prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
#!python3
#
# Copyright (C) 2014-2015 <NAME>. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
"""
PYPOWER-Dynamics
Voltage Source Converter Model Class
Average model of a VSC in voltage-control mode (i.e. controlled voltage source behind an impedance).
"""
import numpy as np
class vsc_average:
def __init__(self, ID, gen_no, Rl, Xl, dynopt):
self.id = ID
self.gen_no = gen_no
self.opt = dynopt['iopt']
self.signals = {}
self.states = {}
self.states0 = {}
self.dsteps = {}
self.params = {}
self.params['Rl'] = Rl
self.params['Xl'] = Xl
self.params['fn'] = dynopt['fn']
# Equivalent Norton impedance for Ybus modification
self.Yg = 1 / (Rl + 1j * Xl)
def initialise(self,vt0,S0):
"""
Initialise converter emf based on load flow voltage and grid current injection
"""
# Calculate initial armature current
Ia0 = np.conj(S0 / vt0)
phi0 = np.angle(Ia0)
# Calculate steady state machine emf (i.e. voltage behind synchronous reactance)
Edq0 = vt0 + (self.params['Rl'] + 1j * self.params['Xl']) * Ia0
delta0 = np.angle(Edq0)
# Convert currents to rotor reference frame
Id0 = np.abs(Ia0) * np.sin(delta0 - phi0)
Iq0 = np.abs(Ia0) * np.cos(delta0 - phi0)
# Initialise signals, states and parameters
self.signals['Vt'] = np.abs(vt0)
self.signals['Edq'] = Edq0
self.signals['Ed'] = np.real(Edq0)
self.signals['Eq'] = np.imag(Edq0)
self.signals['Id'] = Id0
self.signals['Iq'] = Iq0
self.states['delta'] = delta0
self.states['omega'] = 1
def calc_currents(self, vt):
"""
Solve grid current injections (in network reference frame)
"""
Edq = self.signals['Ed'] + 1j * self.signals['Eq']
delta = np.angle(Edq)
# Calculate terminal voltage in dq reference frame
Vd = np.abs(vt) * np.sin(self.states['delta'] - np.angle(vt))
Vq = np.abs(vt) * np.cos(self.states['delta'] - np.angle(vt))
# Calculate Id and Iq (Norton equivalent current injection in dq frame)
Ia = (Edq - vt) / (self.params['Rl'] + 1j * self.params['Xl'])
phi = np.angle(Ia)
Id = np.abs(Ia) * np.sin(delta - phi)
Iq = np.abs(Ia) * np.cos(delta - phi)
# Calculate machine current injection (Norton equivalent current injection in network frame)
In = (Iq - 1j * Id) * | np.exp(1j * (self.states['delta'])) | numpy.exp |
from __future__ import division, print_function, absolute_import
import numpy as np
from copy import deepcopy
from ipsolver._constraints import (NonlinearConstraint,
LinearConstraint,
BoxConstraint)
from ipsolver._canonical_constraint import (_parse_constraint,
to_canonical,
empty_canonical_constraint)
from numpy.testing import (TestCase, assert_array_almost_equal,
assert_array_equal, assert_array_less,
assert_raises, assert_equal, assert_,
run_module_suite, assert_allclose, assert_warns,
dec)
class TestParseConstraint(TestCase):
def test_equality_constraint(self):
kind = ("equals", [10, 20, 30])
eq, ineq, val_eq, val_ineq, sign, fun_len = _parse_constraint(kind)
assert_array_equal(eq, [0, 1, 2])
assert_array_equal(val_eq, [10, 20, 30])
assert_array_equal(ineq, [])
assert_array_equal(val_ineq, [])
assert_array_equal(sign, [])
def test_greater_constraint(self):
kind = ("greater", [10, 20, 30])
eq, ineq, val_eq, val_ineq, sign, fun_len = _parse_constraint(kind)
assert_array_equal(eq, [])
assert_array_equal(val_eq, [])
assert_array_equal(ineq, [0, 1, 2])
assert_array_equal(val_ineq, [10, 20, 30])
assert_array_equal(sign, [-1, -1, -1])
kind = ("greater", [10, np.inf, 30])
eq, ineq, val_eq, val_ineq, sign, fun_len = _parse_constraint(kind)
assert_array_equal(eq, [])
assert_array_equal(val_eq, [])
assert_array_equal(ineq, [0, 2])
assert_array_equal(val_ineq, [10, 30])
assert_array_equal(sign, [-1, -1])
def test_less_constraint(self):
kind = ("less", [10, 20, 30])
eq, ineq, val_eq, val_ineq, sign, fun_len = _parse_constraint(kind)
assert_array_equal(eq, [])
assert_array_equal(val_eq, [])
assert_array_equal(ineq, [0, 1, 2])
assert_array_equal(val_ineq, [10, 20, 30])
assert_array_equal(sign, [1, 1, 1])
kind = ("less", [10, np.inf, 30])
eq, ineq, val_eq, val_ineq, sign, fun_len = _parse_constraint(kind)
assert_array_equal(eq, [])
assert_array_equal(val_eq, [])
assert_array_equal(ineq, [0, 2])
assert_array_equal(val_ineq, [10, 30])
assert_array_equal(sign, [1, 1])
def test_interval_constraint(self):
kind = ("interval", [10, 20, 30], [50, 60, 70])
eq, ineq, val_eq, val_ineq, sign, fun_len = _parse_constraint(kind)
assert_array_equal(eq, [])
assert_array_equal(val_eq, [])
assert_array_equal(ineq, [0, 1, 2, 0, 1, 2])
assert_array_equal(val_ineq, [10, 20, 30, 50, 60, 70])
assert_array_equal(sign, [-1, -1, -1, 1, 1, 1])
kind = ("interval", [10, 20, 30], [50, 20, 70])
eq, ineq, val_eq, val_ineq, sign, fun_len = _parse_constraint(kind)
assert_array_equal(eq, [1])
assert_array_equal(val_eq, [20])
assert_array_equal(ineq, [0, 2, 0, 2])
assert_array_equal(val_ineq, [10, 30, 50, 70])
assert_array_equal(sign, [-1, -1, 1, 1])
kind = ("interval", [10, 20, 30], [50, 20, np.inf])
eq, ineq, val_eq, val_ineq, sign, fun_len = _parse_constraint(kind)
assert_array_equal(eq, [1])
assert_array_equal(val_eq, [20])
assert_array_equal(ineq, [0, 2, 0])
assert_array_equal(val_ineq, [10, 30, 50])
assert_array_equal(sign, [-1, -1, 1])
kind = ("interval", [-np.inf, 20, 30], [50, 20, np.inf])
eq, ineq, val_eq, val_ineq, sign, fun_len = _parse_constraint(kind)
assert_array_equal(eq, [1])
assert_array_equal(val_eq, [20])
assert_array_equal(ineq, [2, 0])
assert_array_equal(val_ineq, [30, 50])
assert_array_equal(sign, [-1, 1])
class TestToCanonical(TestCase):
def test_empty_constraint(self):
x = [1, 2, 3]
canonical = empty_canonical_constraint(x, 3)
| assert_array_equal(canonical.n_eq, 0) | numpy.testing.assert_array_equal |
import numpy as np
from scipy.special import gammaln
def logfactorial(n):
return gammaln(n + 1)
def regularized_log(vector):
"""
A function which is log(vector) where vector > 0, and zero otherwise.
:param vector:
:return:
"""
out = | np.zeros_like(vector) | numpy.zeros_like |
import copy
import numpy as np
from Classes.Uncertainty import Uncertainty
from Classes.QComp import QComp
from Classes.MovingBedTests import MovingBedTests
from Classes.TransectData import TransectData
class QAData(object):
"""Evaluates and stores quality assurance characteristics and messages.
Attributes
----------
q_run_threshold_caution: int
Caution threshold for interpolated discharge for a run of invalid ensembles, in percent.
q_run_threshold_warning: int
Warning threshold for interpolated discharge for a run of invalid ensembles, in percent.
q_total_threshold_caution: int
Caution threshold for total interpolated discharge for invalid ensembles, in percent.
q_total_threshold_warning: int
Warning threshold for total interpolated discharge for invalid ensembles, in percent.
transects: dict
Dictionary of quality assurance checks for transects
system_tst: dict
Dictionary of quality assurance checks on the system test(s)
compass: dict
Dictionary of quality assurance checks on compass calibration and evaluations
temperature: dict
Dictionary of quality assurance checks on temperature comparions and variation
movingbed: dict
Dictionary of quality assurance checks on moving-bed tests
user: dict
Dictionary of quality assurance checks on user input data
boat: dict
Dictionary of quality assurance checks on boat velocities
bt_vel: dict
Dictionary of quality assurance checks on bottom track velocities
gga_vel: dict
Dictionary of quality assurance checks on gga boat velocities
vtg_vel: dict
Dictionary of quality assurance checks on vtg boat velocities
w_vel: dict
Dictionary of quality assurance checks on water track velocities
extrapolation: dict
Dictionary of quality assurance checks on extrapolations
edges: dict
Dictionary of quality assurance checks on edges
"""
def __init__(self, meas, mat_struct=None, compute=True):
"""Checks the measurement for all quality assurance issues.
Parameters
----------
meas: Measurement
Object of class Measurement
"""
# Set default thresholds
self.q_run_threshold_caution = 3
self.q_run_threshold_warning = 5
self.q_total_threshold_caution = 10
self.q_total_threshold_warning = 25
# Initialize instance variables
self.transects = dict()
self.system_tst = dict()
self.compass = dict()
self.temperature = dict()
self.movingbed = dict()
self.user = dict()
self.depths = dict()
self.boat = dict()
self.bt_vel = dict()
self.gga_vel = dict()
self.vtg_vel = dict()
self.w_vel = dict()
self.extrapolation = dict()
self.edges = dict()
self.settings_dict = dict()
self.settings_dict['tab_compass'] = 'Default'
self.settings_dict['tab_tempsal'] = 'Default'
self.settings_dict['tab_mbt'] = 'Default'
self.settings_dict['tab_bt'] = 'Default'
self.settings_dict['tab_gps'] = 'Default'
self.settings_dict['tab_depth'] = 'Default'
self.settings_dict['tab_wt'] = 'Default'
self.settings_dict['tab_extrap'] = 'Default'
self.settings_dict['tab_edges'] = 'Default'
if compute:
# Apply QA checks
self.transects_qa(meas)
self.system_tst_qa(meas)
self.compass_qa(meas)
self.temperature_qa(meas)
self.moving_bed_qa(meas)
self.user_qa(meas)
self.depths_qa(meas)
self.boat_qa(meas)
self.water_qa(meas)
self.extrapolation_qa(meas)
self.edges_qa(meas)
self.check_bt_setting(meas)
self.check_wt_settings(meas)
self.check_depth_settings(meas)
self.check_gps_settings(meas)
self.check_edge_settings(meas)
self.check_extrap_settings(meas)
self.check_tempsal_settings(meas)
self.check_mbt_settings(meas)
self.check_compass_settings(meas)
else:
self.populate_from_qrev_mat(meas, mat_struct)
def populate_from_qrev_mat(self, meas, meas_struct):
"""Populates the object using data from previously saved QRev Matlab file.
Parameters
----------
meas: Measurement
Object of Measurement
meas_struct: mat_struct
Matlab data structure obtained from sio.loadmat
"""
# Generate a new QA object using the measurement data and the current QA code.
# When QA checks from the current QA are not available from old QRev files, these
# checks will be included to supplement the old QRev file data.
new_qa = QAData(meas)
if hasattr(meas_struct, 'qa'):
# Set default thresholds
self.q_run_threshold_caution = meas_struct.qa.qRunThresholdCaution
self.q_run_threshold_warning = meas_struct.qa.qRunThresholdWarning
self.q_total_threshold_caution = meas_struct.qa.qTotalThresholdCaution
self.q_total_threshold_warning = meas_struct.qa.qTotalThresholdWarning
# Initialize instance variables
self.transects = dict()
self.transects['duration'] = meas_struct.qa.transects.duration
self.transects['messages'] = self.make_list(meas_struct.qa.transects.messages)
self.transects['number'] = meas_struct.qa.transects.number
self.transects['recip'] = meas_struct.qa.transects.recip
self.transects['sign'] = meas_struct.qa.transects.sign
self.transects['status'] = meas_struct.qa.transects.status
self.transects['uncertainty'] = meas_struct.qa.transects.uncertainty
self.system_tst = dict()
self.system_tst['messages'] = self.make_list(meas_struct.qa.systemTest.messages)
self.system_tst['status'] = meas_struct.qa.systemTest.status
self.compass = dict()
self.compass['messages'] = self.make_list(meas_struct.qa.compass.messages)
self.compass['status'] = meas_struct.qa.compass.status
self.compass['status1'] = meas_struct.qa.compass.status1
self.compass['status2'] = meas_struct.qa.compass.status2
# If QA check not available, get check from new QA
if hasattr(meas_struct.qa.compass, 'magvar'):
self.compass['magvar'] = meas_struct.qa.compass.magvar
else:
self.compass['magvar'] = new_qa.compass['magvar']
# If QA check not available, get check from new QA
if hasattr(meas_struct.qa.compass, 'magvarIdx'):
self.compass['magvar_idx'] = self.make_array(meas_struct.qa.compass.magvarIdx)
else:
self.compass['magvar_idx'] = new_qa.compass['magvar_idx']
# Changed mag_error_idx from bool to int array in QRevPy
self.compass['mag_error_idx'] = new_qa.compass['mag_error_idx']
# If QA check not available, get check from new QA
if hasattr(meas_struct.qa.compass, 'pitchMeanWarningIdx'):
self.compass['pitch_mean_warning_idx'] = self.make_array(meas_struct.qa.compass.pitchMeanWarningIdx)
else:
self.compass['pitch_mean_warning_idx'] = new_qa.compass['pitch_mean_warning_idx']
# If QA check not available, get check from new QA
if hasattr(meas_struct.qa.compass, 'rollMeanWarningIdx'):
self.compass['roll_mean_warning_idx'] = self.make_array(meas_struct.qa.compass.rollMeanWarningIdx)
else:
self.compass['roll_mean_warning_idx'] = new_qa.compass['roll_mean_warning_idx']
# If QA check not available, get check from new QA
if hasattr(meas_struct.qa.compass, 'pitchMeanCautionIdx'):
self.compass['pitch_mean_caution_idx'] = self.make_array(meas_struct.qa.compass.pitchMeanCautionIdx)
else:
self.compass['pitch_mean_caution_idx'] = new_qa.compass['pitch_mean_caution_idx']
# If QA check not available, get check from new QA
if hasattr(meas_struct.qa.compass, 'rollMeanCautionIdx'):
self.compass['roll_mean_caution_idx'] = self.make_array(meas_struct.qa.compass.rollMeanCautionIdx)
else:
self.compass['roll_mean_caution_idx'] = new_qa.compass['roll_mean_caution_idx']
# If QA check not available, get check from new QA
if hasattr(meas_struct.qa.compass, 'pitchStdCautionIdx'):
self.compass['pitch_std_caution_idx'] = self.make_array(meas_struct.qa.compass.pitchStdCautionIdx)
else:
self.compass['pitch_std_caution_idx'] = new_qa.compass['pitch_std_caution_idx']
# If QA check not available, get check from new QA
if hasattr(meas_struct.qa.compass, 'rollStdCautionIdx'):
self.compass['roll_std_caution_idx'] = self.make_array(meas_struct.qa.compass.rollStdCautionIdx)
else:
self.compass['roll_std_caution_idx'] = new_qa.compass['roll_std_caution_idx']
self.temperature = dict()
self.temperature['messages'] = self.make_list(meas_struct.qa.temperature.messages)
self.temperature['status'] = meas_struct.qa.temperature.status
self.movingbed = dict()
self.movingbed['messages'] = self.make_list(meas_struct.qa.movingbed.messages)
self.movingbed['status'] = meas_struct.qa.movingbed.status
self.movingbed['code'] = meas_struct.qa.movingbed.code
self.user = dict()
self.user['messages'] = self.make_list(meas_struct.qa.user.messages)
self.user['sta_name'] = bool(meas_struct.qa.user.staName)
self.user['sta_number'] = bool(meas_struct.qa.user.staNumber)
self.user['status'] = meas_struct.qa.user.status
# If QA check not available, get check from new QA
self.depths = self.create_qa_dict(self, meas_struct.qa.depths)
if 'draft' not in self.depths:
self.depths['draft'] = new_qa.depths['draft']
if 'all_invalid' not in self.depths:
self.depths['all_invalid'] = new_qa.depths['all_invalid']
# If QA check not available, get check from new QA
self.bt_vel = self.create_qa_dict(self, meas_struct.qa.btVel, ndim=2)
if 'all_invalid' not in self.bt_vel:
self.bt_vel['all_invalid'] = new_qa.bt_vel['all_invalid']
# If QA check not available, get check from new QA
self.gga_vel = self.create_qa_dict(self, meas_struct.qa.ggaVel, ndim=2)
if 'all_invalid' not in self.gga_vel:
self.gga_vel['all_invalid'] = new_qa.gga_vel['all_invalid']
# If QA check not available, get check from new QA
self.vtg_vel = self.create_qa_dict(self, meas_struct.qa.vtgVel, ndim=2)
if 'all_invalid' not in self.vtg_vel:
self.vtg_vel['all_invalid'] = new_qa.vtg_vel['all_invalid']
# If QA check not available, get check from new QA
self.w_vel = self.create_qa_dict(self, meas_struct.qa.wVel, ndim=2)
if 'all_invalid' not in self.w_vel:
self.w_vel['all_invalid'] = new_qa.w_vel['all_invalid']
self.extrapolation = dict()
self.extrapolation['messages'] = self.make_list(meas_struct.qa.extrapolation.messages)
self.extrapolation['status'] = meas_struct.qa.extrapolation.status
self.edges = dict()
self.edges['messages'] = self.make_list(meas_struct.qa.edges.messages)
self.edges['status'] = meas_struct.qa.edges.status
self.edges['left_q'] = meas_struct.qa.edges.leftQ
self.edges['right_q'] = meas_struct.qa.edges.rightQ
self.edges['left_sign'] = meas_struct.qa.edges.leftSign
self.edges['right_sign'] = meas_struct.qa.edges.rightSign
self.edges['left_zero'] = meas_struct.qa.edges.leftzero
self.edges['right_zero'] = meas_struct.qa.edges.rightzero
self.edges['left_type'] = meas_struct.qa.edges.leftType
self.edges['right_type'] = meas_struct.qa.edges.rightType
# If QA check not available, get check from new QA
if hasattr(meas_struct.qa.edges, 'rightDistMovedIdx'):
self.edges['right_dist_moved_idx'] = self.make_array(meas_struct.qa.edges.rightDistMovedIdx)
else:
self.edges['right_dist_moved_idx'] = new_qa.edges['right_dist_moved_idx']
# If QA check not available, get check from new QA
if hasattr(meas_struct.qa.edges, 'leftDistMovedIdx'):
self.edges['left_dist_moved_idx'] = self.make_array(meas_struct.qa.edges.leftDistMovedIdx)
else:
self.edges['left_dist_moved_idx'] = new_qa.edges['left_dist_moved_idx']
# If QA check not available, get check from new QA
if hasattr(meas_struct.qa.edges, 'leftQIdx'):
self.edges['left_q_idx'] = self.make_array(meas_struct.qa.edges.leftQIdx)
else:
self.edges['left_q_idx'] = new_qa.edges['left_q_idx']
# If QA check not available, get check from new QA
if hasattr(meas_struct.qa.edges, 'rightQIdx'):
self.edges['right_q_idx'] = self.make_array(meas_struct.qa.edges.rightQIdx)
else:
self.edges['right_q_idx'] = new_qa.edges['right_q_idx']
# If QA check not available, get check from new QA
if hasattr(meas_struct.qa.edges, 'leftZeroIdx'):
self.edges['left_zero_idx'] = self.make_array(meas_struct.qa.edges.leftZeroIdx)
else:
self.edges['left_zero_idx'] = new_qa.edges['left_zero_idx']
# If QA check not available, get check from new QA
if hasattr(meas_struct.qa.edges, 'rightZeroIdx'):
self.edges['right_zero_idx'] = self.make_array(meas_struct.qa.edges.rightZeroIdx)
else:
self.edges['right_zero_idx'] = new_qa.edges['right_zero_idx']
# If QA check not available, get check from new QA
if hasattr(meas_struct.qa.edges, 'invalid_transect_left_idx'):
self.edges['invalid_transect_left_idx'] = \
self.make_array(meas_struct.qa.edges.invalid_transect_left_idx)
elif hasattr(meas_struct.qa.edges, 'invalidTransLeftIdx'):
self.edges['invalid_transect_left_idx'] = \
self.make_array(meas_struct.qa.edges.invalidTransLeftIdx)
else:
self.edges['invalid_transect_left_idx'] = new_qa.edges['invalid_transect_left_idx']
# If QA check not available, get check from new QA
if hasattr(meas_struct.qa.edges, 'invalid_transect_right_idx'):
self.edges['invalid_transect_right_idx'] = \
self.make_array(meas_struct.qa.edges.invalid_transect_right_idx)
elif hasattr(meas_struct.qa, 'invalidTransRightIdx'):
self.edges['invalid_transect_right_idx'] = \
self.make_array(meas_struct.qa.edges.invalidTransRightIdx)
else:
self.edges['invalid_transect_right_idx'] = new_qa.edges['invalid_transect_right_idx']
if hasattr(meas_struct.qa, 'settings_dict'):
self.settings_dict = dict()
self.settings_dict['tab_compass'] = meas_struct.qa.settings_dict.tab_compass
self.settings_dict['tab_tempsal'] = meas_struct.qa.settings_dict.tab_tempsal
self.settings_dict['tab_mbt'] = meas_struct.qa.settings_dict.tab_mbt
self.settings_dict['tab_bt'] = meas_struct.qa.settings_dict.tab_bt
self.settings_dict['tab_gps'] = meas_struct.qa.settings_dict.tab_gps
self.settings_dict['tab_depth'] = meas_struct.qa.settings_dict.tab_depth
self.settings_dict['tab_wt'] = meas_struct.qa.settings_dict.tab_wt
self.settings_dict['tab_extrap'] = meas_struct.qa.settings_dict.tab_extrap
self.settings_dict['tab_edges'] = meas_struct.qa.settings_dict.tab_edges
@staticmethod
def create_qa_dict(self, mat_data, ndim=1):
"""Creates the dictionary used to store QA checks associated with the percent of discharge estimated
by interpolation. This dictionary is used by BT, GPS, Depth, and WT.
Parameters
----------
self: QAData
Object of QAData
mat_data: mat_struct
Matlab data from QRev file
"""
# Initialize dictionary
qa_dict = dict()
# Populate dictionary from Matlab data
qa_dict['messages'] = QAData.make_list(mat_data.messages)
# allInvalid not available in older QRev data
if hasattr(mat_data, 'allInvalid'):
qa_dict['all_invalid'] = self.make_array(mat_data.allInvalid, 1).astype(bool)
qa_dict['q_max_run_caution'] = self.make_array(mat_data.qRunCaution, ndim).astype(bool)
qa_dict['q_max_run_warning'] = self.make_array(mat_data.qRunWarning, ndim).astype(bool)
qa_dict['q_total_caution'] = self.make_array(mat_data.qTotalCaution, ndim).astype(bool)
qa_dict['q_total_warning'] = self.make_array(mat_data.qTotalWarning, ndim).astype(bool)
qa_dict['status'] = mat_data.status
# q_max_run and q_total not available in older QRev data
try:
qa_dict['q_max_run'] = self.make_array(mat_data.qMaxRun, ndim)
qa_dict['q_total'] = self.make_array(mat_data.qTotal, ndim)
except AttributeError:
qa_dict['q_max_run'] = np.tile(np.nan, (len(mat_data.qRunCaution), 6))
qa_dict['q_total'] = np.tile(np.nan, (len(mat_data.qRunCaution), 6))
return qa_dict
@staticmethod
def make_array(num_in, ndim=1):
"""Ensures that num_in is an array and if not makes it an array.
num_in: any
Any value or array
"""
if type(num_in) is np.ndarray:
if len(num_in.shape) < 2 and ndim > 1:
num_in = np.reshape(num_in, (1, num_in.shape[0]))
return num_in
else:
return num_in
else:
return np.array([num_in])
@staticmethod
def make_list(array_in):
"""Converts a string or array to a list.
Parameters
----------
array_in: any
Data to be converted to list.
Returns
-------
list_out: list
List of array_in data
"""
list_out = []
# Convert string to list
if type(array_in) is str:
list_out = [array_in]
else:
# Empty array
if array_in.size == 0:
list_out = []
# Single message with integer codes at end
elif array_in.size == 3:
if type(array_in[1]) is int or len(array_in[1].strip()) == 1:
temp = array_in.tolist()
if len(temp) > 0:
internal_list = []
for item in temp:
internal_list.append(item)
list_out = [internal_list]
else:
list_out = array_in.tolist()
# Either multiple messages with or without integer codes
else:
list_out = array_in.tolist()
return list_out
def transects_qa(self, meas):
"""Apply quality checks to transects
Parameters
----------
meas: Measurement
Object of class Measurement
"""
# Assume good results
self.transects['status'] = 'good'
# Initialize keys
self.transects['messages'] = []
self.transects['recip'] = 0
self.transects['sign'] = 0
self.transects['duration'] = 0
self.transects['number'] = 0
self.transects['uncertainty'] = 0
# Initialize lists
checked = []
discharges = []
start_edge = []
# Populate lists
for n in range(len(meas.transects)):
checked.append(meas.transects[n].checked)
if meas.transects[n].checked:
discharges.append(meas.discharge[n])
start_edge.append(meas.transects[n].start_edge)
num_checked = np.nansum(np.asarray(checked))
# Check duration
total_duration = 0
if num_checked >= 1:
for transect in meas.transects:
if transect.checked:
total_duration += transect.date_time.transect_duration_sec
# Check duration against USGS policy
if total_duration < 720:
self.transects['status'] = 'caution'
self.transects['messages'].append(
['Transects: Duration of selected transects is less than 720 seconds;', 2, 0])
self.transects['duration'] = 1
# Check transects for missing ensembles
for transect in meas.transects:
if transect.checked:
# Determine number of missing ensembles
if transect.adcp.manufacturer == 'SonTek':
# Determine number of missing ensembles for SonTek data
idx_missing = np.where(transect.date_time.ens_duration_sec > 1.5)[0]
if len(idx_missing) > 0:
average_ensemble_duration = (np.nansum(transect.date_time.ens_duration_sec)
- np.nansum(transect.date_time.ens_duration_sec[idx_missing])) \
/ (len(transect.date_time.ens_duration_sec) - len(idx_missing))
num_missing = np.round(np.nansum(transect.date_time.ens_duration_sec[idx_missing])
/ average_ensemble_duration) - len(idx_missing)
else:
num_missing = 0
else:
# Determine number of lost ensembles for TRDI data
idx_missing = np.where(np.isnan(transect.date_time.ens_duration_sec) == True)[0]
num_missing = len(idx_missing) - 1
# Save caution message
if num_missing > 0:
self.transects['messages'].append(['Transects: ' + str(transect.file_name) + ' is missing '
+ str(int(num_missing)) + ' ensembles;', 2, 0])
self.transects['status'] = 'caution'
# Check number of transects checked
if num_checked == 0:
# No transects selected
self.transects['status'] = 'warning'
self.transects['messages'].append(['TRANSECTS: No transects selected;', 1, 0])
self.transects['number'] = 2
elif num_checked == 1:
# Only one transect selected
self.transects['status'] = 'caution'
self.transects['messages'].append(['Transects: Only one transect selected;', 2, 0])
self.transects['number'] = 2
else:
self.transects['number'] = num_checked
if num_checked == 2:
# Only 2 transects selected
cov, _ = Uncertainty.uncertainty_q_random(discharges, 'total')
# Check uncertainty
if cov > 2:
self.transects['status'] = 'caution'
self.transects['messages'].append(
['Transects: Uncertainty would be reduced by additional transects;', 2, 0])
# Check for consistent sign
q_positive = []
for q in discharges:
if q.total >= 0:
q_positive.append(True)
else:
q_positive.append(False)
if len(np.unique(q_positive)) > 1:
self.transects['status'] = 'warning'
self.transects['messages'].append(
['TRANSECTS: Sign of total Q is not consistent. One or more start banks may be incorrect;', 1, 0])
# Check for reciprocal transects
num_left = start_edge.count('Left')
num_right = start_edge.count('Right')
if not num_left == num_right:
self.transects['status'] = 'warning'
self.transects['messages'].append(['TRANSECTS: Transects selected are not reciprocal transects;', 1, 0])
# Check for zero discharge transects
q_zero = False
for q in discharges:
if q.total == 0:
q_zero = True
if q_zero:
self.transects['status'] = 'warning'
self.transects['messages'].append(['TRANSECTS: One or more transects have zero Q;', 1, 0])
def system_tst_qa(self, meas):
"""Apply QA checks to system test.
Parameters
----------
meas: Measurement
Object of class Measurement
"""
self.system_tst['messages'] = []
self.system_tst['status'] = 'good'
# Determine if a system test was recorded
if not meas.system_tst:
# No system test data recorded
self.system_tst['status'] = 'warning'
self.system_tst['messages'].append(['SYSTEM TEST: No system test;', 1, 3])
else:
pt3_fail = False
num_tests_with_failure = 0
for test in meas.system_tst:
if hasattr(test, 'result'):
# Check for presence of pt3 test
if 'pt3' in test.result and test.result['pt3'] is not None:
# Check hard_limit, high gain, wide bandwidth
if 'hard_limit' in test.result['pt3']:
if 'high_wide' in test.result['pt3']['hard_limit']:
corr_table = test.result['pt3']['hard_limit']['high_wide']['corr_table']
if len(corr_table) > 0:
# All lags past lag 2 should be less than 50% of lag 0
qa_threshold = corr_table[0, :] * 0.5
all_lag_check = np.greater(corr_table[3::, :], qa_threshold)
# Lag 7 should be less than 25% of lag 0
lag_7_check = np.greater(corr_table[7, :], corr_table[0, :] * 0.25)
# If either condition is met for any beam the test fails
if np.sum(np.sum(all_lag_check)) + np.sum(lag_7_check) > 1:
pt3_fail = True
if test.result['sysTest']['n_failed'] is not None and test.result['sysTest']['n_failed'] > 0:
num_tests_with_failure += 1
# pt3 test failure message
if pt3_fail:
self.system_tst['status'] = 'caution'
self.system_tst['messages'].append(
['System Test: One or more PT3 tests in the system test indicate potential EMI;', 2, 3])
# Check for failed tests
if num_tests_with_failure == len(meas.system_tst):
# All tests had a failure
self.system_tst['status'] = 'warning'
self.system_tst['messages'].append(
['SYSTEM TEST: All system test sets have at least one test that failed;', 1, 3])
elif num_tests_with_failure > 0:
self.system_tst['status'] = 'caution'
self.system_tst['messages'].append(
['System Test: One or more system test sets have at least one test that failed;', 2, 3])
def compass_qa(self, meas):
"""Apply QA checks to compass calibration and evaluation.
Parameters
----------
meas: Measurement
Object of class Measurement
"""
self.compass['messages'] = []
checked = []
for transect in meas.transects:
checked.append(transect.checked)
if np.any(checked):
heading = np.unique(meas.transects[checked.index(1)].sensors.heading_deg.internal.data)
else:
heading = np.array([0])
# Initialize variable as if ADCP has no compass
self.compass['status'] = 'inactive'
self.compass['status1'] = 'good'
self.compass['status2'] = 'good'
self.compass['magvar'] = 0
self.compass['magvar_idx'] = []
self.compass['mag_error_idx'] = []
self.compass['pitch_mean_warning_idx'] = []
self.compass['pitch_mean_caution_idx'] = []
self.compass['pitch_std_caution_idx'] = []
self.compass['roll_mean_warning_idx'] = []
self.compass['roll_mean_caution_idx'] = []
self.compass['roll_std_caution_idx'] = []
if len(heading) > 1 and np.any(np.not_equal(heading, 0)):
# ADCP has a compass
# A compass calibration is required if a loop test or GPS are used
# Check for loop test
loop = False
for test in meas.mb_tests:
if test.type == 'Loop':
loop = True
# Check for GPS data
gps = False
if meas.transects[checked.index(True)].boat_vel.gga_vel is not None or \
meas.transects[checked.index(True)].boat_vel.vtg_vel is not None:
gps = True
if gps or loop:
# Compass calibration is required
# Determine the ADCP manufacturer
if meas.transects[checked.index(True)].adcp.manufacturer == 'SonTek':
# SonTek ADCP
if len(meas.compass_cal) == 0:
# No compass calibration
self.compass['status1'] = 'warning'
self.compass['messages'].append(['COMPASS: No compass calibration;', 1, 4])
elif meas.compass_cal[-1].result['compass']['error'] == 'N/A':
# If the error cannot be decoded from the calibration assume the calibration is good
self.compass['status1'] = 'good'
else:
if meas.compass_cal[-1].result['compass']['error'] <= 0.2:
self.compass['status1'] = 'good'
else:
self.compass['status1'] = 'caution'
self.compass['messages'].append(['Compass: Calibration result > 0.2 deg;', 2, 4])
elif meas.transects[checked.index(True)].adcp.manufacturer == 'TRDI':
# TRDI ADCP
if len(meas.compass_cal) == 0:
# No compass calibration
if len(meas.compass_eval) == 0:
# No calibration or evaluation
self.compass['status1'] = 'warning'
self.compass['messages'].append(['COMPASS: No compass calibration or evaluation;', 1, 4])
else:
# No calibration but an evaluation was completed
self.compass['status1'] = 'caution'
self.compass['messages'].append(['Compass: No compass calibration;', 2, 4])
else:
# Compass was calibrated
if len(meas.compass_eval) == 0:
# No compass evaluation
self.compass['status1'] = 'caution'
self.compass['messages'].append(['Compass: No compass evaluation;', 2, 4])
else:
# Check results of evaluation
try:
if float(meas.compass_eval[-1].result['compass']['error']) <= 1:
self.compass['status1'] = 'good'
else:
self.compass['status1'] = 'caution'
self.compass['messages'].append(['Compass: Evaluation result > 1 deg;', 2, 4])
except ValueError:
self.compass['status1'] = 'good'
else:
# Compass not required
if len(meas.compass_cal) == 0 and len(meas.compass_eval) == 0:
# No compass calibration or evaluation
self.compass['status1'] = 'default'
else:
# Compass was calibrated and evaluated
self.compass['status1'] = 'good'
# Check for consistent magvar and pitch and roll mean and variation
magvar = []
align = []
mag_error_exceeded = []
pitch_mean = []
pitch_std = []
pitch_exceeded = []
roll_mean = []
roll_std = []
roll_exceeded = []
transect_idx = []
for n, transect in enumerate(meas.transects):
if transect.checked:
transect_idx.append(n)
heading_source_selected = getattr(
transect.sensors.heading_deg, transect.sensors.heading_deg.selected)
pitch_source_selected = getattr(transect.sensors.pitch_deg, transect.sensors.pitch_deg.selected)
roll_source_selected = getattr(transect.sensors.roll_deg, transect.sensors.roll_deg.selected)
magvar.append(transect.sensors.heading_deg.internal.mag_var_deg)
if transect.sensors.heading_deg.external is not None:
align.append(transect.sensors.heading_deg.external.align_correction_deg)
pitch_mean.append(np.nanmean(pitch_source_selected.data))
pitch_std.append(np.nanstd(pitch_source_selected.data, ddof=1))
roll_mean.append(np.nanmean(roll_source_selected.data))
roll_std.append(np.nanstd(roll_source_selected.data, ddof=1))
# SonTek G3 compass provides pitch, roll, and magnetic error parameters that can be checked
if transect.adcp.manufacturer == 'SonTek':
if heading_source_selected.pitch_limit is not None:
# Check for bug in SonTek data where pitch and roll was n x 3 use n x 1
if len(pitch_source_selected.data.shape) == 1:
pitch_data = pitch_source_selected.data
else:
pitch_data = pitch_source_selected.data[:, 0]
idx_max = np.where(pitch_data > heading_source_selected.pitch_limit[0])[0]
idx_min = np.where(pitch_data < heading_source_selected.pitch_limit[1])[0]
if len(idx_max) > 0 or len(idx_min) > 0:
pitch_exceeded.append(True)
else:
pitch_exceeded.append(False)
if heading_source_selected.roll_limit is not None:
if len(roll_source_selected.data.shape) == 1:
roll_data = roll_source_selected.data
else:
roll_data = roll_source_selected.data[:, 0]
idx_max = np.where(roll_data > heading_source_selected.pitch_limit[0])[0]
idx_min = np.where(roll_data < heading_source_selected.pitch_limit[1])[0]
if len(idx_max) > 0 or len(idx_min) > 0:
roll_exceeded.append(True)
else:
roll_exceeded.append(False)
if heading_source_selected.mag_error is not None:
idx_max = np.where(heading_source_selected.mag_error > 2)[0]
if len(idx_max) > 0:
mag_error_exceeded.append(n)
# Check magvar consistency
if len(np.unique(magvar)) > 1:
self.compass['status2'] = 'caution'
self.compass['messages'].append(
['Compass: Magnetic variation is not consistent among transects;', 2, 4])
self.compass['magvar'] = 1
# Check magvar consistency
if len(np.unique(align)) > 1:
self.compass['status2'] = 'caution'
self.compass['messages'].append(
['Compass: Heading offset is not consistent among transects;', 2, 4])
self.compass['align'] = 1
# Check that magvar was set if GPS data are available
if gps:
if 0 in magvar:
self.compass['status2'] = 'warning'
self.compass['messages'].append(
['COMPASS: Magnetic variation is 0 and GPS data are present;', 1, 4])
self.compass['magvar'] = 2
self.compass['magvar_idx'] = np.where(np.array(magvar) == 0)[0].tolist()
# Check pitch mean
if np.any(np.asarray(np.abs(pitch_mean)) > 8):
self.compass['status2'] = 'warning'
self.compass['messages'].append(['PITCH: One or more transects have a mean pitch > 8 deg;', 1, 4])
temp = np.where(np.abs(pitch_mean) > 8)[0]
if len(temp) > 0:
self.compass['pitch_mean_warning_idx'] = np.array(transect_idx)[temp]
else:
self.compass['pitch_mean_warning_idx'] = []
elif np.any(np.asarray(np.abs(pitch_mean)) > 4):
if self.compass['status2'] == 'good':
self.compass['status2'] = 'caution'
self.compass['messages'].append(['Pitch: One or more transects have a mean pitch > 4 deg;', 2, 4])
temp = np.where(np.abs(pitch_mean) > 4)[0]
if len(temp) > 0:
self.compass['pitch_mean_caution_idx'] = np.array(transect_idx)[temp]
else:
self.compass['pitch_mean_caution_idx'] = []
# Check roll mean
if np.any(np.asarray(np.abs(roll_mean)) > 8):
self.compass['status2'] = 'warning'
self.compass['messages'].append(['ROLL: One or more transects have a mean roll > 8 deg;', 1, 4])
temp = np.where( | np.abs(roll_mean) | numpy.abs |
from astropy.io import fits
from os import path
import os
import numpy as np
import shutil
import autoarray as aa
fits_path = path.join(
"{}".format(path.dirname(path.realpath(__file__))), "files", "array_1d"
)
def create_fits(fits_path,):
if path.exists(fits_path):
shutil.rmtree(fits_path)
os.makedirs(fits_path)
hdu_list = fits.HDUList()
hdu_list.append(fits.ImageHDU(np.ones(3)))
hdu_list[0].header.set("BITPIX", -64, "")
hdu_list.writeto(path.join(fits_path, "3_ones.fits"))
hdu_list = fits.HDUList()
hdu_list.append(fits.ImageHDU(np.ones(4)))
hdu_list[0].header.set("BITPIX", -64, "")
hdu_list.writeto(path.join(fits_path, "4_ones.fits"))
def clean_fits(fits_path):
if path.exists(fits_path):
shutil.rmtree(fits_path)
class TestAPI:
def test__manual__makes_array_1d_with_pixel_scale(self):
array_1d = aa.Array1D.manual_slim(array=[1.0, 2.0, 3.0, 4.0], pixel_scales=1.0)
assert type(array_1d) == aa.Array1D
assert (array_1d.native == np.array([1.0, 2.0, 3.0, 4.0])).all()
assert (array_1d.slim == np.array([1.0, 2.0, 3.0, 4.0])).all()
assert (array_1d.grid_radial == np.array(([0.0, 1.0, 2.0, 3.0]))).all()
assert array_1d.pixel_scale == 1.0
assert array_1d.pixel_scales == (1.0,)
assert array_1d.origin == (0.0,)
array_1d = aa.Array1D.manual_native(
array=[1.0, 2.0, 3.0, 4.0], pixel_scales=1.0
)
assert type(array_1d) == aa.Array1D
assert (array_1d.native == np.array([1.0, 2.0, 3.0, 4.0])).all()
assert (array_1d.slim == np.array([1.0, 2.0, 3.0, 4.0])).all()
assert (array_1d.grid_radial == np.array(([0.0, 1.0, 2.0, 3.0]))).all()
assert array_1d.pixel_scale == 1.0
assert array_1d.pixel_scales == (1.0,)
assert array_1d.origin == (0.0,)
def test__manual_mask__makes_array_1d_using_input_mask(self):
mask = aa.Mask1D.manual(
mask=[True, False, False, True, False, False], pixel_scales=1.0, sub_size=1
)
array_1d = aa.Array1D.manual_mask(
array=[100.0, 1.0, 2.0, 100.0, 3.0, 4.0], mask=mask
)
assert type(array_1d) == aa.Array1D
assert (array_1d.native == np.array([0.0, 1.0, 2.0, 0.0, 3.0, 4.0])).all()
assert (array_1d.slim == np.array([1.0, 2.0, 3.0, 4.0])).all()
assert array_1d.pixel_scale == 1.0
assert array_1d.pixel_scales == (1.0,)
assert array_1d.origin == (0.0,)
def test__full__makes_array_1d_with_pixel_scale__filled_with_input_value(self):
array_1d = aa.Array1D.full(fill_value=1.0, shape_native=4, pixel_scales=1.0)
assert type(array_1d) == aa.Array1D
assert (array_1d.native == np.array([1.0, 1.0, 1.0, 1.0])).all()
assert (array_1d.slim == np.array([1.0, 1.0, 1.0, 1.0])).all()
assert array_1d.pixel_scale == 1.0
assert array_1d.pixel_scales == (1.0,)
assert array_1d.origin == (0.0,)
array_1d = aa.Array1D.full(
fill_value=2.0, shape_native=3, pixel_scales=3.0, sub_size=2, origin=(4.0,)
)
assert type(array_1d) == aa.Array1D
print(array_1d.native)
assert (array_1d.native == np.array([2.0, 2.0, 2.0, 2.0, 2.0, 2.0])).all()
assert (array_1d.slim == np.array([2.0, 2.0, 2.0, 2.0, 2.0, 2.0])).all()
assert array_1d.pixel_scale == 3.0
assert array_1d.pixel_scales == (3.0,)
assert array_1d.origin == (4.0,)
def test__ones_zeros__makes_array_1d_with_pixel_scale__filled_with_input_value(
self
):
array_1d = aa.Array1D.ones(
shape_native=3, pixel_scales=3.0, sub_size=2, origin=(4.0,)
)
assert type(array_1d) == aa.Array1D
assert (array_1d.native == np.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0])).all()
assert (array_1d.slim == np.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0])).all()
assert array_1d.pixel_scale == 3.0
assert array_1d.pixel_scales == (3.0,)
assert array_1d.origin == (4.0,)
array_1d = aa.Array1D.zeros(
shape_native=3, pixel_scales=3.0, sub_size=2, origin=(4.0,)
)
assert type(array_1d) == aa.Array1D
assert (array_1d.native == np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0])).all()
assert (array_1d.slim == np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0])).all()
assert array_1d.pixel_scale == 3.0
assert array_1d.pixel_scales == (3.0,)
assert array_1d.origin == (4.0,)
def test__from_fits__makes_array_without_other_inputs(self):
create_fits(fits_path=fits_path)
arr = aa.Array1D.from_fits(
file_path=path.join(fits_path, "3_ones.fits"), hdu=0, pixel_scales=1.0
)
assert type(arr) == aa.Array1D
assert (arr.native == | np.ones((3,)) | numpy.ones |
""" Code example from Complexity and Computation, a book about
exploring complexity science with Python. Available free from
http://greenteapress.com/complexity
Copyright 2016 <NAME>
MIT License: http://opensource.org/licenses/MIT
"""
from __future__ import print_function, division
import sys
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import animation
from scipy.signal import convolve2d
"""
For animation to work in the notebook, you might have to install
ffmpeg. On Ubuntu and Linux Mint, the following should work.
sudo add-apt-repository ppa:mc3man/trusty-media
sudo apt-get update
sudo apt-get install ffmpeg
"""
class Cell2D:
"""Implements Conway's Game of Life."""
def __init__(self, n, m=None):
"""Initializes the attributes.
n: number of rows
m: number of columns
"""
m = n if m is None else m
self.array = np.zeros((n, m), np.uint8)
def add_cells(self, row, col, *strings):
"""Adds cells at the given location.
row: top row index
col: left col index
strings: list of strings of 0s and 1s
"""
for i, s in enumerate(strings):
self.array[row+i, col:col+len(s)] = np.array([int(b) for b in s])
def step(self):
"""Executes one time step."""
pass
class Cell2DViewer:
"""Generates an animated view of an array image."""
cmap = plt.get_cmap('Greens')
options = dict(interpolation='nearest', alpha=0.8,
vmin=0, vmax=1, origin='upper')
def __init__(self, viewee):
self.viewee = viewee
self.im = None
self.hlines = None
self.vlines = None
# TODO: should this really take iters?
def step(self, iters=1):
"""Advances the viewee the given number of steps."""
for i in range(iters):
self.viewee.step()
def draw(self, grid=False):
"""Draws the array and any other elements.
grid: boolean, whether to draw grid lines
"""
self.draw_array(self.viewee.array)
if grid:
self.draw_grid()
def draw_array(self, array=None, cmap=None, **kwds):
"""Draws the cells."""
# Note: we have to make a copy because some implementations
# of step perform updates in place.
if array is None:
array = self.viewee.array
a = array.copy()
cmap = self.cmap if cmap is None else cmap
n, m = a.shape
plt.axis([0, m, 0, n])
plt.xticks([])
plt.yticks([])
options = self.options.copy()
options['extent'] = [0, m, 0, n]
options.update(kwds)
self.im = plt.imshow(a, cmap, **options)
def draw_grid(self):
"""Draws the grid."""
a = self.viewee.array
n, m = a.shape
lw = 2 if m < 7 else 1
options = dict(color='white', linewidth=lw)
# the shift is a hack to get the grid to line up with the cells
shift = 0.005 * n
rows = np.arange(n) + shift
self.hlines = plt.hlines(rows, 0, m, **options)
cols = | np.arange(m) | numpy.arange |
"""
Prepare data for Part-GPNN model.
Need:
Node feature at different scales
Edge feature for valid edges
Adjacency matrix GT (parse graph GT)
Edge weight (corresponds to node level)
Edge label GT
"""
import os
import json
import pickle
import warnings
from collections import defaultdict
import numpy as np
import skimage.io
import cv2
import feature_model
import metadata
import torch
import torch.autograd
import torchvision.models
import vsrl_utils as vu
part_ids = {'Right Shoulder': [2],
'Left Shoulder': [5],
'Knee Right': [10],
'Knee Left': [13],
'Ankle Right': [11],
'Ankle Left': [14],
'Elbow Left': [6],
'Elbow Right': [3],
'Hand Left': [7],
'Hand Right': [4],
'Head': [0],
'Hip': [8],
'Upper Body': [2,5,6,3,7,4,0,8],
'Lower Body': [10,13,11,14,8],
'Left Arm': [5,6,7],
'Right Arm': [2,3,4],
'Left Leg': [8,10,11],
'Right Leg': [8,13,14],
'Full Body': [2,5,10,13,11,14,6,3,7,4,0,8],
}
__PART_WEIGHT_L1 = 0.1 # hand
__PART_WEIGHT_L2 = 0.3 # arm
__PART_WEIGHT_L3 = 0.5 # upper body
__PART_WEIGHT_L4 = 1.0 # human
part_weights = {'Right Shoulder': __PART_WEIGHT_L1,
'Left Shoulder': __PART_WEIGHT_L1,
'Knee Right': __PART_WEIGHT_L1,
'Knee Left': __PART_WEIGHT_L1,
'Ankle Right': __PART_WEIGHT_L1,
'Ankle Left': __PART_WEIGHT_L1,
'Elbow Left': __PART_WEIGHT_L1,
'Elbow Right': __PART_WEIGHT_L1,
'Hand Left': __PART_WEIGHT_L1,
'Hand Right': __PART_WEIGHT_L1,
'Head': __PART_WEIGHT_L1,
'Hip': __PART_WEIGHT_L1,
'Upper Body': __PART_WEIGHT_L3,
'Lower Body': __PART_WEIGHT_L3,
'Left Arm': __PART_WEIGHT_L2,
'Right Arm': __PART_WEIGHT_L2,
'Left Leg': __PART_WEIGHT_L2,
'Right Leg': __PART_WEIGHT_L2,
'Full Body': __PART_WEIGHT_L4}
part_names = list(part_ids.keys())
part_graph = {'Right Shoulder': [],
'Left Shoulder': [],
'Knee Right': [],
'Knee Left': [],
'Ankle Right': [],
'Ankle Left': [],
'Elbow Left': [],
'Elbow Right': [],
'Hand Left': [],
'Hand Right': [],
'Head': [],
'Hip': [],
'Upper Body': ['Head', 'Hip', 'Left Arm', 'Right Arm'],
'Lower Body': ['Hip', 'Left Leg', 'Right Leg'],
'Left Arm': ['Left Shoulder', 'Elbow Left', 'Hand Left'],
'Right Arm': ['Right Shoulder', 'Elbow Right', 'Hand Right'],
'Left Leg': ['Hip', 'Knee Left', 'Ankle Left'],
'Right Leg': ['Hip', 'Knee Right', 'Ankle Right'],
'Full Body': ['Upper Body', 'Lower Body']
}
def get_intersection(box1, box2):
return np.hstack((np.maximum(box1[:2], box2[:2]), np.minimum(box1[2:], box2[2:])))
def compute_area(box):
side1 = box[2]-box[0]
side2 = box[3]-box[1]
if side1 > 0 and side2 > 0:
return side1 * side2
else:
return 0.0
def compute_iou(box1, box2):
intersection_area = compute_area(get_intersection(box1, box2))
iou = intersection_area / (compute_area(box1) + compute_area(box2) - intersection_area)
return iou
def get_node_index(bbox, det_boxes, index_list):
bbox = np.array(bbox, dtype=np.float32)
max_iou = 0.5 # Use 0.5 as a threshold for evaluation
max_iou_index = -1
for i_node in index_list:
# check bbox overlap
iou = compute_iou(bbox, det_boxes[i_node])
if iou > max_iou:
max_iou = iou
max_iou_index = i_node
return max_iou_index
def combine_box(box1, box2):
return np.hstack((np.minimum(box1[:2], box2[:2]), np.maximum(box1[2:], box2[2:])))
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
def get_box(_box, human_boxes_all, used_human):
max_iou = 0
best_box = None
best_i = None
for i, box in enumerate(human_boxes_all):
if i in used_human:
continue
iou = compute_iou(_box, box)
if iou > max_iou:
max_iou = iou
best_box = box
best_i = i
return best_i, box
def img_to_torch(img):
"""
input: H x W x C img iterables with range 0-255
output: C x H x W img tensor with range 0-1, normalized
"""
img = np.array(img) / 255.
img = (img - mean) / std
if len(img.shape) == 3:
img = np.expand_dims(img.transpose([2,0,1]), axis=0)
elif len(img.shape) == 4:
img = img.transpose([0,3,1,2])
elif len(img.shape) == 5:
img = img.transpose([0,1,4,2,3])
img = torch.autograd.Variable(torch.Tensor(img)).cuda()
return img
meta_dir = os.path.join(os.path.dirname(__file__), '../../../data/vcoco_features')
img_dir = '/home/tengyu/dataset/mscoco/images'
checkpoint_dir = '/home/tengyu/github/Part-GPNN/data/model_resnet_noisy/finetune_resnet'
vcoco_root = '/home/tengyu/dataset/v-coco/data'
save_data_path = '/home/tengyu/github/Part-GPNN/data/feature_resnet_tengyu2'
os.makedirs(save_data_path, exist_ok=True)
feature_network = feature_model.Resnet152(num_classes=len(metadata.action_classes))
feature_network.cuda()
best_model_file = os.path.join(checkpoint_dir, 'model_best.pth')
checkpoint = torch.load(best_model_file)
for k in list(checkpoint['state_dict'].keys()):
if k[:7] == 'module.':
checkpoint['state_dict'][k[7:]] = checkpoint['state_dict'][k]
del checkpoint['state_dict'][k]
feature_network.load_state_dict(checkpoint['state_dict'])
transform = torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])
input_h, input_w = 224, 224
part_eye = np.eye(21)
obj_eye = np.eye(81)
vcoco_mapping = {'train': 'train', 'test': 'val', 'val': 'train'}
for imageset in ['train', 'test', 'val']:
coco = vu.load_coco(vcoco_root)
vcoco_all = vu.load_vcoco('vcoco_{}'.format(imageset), vcoco_root)
for x in vcoco_all:
x = vu.attach_gt_boxes(x, coco)
image_ids = vcoco_all[0]['image_id'][:, 0].astype(int).tolist()
for imageset in ['train', 'test', 'val']:
coco = vu.load_coco(vcoco_root)
vcoco_all = vu.load_vcoco('vcoco_{}'.format(imageset), vcoco_root)
for x in vcoco_all:
x = vu.attach_gt_boxes(x, coco)
image_ids = vcoco_all[0]['image_id'][:, 0].astype(int).tolist()
for i_image, image_id in enumerate(image_ids):
filename = coco.loadImgs(ids=[image_id])[0]['file_name']
d = filename.split('_')[1][:-4]
print('%d/%d: %s'%(i_image, len(image_ids), filename))
# if os.path.exists(os.path.join(save_data_path, filename + '.data')):
# continue
try:
openpose = json.load(open(os.path.join(os.path.dirname(__file__), '../../../data/openpose/%s2014openpose/%s_keypoints.json'%(vcoco_mapping[imageset], filename[:-4]))))
except:
warnings.warn('OpenPose missing ' + os.path.join(os.path.dirname(__file__), '../../../data/openpose/%s2014openpose/%s_keypoints.json'%(vcoco_mapping[imageset], filename[:-4])))
continue
try:
image_meta = pickle.load(open(os.path.join(meta_dir, filename + '.p'), 'rb'), encoding='latin1')
except:
warnings.warn('Meta missing ' + filename)
continue
try:
image = skimage.io.imread(os.path.join(img_dir, '%s2014'%d, filename))
except:
warnings.warn('Image missing ' + filename)
continue
img_w = image.shape[0]
img_h = image.shape[1]
if len(image.shape) == 2:
image = np.tile(np.expand_dims(image, axis=-1), [1, 1, 3])
obj_boxes_all = image_meta['boxes'][image_meta['human_num']:]
obj_classes_all = image_meta['classes'][image_meta['human_num']:]
human_boxes_all = image_meta['boxes'][:image_meta['human_num']]
part_human_ids = []
part_classes = []
part_boxes = []
human_boxes = []
used_human = []
# human_boxes contains parts at different levels
for human_id, human in enumerate(openpose['people']):
keypoints = | np.array(human['pose_keypoints_2d']) | numpy.array |
#!/usr/bin/env python
# coding=utf-8
"""
Script to sample uncertain user parameters
"""
from __future__ import division
import random as rd
import numpy as np
from scipy.stats import nakagami
def main():
nb_samples = 100000
import matplotlib.pyplot as plt
# Get samples of set temperatures within building
list_set_temp = calc_set_temp_samples(nb_samples=nb_samples)
print('List of set temperatures in degree Celsius:')
print(list_set_temp)
print()
fig = plt.figure()
# the histogram of the data
plt.hist(list_set_temp, bins='auto')
plt.xlabel('Set temperatures in degree Celsius')
plt.ylabel('Number of temperatures')
plt.show()
plt.close()
# Create constant user air exchange rates
list_usr_airx = calc_user_air_ex_rates(nb_samples)
print('List of user air exchange rates in 1/h:')
print(list_usr_airx)
print()
fig = plt.figure()
# the histogram of the data
plt.hist(list_usr_airx, bins='auto')
plt.xlabel('User air exchange rates in 1/h')
plt.ylabel('Number of values')
plt.show()
plt.close()
method = 'destatis'
# method = 'equal'
# Sample number of occupants in apartments:
list_occ_in_app = calc_sampling_occ_per_app(nb_samples=nb_samples,
method=method)
fig = plt.figure()
# the histogram of the data
plt.hist(list_occ_in_app, 5)
plt.xlabel('Number of occupants per apartment')
plt.ylabel('Number of values')
plt.show()
plt.close()
# Annual electric demand sampling per apartment (3 persons, SFH)
list_el_dem = calc_sampling_el_demand_per_apartment(nb_samples=nb_samples,
nb_persons=3,
type='sfh')
fig = plt.figure()
# the histogram of the data
plt.hist(list_el_dem, bins='auto')
plt.xlabel('Number of electric energy demands in kWh')
plt.ylabel('Number of values')
plt.title('Electric energy demand for\napartment with '
'3 occupants')
plt.show()
plt.close()
list_el_dem_2 = []
for nb_occ in list_occ_in_app:
sample_el = \
calc_sampling_el_demand_per_apartment(nb_samples=1,
nb_persons=nb_occ,
type='sfh')[0]
list_el_dem_2.append(sample_el)
fig = plt.figure()
# the histogram of the data
plt.hist(list_el_dem_2, bins='auto')
plt.xlabel('Number of electric energy demands in kWh')
plt.ylabel('Number of values')
plt.title('Electric energy demand for\napartment with '
'different number of occupants')
plt.show()
plt.close()
# list_dhw = calc_sampling_dhw_per_person(nb_samples=nb_samples)
#
# fig = plt.figure()
# # the histogram of the data
# plt.hist(list_dhw, bins='auto')
# plt.xlabel('Hot water volumes per person and day in liters')
# plt.ylabel('Number of values')
# plt.show()
# plt.close()
nb_persons = 5
list_dhw_vol_per_app = \
calc_sampling_dhw_per_apartment(nb_samples=nb_samples,
nb_persons=nb_persons)
fig = plt.figure()
# the histogram of the data
plt.hist(list_dhw_vol_per_app, bins='auto')
plt.xlabel('Hot water volumes per apartment and day in liters')
plt.ylabel('Number of values')
plt.title('Hot water volumes per person and day for ' + str(nb_persons)
+ ' person apartment')
plt.show()
plt.close()
list_dhw_per_app_2 = []
for nb_occ in list_occ_in_app:
sample_dhw = calc_sampling_dhw_per_apartment(nb_samples=1,
nb_persons=nb_occ)[0]
list_dhw_per_app_2.append(sample_dhw)
fig = plt.figure()
# the histogram of the data
plt.hist(list_dhw_per_app_2, bins='auto')
plt.xlabel('Hot water volumes per apartment and day in liters')
plt.ylabel('Number of values')
plt.title('Hot water volumes per person and day for\napartment with '
'different number of occupants')
plt.show()
plt.close()
# # Create environment
# # ####################################################################
#
# # Create extended environment of pycity_calc
# year = 2010
# timestep = 3600 # Timestep in seconds
# location = (51.529086, 6.944689) # (latitude, longitute) of Bottrop
# altitude = 55 # Altitude of Bottrop
#
# # Generate timer object
# timer = time.TimerExtended(timestep=timestep, year=year)
#
# # Generate weather object
# weather = Weather.Weather(timer, useTRY=True, location=location,
# altitude=altitude)
#
# # Generate market object
# market = mark.Market()
#
# # Generate co2 emissions object
# co2em = co2.Emissions(year=year)
#
# # Generate environment
# environment = env.EnvironmentExtended(timer, weather, prices=market,
# location=location, co2em=co2em)
#
# # # Create occupancy profile
# # #####################################################################
#
# num_occ = 3
#
# print('Calculate occupancy.\n')
# # Generate occupancy profile
# occupancy_obj = occ.Occupancy(environment, number_occupants=num_occ)
#
# print('Finished occupancy calculation.\n')
# # Generate user air exchange rate profiles
# # #####################################################################
# list_air_ex_profiles = \
# calc_user_air_ex_profiles_factors(nb_samples=
# nb_samples,
# occ_profile=occupancy_obj.occupancy,
# temp_profile=
# environment.weather.tAmbient,
# random_gauss=True)
#
# list_av_air_ex_rates = []
#
# for profile in list_air_ex_profiles:
# plt.plot(profile, alpha=0.5)
#
# av_rate = np.mean(profile)
#
# print('Average air exchange rate in 1/h:')
# print(av_rate)
#
# list_av_air_ex_rates.append(av_rate)
#
# plt.xlabel('Time in hours')
# plt.ylabel('User air exchange rate in 1/h')
# plt.show()
# plt.close()
#
# fig2 = plt.figure()
# # the histogram of the data
# plt.hist(list_av_air_ex_rates, 50)
# plt.xlabel('Average user air exchange rate in 1/h')
# plt.ylabel('Number of air exchange rates')
# plt.show()
# plt.close()
def calc_set_temp_samples(nb_samples, mean=20, sdev=2.5):
"""
Calculate array of indoor set temperature values from gaussian
distribution.
Parameters
----------
nb_samples : int
Number of samples
mean : float, optional
Mean temperature value in degree Celsius for gaussian distribution
(default: 20)
sdev : float, optional
Standard deviation in degree Celsius for gaussian distribution
(default: 2.5)
Returns
-------
array_set_temp : np.array (of floats)
Numpy array of indoor set temperatures in degree Celsius
"""
array_set_temp = np.random.normal(loc=mean, scale=sdev, size=nb_samples)
return array_set_temp
def calc_user_air_ex_rates(nb_samples, min_value=0, max_value=1.2,
pdf='nakagami'):
"""
Calculate array of user air exchange rate samples
Parameters
----------
nb_samples : int
Number of samples
min_value : float, optional
Minimum user air exchange rate (default: 0)
max_value : float, optional
Maximum user air exchange rate (default: 1.2)
dist : str, optional
Probability density function to choose samples (default: 'nakagami')
Options:
- 'equal' : Equal distribution between min_value and max_value
- 'triangle' : Triangular distribution
- 'nakagami' : Nakagami distribution
Returns
-------
array_usr_inf : np.array (of floats)
Numpy array holding user infiltration rates in 1/h
"""
assert pdf in ['equal', 'triangle', 'nakagami'], \
'Unknown value for pdf input.'
# list_usr_inf = []
array_usr_inf = np.zeros(nb_samples)
if pdf == 'equal':
min_value *= 1000
max_value *= 1000
for i in range(nb_samples):
array_usr_inf[i] = rd.randint(min_value, max_value)
for i in range(len(array_usr_inf)):
array_usr_inf[i] /= 1000
elif pdf == 'triangle':
mode = min_value + (max_value - min_value) * 0.2
for i in range(nb_samples):
val = np.random.triangular(left=min_value,
right=max_value,
mode=mode)
array_usr_inf[i] = val
elif pdf == 'nakagami':
array_usr_inf = nakagami.rvs(0.6, scale=0.4, size=nb_samples)
return array_usr_inf
# Led to problems within monte-carlo simulation, as extrem air exchange
# rates can lead to unrealistic thermal peak loads within space heating
# profiles
# def calc_user_air_ex_profiles_factors(nb_samples, occ_profile, temp_profile,
# random_gauss=True):
# """
# Calculate set of user air exchange rate profiles. Uses stochastic
# air exchange rate user profile generation, based on user_air_exchange.py.
# Moreover, random rescaling, based on gaussion distribution, can be
# activated
#
# Parameters
# ----------
# nb_samples : int
# Number of samples
# occ_profile : array (of ints)
# Occupancy profile per timestep
# temp_profile : array (of floats)
# Outdoor temperature profile in degree Celsius per timestep
# random_gauss : bool, optional
# Defines, if resulting profile should randomly be rescaled with
# gaussian distribution rescaling factor (default: True)
#
# Returns
# -------
# list_air_ex_profiles : list (of arrays)
# List of air exchange profiles
# """
#
# list_air_ex_profiles = []
#
# for i in range(nb_samples):
#
# air_exch = usair.gen_user_air_ex_rate(occ_profile=occ_profile,
# temp_profile=temp_profile,
# b_type='res',
# inf_rate=None)
#
# if random_gauss:
# rescale_factor = np.random.normal(loc=1, scale=0.25)
# if rescale_factor < 0:
# rescale_factor = 0
# air_exch *= rescale_factor
#
# list_air_ex_profiles.append(air_exch)
#
# return list_air_ex_profiles
def calc_sampling_occ_per_app(nb_samples, method='destatis',
min_occ=1, max_occ=5):
"""
Calculate array of nb. of occupants samples
Parameters
----------
nb_samples : int
Number of samples
method : str, optional
Method to calculate occupants per apartment samples
(default: 'destatis')
Options:
- 'equal' : Select samples between min_occ and max_occ from equal
distribution
- 'destatis' : Select samples with random numbers from Destatis
statistics from 2015
min_occ : int, optional
Minimal possible number of occupants per apartment (default: 1)
Only relevant for method == 'equal'
max_occ : int, optional
Maximal possible number of occupants per apartment (default: 5)
Only relevant for method == 'equal'
Returns
-------
array_nb_occ : np.array (of ints)
Numpy array holding number of occupants per apartment
Reference
---------
Statistisches Bundesamt (Destatis) (2017): Bevoelkerung in Deutschland.
Online verfuegbar unter
https://www.destatis.de/DE/ZahlenFakten/Indikatoren/LangeReihen/
Bevoelkerung/lrbev05.html;jsessionid=4AACC10D2225591EC88C40EDEFB5EDAC.cae2,
zuletzt geprueft am 05.04.2017.
"""
assert method in ['equal', 'destatis']
# list_nb_occ = []
array_nb_occ = | np.zeros(nb_samples) | numpy.zeros |
# %%
import matplotlib.pyplot as plt
import numpy as np
import sklearn
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
from model.inceptionv4 import inceptionv4
from model.mobilenetv2 import mobilenetv2
from model.resnet import resnet18
from model.shufflenetv2 import shufflenetv2
from model.vgg import vgg9_bn
from s3_dataset import PlantDataSet, PlantDataSetB
# %%
def get_acc(net, device, data_loader):
'''
get acc
'''
correct = 0
total = 0
with torch.no_grad():
net.eval()
for data in data_loader:
images, labels = data
images = images.float().to(device)
labels = labels.long().to(device)
outputs = net(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
return correct / total
def get_pre(net, device, data_loader):
'''
得到整个测试集预测的结果,以及标签
'''
label_all = []
pre_all = []
with torch.no_grad():
net.eval()
for data in data_loader:
images, labels = data
images = images.float().to(device)
labels = labels.long().to(device)
outputs = net(images)
_, predicted = torch.max(outputs.data, 1)
label_all.extend(labels.data.cpu().numpy())
pre_all.extend(predicted.data.cpu().numpy())
return pre_all, label_all
# %%
Func = [vgg9_bn, resnet18, shufflenetv2, mobilenetv2, inceptionv4]
Save_path = [
'../model_save/plant_disease2/vgg.pth',
'../model_save/plant_disease2/resnet18.pth',
'../model_save/plant_disease2/shufflenetv2.pth',
'../model_save/plant_disease2/mobilenetv2.pth',
'../model_save/plant_disease2/inceptionv4.pth'
]
device = torch.device('cuda:1' if torch.cuda.is_available() else 'cpu')
# data_loader_val = DataLoader(PlantDataSetB(flag='val'),
# batch_size=64,
# shuffle=False)
# data_loader_test = DataLoader(PlantDataSetB(flag='test'),
# batch_size=64,
# shuffle=False)
data_loader_val = DataLoader(PlantDataSet(flag='val'),
batch_size=64,
shuffle=False)
data_loader_test = DataLoader(PlantDataSet(flag='test'),
batch_size=64,
shuffle=False)
print('A 域数据集: 校核')
for Index in range(1):
# 导入模型和权重
net = Func[Index]()
path_saved_model = Save_path[Index]
net.load_state_dict(torch.load(path_saved_model))
net.to(device)
val_acc = get_acc(net, device, data_loader_val)
test_acc = get_acc(net, device, data_loader_test)
print('{:d}: val_acc:{:.5f}, test_acc:{:.5f}'.format(
Index, val_acc, test_acc))
# %%
# 计算每个模型在两个测试集上的混淆矩阵
device = torch.device('cuda:1' if torch.cuda.is_available() else 'cpu')
Func = [vgg9_bn, resnet18, shufflenetv2, mobilenetv2, inceptionv4]
Save_path = [
'../model_save/plant_disease2/vgg.pth',
'../model_save/plant_disease2/resnet18.pth',
'../model_save/plant_disease2/shufflenetv2.pth',
'../model_save/plant_disease2/mobilenetv2.pth',
'../model_save/plant_disease2/inceptionv4.pth'
]
data_test_a = DataLoader(PlantDataSet(flag='test'),
batch_size=64,
shuffle=False)
data_test_b = DataLoader(PlantDataSetB(flag='test'),
batch_size=64,
shuffle=False)
Index = 1
# 导入模型和权重
net = Func[Index]()
path_saved_model = Save_path[Index]
net.load_state_dict(torch.load(path_saved_model))
net.to(device)
pre, label = get_pre(net, device, data_test_b)
pre, label = np.array(pre), np.array(label)
# %%
import matplotlib.pyplot as plt
from sklearn.metrics import accuracy_score # 精度
from sklearn.metrics import confusion_matrix # 混淆矩阵
print('预测精度为:{:.9f}'.format(accuracy_score(label, pre)))
# 查看混淆矩阵
domain_A_class = {
'Apple___Apple_scab': 0,
'Apple___Black_rot': 1,
'Apple___Cedar_apple_rust': 2,
'Apple___healthy': 3,
'Blueberry___healthy': 4,
'Cherry_(including_sour)___Powdery_mildew': 5,
'Cherry_(including_sour)___healthy': 6,
'Corn_(maize)___Cercospora_leaf_spot Gray_leaf_spot': 7,
'Corn_(maize)___Common_rust_': 8,
'Corn_(maize)___Northern_Leaf_Blight': 9,
'Corn_(maize)___healthy': 10,
'Grape___Black_rot': 11,
'Grape___Esca_(Black_Measles)': 12,
'Grape___Leaf_blight_(Isariopsis_Leaf_Spot)':13,
'Grape___healthy':14,
'Orange___Haunglongbing_(Citrus_greening)':15,
'Peach___Bacterial_spot':16,
'Peach___healthy':17,
'Pepper,_bell___Bacterial_spot':18,
'Pepper,_bell___healthy':19,
'Potato___Early_blight':20,
'Potato___Late_blight':21,
'Potato___healthy':22,
'Raspberry___healthy':23,
'Soybean___healthy':24,
'Squash___Powdery_mildew':25,
'Strawberry___Leaf_scorch':26,
'Strawberry___healthy':27,
'Tomato___Bacterial_spot':28,
'Tomato___Early_blight':29,
'Tomato___Late_blight':30,
'Tomato___Leaf_Mold':31,
'Tomato___Septoria_leaf_spot':32,
'Tomato___Spider_mites Two-spotted_spider_mite':33,
'Tomato___Target_Spot':34,
'Tomato___Tomato_Yellow_Leaf_Curl_Virus':35,
'Tomato___Tomato_mosaic_virus':36,
'Tomato___healthy':37}
c_matrix = confusion_matrix(label, pre, labels=list(range(38)))
# %% 这个代码留着
def plot_Matrix(cm, classes, title=None, cmap=plt.cm.Blues):
plt.rc('font',family='Times New Roman',size='8') # 设置字体样式、大小
# 按行进行归一化
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
str_cm = cm.astype(np.str).tolist()
for row in str_cm:
print('\t'.join(row))
# 占比1%以下的单元格,设为0,防止在最后的颜色中体现出来
for i in range(cm.shape[0]):
for j in range(cm.shape[1]):
if int(cm[i, j]*100 + 0.5) == 0:
cm[i, j]=0
fig, ax = plt.subplots()
im = ax.imshow(cm, interpolation='nearest', cmap=cmap)
# ax.figure.colorbar(im, ax=ax) # 侧边的颜色条带
ax.set(xticks=np.arange(cm.shape[1]),
yticks=np.arange(cm.shape[0]),
xticklabels=classes, yticklabels=classes,
title=title,
ylabel='Actual',
xlabel='Predicted')
# 通过绘制格网,模拟每个单元格的边框
ax.set_xticks(np.arange(cm.shape[1]+1)-.5, minor=True)
ax.set_yticks( | np.arange(cm.shape[0]+1) | numpy.arange |
from __future__ import division, unicode_literals, print_function, absolute_import
import re
import json
import logging
from six import string_types
from dateutil import parser
from io import StringIO
try:
import cPickle # Python 2.7
except:
import _pickle as cPickle
import numpy as np
import traitlets as tl
# Optional dependencies
from lazy_import import lazy_module
bs4 = lazy_module("bs4")
import podpac
from podpac.core.utils import _get_from_url, cached_property
from podpac.data import DataSource
from podpac.compositor import TileCompositorRaw
from podpac.interpolators import InterpolationMixin
_logger = logging.getLogger(__name__)
def _convert_str_to_vals(properties):
IGNORE_KEYS = ["sitenumber"]
for k, v in properties.items():
if not isinstance(v, string_types) or k in IGNORE_KEYS:
continue
try:
if "," in v:
properties[k] = tuple([float(vv) for vv in v.split(",")])
else:
properties[k] = float(v)
except ValueError:
try:
properties[k] = np.datetime64(v)
except ValueError:
pass
return properties
class COSMOSStation(DataSource):
_repr_keys = ["label", "network", "location"]
url = tl.Unicode("http://cosmos.hwr.arizona.edu/Probes/StationDat/")
station_data = tl.Dict().tag(attr=True)
@cached_property
def raw_data(self):
_logger.info("Downloading station data from {}".format(self.station_data_url))
r = _get_from_url(self.station_data_url)
if r is None:
raise ConnectionError(
"COSMOS data cannot be retrieved. Is the site {} down?".format(self.station_calibration_url)
)
return r.text
@cached_property
def data_columns(self):
return self.raw_data.split("\n", 1)[0].split(" ")
@property
def site_number(self):
return str(self.station_data["sitenumber"])
@property
def station_data_url(self):
return self.url + self.site_number + "/smcounts.txt"
@property
def station_calibration_url(self):
return self.url + self.site_number + "/calibrationInfo.php"
@property
def station_properties_url(self):
return self.url + self.site_number + "/index.php"
def get_data(self, coordinates, coordinates_index):
data = np.loadtxt(StringIO(self.raw_data), skiprows=1, usecols=self.data_columns.index("SOILM"))[
coordinates_index[0]
]
data[data > 100] = np.nan
data[data < 0] = np.nan
data /= 100.0 # Make it fractional
return self.create_output_array(coordinates, data=data.reshape(coordinates.shape))
def get_coordinates(self):
lat_lon = self.station_data["location"]
time = np.atleast_2d(
np.loadtxt(
StringIO(self.raw_data),
skiprows=1,
usecols=[self.data_columns.index("YYYY-MM-DD"), self.data_columns.index("HH:MM")],
dtype=str,
)
)
if time.size == 0:
time = np.datetime64("NaT")
else:
time = | np.array([t[0] + "T" + t[1] for t in time], np.datetime64) | numpy.array |
import numpy as np
import os
import re
import requests
import sys
import time
from netCDF4 import Dataset
import pandas as pd
from bs4 import BeautifulSoup
from tqdm import tqdm
# setup constants used to access the data from the different M2M interfaces
BASE_URL = 'https://ooinet.oceanobservatories.org/api/m2m/' # base M2M URL
SENSOR_URL = '12576/sensor/inv/' # Sensor Information
# setup access credentials
AUTH = ['OOIAPI-853A3LA6QI3L62', '<KEY>']
def M2M_Call(uframe_dataset_name, start_date, end_date):
options = '?beginDT=' + start_date + '&endDT=' + end_date + '&format=application/netcdf'
r = requests.get(BASE_URL + SENSOR_URL + uframe_dataset_name + options, auth=(AUTH[0], AUTH[1]))
if r.status_code == requests.codes.ok:
data = r.json()
else:
return None
# wait until the request is completed
print('Waiting for OOINet to process and prepare data request, this may take up to 20 minutes')
url = [url for url in data['allURLs'] if re.match(r'.*async_results.*', url)][0]
check_complete = url + '/status.txt'
with tqdm(total=400, desc='Waiting') as bar:
for i in range(400):
r = requests.get(check_complete)
bar.update(1)
if r.status_code == requests.codes.ok:
bar.n = 400
bar.last_print_n = 400
bar.refresh()
print('\nrequest completed in %f minutes.' % elapsed)
break
else:
time.sleep(3)
elapsed = (i * 3) / 60
return data
def M2M_Files(data, tag=''):
"""
Use a regex tag combined with the results of the M2M data request to collect the data from the THREDDS catalog.
Collected data is gathered into an xarray dataset for further processing.
:param data: JSON object returned from M2M data request with details on where the data is to be found for download
:param tag: regex tag to use in discriminating the data files, so we only collect the correct ones
:return: the collected data as an xarray dataset
"""
# Create a list of the files from the request above using a simple regex as a tag to discriminate the files
url = [url for url in data['allURLs'] if re.match(r'.*thredds.*', url)][0]
files = list_files(url, tag)
return files
def list_files(url, tag=''):
"""
Function to create a list of the NetCDF data files in the THREDDS catalog created by a request to the M2M system.
:param url: URL to user's THREDDS catalog specific to a data request
:param tag: regex pattern used to distinguish files of interest
:return: list of files in the catalog with the URL path set relative to the catalog
"""
page = requests.get(url).text
soup = BeautifulSoup(page, 'html.parser')
pattern = re.compile(tag)
return [node.get('href') for node in soup.find_all('a', text=pattern)]
def M2M_Data(nclist,variables):
thredds = 'https://opendap.oceanobservatories.org/thredds/dodsC/ooi/'
#nclist is going to contain more than one url eventually
for jj in range(len(nclist)):
url=nclist[jj]
url=url[25:]
dap_url = thredds + url + '#fillmismatch'
openFile = Dataset(dap_url,'r')
for ii in range(len(variables)):
dum = openFile.variables[variables[ii].name]
variables[ii].data = np.append(variables[ii].data, dum[:].data)
tmp = variables[0].data/60/60/24
time_converted = pd.to_datetime(tmp, unit='D', origin=pd.Timestamp('1900-01-01'))
return variables, time_converted
class var(object):
def __init__(self):
"""A Class that generically holds data with a variable name
and the units as attributes"""
self.name = ''
self.data = np.array([])
self.units = ''
def __repr__(self):
return_str = "name: " + self.name + '\n'
return_str += "units: " + self.units + '\n'
return_str += "data: size: " + str(self.data.shape)
return return_str
class structtype(object):
def __init__(self):
""" A class that imitates a Matlab structure type
"""
self._data = []
def __getitem__(self, index):
"""implement index behavior in the struct"""
if index == len(self._data):
self._data.append(var())
return self._data[index]
def __len__(self):
return len(self._data)
def M2M_URLs(platform_name,node,instrument_class,method):
var_list = structtype()
#MOPAK
if platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#METBK
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
#FLORT
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/04-FLORTK000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
#FDCHP
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/telemetered/fdchp_a_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#DOSTA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/02-DOFSTK000/telemetered/dofst_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
#ADCP
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
#ZPLSC
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#WAVSS
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
#VELPT
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
#PCO2W
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
#PHSEN
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
#SPKIR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
#PRESF
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/02-PRESFA000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/02-PRESFA000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/02-PRESFB000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/02-PRESFC000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
#CTDBP
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/06-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/06-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#VEL3D
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
#VEL3DK
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/01-VEL3DK000/telemetered/vel3d_k_wfp_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/03-CTDPFK000/telemetered/ctdpf_ckl_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#PCO2A
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
#PARAD
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/05-PARADK000/telemetered/parad_k__stc_imodem_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
#OPTAA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/01-OPTAAC000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#NUTNR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
##
#MOPAK
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSPM/SBS01/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#METBK
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
#FLORT
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
#FDCHP
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/recovered_host/fdchp_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#DOSTA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
#ADCP
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
#WAVSS
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
#VELPT
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
#uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
#PCO2W
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
#PHSEN
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
#SPKIR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
#PRESF
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/02-PRESFA000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/02-PRESFA000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/02-PRESFB000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/02-PRESFC000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
#CTDBP
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/06-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/06-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#VEL3D
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
#PCO2A
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
#OPTAA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/01-OPTAAC000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#NUTNR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD37/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/SBD17/06-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD37/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/SBD17/06-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/03-CTDPFK000/recovered_wfp/ctdpf_ckl_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/SBD17/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/01-VEL3DK000/recovered_wfp/vel3d_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/02-PRESFA000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/02-PRESFA000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/02-PRESFB000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/02-PRESFC000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/05-PARADK000/recovered_wfp/parad_k__stc_imodem_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/recovered_inst/fdchp_a_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/recovered_inst/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/recovered_inst/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/04-FLORTK000/recovered_wfp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/02-DOFSTK000/recovered_wfp/dofst_k_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/recovered_inst/adcpt_m_instrument_log9_recovered'
var_list[0].name = 'time'
var_list[1].name = 'significant_wave_height'
var_list[2].name = 'peak_wave_period'
var_list[3].name = 'peak_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'seconds'
var_list[3].units = 'degrees'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/recovered_inst/adcpt_m_instrument_log9_recovered'
var_list[0].name = 'time'
var_list[1].name = 'significant_wave_height'
var_list[2].name = 'peak_wave_period'
var_list[3].name = 'peak_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'seconds'
var_list[3].units = 'degrees'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'CTD' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/06-CTDBPN106/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_no_seawater_pressure'
var_list[5].name = 'ctdbp_no_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'CTD' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/06-CTDBPO108/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_no_seawater_pressure'
var_list[5].name = 'ctdbp_no_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'DOSTA' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/06-CTDBPN106/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'DOSTA' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/06-CTDBPO108/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'PHSEN' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/10-PHSEND103/streamed/phsen_data_record'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'PHSEN' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/10-PHSEND107/streamed/phsen_data_record'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'PCO2W' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/09-PCO2WB103/streamed/pco2w_b_sami_data_record'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'PCO2W' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/09-PCO2WB104/streamed/pco2w_b_sami_data_record'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'ADCP' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/05-ADCPTB104/streamed/adcp_velocity_beam'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'ADCP' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/05-ADCPSI103/streamed/adcp_velocity_beam'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'VEL3D' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/07-VEL3DC108/streamed/vel3d_cd_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'VEL3D' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/07-VEL3DC107/streamed/vel3d_cd_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'OPTAA' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/08-OPTAAD106/streamed/optaa_sample'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'OPTAA' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/08-OPTAAC104/streamed/optaa_sample'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#CSPP Data below
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/08-FLORTJ000/telemetered/flort_dj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/08-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/08-FLORTJ000/telemetered/flort_dj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/08-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/02-DOSTAJ000/telemetered/dosta_abcdjm_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/02-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/02-DOSTAJ000/telemetered/dosta_abcdjm_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/02-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = | np.array([]) | numpy.array |
import numpy as np
import scipy.interpolate as interpolate
import matplotlib.pyplot as plt
import clusterbuster.mathut as math
"""
Start with e.g. InterpolateRadio2D(psiFile = '../Analysis_MUSIC2/Hoeft_radio/mach_psi_tablefine(10,3).txt', inter=(10,6))
"""
# from http://stackoverflow.com/questions/5328128/scipy-interpolation-of-large-matrix
def my_interp(X, Y, Z, x, y, spn=3):
xs,ys = map(np.array,(x,y))
z = np.zeros(xs.shape)
for i,(x,y) in enumerate(zip(xs,ys)):
# get the indices of the nearest x,y
xi = np.argmin(np.abs(X[0,:]-x))
yi = np.argmin(np.abs(Y[:,0]-y))
xlo = max(xi-spn, 0)
ylo = max(yi-spn, 0)
xhi = min(xi+spn, X[0,:].size)
yhi = min(yi+spn, Y[:,0].size)
# make slices of X,Y,Z that are only a few items wide
nX = X[xlo:xhi, ylo:yhi]
nY = Y[xlo:xhi, ylo:yhi]
nZ = Z[xlo:xhi, ylo:yhi]
intp = interpolate.interp2d(nX, nY, nZ)
z[i] = intp(x,y)[0]
return z
# from here on: done by myself
def LoadFile_psi(psiFile):
""" Just gives the Mach number and Temperature values """
#=== FILE A ===#
# read first line .... split it and convert sstring to float science float('1.31E+01') or for a list:map(float, ['3.76E+00', '1.31E+01', '1.14E+01'])
with open(psiFile, 'r') as f:
first_line = f.readline()
psi_x = first_line.split()[2:] # Splits into list without first two elements
psi_x = np.asarray( [float(i) for i in psi_x ] ) # Converts strings to floats # Converts strings to floats
psi_y = np.loadtxt(psiFile,skiprows=0)[:,0]
return psi_x, psi_y
def InterpolateRadio2D(psiFile='../Analysis_MUSIC2/Hoeft_radio/mach_psi_table.txt', machFile='../Analysis_MUSIC2/Hoeft_radio/q_mach_machr_table.txt', saveplot='../Analysis_MUSIC2/Hoeft_radio/interpolated', psiFileNew = False, machFileNew = False, inter=(10,3)):
# Currently the mach number is interpolated in an logarithmic space which is much sparser at lower mach numbers then anticipated
# I suspect an double-exponential function for mach (both efficiency dependency stepsize)
# Note that the original grid given in 'Hoeft_radio/mach_psi_table.txt' is (quite) regular in log-loglog space, which makes it very simple to invoke an interpolation function!
# Irregular data points would make it nececcary to use functions like scipy.interpolate.griddata(points, values, (grid_x, grid_y), method='cubic')
plot_old = False
plot_new = False
plot_PhD = True
##==== psiFile for psi factor; machfile for mach-numbers conversion factors
H_mach = np.loadtxt(machFile,skiprows=0)
H_psi = np.loadtxt(psiFile,skiprows=0)[:,1::] # you wont get the temperature values ... read them separetely
psi_x,psi_y = LoadFile_psi(psiFile)
psi_x = np.log10( psi_x ) # converts to and log10 space
psi_y = np.log10(np.log10( psi_y )) # converts to and log10(log10) space
X, Y = np.meshgrid(psi_x, psi_y)
Z = np.log10(H_psi)
#interp_spline = interpolate.interp2d(x, y, Z) #, kind='cubic'
interp_spline = interpolate.RectBivariateSpline(psi_y, psi_x, Z) #, bbox=[None, None, None, None], kx=3, ky=3, s=0
xnew = np.arange(psi_x[0], psi_x[-1], (psi_x[-1]-psi_x[0])/(len(psi_x)*inter[0]) ) #np.arange(-4, 2, 4e-2) #
ynew = np.arange(psi_y[0], psi_y[-1], (psi_y[-1]-psi_y[0])/(len(psi_y)*inter[1]) ) #np.arange(0.2, 3, 2e-2) #
Znew = interp_spline(ynew, xnew )
keV2K = 1.16e7 # Translates keV to Kelvin
if plot_old:
plt.plot( np.arange(0, len(psi_x), 1 ), psi_x )
plt.plot( np.arange(0, len(psi_y), 1 ), psi_y )
plt.savefig(saveplot + '_linearity.png')
fig = plt.figure()
ax1 = plt.subplot(121)
ax1.pcolor( np.log10(keV2K) + psi_x, psi_y, Z)
ax1.set_title("Sparsely sampled function")
ax1.set_xlim([3.1, 9])
ax1.set_ylim([psi_y[0], 0.5])
ax1.set_xlabel('$\\mathrm{log_{10}(T)\\,[K]}$ ')
ax1.set_ylabel('$\\mathrm{log_{10}(log_{10}(M))\\,[]}$')
ax2 = plt.subplot(122)
im2 = ax2.pcolor( np.log10(keV2K) + xnew, ynew, Znew)
ax2.set_title("Interpolated function")
ax2.set_xlim([3.1, 9])
ax2.set_ylim([psi_y[0], 0.5])
ax2.set_xlabel('$\\mathrm{log_{10}(T)\\,[K]}$ ')
ax2.set_yticklabels([])
mach = [1.5,2.2,3.0,10.0]
c = [plt.cm.rainbow( (np.log10(np.log10(m))-ax1.get_ylim()[0])/abs(ax1.get_ylim()[1]-ax1.get_ylim()[0]) ) for m in mach]
for ii,m in enumerate(mach):
ax1.plot( [ax1.get_xlim()[0], ax1.get_xlim()[1]] , [np.log10( | np.log10(m) | numpy.log10 |
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid import Program, program_guard
from op_test import OpTest
from test_anchor_generator_op import anchor_generator_in_python
from test_generate_proposal_labels_op import _generate_groundtruth
from test_generate_proposal_labels_op import _bbox_overlaps, _box_to_delta
def rpn_target_assign(anchor_by_gt_overlap,
rpn_batch_size_per_im,
rpn_positive_overlap,
rpn_negative_overlap,
rpn_fg_fraction,
use_random=True):
anchor_to_gt_argmax = anchor_by_gt_overlap.argmax(axis=1)
anchor_to_gt_max = anchor_by_gt_overlap[
np.arange(anchor_by_gt_overlap.shape[0]), anchor_to_gt_argmax]
gt_to_anchor_argmax = anchor_by_gt_overlap.argmax(axis=0)
gt_to_anchor_max = anchor_by_gt_overlap[
gt_to_anchor_argmax,
np.arange(anchor_by_gt_overlap.shape[1])]
anchors_with_max_overlap = np.where(
anchor_by_gt_overlap == gt_to_anchor_max)[0]
labels = np.ones((anchor_by_gt_overlap.shape[0], ), dtype=np.int32) * -1
labels[anchors_with_max_overlap] = 1
labels[anchor_to_gt_max >= rpn_positive_overlap] = 1
num_fg = int(rpn_fg_fraction * rpn_batch_size_per_im)
fg_inds = np.where(labels == 1)[0]
if len(fg_inds) > num_fg and use_random:
disable_inds = np.random.choice(fg_inds,
size=(len(fg_inds) - num_fg),
replace=False)
else:
disable_inds = fg_inds[num_fg:]
labels[disable_inds] = -1
fg_inds = np.where(labels == 1)[0]
bbox_inside_weight = np.zeros((len(fg_inds), 4), dtype=np.float32)
num_bg = rpn_batch_size_per_im - np.sum(labels == 1)
bg_inds = | np.where(anchor_to_gt_max < rpn_negative_overlap) | numpy.where |
import numpy as np
import matplotlib.pyplot as plt
import sklearn.feature_selection as fs
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
import math
def main():
two = np.genfromtxt('data/two.csv')
two = StandardScaler().fit_transform(two)
pca = PCA()
pca.fit(two)
base_corr = np.corrcoef(two.transpose())
print(
"Q1a)\nVariance weights:\nÂ1: {}\nÂ2: {}".format(pca.explained_variance_ratio_[0],
pca.explained_variance_ratio_[1]))
two = pca.transform(two)
pcaed = pca.transform(two)
pcaed_corr = np.corrcoef(pcaed.transpose())
print("Q1b)\nrotated by {}°".format(np.degrees(math.acos(pca.components_[0, 0]))))
print("Q1c)\nOg:\n{}\nPCA:\n{}".format(base_corr, pcaed_corr))
reduced = pcaed[:, 0]
zs = np.zeros((len(reduced)))
x = np.array([reduced, zs]).transpose()
red_inverse = pca.inverse_transform(x)
plt.scatter(two[:, 0], two[:, 1])
plt.scatter(pcaed[:, 0], pcaed[:, 1])
plt.scatter(red_inverse[:, 0], red_inverse[:, 1])
plt.show()
print("Q2a)\nBinäre werden zu 0 und 1")
zoo = np.genfromtxt("data/zoo_german.csv", delimiter=",",
skip_header=1,
usecols=list(range(1, 13)) + list(range(15, 18)),
dtype=bool,
encoding="iso-8859-1")
print("Q2b)\nLädt csv mit utf8 statt iso88591 mit Komma Deleimiter, "
"nutz nur zeilen 1-12,15-16, skipt header zeile und setzt datentypen zu boolean")
selector = fs.VarianceThreshold()
selector.fit_transform(zoo)
t = np.sort(selector.variances_)[2]
selector = fs.VarianceThreshold(threshold=t)
selected = selector.fit_transform(zoo)
print("Q2c)\nOg shape: {}\nSelectedShape: {}".format(zoo.shape, selected.shape))
holes = | np.genfromtxt('data/two_missing.csv', delimiter=",") | numpy.genfromtxt |
from unittest import TestCase
import logging
import numpy as np
import numpy.linalg as la
from scipy.linalg import cho_factor, cho_solve
from ssmtoybox.bq.bqmtran import GaussianProcessTransform
from ssmtoybox.mtran import MonteCarloTransform, UnscentedTransform
from ssmtoybox.ssmod import ReentryVehicle2DTransition
from ssmtoybox.utils import GaussRV
logging.basicConfig(level=logging.DEBUG)
def sym(a):
return 0.5 * (a + a.T)
def cho_inv(a):
n = a.shape[0]
return cho_solve(cho_factor(a), np.eye(n))
class MultTest(TestCase):
def test_dot_matvec_matmat(self):
# Does numpy.dot use different subroutines for matrix/vector and matrix/matrix multiplication?
# dot internals
n, e = 300, 150
A = 10 * np.random.randn(n, n)
B = 50 * np.random.randn(e, n)
A = sym(A) # symmetrize A
b = B[0, :]
c = b.dot(A)
C = B.dot(A)
self.assertTrue(np.all(c == C[0, :]),
"MAX DIFF: {:.4e}".format(np.abs(c - C[0, :]).max()))
def test_einsum_dot(self):
# einsum and dot give different results?
dim_in, dim_out = 2, 1
ker_par_mo = np.hstack((np.ones((dim_out, 1)), 1 * np.ones((dim_out, dim_in))))
tf_mo = GaussianProcessTransform(dim_in, dim_out, ker_par_mo, point_str='sr')
iK, Q = tf_mo.model.iK, tf_mo.model.Q
C1 = iK.dot(Q).dot(iK)
C2 = np.einsum('ab, bc, cd', iK, Q, iK)
self.assertTrue(np.allclose(C1, C2), "MAX DIFF: {:.4e}".format(np.abs(C1 - C2).max()))
def test_cho_dot_ein(self):
# attempt to compute the transformed covariance using cholesky decomposition
# integrand
# input moments
mean_in = np.array([6500.4, 349.14, 1.8093, 6.7967, 0.6932])
cov_in = np.diag([1e-6, 1e-6, 1e-6, 1e-6, 1])
f = ReentryVehicle2DTransition(GaussRV(5, mean_in, cov_in), GaussRV(3)).dyn_eval
dim_in, dim_out = ReentryVehicle2DTransition.dim_state, 1
# transform
ker_par_mo = np.hstack((np.ones((dim_out, 1)), 25 * np.ones((dim_out, dim_in))))
tf_so = GaussianProcessTransform(dim_in, dim_out, ker_par_mo, point_str='sr')
# Monte-Carlo for ground truth
# tf_ut = UnscentedTransform(dim_in)
# tf_ut.apply(f, mean_in, cov_in, np.atleast_1d(1), None)
tf_mc = MonteCarloTransform(dim_in, 1000)
mean_mc, cov_mc, ccov_mc = tf_mc.apply(f, mean_in, cov_in, | np.atleast_1d(1) | numpy.atleast_1d |
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# Copyright (c) 2021. by <NAME>, UC3M. +
# All rights reserved. This file is part of the Shi-VAE, and is released under the +
# "MIT License Agreement". Please see the LICENSE file that should have been included +
# as part of this package. +
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
import matplotlib
import numpy as np
import torch
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import roc_auc_score
from tqdm import tqdm
from lib import utils, plot
from lib.loss import compute_avg_error
from models.train import BaseTrainer, update_loss_dict
matplotlib.use("Pdf")
shivae_losses = ['n_elbo', 'nll_loss', 'nll_real', 'nll_pos', 'nll_bin', 'nll_cat', 'kld', 'kld_q_z', 'kld_q_s']
class LinearTempScheduler:
r"""
Linear Temperature Scheduler.
tau = max( temp_end, temp_init - temp_init * slope )
"""
def __init__(self, init_temp=3, end_temp=0.01, annealing_epochs=10):
r"""
Args:
init_temp (int): Initial Temperature.
end_temp (int): Last Temperature.
annealing_epochs (int): Number of annealing epochs.
"""
self.temp_init = init_temp
self.temp_end = end_temp
self.annealing_epochs = annealing_epochs
self.slope = (init_temp - end_temp) / annealing_epochs
def update_temp(self, epoch):
r"""
Update temperature
Args:
epoch (int): Current epoch.
Returns:
Update temperature
"""
tau = max(self.temp_init - self.slope * epoch, self.temp_end)
print('Updated temperature: {}'.format(tau))
return tau
class ExpTempScheduler:
r"""
Exponential Temperature Scheduler.
tau = max( temp_end, exp(-alpha * t) )
"""
def __init__(self, end_temp, annealing_epochs=20):
r"""
Args:
end_temp (int): Last temperature.
annealing_epochs (int): Number of annealing epochs.
"""
self.temp_end = end_temp
self.annealing_epochs = annealing_epochs
self.alpha = - | np.log(end_temp) | numpy.log |
import numpy as np
import torch
import torch.nn as nn
import cv2
from HPEDA.FSANetDA import FSANet
import datasets
# import matplotlib.pyplot as plt
def predict(model, rgb, pose, batch_size=4, verbose=False):
N = len(rgb)
bs = batch_size
predictions = []
testSetPose = []
for i in range(N // bs):
x = rgb[(i) * bs:(i + 1) * bs, :, :, :]
# Compute results
true_y = pose[(i) * bs:(i + 1) * bs, :]
x1 = x.transpose(0, -1, 1, 2)
x2 = torch.from_numpy(x1).float().div(255)
x3, _ = model(x2.cuda(), alpha=0.1)
pred_y = x3.detach().cpu().numpy()
# print(true_y.shape, pred_y.shape)
predictions.append(pred_y)
testSetPose.append(true_y)
p_data = np.concatenate(predictions, axis=0)
y_data = np.concatenate(testSetPose, axis=0)
print(p_data.shape, y_data.shape)
return p_data, y_data
def load_data_npz(npz_path):
d = np.load(npz_path)
return d["image"], d["pose"]
def convertBGR2RGB(imgArray):
rgbArray = []
for img in imgArray:
rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
rgbArray.append(rgb)
return np.asarray(rgbArray)
_IMAGE_SIZE = 64
stage_num = [3, 3, 3]
lambda_d = 1
num_classes = 3
num_capsule = 3
dim_capsule = 16
routings = 2
num_primcaps = 7 * 3
m_dim = 5
S_set = [num_capsule, dim_capsule, routings, num_primcaps, m_dim]
_TEST_DB_AFLW = "AFLW2000"
_TEST_DB_BIWI1 = "BIWI_noTrack"
_TEST_DB_BIWI2 = "BIWI_Test"
_TEST_DB_POINTING = "Pointing"
_TEST_DB_SASE = "SASE"
_TEST_DB_SYNTHETIC = "SYNTHETIC"
# test_db_list = [_TEST_DB_AFLW, _TEST_DB_BIWI1, _TEST_DB_POINTING, _TEST_DB_SASE] # _TEST_DB_AFLW, , _TEST_DB_BIWI2
test_db_list = [_TEST_DB_SYNTHETIC] # [_TEST_DB_BIWI1]
# get device GPU or CPU
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# ---UNet with modified loss (surface normal) ------#
model = FSANet(S_set).cuda()
# get multiple GPU
if torch.cuda.device_count() > 1:
print("Let's use", torch.cuda.device_count(), "GPUs!")
# dim = 0 [30, xxx] -> [10, ...], [10, ...], [10, ...] on 3 GPUs
model = nn.DataParallel(model)
model.to(device)
# ----- Get the optimum epoch ----- #
if True:
epT = 0
MAET = 10.0
for ep in range(0, 40):
modelPath = 'models/BIWIRaw_11-25-2020_21-14-54-n1484-e40-bs8-lr0.0001/weights.epoch{0}_model.pth'. \
format(str(ep))
# print(modelPath)
model.load_state_dict(torch.load(modelPath))
model.eval()
if True:
for test_db_name in test_db_list:
if test_db_name == _TEST_DB_AFLW:
image, pose = load_data_npz('../Data/RealNPZ/AFLW2000.npz')
elif test_db_name == _TEST_DB_BIWI1:
image = np.load('/mnt/fastssd/Shubhajit_Stuff/HPECode/Data/BIWI/testImg.npy')
pose = np.load('/mnt/fastssd/Shubhajit_Stuff/HPECode/Data/BIWI/testPose.npy')
# image, pose = load_data_npz('../Data/RealNPZ/BIWI_noTrack.npz')
elif test_db_name == _TEST_DB_BIWI2:
image, pose = load_data_npz('../Data/RealNPZ/BIWI_test.npz')
elif test_db_name == _TEST_DB_SASE:
image = np.load('../Data/RealNPZ/SASERgbData.npy')
pose = np.load('../Data/RealNPZ/SASEPoseData.npy')
elif test_db_name == _TEST_DB_POINTING:
image = np.load('../Data/RealNPZ/PointingRgbData.npy')
pose = np.load('../Data/RealNPZ/PointingPoseData.npy')
elif test_db_name == _TEST_DB_SYNTHETIC:
image = np.load('/mnt/fastssd/Shubhajit_Stuff/HPECode/Data/SynData/rgbData.npy')[0:12000]
pose = np.load('/mnt/fastssd/Shubhajit_Stuff/HPECode/Data/SynData/poseData.npy')[0:12000]
# image = convertBGR2RGB(image)
x_data = []
y_data = []
for i in range(0, pose.shape[0]):
temp_pose = pose[i, :]
# if (np.max(temp_pose[0]) <= 60.0 and np.min(temp_pose[0]) >= -60.0) and \
# (np.max(temp_pose[1]) <= 50.0 and np.min(temp_pose[1]) >= -50.0) and \
# (np.max(temp_pose[2]) <= 40.0 and np.min(temp_pose[2]) >= -40.0):
if np.max(temp_pose) <= 90.0 and np.min(temp_pose) >= -90.0:
x_data.append(image[i, :, :, :])
y_data.append(pose[i, :])
x_data = np.array(x_data)
y_data = np.array(y_data)
p_data, y_data = predict(model, x_data, y_data, batch_size=64)
# p_data[:, 2] = -p_data[:, 2]
pose_matrix = np.mean(np.abs(p_data - y_data), axis=0)
MAE = np.mean(pose_matrix)
yaw = pose_matrix[0]
pitch = pose_matrix[1]
roll = pose_matrix[2]
print('\n--------------------------------------------------------------------------------')
print(test_db_name, ep,
' : MAE = %3.3f, [yaw,pitch,roll] = [%3.3f, %3.3f, %3.3f]' % (MAE, yaw, pitch, roll))
print('--------------------------------------------------------------------------------')
if MAE < MAET:
epT = ep
MAET = MAE
result = (MAE, yaw, pitch, roll)
print('BIWI: ', epT, ' : MAE = %3.3f, [yaw,pitch,roll] = [%3.3f, %3.3f, %3.3f]' % result)
# 59
# ----- Result for optimum epoch ----- #
if False:
modelPath = r'../models/MySynth_09-05-2020_18-50-28-n8858-e90-bs8-lr0.0001/weights.epoch72_model.pth'
# modelPath = r'models/MySynthTrans_10-13-2020_03-10-10-n168-e50-bs8-lr0.0001/weights.epoch39_model.pth'
# print(modelPath)
model.load_state_dict(torch.load(modelPath))
model.eval()
if True:
for test_db_name in test_db_list:
if test_db_name == _TEST_DB_AFLW:
image, pose = load_data_npz('../Data/RealNPZ/AFLW2000.npz')
elif test_db_name == _TEST_DB_BIWI1:
image = np.load('../../Data/RealNPZ/BIWI/testImg.npy')
pose = np.load('../../Data/RealNPZ/BIWI/testPose.npy')
# image, pose = load_data_npz('../Data/RealNPZ/BIWI_noTrack.npz')
elif test_db_name == _TEST_DB_BIWI2:
image, pose = load_data_npz('../Data/RealNPZ/BIWI_test.npz')
elif test_db_name == _TEST_DB_SASE:
image = np.load('../Data/RealNPZ/SASERgbData.npy')
pose = np.load('../Data/RealNPZ/SASEPoseData.npy')
# image = np.delete(image,[1394, 1398, 1403],axis=0)
# pose = np.delete(pose,[1394, 1398, 1403],axis=0)
image = convertBGR2RGB(image)
x_data = []
y_data = []
for i in range(0, pose.shape[0]):
temp_pose = pose[i, :]
# if ((np.max(temp_pose[0]) <= 90.0 and np.min(temp_pose[0]) >= -90.0) and \
# (np.max(temp_pose[1]) <= 10.0 and np.min(temp_pose[1]) >= -10.0) and \
# (np.max(temp_pose[2]) <= 10.0 and np.min(temp_pose[2]) >= -10.0)):
if np.max(temp_pose) <= 90.0 and np.min(temp_pose) >= -90.0:
x_data.append(image[i, :, :, :])
y_data.append(pose[i, :])
x_data = np.array(x_data)
y_data = np.array(y_data)
p_data, y_data = predict(model, x_data, y_data, batch_size=8)
# l = 1
#
# for y, p in zip(y_data, p_data):
# print(l, y, p)
# l = l+1
pose_matrix = np.mean(np.abs(p_data - y_data), axis=0)
MAE = np.mean(pose_matrix)
yaw = pose_matrix[0]
pitch = pose_matrix[1]
roll = pose_matrix[2]
print('\n--------------------------------------------------------------------------------')
print(test_db_name,
' : MAE = %3.3f, [yaw,pitch,roll] = [%3.3f, %3.3f, %3.3f]' % (MAE, yaw, pitch, roll))
print('--------------------------------------------------------------------------------')
if False:
modelPath = r'models/MySynth_09-05-2020_18-50-28-n8858-e90-bs8-lr0.0001/weights.epoch72_model.pth'
data_dir = '/mnt/fastssd/Shubhajit_stuff/DA-Code/HeadPoseCode/Data/BIWI/FRData/'
filename_path = '/mnt/fastssd/Shubhajit_stuff/DA-Code/HeadPoseCode/Data/BIWI/FRData/data.txt'
batch_size = 8
model.load_state_dict(torch.load(modelPath))
model.eval()
pose_dataset = datasets.Pose_BIWI_Raw(data_dir, filename_path=filename_path)
test_loader = torch.utils.data.DataLoader(dataset=pose_dataset,
batch_size=batch_size,
shuffle=True,
num_workers=2)
predictions = []
testSetPose = []
for i, (images, cont_labels) in enumerate(test_loader):
# images = Variable(images).cuda()
# label_angles = Variable(cont_labels[:, :3]).cuda(non_blocking=True)
images = images.cuda()
label_angles = cont_labels[:, :3].cuda()
# Predict
angles = model(images)
pred_y = angles.detach().cpu().numpy()
true_y = cont_labels.detach().cpu().numpy()
predictions.append(pred_y)
testSetPose.append(true_y)
p_data = np.concatenate(predictions, axis=0)
y_data = np.concatenate(testSetPose, axis=0)
pose_matrix = np.mean(np.abs(p_data - y_data), axis=0)
MAE = np.mean(pose_matrix)
yaw = pose_matrix[0]
pitch = pose_matrix[1]
roll = pose_matrix[2]
print('\n--------------------------------------------------------------------------------')
print('BIWI',
' : MAE = %3.3f, [yaw,pitch,roll] = [%3.3f, %3.3f, %3.3f]' % (MAE, yaw, pitch, roll))
print('--------------------------------------------------------------------------------')
if False:
image = np.load('../../Data/RealNPZ/BIWI/testImg.npy')
pose = np.load('../../Data/RealNPZ/BIWI/testPose.npy')
x_data = []
y_data = []
for i in range(0, pose.shape[0]):
temp_pose = pose[i, :]
# if (np.max(temp_pose[0]) <= 60.0 and np.min(temp_pose[0]) >= -60.0) and \
# (np.max(temp_pose[1]) <= 50.0 and np.min(temp_pose[1]) >= -50.0) and \
# (np.max(temp_pose[2]) <= 40.0 and np.min(temp_pose[2]) >= -40.0):
if np.max(temp_pose) <= 90.0 and np.min(temp_pose) >= -90.0:
x_data.append(image[i, :, :, :])
y_data.append(pose[i, :])
x_data = | np.array(x_data) | numpy.array |
import numpy as np
def init_spin_state_2d(nsize=16):
"""Initialize spin state"""
return 2*np.random.randint(2, size=(nsize, nsize)) - 1
def mcmh_algorithm(state, beta=1):
"""Apply Monte Carlo Metropolis-Hastings algorithm"""
# Get input dimensions
height, width = state.shape
energy = 0
for i in range(height):
for j in range(width):
# Periodic neighbors
up, down, left, right = (
(i - 1) % height, (i + 1) & height,
(j - 1) % width, (j + 1) & width
)
# Spin interaction energies
e_spin_init = J*(
state[i, j]*state[up, j]
+ state[i, j]*state[down, j]
+ state[i, j]*state[i, left]
+ state[i, j]*state[i, right]
)
e_spin_flip = J*(
-state[i, j]*state[up, j]
- state[i, j]*state[down, j]
- state[i, j]*state[i, left]
- state[i, j]*state[i, right]
)
delta_e = e_spin_flip - e_spin_init
energy += e_spin_flip
# Metropolis updates
if delta:
state[i, j] = -state[i, j]
elif np.random.rand() < np.exp(-beta*delta_e) :
state[i, j] = -state[i, j]
else:
pass
return state, energy/nsize**2., | np.sum(state) | numpy.sum |
#!/usr/bin/env python
'''
Test script to compare the performances of
the generalized poisson llh with the other
miminization metrics available in pisa
'''
from __future__ import absolute_import, print_function, division
__author__ = "<NAME> (<EMAIL>)"
#
# Standard python imports
#
import os
import pickle
from collections import OrderedDict
import copy
import numpy as np
#
# Font stuff
#
import matplotlib as mpl
mpl.use('agg')
from matplotlib import rcParams
FONTSIZE=20
rcParams['font.family'] = 'serif'
rcParams['font.size'] = 20
mpl.rc('text', usetex=True)
#mpl.rcParams['text.latex.preamble']=[r"\usepackage{amsmath}"]
#
# pisa tools and objects
#
from pisa.core.binning import OneDimBinning, MultiDimBinning
from pisa.core.map import Map, MapSet
from pisa.core.distribution_maker import DistributionMaker
from pisa.utils.config_parser import parse_pipeline_config
from pisa.core.param import Param, ParamSet
from pisa.analysis.analysis import Analysis
# debug tools
from pisa.utils.log import logging
from pisa.utils.profiler import line_profile
from pisa.utils.log import set_verbosity, Levels
#set_verbosity(Levels.TRACE)
##################################################################################
STANDARD_CONFIG = os.environ['PISA'] + \
'/pisa/stages/data/super_simple_pipeline.cfg'
TRUE_MU = 20.
TRUE_SIGMA = 3.1
NBINS = 31
#
# Define formatting properties for all metrics
#
import seaborn as sns
COLORS = sns.color_palette("muted", 8)
LIKELIHOOD_FORMATTING = OrderedDict()
LIKELIHOOD_FORMATTING['llh'] = {'label':r'Poisson llh',
'marker':'s',
'color': COLORS[0]}
LIKELIHOOD_FORMATTING['mcllh_eff'] = {'label':r'Effective llh',
'color': COLORS[1]}
LIKELIHOOD_FORMATTING['mcllh_mean'] = {'label':r'Mean llh',
'color': COLORS[2],
'linestyle': '--'}
LIKELIHOOD_FORMATTING['generalized_poisson_llh'] = {'label':r'Generalized llh',
'color': COLORS[7]}
LIKELIHOOD_FORMATTING['mod_chi2'] = {'label':r'Mod. $\chi^{2}$',
'color': COLORS[3]}
################################################################################
class ToyMCllhParam:
'''
Class defining the parameters of the Toy MC
'''
def __init__(self):
self.n_data = 0. # Number of data points to bin
self.signal_fraction = 1. # fraction of those points that will constitute the signal
self.true_mu = TRUE_MU # True mean of the signal
self.true_sigma = TRUE_SIGMA # True width of the signal
self.nbackground_low = 0. # lowest value the background can take
self.nbackground_high = 40. # highest value the background can take
self.stats_factor = 1. # Statistical factor for the MC
#
# Binning
#
self.binning = None
@property
def nsig(self):
'''
number of data points that are part of the signal
'''
return int(self.n_data*self.signal_fraction)
@property
def nbkg(self):
'''
number of data points that are part of the background
'''
return self.n_data-self.nsig
@property
def nbins(self):
'''
number of bins in the binning
'''
assert self.binning is not None, 'ERROR: specify a binning first'
return self.binning.tot_num_bins
def create_pseudo_data(toymc_params, seed=None):
'''
Create pseudo data consisting of a gaussian peak
on top of a uniform background
'''
if seed is not None:
np.random.seed(seed)
binning = toymc_params.binning
#
# Gaussian signal peak
#
signal = np.random.normal(
loc=toymc_params.mu, scale=toymc_params.sigma, size=toymc_params.nsig)
#
# Uniform background
#
background = np.random.uniform(
high=toymc_params.nbackground_high, low=toymc_params.nbackground_low, size=toymc_params.nbkg)
total_data = np.concatenate([signal, background])
counts_data, _ = np.histogram(total_data, bins=binning.bin_edges[0].magnitude)
# Convert data histogram into a pisa map
data_map = Map(name='total', binning=binning, hist=counts_data)
# Set the errors as the sqrt of the counts
data_map.set_errors(error_hist=np.sqrt(counts_data))
data_as_mapset = MapSet([data_map])
return data_as_mapset
def create_mc_template(toymc_params, config_file=None, seed=None, keep_same_weight=True):
'''
Create MC template out of a pisa pipeline
'''
if seed is not None:
np.random.seed(seed)
Config = parse_pipeline_config(config_file)
# Change binning
Config[('data','pi_simple_signal')]['output_specs'] = toymc_params.binning
Config[('likelihood','pi_generalized_llh_params')]['output_specs'] = toymc_params.binning
# If keep_same_weight is True, turn off the mean adjust and pseudo weight of pi_generalized_llh
if keep_same_weight:
Config[('likelihood','pi_generalized_llh_params')]['with_mean_adjust'] = False
Config[('likelihood','pi_generalized_llh_params')]['with_pseudo_weight'] = False
else:
Config[('likelihood','pi_generalized_llh_params')]['with_mean_adjust'] = True
Config[('likelihood','pi_generalized_llh_params')]['with_pseudo_weight'] = True
new_n_events_data = Param(
name='n_events_data', value=toymc_params.n_data, prior=None, range=None, is_fixed=True)
new_sig_frac = Param(name='signal_fraction', value=toymc_params.signal_fraction,
prior=None, range=None, is_fixed=True)
new_stats_factor = Param(
name='stats_factor', value=toymc_params.stats_factor, prior=None, range=None, is_fixed=True)
# These should match the values of the config file, but we override them just in case we need to change these later
new_mu = Param(name='mu', value=toymc_params.mu,
prior=None, range=[0, 100], is_fixed=False)
new_sigma = Param(name='sigma', value=toymc_params.sigma,
prior=None, range=None, is_fixed=True)
Config[('data', 'pi_simple_signal')]['params'].update(p=ParamSet(
[new_n_events_data, new_sig_frac, new_stats_factor, new_mu, new_sigma]))
MCtemplate = DistributionMaker(Config)
return MCtemplate
##################################################################################
def run_llh_scans(metrics=[], mc_params=None, config_file=None, data_mapset=None, mc_seed=None, results=None):
'''
Perform Likelihood scans fover a range of injected mu values
metrics: list of strings (names of the likelihood to run)
mc_template: DistributionMaker
data: MapSet
'''
assert isinstance(results, (dict, OrderedDict)
), 'ERROR: results must be a dict'
assert 'toymc_params' in results.keys(), 'ERROR: missing toymc_params'
for metric in metrics:
if metric not in results.keys():
results[metric] = OrderedDict()
results[metric]['llh_scan'] = OrderedDict()
#
# Create the mc template
#
mc_template = create_mc_template(mc_params, config_file=config_file, seed=mc_seed)
#
# Collect the llh value at the Truth
#
for metric in metrics:
print(metric)
mc_template.params['mu'].value = toymc_params.true_mu
new_MC = mc_template.get_outputs(return_sum=True, force_standard_output=False)
if metric == 'generalized_poisson_llh':
llhval = data_mapset.maps[0].metric_total(new_MC, metric=metric, metric_kwargs={
'empty_bins': mc_template.empty_bin_indices})
logging.trace('empty_bins: ', mc_template.empty_bin_indices)
else:
new_MC = new_MC['old_sum']
llhval = data_mapset.metric_total(new_MC, metric=metric)
results[metric]['llh_scan']['llh_at_truth'] = llhval
results[metric]['llh_scan']['tested_mu'] = np.linspace(10., 30., 50)
results[metric]['llh_scan']['scan_values'] = []
#
# Scan llh values around the true signal peak value
#
for tested_mu in results[metric]['llh_scan']['tested_mu']:
#
# Recompute the MC template with a new value of the mu parameter
#
mc_template.params['mu'].value = tested_mu
new_MC = mc_template.get_outputs(return_sum=True, force_standard_output=False)
if metric == 'generalized_poisson_llh':
llhval = data_mapset.maps[0].metric_total(new_MC, metric=metric, metric_kwargs={
'empty_bins': mc_template.empty_bin_indices})
else:
new_MC = new_MC['old_sum']
llhval = data_mapset.metric_total(new_MC, metric=metric)
results[metric]['llh_scan']['scan_values'].append(llhval)
return results
def plot_llh_scans(metrics=[], results=None, interactive=False, output_pdf=None, prefix='', save_individual_fig=False):
'''
Plot Likelihood scans
'''
fig, ax = plt.subplots(figsize=(7, 7))
n = 0
for llh_name in metrics:
llhvals = results[llh_name]['llh_scan']['scan_values']
tested_mu = results[llh_name]['llh_scan']['tested_mu']
if 'chi2' in llh_name:
TS = llhvals-np.amin(llhvals)
else:
TS = -2*(llhvals-np.amax(llhvals))
ax.plot(tested_mu, TS, **LIKELIHOOD_FORMATTING[llh_name])
n += 1
ax.set_xlabel(r'injected $\mu$')
ax.set_ylabel(r'Test Statistic(-2$\ln[L_{\mu}/L_{o}]$ or $\chi^{2}$)')
ax.set_ylim([-10., 500])
ax.plot([15.,25.],[0.,0.],'k')
ax.set_title('MC factor = {}'.format(results['toymc_params'].stats_factor))
#ax.set_title('Likelihood scans over mu')
ax.legend()
fig.tight_layout()
if interactive:
plt.show()
if save_individual_fig:
plt.savefig(prefix+'plot_llh_scan.png')
if output_pdf is None:
return fig
else:
output_pdf.savefig(fig)
plt.close('all')
del fig
return 1
###################################################################################################
#@line_profile
def run_coverage_test(n_trials=100,
toymc_params=None,
mc_seed = None,
config_file=None,
metrics=None,
results=None,
output_stem='coverage_test'):
'''
Perform Coverage and bias tests
We create n_trials pseudo-dataset, Fit them
with each metric at various levels of statistics.
and save the resulting llh values and fitted
parameters into a file
n_trials: int (number of pseudo-experiment to run)
toymc_params: ToyMC_LLh object (describe the parameters
of the experiment like signal_fraction and
stats_factor)
mc_infinite_Stats: DistributionMaker
(MC template made with an ideal level
of stats representing "infinite MC" precision)
'''
import time
assert isinstance(results, (dict, OrderedDict)), 'ERROR: results must be a dict'
assert isinstance(metrics, list), 'ERROR: must specify metrics as a list'
assert 'toymc_params' in results.keys(), 'ERROR: missing toymc_params'
results['toymc_params'] = toymc_params
for metric in metrics:
if metric not in results.keys():
results[metric] = OrderedDict()
results[metric]['coverage'] = []
#
# minimizer settings to pass into the pisa analysis class
#
minimizer_settings = {"method": {"value": "l-bfgs-b", # "SLSQP",
"desc": "The string to pass to scipy.optimize.minimize so it knows what to use"
},
"options": {"value": {"disp": 0,
"ftol": 1.0e-6,
"eps": 1.0e-6,
"maxiter": 100
},
"desc": {"disp": "Set to True to print convergence messages",
"ftol": "Precision goal for the value of f in the stopping criterion",
"eps": "Step size used for numerical approximation of the jacobian.",
"maxiter": "Maximum number of iteration"
}
}
}
#
# Create the mc template
#
mc_template = create_mc_template(toymc_params, config_file=config_file, seed=mc_seed)
#
# Create a pseudo-infinite statistics template
#
infinite_toymc_params = copy.deepcopy(toymc_params)
infinite_toymc_params.stats_factor = 100.
mc_template_pseudo_infinite = create_mc_template(infinite_toymc_params, config_file=config_file, seed=mc_seed)
#
# Start pseudo trials
#
for metric in metrics_to_test:
filename = output_stem+'_pseudo_exp_llh_%s.pckl' % metric
if os.path.isfile(filename):
results[metric]['coverage'] = pickle.load(open(filename,'rb'))
else:
logging.debug('minimizing: ', metric)
to = time.time()
trial_i = 0
failed_fits = 0
while trial_i < n_trials and failed_fits<2*n_trials:
experiment_result = {}
#
# Create a pseudo-dataset
#
data_trial = create_pseudo_data(toymc_params=toymc_params, seed=None)
#
# Compute the truth llh value of this pseudo experiment
# truth - if the truth comes from infinite stats MC
#
if metric == 'generalized_poisson_llh':
mc = mc_template_pseudo_infinite.get_outputs(return_sum=False, force_standard_output=False)[0]
llhval_true = data_trial.maps[0].metric_total(mc,
metric=metric,
metric_kwargs={
'empty_bins': mc_template_pseudo_infinite.empty_bin_indices})
else:
mc = mc_template_pseudo_infinite.get_outputs(return_sum=True)
llhval_true = data_trial.metric_total(mc, metric=metric)
experiment_result['llh_infinite_stats'] = llhval_true
#
# truth if the truth comes from low stats MC
#
if metric == 'generalized_poisson_llh':
mc = mc_template.get_outputs(return_sum=False,
force_standard_output=False)[0]
llhval = data_trial.maps[0].metric_total(mc,
metric=metric,
metric_kwargs={
'empty_bins': mc_template.empty_bin_indices})
else:
mc = mc_template.get_outputs(return_sum=True)
llhval = data_trial.metric_total(mc,
metric=metric)
experiment_result['llh_lowstats'] = llhval
# #
# # minimized llh (high stats)
# #
# logging.debug('\nhigh stats fit:\n')
# ana = Analysis()
# result_pseudo_truth, _ = ana.fit_hypo(data_trial,
# mc_infinite_stats,
# metric=metric,
# minimizer_settings=minimizer_settings,
# hypo_param_selections=None,
# check_octant=False,
# fit_octants_separately=False,
# )
# #except:
# # logging.trace('Failed Fit')
# # failed_fits += 1
# # continue
# experiment_result['infinite_stats_opt'] = {'metric_val': result_pseudo_truth['metric_val'],
# 'best_fit_param': result_pseudo_truth['params']['mu']}
#
# minimized llh (low stats)
#
logging.debug('\nlow stats fit:\n')
ana = Analysis()
try:
result_lowstats, _ = ana.fit_hypo(data_trial,
mc_template,
metric=metric,
minimizer_settings=minimizer_settings,
hypo_param_selections=None,
check_octant=False,
fit_octants_separately=False,
)
except:
logging.debug('Failed Fit')
failed_fits += 1
continue
experiment_result['lowstats_opt'] = {'metric_val': result_lowstats['metric_val'],
'best_fit_param': result_lowstats['params']['mu']}
results[metric]['coverage'].append(experiment_result)
trial_i += 1
if trial_i==0:
raise Exception('ERROR: no fit managed to converge after {} attempst'.format(failed_fits))
t1 = time.time()
logging.debug("Time for ", n_trials, " minimizations: ", t1-to, " s")
logging.debug("Saving to file...")
pickle.dump(results[metric]['coverage'], open(filename, 'wb'))
logging.debug("Saved.")
return results
def plot_coverage_test(output_pdf=None,
results=None,
metrics=None,
stats_factor=None,
output_stem=None,
n_trials=None,
prefix='',
save_individual_fig=False,
outname='test_coverage.pdf'):
'''
plot the results of the coverage test
'''
from utils.plotting.standard_modules import Figure
assert isinstance(metrics, list), 'ERROR: must specify metrics as a list'
from scipy.stats import chi2
if output_pdf is None:
output_pdf = PdfPages(outname)
coverage_fig = Figure(figsize=(7, 7))
#
# produce an example chi2 distribution with d.o.f =1
#
# This will help us compare ts distribution directly
sample_chi2_distrib = np.random.chisquare(size=n_trials, df=1)
for llh_name in metrics:
logging.trace('plotting %s'%llh_name)
container_ts_truth_high = []
container_ts_truth_low = []
container_ts_lowstat = []
container_ts_highstat = []
llh_bias = []
param_bias = []
val_truth = TRUE_MU
container_val_lowstat = []
container_val_highstat = []
# Retrieve data from the coverage test
indata = results[llh_name]['coverage']
if len(indata) < 1:
print('No successful fits for metric: {}.skipping')
for pseudo_exp in indata:
val_low = pseudo_exp['lowstats_opt']['best_fit_param'].value.m
llh_optimized_low = pseudo_exp['lowstats_opt']['metric_val']
llh_truth_low = pseudo_exp['llh_lowstats']
llh_truth_high = pseudo_exp['llh_infinite_stats']
#
# check that all elements of the comparison are finite
#
good_trial = np.isfinite(val_low)
good_trial *= np.isfinite(llh_optimized_low)
good_trial *= np.isfinite(llh_truth_low)
good_trial *= np.isfinite(llh_truth_high)
if good_trial:
container_val_lowstat.append(val_low)
container_ts_truth_high.append(llh_truth_high)
container_ts_truth_low.append(llh_truth_low)
ts_low = -2*(llh_optimized_low-llh_truth_low)
# We take the absolute value here because we want to know how far
# we are from the truth, and we can optimize to llh values above and below the truth
container_ts_lowstat.append(np.abs(ts_low))
param_bias.append((val_low-val_truth)/val_truth)
else:
continue
#
# First plot: TS distribution
#
fig_ts_distrib = Figure(figsize=(7,7), title=LIKELIHOOD_FORMATTING[llh_name]['label'])
ts_binning = np.linspace(0, 25, 31)
c,ts_edges = np.histogram(sample_chi2_distrib, bins=ts_binning)
ts_x = ts_edges[:-1]+0.5*(ts_edges[1:]-ts_edges[:-1])
fig_ts_distrib.get_ax().errorbar(ts_x, c, yerr= | np.sqrt(c) | numpy.sqrt |
import re
import sys
from io import StringIO
import numpy as np
import scipy.sparse as sp
from scipy import linalg
from sklearn.decomposition import NMF, MiniBatchNMF
from sklearn.decomposition import non_negative_factorization
from sklearn.decomposition import _nmf as nmf # For testing internals
from scipy.sparse import csc_matrix
import pytest
from sklearn.utils._testing import assert_array_equal
from sklearn.utils._testing import assert_array_almost_equal
from sklearn.utils._testing import assert_almost_equal
from sklearn.utils._testing import assert_allclose
from sklearn.utils._testing import ignore_warnings
from sklearn.utils.extmath import squared_norm
from sklearn.base import clone
from sklearn.exceptions import ConvergenceWarning
@pytest.mark.parametrize(
["Estimator", "solver"],
[[NMF, {"solver": "cd"}], [NMF, {"solver": "mu"}], [MiniBatchNMF, {}]],
)
def test_convergence_warning(Estimator, solver):
convergence_warning = (
"Maximum number of iterations 1 reached. Increase it to improve convergence."
)
A = np.ones((2, 2))
with pytest.warns(ConvergenceWarning, match=convergence_warning):
Estimator(max_iter=1, **solver).fit(A)
def test_initialize_nn_output():
# Test that initialization does not return negative values
rng = np.random.mtrand.RandomState(42)
data = np.abs(rng.randn(10, 10))
for init in ("random", "nndsvd", "nndsvda", "nndsvdar"):
W, H = nmf._initialize_nmf(data, 10, init=init, random_state=0)
assert not ((W < 0).any() or (H < 0).any())
@pytest.mark.filterwarnings(
r"ignore:The multiplicative update \('mu'\) solver cannot update zeros present in"
r" the initialization"
)
def test_parameter_checking():
A = np.ones((2, 2))
name = "spam"
with ignore_warnings(category=FutureWarning):
# TODO remove in 1.2
msg = "Invalid regularization parameter: got 'spam' instead of one of"
with pytest.raises(ValueError, match=msg):
NMF(regularization=name).fit(A)
msg = "Invalid beta_loss parameter: solver 'cd' does not handle beta_loss = 1.0"
with pytest.raises(ValueError, match=msg):
NMF(solver="cd", beta_loss=1.0).fit(A)
msg = "Negative values in data passed to"
with pytest.raises(ValueError, match=msg):
NMF().fit(-A)
clf = NMF(2, tol=0.1).fit(A)
with pytest.raises(ValueError, match=msg):
clf.transform(-A)
with pytest.raises(ValueError, match=msg):
nmf._initialize_nmf(-A, 2, "nndsvd")
for init in ["nndsvd", "nndsvda", "nndsvdar"]:
msg = re.escape(
"init = '{}' can only be used when "
"n_components <= min(n_samples, n_features)".format(init)
)
with pytest.raises(ValueError, match=msg):
NMF(3, init=init).fit(A)
with pytest.raises(ValueError, match=msg):
MiniBatchNMF(3, init=init).fit(A)
with pytest.raises(ValueError, match=msg):
nmf._initialize_nmf(A, 3, init)
@pytest.mark.parametrize(
"param, match",
[
({"n_components": 0}, "Number of components must be a positive integer"),
({"max_iter": -1}, "Maximum number of iterations must be a positive integer"),
({"tol": -1}, "Tolerance for stopping criteria must be positive"),
({"init": "wrong"}, "Invalid init parameter"),
({"beta_loss": "wrong"}, "Invalid beta_loss parameter"),
],
)
@pytest.mark.parametrize("Estimator", [NMF, MiniBatchNMF])
def test_nmf_common_wrong_params(Estimator, param, match):
# Check that appropriate errors are raised for invalid values of parameters common
# to NMF and MiniBatchNMF.
A = np.ones((2, 2))
with pytest.raises(ValueError, match=match):
Estimator(**param).fit(A)
@pytest.mark.parametrize(
"param, match",
[
({"solver": "wrong"}, "Invalid solver parameter"),
],
)
def test_nmf_wrong_params(param, match):
# Check that appropriate errors are raised for invalid values specific to NMF
# parameters
A = np.ones((2, 2))
with pytest.raises(ValueError, match=match):
NMF(**param).fit(A)
@pytest.mark.parametrize(
"param, match",
[
({"batch_size": 0}, "batch_size must be a positive integer"),
],
)
def test_minibatch_nmf_wrong_params(param, match):
# Check that appropriate errors are raised for invalid values specific to
# MiniBatchNMF parameters
A = np.ones((2, 2))
with pytest.raises(ValueError, match=match):
MiniBatchNMF(**param).fit(A)
def test_initialize_close():
# Test NNDSVD error
# Test that _initialize_nmf error is less than the standard deviation of
# the entries in the matrix.
rng = np.random.mtrand.RandomState(42)
A = np.abs(rng.randn(10, 10))
W, H = nmf._initialize_nmf(A, 10, init="nndsvd")
error = linalg.norm(np.dot(W, H) - A)
sdev = linalg.norm(A - A.mean())
assert error <= sdev
def test_initialize_variants():
# Test NNDSVD variants correctness
# Test that the variants 'nndsvda' and 'nndsvdar' differ from basic
# 'nndsvd' only where the basic version has zeros.
rng = np.random.mtrand.RandomState(42)
data = np.abs(rng.randn(10, 10))
W0, H0 = nmf._initialize_nmf(data, 10, init="nndsvd")
Wa, Ha = nmf._initialize_nmf(data, 10, init="nndsvda")
War, Har = nmf._initialize_nmf(data, 10, init="nndsvdar", random_state=0)
for ref, evl in ((W0, Wa), (W0, War), (H0, Ha), (H0, Har)):
assert_almost_equal(evl[ref != 0], ref[ref != 0])
# ignore UserWarning raised when both solver='mu' and init='nndsvd'
@ignore_warnings(category=UserWarning)
@pytest.mark.parametrize(
["Estimator", "solver"],
[[NMF, {"solver": "cd"}], [NMF, {"solver": "mu"}], [MiniBatchNMF, {}]],
)
@pytest.mark.parametrize("init", (None, "nndsvd", "nndsvda", "nndsvdar", "random"))
@pytest.mark.parametrize("alpha_W", (0.0, 1.0))
@pytest.mark.parametrize("alpha_H", (0.0, 1.0, "same"))
def test_nmf_fit_nn_output(Estimator, solver, init, alpha_W, alpha_H):
# Test that the decomposition does not contain negative values
A = np.c_[5.0 - np.arange(1, 6), 5.0 + np.arange(1, 6)]
model = Estimator(
n_components=2,
init=init,
alpha_W=alpha_W,
alpha_H=alpha_H,
random_state=0,
**solver,
)
transf = model.fit_transform(A)
assert not ((model.components_ < 0).any() or (transf < 0).any())
@pytest.mark.parametrize(
["Estimator", "solver"],
[[NMF, {"solver": "cd"}], [NMF, {"solver": "mu"}], [MiniBatchNMF, {}]],
)
def test_nmf_fit_close(Estimator, solver):
rng = np.random.mtrand.RandomState(42)
# Test that the fit is not too far away
pnmf = Estimator(
5,
init="nndsvdar",
random_state=0,
max_iter=600,
**solver,
)
X = np.abs(rng.randn(6, 5))
assert pnmf.fit(X).reconstruction_err_ < 0.1
def test_nmf_true_reconstruction():
# Test that the fit is not too far away from an exact solution
# (by construction)
n_samples = 15
n_features = 10
n_components = 5
beta_loss = 1
batch_size = 3
max_iter = 1000
rng = np.random.mtrand.RandomState(42)
W_true = np.zeros([n_samples, n_components])
W_array = np.abs(rng.randn(n_samples))
for j in range(n_components):
W_true[j % n_samples, j] = W_array[j % n_samples]
H_true = np.zeros([n_components, n_features])
H_array = np.abs(rng.randn(n_components))
for j in range(n_features):
H_true[j % n_components, j] = H_array[j % n_components]
X = np.dot(W_true, H_true)
model = NMF(
n_components=n_components,
solver="mu",
beta_loss=beta_loss,
max_iter=max_iter,
random_state=0,
)
transf = model.fit_transform(X)
X_calc = np.dot(transf, model.components_)
assert model.reconstruction_err_ < 0.1
assert_allclose(X, X_calc)
mbmodel = MiniBatchNMF(
n_components=n_components,
beta_loss=beta_loss,
batch_size=batch_size,
random_state=0,
max_iter=max_iter,
)
transf = mbmodel.fit_transform(X)
X_calc = np.dot(transf, mbmodel.components_)
assert mbmodel.reconstruction_err_ < 0.1
assert_allclose(X, X_calc, atol=1)
@pytest.mark.parametrize("solver", ["cd", "mu"])
def test_nmf_transform(solver):
# Test that fit_transform is equivalent to fit.transform for NMF
# Test that NMF.transform returns close values
rng = np.random.mtrand.RandomState(42)
A = np.abs(rng.randn(6, 5))
m = NMF(
solver=solver,
n_components=3,
init="random",
random_state=0,
tol=1e-6,
)
ft = m.fit_transform(A)
t = m.transform(A)
assert_allclose(ft, t, atol=1e-1)
def test_minibatch_nmf_transform():
# Test that fit_transform is equivalent to fit.transform for MiniBatchNMF
# Only guaranteed with fresh restarts
rng = np.random.mtrand.RandomState(42)
A = np.abs(rng.randn(6, 5))
m = MiniBatchNMF(
n_components=3,
random_state=0,
tol=1e-3,
fresh_restarts=True,
)
ft = m.fit_transform(A)
t = m.transform(A)
assert_allclose(ft, t)
@pytest.mark.parametrize(
["Estimator", "solver"],
[[NMF, {"solver": "cd"}], [NMF, {"solver": "mu"}], [MiniBatchNMF, {}]],
)
def test_nmf_transform_custom_init(Estimator, solver):
# Smoke test that checks if NMF.transform works with custom initialization
random_state = np.random.RandomState(0)
A = np.abs(random_state.randn(6, 5))
n_components = 4
avg = np.sqrt(A.mean() / n_components)
H_init = np.abs(avg * random_state.randn(n_components, 5))
W_init = np.abs(avg * random_state.randn(6, n_components))
m = Estimator(
n_components=n_components, init="custom", random_state=0, tol=1e-3, **solver
)
m.fit_transform(A, W=W_init, H=H_init)
m.transform(A)
@pytest.mark.parametrize("solver", ("cd", "mu"))
def test_nmf_inverse_transform(solver):
# Test that NMF.inverse_transform returns close values
random_state = np.random.RandomState(0)
A = np.abs(random_state.randn(6, 4))
m = NMF(
solver=solver,
n_components=4,
init="random",
random_state=0,
max_iter=1000,
)
ft = m.fit_transform(A)
A_new = m.inverse_transform(ft)
assert_array_almost_equal(A, A_new, decimal=2)
def test_mbnmf_inverse_transform():
# Test that MiniBatchNMF.transform followed by MiniBatchNMF.inverse_transform
# is close to the identity
rng = np.random.RandomState(0)
A = np.abs(rng.randn(6, 4))
nmf = MiniBatchNMF(
random_state=rng,
max_iter=500,
init="nndsvdar",
fresh_restarts=True,
)
ft = nmf.fit_transform(A)
A_new = nmf.inverse_transform(ft)
assert_allclose(A, A_new, rtol=1e-3, atol=1e-2)
@pytest.mark.parametrize("Estimator", [NMF, MiniBatchNMF])
def test_n_components_greater_n_features(Estimator):
# Smoke test for the case of more components than features.
rng = np.random.mtrand.RandomState(42)
A = np.abs(rng.randn(30, 10))
Estimator(n_components=15, random_state=0, tol=1e-2).fit(A)
@pytest.mark.parametrize(
["Estimator", "solver"],
[[NMF, {"solver": "cd"}], [NMF, {"solver": "mu"}], [MiniBatchNMF, {}]],
)
@pytest.mark.parametrize("alpha_W", (0.0, 1.0))
@pytest.mark.parametrize("alpha_H", (0.0, 1.0, "same"))
def test_nmf_sparse_input(Estimator, solver, alpha_W, alpha_H):
# Test that sparse matrices are accepted as input
from scipy.sparse import csc_matrix
rng = np.random.mtrand.RandomState(42)
A = np.abs(rng.randn(10, 10))
A[:, 2 * np.arange(5)] = 0
A_sparse = csc_matrix(A)
est1 = Estimator(
n_components=5,
init="random",
alpha_W=alpha_W,
alpha_H=alpha_H,
random_state=0,
tol=0,
max_iter=100,
**solver,
)
est2 = clone(est1)
W1 = est1.fit_transform(A)
W2 = est2.fit_transform(A_sparse)
H1 = est1.components_
H2 = est2.components_
assert_allclose(W1, W2)
assert_allclose(H1, H2)
@pytest.mark.parametrize(
["Estimator", "solver"],
[[NMF, {"solver": "cd"}], [NMF, {"solver": "mu"}], [MiniBatchNMF, {}]],
)
def test_nmf_sparse_transform(Estimator, solver):
# Test that transform works on sparse data. Issue #2124
rng = np.random.mtrand.RandomState(42)
A = np.abs(rng.randn(3, 2))
A[1, 1] = 0
A = csc_matrix(A)
model = Estimator(random_state=0, n_components=2, max_iter=400, **solver)
A_fit_tr = model.fit_transform(A)
A_tr = model.transform(A)
assert_allclose(A_fit_tr, A_tr, atol=1e-1)
@pytest.mark.parametrize("init", ["random", "nndsvd"])
@pytest.mark.parametrize("solver", ("cd", "mu"))
@pytest.mark.parametrize("alpha_W", (0.0, 1.0))
@pytest.mark.parametrize("alpha_H", (0.0, 1.0, "same"))
def test_non_negative_factorization_consistency(init, solver, alpha_W, alpha_H):
# Test that the function is called in the same way, either directly
# or through the NMF class
max_iter = 500
rng = np.random.mtrand.RandomState(42)
A = np.abs(rng.randn(10, 10))
A[:, 2 * np.arange(5)] = 0
W_nmf, H, _ = non_negative_factorization(
A,
init=init,
solver=solver,
max_iter=max_iter,
alpha_W=alpha_W,
alpha_H=alpha_H,
random_state=1,
tol=1e-2,
)
W_nmf_2, H, _ = non_negative_factorization(
A,
H=H,
update_H=False,
init=init,
solver=solver,
max_iter=max_iter,
alpha_W=alpha_W,
alpha_H=alpha_H,
random_state=1,
tol=1e-2,
)
model_class = NMF(
init=init,
solver=solver,
max_iter=max_iter,
alpha_W=alpha_W,
alpha_H=alpha_H,
random_state=1,
tol=1e-2,
)
W_cls = model_class.fit_transform(A)
W_cls_2 = model_class.transform(A)
assert_allclose(W_nmf, W_cls)
assert_allclose(W_nmf_2, W_cls_2)
def test_non_negative_factorization_checking():
A = np.ones((2, 2))
# Test parameters checking is public function
nnmf = non_negative_factorization
msg = re.escape(
"Number of components must be a positive integer; got (n_components=1.5)"
)
with pytest.raises(ValueError, match=msg):
nnmf(A, A, A, 1.5, init="random")
msg = re.escape(
"Number of components must be a positive integer; got (n_components='2')"
)
with pytest.raises(ValueError, match=msg):
nnmf(A, A, A, "2", init="random")
msg = re.escape("Negative values in data passed to NMF (input H)")
with pytest.raises(ValueError, match=msg):
nnmf(A, A, -A, 2, init="custom")
msg = re.escape("Negative values in data passed to NMF (input W)")
with pytest.raises(ValueError, match=msg):
nnmf(A, -A, A, 2, init="custom")
msg = re.escape("Array passed to NMF (input H) is full of zeros")
with pytest.raises(ValueError, match=msg):
nnmf(A, A, 0 * A, 2, init="custom")
with ignore_warnings(category=FutureWarning):
# TODO remove in 1.2
msg = "Invalid regularization parameter: got 'spam' instead of one of"
with pytest.raises(ValueError, match=msg):
nnmf(A, A, 0 * A, 2, init="custom", regularization="spam")
def _beta_divergence_dense(X, W, H, beta):
"""Compute the beta-divergence of X and W.H for dense array only.
Used as a reference for testing nmf._beta_divergence.
"""
WH = np.dot(W, H)
if beta == 2:
return squared_norm(X - WH) / 2
WH_Xnonzero = WH[X != 0]
X_nonzero = X[X != 0]
np.maximum(WH_Xnonzero, 1e-9, out=WH_Xnonzero)
if beta == 1:
res = np.sum(X_nonzero * np.log(X_nonzero / WH_Xnonzero))
res += WH.sum() - X.sum()
elif beta == 0:
div = X_nonzero / WH_Xnonzero
res = np.sum(div) - X.size - np.sum(np.log(div))
else:
res = (X_nonzero**beta).sum()
res += (beta - 1) * (WH**beta).sum()
res -= beta * (X_nonzero * (WH_Xnonzero ** (beta - 1))).sum()
res /= beta * (beta - 1)
return res
def test_beta_divergence():
# Compare _beta_divergence with the reference _beta_divergence_dense
n_samples = 20
n_features = 10
n_components = 5
beta_losses = [0.0, 0.5, 1.0, 1.5, 2.0, 3.0]
# initialization
rng = np.random.mtrand.RandomState(42)
X = rng.randn(n_samples, n_features)
np.clip(X, 0, None, out=X)
X_csr = sp.csr_matrix(X)
W, H = nmf._initialize_nmf(X, n_components, init="random", random_state=42)
for beta in beta_losses:
ref = _beta_divergence_dense(X, W, H, beta)
loss = nmf._beta_divergence(X, W, H, beta)
loss_csr = nmf._beta_divergence(X_csr, W, H, beta)
assert_almost_equal(ref, loss, decimal=7)
assert_almost_equal(ref, loss_csr, decimal=7)
def test_special_sparse_dot():
# Test the function that computes np.dot(W, H), only where X is non zero.
n_samples = 10
n_features = 5
n_components = 3
rng = np.random.mtrand.RandomState(42)
X = rng.randn(n_samples, n_features)
np.clip(X, 0, None, out=X)
X_csr = sp.csr_matrix(X)
W = np.abs(rng.randn(n_samples, n_components))
H = np.abs(rng.randn(n_components, n_features))
WH_safe = nmf._special_sparse_dot(W, H, X_csr)
WH = nmf._special_sparse_dot(W, H, X)
# test that both results have same values, in X_csr nonzero elements
ii, jj = X_csr.nonzero()
WH_safe_data = np.asarray(WH_safe[ii, jj]).ravel()
assert_array_almost_equal(WH_safe_data, WH[ii, jj], decimal=10)
# test that WH_safe and X_csr have the same sparse structure
assert_array_equal(WH_safe.indices, X_csr.indices)
assert_array_equal(WH_safe.indptr, X_csr.indptr)
assert_array_equal(WH_safe.shape, X_csr.shape)
@ignore_warnings(category=ConvergenceWarning)
def test_nmf_multiplicative_update_sparse():
# Compare sparse and dense input in multiplicative update NMF
# Also test continuity of the results with respect to beta_loss parameter
n_samples = 20
n_features = 10
n_components = 5
alpha = 0.1
l1_ratio = 0.5
n_iter = 20
# initialization
rng = np.random.mtrand.RandomState(1337)
X = rng.randn(n_samples, n_features)
X = np.abs(X)
X_csr = sp.csr_matrix(X)
W0, H0 = nmf._initialize_nmf(X, n_components, init="random", random_state=42)
for beta_loss in (-1.2, 0, 0.2, 1.0, 2.0, 2.5):
# Reference with dense array X
W, H = W0.copy(), H0.copy()
W1, H1, _ = non_negative_factorization(
X,
W,
H,
n_components,
init="custom",
update_H=True,
solver="mu",
beta_loss=beta_loss,
max_iter=n_iter,
alpha_W=alpha,
l1_ratio=l1_ratio,
random_state=42,
)
# Compare with sparse X
W, H = W0.copy(), H0.copy()
W2, H2, _ = non_negative_factorization(
X_csr,
W,
H,
n_components,
init="custom",
update_H=True,
solver="mu",
beta_loss=beta_loss,
max_iter=n_iter,
alpha_W=alpha,
l1_ratio=l1_ratio,
random_state=42,
)
assert_allclose(W1, W2, atol=1e-7)
assert_allclose(H1, H2, atol=1e-7)
# Compare with almost same beta_loss, since some values have a specific
# behavior, but the results should be continuous w.r.t beta_loss
beta_loss -= 1.0e-5
W, H = W0.copy(), H0.copy()
W3, H3, _ = non_negative_factorization(
X_csr,
W,
H,
n_components,
init="custom",
update_H=True,
solver="mu",
beta_loss=beta_loss,
max_iter=n_iter,
alpha_W=alpha,
l1_ratio=l1_ratio,
random_state=42,
)
assert_allclose(W1, W3, atol=1e-4)
assert_allclose(H1, H3, atol=1e-4)
def test_nmf_negative_beta_loss():
# Test that an error is raised if beta_loss < 0 and X contains zeros.
# Test that the output has not NaN values when the input contains zeros.
n_samples = 6
n_features = 5
n_components = 3
rng = np.random.mtrand.RandomState(42)
X = rng.randn(n_samples, n_features)
np.clip(X, 0, None, out=X)
X_csr = sp.csr_matrix(X)
def _assert_nmf_no_nan(X, beta_loss):
W, H, _ = non_negative_factorization(
X,
init="random",
n_components=n_components,
solver="mu",
beta_loss=beta_loss,
random_state=0,
max_iter=1000,
)
assert not np.any(np.isnan(W))
assert not np.any(np.isnan(H))
msg = "When beta_loss <= 0 and X contains zeros, the solver may diverge."
for beta_loss in (-0.6, 0.0):
with pytest.raises(ValueError, match=msg):
_assert_nmf_no_nan(X, beta_loss)
_assert_nmf_no_nan(X + 1e-9, beta_loss)
for beta_loss in (0.2, 1.0, 1.2, 2.0, 2.5):
_assert_nmf_no_nan(X, beta_loss)
_assert_nmf_no_nan(X_csr, beta_loss)
@pytest.mark.parametrize("beta_loss", [-0.5, 0.0])
def test_minibatch_nmf_negative_beta_loss(beta_loss):
"""Check that an error is raised if beta_loss < 0 and X contains zeros."""
rng = np.random.RandomState(0)
X = rng.normal(size=(6, 5))
X[X < 0] = 0
nmf = MiniBatchNMF(beta_loss=beta_loss, random_state=0)
msg = "When beta_loss <= 0 and X contains zeros, the solver may diverge."
with pytest.raises(ValueError, match=msg):
nmf.fit(X)
@pytest.mark.parametrize(
["Estimator", "solver"],
[[NMF, {"solver": "cd"}], [NMF, {"solver": "mu"}], [MiniBatchNMF, {}]],
)
def test_nmf_regularization(Estimator, solver):
# Test the effect of L1 and L2 regularizations
n_samples = 6
n_features = 5
n_components = 3
rng = np.random.mtrand.RandomState(42)
X = np.abs(rng.randn(n_samples, n_features))
# L1 regularization should increase the number of zeros
l1_ratio = 1.0
regul = Estimator(
n_components=n_components,
alpha_W=0.5,
l1_ratio=l1_ratio,
random_state=42,
**solver,
)
model = Estimator(
n_components=n_components,
alpha_W=0.0,
l1_ratio=l1_ratio,
random_state=42,
**solver,
)
W_regul = regul.fit_transform(X)
W_model = model.fit_transform(X)
H_regul = regul.components_
H_model = model.components_
eps = np.finfo(np.float64).eps
W_regul_n_zeros = W_regul[W_regul <= eps].size
W_model_n_zeros = W_model[W_model <= eps].size
H_regul_n_zeros = H_regul[H_regul <= eps].size
H_model_n_zeros = H_model[H_model <= eps].size
assert W_regul_n_zeros > W_model_n_zeros
assert H_regul_n_zeros > H_model_n_zeros
# L2 regularization should decrease the sum of the squared norm
# of the matrices W and H
l1_ratio = 0.0
regul = Estimator(
n_components=n_components,
alpha_W=0.5,
l1_ratio=l1_ratio,
random_state=42,
**solver,
)
model = Estimator(
n_components=n_components,
alpha_W=0.0,
l1_ratio=l1_ratio,
random_state=42,
**solver,
)
W_regul = regul.fit_transform(X)
W_model = model.fit_transform(X)
H_regul = regul.components_
H_model = model.components_
assert (linalg.norm(W_model)) ** 2.0 + (linalg.norm(H_model)) ** 2.0 > (
linalg.norm(W_regul)
) ** 2.0 + (linalg.norm(H_regul)) ** 2.0
@ignore_warnings(category=ConvergenceWarning)
@pytest.mark.parametrize("solver", ("cd", "mu"))
def test_nmf_decreasing(solver):
# test that the objective function is decreasing at each iteration
n_samples = 20
n_features = 15
n_components = 10
alpha = 0.1
l1_ratio = 0.5
tol = 0.0
# initialization
rng = np.random.mtrand.RandomState(42)
X = rng.randn(n_samples, n_features)
np.abs(X, X)
W0, H0 = nmf._initialize_nmf(X, n_components, init="random", random_state=42)
for beta_loss in (-1.2, 0, 0.2, 1.0, 2.0, 2.5):
if solver != "mu" and beta_loss != 2:
# not implemented
continue
W, H = W0.copy(), H0.copy()
previous_loss = None
for _ in range(30):
# one more iteration starting from the previous results
W, H, _ = non_negative_factorization(
X,
W,
H,
beta_loss=beta_loss,
init="custom",
n_components=n_components,
max_iter=1,
alpha_W=alpha,
solver=solver,
tol=tol,
l1_ratio=l1_ratio,
verbose=0,
random_state=0,
update_H=True,
)
loss = (
nmf._beta_divergence(X, W, H, beta_loss)
+ alpha * l1_ratio * n_features * W.sum()
+ alpha * l1_ratio * n_samples * H.sum()
+ alpha * (1 - l1_ratio) * n_features * (W**2).sum()
+ alpha * (1 - l1_ratio) * n_samples * (H**2).sum()
)
if previous_loss is not None:
assert previous_loss > loss
previous_loss = loss
def test_nmf_underflow():
# Regression test for an underflow issue in _beta_divergence
rng = np.random.RandomState(0)
n_samples, n_features, n_components = 10, 2, 2
X = np.abs(rng.randn(n_samples, n_features)) * 10
W = np.abs(rng.randn(n_samples, n_components)) * 10
H = np.abs(rng.randn(n_components, n_features))
X[0, 0] = 0
ref = nmf._beta_divergence(X, W, H, beta=1.0)
X[0, 0] = 1e-323
res = nmf._beta_divergence(X, W, H, beta=1.0)
assert_almost_equal(res, ref)
@pytest.mark.parametrize(
"dtype_in, dtype_out",
[
(np.float32, np.float32),
(np.float64, np.float64),
(np.int32, np.float64),
(np.int64, np.float64),
],
)
@pytest.mark.parametrize(
["Estimator", "solver"],
[[NMF, {"solver": "cd"}], [NMF, {"solver": "mu"}], [MiniBatchNMF, {}]],
)
def test_nmf_dtype_match(Estimator, solver, dtype_in, dtype_out):
# Check that NMF preserves dtype (float32 and float64)
X = np.random.RandomState(0).randn(20, 15).astype(dtype_in, copy=False)
np.abs(X, out=X)
nmf = Estimator(alpha_W=1.0, alpha_H=1.0, tol=1e-2, random_state=0, **solver)
assert nmf.fit(X).transform(X).dtype == dtype_out
assert nmf.fit_transform(X).dtype == dtype_out
assert nmf.components_.dtype == dtype_out
@pytest.mark.parametrize(
["Estimator", "solver"],
[[NMF, {"solver": "cd"}], [NMF, {"solver": "mu"}], [MiniBatchNMF, {}]],
)
def test_nmf_float32_float64_consistency(Estimator, solver):
# Check that the result of NMF is the same between float32 and float64
X = np.random.RandomState(0).randn(50, 7)
| np.abs(X, out=X) | numpy.abs |
import sys
import warnings
import itertools
import platform
import pytest
from decimal import Decimal
import numpy as np
from numpy.core import umath
from numpy.random import rand, randint, randn
from numpy.testing import (
assert_, assert_equal, assert_raises, assert_raises_regex,
assert_array_equal, assert_almost_equal, assert_array_almost_equal,
assert_warns, HAS_REFCOUNT
)
class TestResize(object):
def test_copies(self):
A = np.array([[1, 2], [3, 4]])
Ar1 = np.array([[1, 2, 3, 4], [1, 2, 3, 4]])
assert_equal(np.resize(A, (2, 4)), Ar1)
Ar2 = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
assert_equal(np.resize(A, (4, 2)), Ar2)
Ar3 = np.array([[1, 2, 3], [4, 1, 2], [3, 4, 1], [2, 3, 4]])
assert_equal(np.resize(A, (4, 3)), Ar3)
def test_zeroresize(self):
A = np.array([[1, 2], [3, 4]])
Ar = np.resize(A, (0,))
assert_array_equal(Ar, np.array([]))
assert_equal(A.dtype, Ar.dtype)
Ar = np.resize(A, (0, 2))
assert_equal(Ar.shape, (0, 2))
Ar = np.resize(A, (2, 0))
assert_equal(Ar.shape, (2, 0))
def test_reshape_from_zero(self):
# See also gh-6740
A = np.zeros(0, dtype=[('a', np.float32)])
Ar = np.resize(A, (2, 1))
assert_array_equal(Ar, np.zeros((2, 1), Ar.dtype))
assert_equal(A.dtype, Ar.dtype)
class TestNonarrayArgs(object):
# check that non-array arguments to functions wrap them in arrays
def test_choose(self):
choices = [[0, 1, 2],
[3, 4, 5],
[5, 6, 7]]
tgt = [5, 1, 5]
a = [2, 0, 1]
out = np.choose(a, choices)
assert_equal(out, tgt)
def test_clip(self):
arr = [-1, 5, 2, 3, 10, -4, -9]
out = np.clip(arr, 2, 7)
tgt = [2, 5, 2, 3, 7, 2, 2]
assert_equal(out, tgt)
def test_compress(self):
arr = [[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9]]
tgt = [[5, 6, 7, 8, 9]]
out = np.compress([0, 1], arr, axis=0)
assert_equal(out, tgt)
def test_count_nonzero(self):
arr = [[0, 1, 7, 0, 0],
[3, 0, 0, 2, 19]]
tgt = np.array([2, 3])
out = np.count_nonzero(arr, axis=1)
assert_equal(out, tgt)
def test_cumproduct(self):
A = [[1, 2, 3], [4, 5, 6]]
assert_(np.all(np.cumproduct(A) == np.array([1, 2, 6, 24, 120, 720])))
def test_diagonal(self):
a = [[0, 1, 2, 3],
[4, 5, 6, 7],
[8, 9, 10, 11]]
out = np.diagonal(a)
tgt = [0, 5, 10]
assert_equal(out, tgt)
def test_mean(self):
A = [[1, 2, 3], [4, 5, 6]]
assert_(np.mean(A) == 3.5)
assert_(np.all(np.mean(A, 0) == np.array([2.5, 3.5, 4.5])))
assert_(np.all(np.mean(A, 1) == np.array([2., 5.])))
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', RuntimeWarning)
assert_(np.isnan(np.mean([])))
assert_(w[0].category is RuntimeWarning)
def test_ptp(self):
a = [3, 4, 5, 10, -3, -5, 6.0]
assert_equal(np.ptp(a, axis=0), 15.0)
def test_prod(self):
arr = [[1, 2, 3, 4],
[5, 6, 7, 9],
[10, 3, 4, 5]]
tgt = [24, 1890, 600]
assert_equal(np.prod(arr, axis=-1), tgt)
def test_ravel(self):
a = [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]
tgt = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
assert_equal(np.ravel(a), tgt)
def test_repeat(self):
a = [1, 2, 3]
tgt = [1, 1, 2, 2, 3, 3]
out = np.repeat(a, 2)
assert_equal(out, tgt)
def test_reshape(self):
arr = [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]
tgt = [[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]]
assert_equal(np.reshape(arr, (2, 6)), tgt)
def test_round(self):
arr = [1.56, 72.54, 6.35, 3.25]
tgt = [1.6, 72.5, 6.4, 3.2]
assert_equal(np.around(arr, decimals=1), tgt)
def test_searchsorted(self):
arr = [-8, -5, -1, 3, 6, 10]
out = np.searchsorted(arr, 0)
assert_equal(out, 3)
def test_size(self):
A = [[1, 2, 3], [4, 5, 6]]
assert_(np.size(A) == 6)
assert_(np.size(A, 0) == 2)
assert_(np.size(A, 1) == 3)
def test_squeeze(self):
A = [[[1, 1, 1], [2, 2, 2], [3, 3, 3]]]
assert_equal(np.squeeze(A).shape, (3, 3))
assert_equal(np.squeeze(np.zeros((1, 3, 1))).shape, (3,))
assert_equal(np.squeeze(np.zeros((1, 3, 1)), axis=0).shape, (3, 1))
assert_equal(np.squeeze(np.zeros((1, 3, 1)), axis=-1).shape, (1, 3))
assert_equal(np.squeeze(np.zeros((1, 3, 1)), axis=2).shape, (1, 3))
assert_equal(np.squeeze([np.zeros((3, 1))]).shape, (3,))
assert_equal(np.squeeze([np.zeros((3, 1))], axis=0).shape, (3, 1))
assert_equal(np.squeeze([np.zeros((3, 1))], axis=2).shape, (1, 3))
assert_equal(np.squeeze([np.zeros((3, 1))], axis=-1).shape, (1, 3))
def test_std(self):
A = [[1, 2, 3], [4, 5, 6]]
assert_almost_equal(np.std(A), 1.707825127659933)
assert_almost_equal(np.std(A, 0), np.array([1.5, 1.5, 1.5]))
assert_almost_equal(np.std(A, 1), np.array([0.81649658, 0.81649658]))
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', RuntimeWarning)
assert_(np.isnan(np.std([])))
assert_(w[0].category is RuntimeWarning)
def test_swapaxes(self):
tgt = [[[0, 4], [2, 6]], [[1, 5], [3, 7]]]
a = [[[0, 1], [2, 3]], [[4, 5], [6, 7]]]
out = np.swapaxes(a, 0, 2)
assert_equal(out, tgt)
def test_sum(self):
m = [[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]
tgt = [[6], [15], [24]]
out = np.sum(m, axis=1, keepdims=True)
assert_equal(tgt, out)
def test_take(self):
tgt = [2, 3, 5]
indices = [1, 2, 4]
a = [1, 2, 3, 4, 5]
out = np.take(a, indices)
assert_equal(out, tgt)
def test_trace(self):
c = [[1, 2], [3, 4], [5, 6]]
assert_equal(np.trace(c), 5)
def test_transpose(self):
arr = [[1, 2], [3, 4], [5, 6]]
tgt = [[1, 3, 5], [2, 4, 6]]
assert_equal(np.transpose(arr, (1, 0)), tgt)
def test_var(self):
A = [[1, 2, 3], [4, 5, 6]]
assert_almost_equal(np.var(A), 2.9166666666666665)
assert_almost_equal(np.var(A, 0), np.array([2.25, 2.25, 2.25]))
assert_almost_equal(np.var(A, 1), np.array([0.66666667, 0.66666667]))
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', RuntimeWarning)
assert_(np.isnan(np.var([])))
assert_(w[0].category is RuntimeWarning)
B = np.array([None, 0])
B[0] = 1j
assert_almost_equal(np.var(B), 0.25)
class TestIsscalar(object):
def test_isscalar(self):
assert_(np.isscalar(3.1))
assert_(np.isscalar(np.int16(12345)))
assert_(np.isscalar(False))
assert_(np.isscalar('numpy'))
assert_(not np.isscalar([3.1]))
assert_(not np.isscalar(None))
# PEP 3141
from fractions import Fraction
assert_(np.isscalar(Fraction(5, 17)))
from numbers import Number
assert_(np.isscalar(Number()))
class TestBoolScalar(object):
def test_logical(self):
f = np.False_
t = np.True_
s = "xyz"
assert_((t and s) is s)
assert_((f and s) is f)
def test_bitwise_or(self):
f = np.False_
t = np.True_
assert_((t | t) is t)
assert_((f | t) is t)
assert_((t | f) is t)
assert_((f | f) is f)
def test_bitwise_and(self):
f = np.False_
t = np.True_
assert_((t & t) is t)
assert_((f & t) is f)
assert_((t & f) is f)
assert_((f & f) is f)
def test_bitwise_xor(self):
f = np.False_
t = np.True_
assert_((t ^ t) is f)
assert_((f ^ t) is t)
assert_((t ^ f) is t)
assert_((f ^ f) is f)
class TestBoolArray(object):
def setup(self):
# offset for simd tests
self.t = np.array([True] * 41, dtype=bool)[1::]
self.f = np.array([False] * 41, dtype=bool)[1::]
self.o = np.array([False] * 42, dtype=bool)[2::]
self.nm = self.f.copy()
self.im = self.t.copy()
self.nm[3] = True
self.nm[-2] = True
self.im[3] = False
self.im[-2] = False
def test_all_any(self):
assert_(self.t.all())
assert_(self.t.any())
assert_(not self.f.all())
assert_(not self.f.any())
assert_(self.nm.any())
assert_(self.im.any())
assert_(not self.nm.all())
assert_(not self.im.all())
# check bad element in all positions
for i in range(256 - 7):
d = np.array([False] * 256, dtype=bool)[7::]
d[i] = True
assert_(np.any(d))
e = np.array([True] * 256, dtype=bool)[7::]
e[i] = False
assert_(not np.all(e))
assert_array_equal(e, ~d)
# big array test for blocked libc loops
for i in list(range(9, 6000, 507)) + [7764, 90021, -10]:
d = np.array([False] * 100043, dtype=bool)
d[i] = True
assert_(np.any(d), msg="%r" % i)
e = np.array([True] * 100043, dtype=bool)
e[i] = False
assert_(not np.all(e), msg="%r" % i)
def test_logical_not_abs(self):
assert_array_equal(~self.t, self.f)
assert_array_equal(np.abs(~self.t), self.f)
assert_array_equal(np.abs(~self.f), self.t)
assert_array_equal(np.abs(self.f), self.f)
assert_array_equal(~np.abs(self.f), self.t)
assert_array_equal(~np.abs(self.t), self.f)
assert_array_equal(np.abs(~self.nm), self.im)
np.logical_not(self.t, out=self.o)
assert_array_equal(self.o, self.f)
np.abs(self.t, out=self.o)
assert_array_equal(self.o, self.t)
def test_logical_and_or_xor(self):
assert_array_equal(self.t | self.t, self.t)
assert_array_equal(self.f | self.f, self.f)
assert_array_equal(self.t | self.f, self.t)
assert_array_equal(self.f | self.t, self.t)
np.logical_or(self.t, self.t, out=self.o)
assert_array_equal(self.o, self.t)
assert_array_equal(self.t & self.t, self.t)
assert_array_equal(self.f & self.f, self.f)
assert_array_equal(self.t & self.f, self.f)
assert_array_equal(self.f & self.t, self.f)
np.logical_and(self.t, self.t, out=self.o)
assert_array_equal(self.o, self.t)
assert_array_equal(self.t ^ self.t, self.f)
assert_array_equal(self.f ^ self.f, self.f)
assert_array_equal(self.t ^ self.f, self.t)
assert_array_equal(self.f ^ self.t, self.t)
np.logical_xor(self.t, self.t, out=self.o)
assert_array_equal(self.o, self.f)
assert_array_equal(self.nm & self.t, self.nm)
assert_array_equal(self.im & self.f, False)
assert_array_equal(self.nm & True, self.nm)
assert_array_equal(self.im & False, self.f)
assert_array_equal(self.nm | self.t, self.t)
assert_array_equal(self.im | self.f, self.im)
assert_array_equal(self.nm | True, self.t)
assert_array_equal(self.im | False, self.im)
assert_array_equal(self.nm ^ self.t, self.im)
assert_array_equal(self.im ^ self.f, self.im)
assert_array_equal(self.nm ^ True, self.im)
assert_array_equal(self.im ^ False, self.im)
class TestBoolCmp(object):
def setup(self):
self.f = np.ones(256, dtype=np.float32)
self.ef = np.ones(self.f.size, dtype=bool)
self.d = np.ones(128, dtype=np.float64)
self.ed = np.ones(self.d.size, dtype=bool)
# generate values for all permutation of 256bit simd vectors
s = 0
for i in range(32):
self.f[s:s+8] = [i & 2**x for x in range(8)]
self.ef[s:s+8] = [(i & 2**x) != 0 for x in range(8)]
s += 8
s = 0
for i in range(16):
self.d[s:s+4] = [i & 2**x for x in range(4)]
self.ed[s:s+4] = [(i & 2**x) != 0 for x in range(4)]
s += 4
self.nf = self.f.copy()
self.nd = self.d.copy()
self.nf[self.ef] = np.nan
self.nd[self.ed] = np.nan
self.inff = self.f.copy()
self.infd = self.d.copy()
self.inff[::3][self.ef[::3]] = np.inf
self.infd[::3][self.ed[::3]] = np.inf
self.inff[1::3][self.ef[1::3]] = -np.inf
self.infd[1::3][self.ed[1::3]] = -np.inf
self.inff[2::3][self.ef[2::3]] = np.nan
self.infd[2::3][self.ed[2::3]] = np.nan
self.efnonan = self.ef.copy()
self.efnonan[2::3] = False
self.ednonan = self.ed.copy()
self.ednonan[2::3] = False
self.signf = self.f.copy()
self.signd = self.d.copy()
self.signf[self.ef] *= -1.
self.signd[self.ed] *= -1.
self.signf[1::6][self.ef[1::6]] = -np.inf
self.signd[1::6][self.ed[1::6]] = -np.inf
self.signf[3::6][self.ef[3::6]] = -np.nan
self.signd[3::6][self.ed[3::6]] = -np.nan
self.signf[4::6][self.ef[4::6]] = -0.
self.signd[4::6][self.ed[4::6]] = -0.
def test_float(self):
# offset for alignment test
for i in range(4):
assert_array_equal(self.f[i:] > 0, self.ef[i:])
assert_array_equal(self.f[i:] - 1 >= 0, self.ef[i:])
assert_array_equal(self.f[i:] == 0, ~self.ef[i:])
assert_array_equal(-self.f[i:] < 0, self.ef[i:])
assert_array_equal(-self.f[i:] + 1 <= 0, self.ef[i:])
r = self.f[i:] != 0
assert_array_equal(r, self.ef[i:])
r2 = self.f[i:] != np.zeros_like(self.f[i:])
r3 = 0 != self.f[i:]
assert_array_equal(r, r2)
assert_array_equal(r, r3)
# check bool == 0x1
assert_array_equal(r.view(np.int8), r.astype(np.int8))
assert_array_equal(r2.view(np.int8), r2.astype(np.int8))
assert_array_equal(r3.view(np.int8), r3.astype(np.int8))
# isnan on amd64 takes the same code path
assert_array_equal(np.isnan(self.nf[i:]), self.ef[i:])
assert_array_equal(np.isfinite(self.nf[i:]), ~self.ef[i:])
assert_array_equal(np.isfinite(self.inff[i:]), ~self.ef[i:])
assert_array_equal(np.isinf(self.inff[i:]), self.efnonan[i:])
assert_array_equal(np.signbit(self.signf[i:]), self.ef[i:])
def test_double(self):
# offset for alignment test
for i in range(2):
assert_array_equal(self.d[i:] > 0, self.ed[i:])
assert_array_equal(self.d[i:] - 1 >= 0, self.ed[i:])
assert_array_equal(self.d[i:] == 0, ~self.ed[i:])
assert_array_equal(-self.d[i:] < 0, self.ed[i:])
assert_array_equal(-self.d[i:] + 1 <= 0, self.ed[i:])
r = self.d[i:] != 0
assert_array_equal(r, self.ed[i:])
r2 = self.d[i:] != np.zeros_like(self.d[i:])
r3 = 0 != self.d[i:]
assert_array_equal(r, r2)
assert_array_equal(r, r3)
# check bool == 0x1
assert_array_equal(r.view(np.int8), r.astype(np.int8))
assert_array_equal(r2.view(np.int8), r2.astype(np.int8))
assert_array_equal(r3.view(np.int8), r3.astype(np.int8))
# isnan on amd64 takes the same code path
assert_array_equal(np.isnan(self.nd[i:]), self.ed[i:])
assert_array_equal(np.isfinite(self.nd[i:]), ~self.ed[i:])
assert_array_equal(np.isfinite(self.infd[i:]), ~self.ed[i:])
assert_array_equal(np.isinf(self.infd[i:]), self.ednonan[i:])
assert_array_equal(np.signbit(self.signd[i:]), self.ed[i:])
class TestSeterr(object):
def test_default(self):
err = np.geterr()
assert_equal(err,
dict(divide='warn',
invalid='warn',
over='warn',
under='ignore')
)
def test_set(self):
with np.errstate():
err = np.seterr()
old = np.seterr(divide='print')
assert_(err == old)
new = np.seterr()
assert_(new['divide'] == 'print')
np.seterr(over='raise')
assert_(np.geterr()['over'] == 'raise')
assert_(new['divide'] == 'print')
np.seterr(**old)
assert_(np.geterr() == old)
@pytest.mark.skipif(platform.machine() == "armv5tel", reason="See gh-413.")
def test_divide_err(self):
with np.errstate(divide='raise'):
with assert_raises(FloatingPointError):
np.array([1.]) / np.array([0.])
np.seterr(divide='ignore')
np.array([1.]) / np.array([0.])
def test_errobj(self):
olderrobj = np.geterrobj()
self.called = 0
try:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
with np.errstate(divide='warn'):
np.seterrobj([20000, 1, None])
np.array([1.]) / np.array([0.])
assert_equal(len(w), 1)
def log_err(*args):
self.called += 1
extobj_err = args
assert_(len(extobj_err) == 2)
assert_("divide" in extobj_err[0])
with np.errstate(divide='ignore'):
np.seterrobj([20000, 3, log_err])
np.array([1.]) / np.array([0.])
assert_equal(self.called, 1)
np.seterrobj(olderrobj)
with np.errstate(divide='ignore'):
np.divide(1., 0., extobj=[20000, 3, log_err])
assert_equal(self.called, 2)
finally:
np.seterrobj(olderrobj)
del self.called
def test_errobj_noerrmask(self):
# errmask = 0 has a special code path for the default
olderrobj = np.geterrobj()
try:
# set errobj to something non default
np.seterrobj([umath.UFUNC_BUFSIZE_DEFAULT,
umath.ERR_DEFAULT + 1, None])
# call a ufunc
np.isnan(np.array([6]))
# same with the default, lots of times to get rid of possible
# pre-existing stack in the code
for i in range(10000):
np.seterrobj([umath.UFUNC_BUFSIZE_DEFAULT, umath.ERR_DEFAULT,
None])
np.isnan(np.array([6]))
finally:
np.seterrobj(olderrobj)
class TestFloatExceptions(object):
def assert_raises_fpe(self, fpeerr, flop, x, y):
ftype = type(x)
try:
flop(x, y)
assert_(False,
"Type %s did not raise fpe error '%s'." % (ftype, fpeerr))
except FloatingPointError as exc:
assert_(str(exc).find(fpeerr) >= 0,
"Type %s raised wrong fpe error '%s'." % (ftype, exc))
def assert_op_raises_fpe(self, fpeerr, flop, sc1, sc2):
# Check that fpe exception is raised.
#
# Given a floating operation `flop` and two scalar values, check that
# the operation raises the floating point exception specified by
# `fpeerr`. Tests all variants with 0-d array scalars as well.
self.assert_raises_fpe(fpeerr, flop, sc1, sc2)
self.assert_raises_fpe(fpeerr, flop, sc1[()], sc2)
self.assert_raises_fpe(fpeerr, flop, sc1, sc2[()])
self.assert_raises_fpe(fpeerr, flop, sc1[()], sc2[()])
def test_floating_exceptions(self):
# Test basic arithmetic function errors
with np.errstate(all='raise'):
# Test for all real and complex float types
for typecode in np.typecodes['AllFloat']:
ftype = np.obj2sctype(typecode)
if np.dtype(ftype).kind == 'f':
# Get some extreme values for the type
fi = np.finfo(ftype)
ft_tiny = fi.tiny
ft_max = fi.max
ft_eps = fi.eps
underflow = 'underflow'
divbyzero = 'divide by zero'
else:
# 'c', complex, corresponding real dtype
rtype = type(ftype(0).real)
fi = np.finfo(rtype)
ft_tiny = ftype(fi.tiny)
ft_max = ftype(fi.max)
ft_eps = ftype(fi.eps)
# The complex types raise different exceptions
underflow = ''
divbyzero = ''
overflow = 'overflow'
invalid = 'invalid'
self.assert_raises_fpe(underflow,
lambda a, b: a/b, ft_tiny, ft_max)
self.assert_raises_fpe(underflow,
lambda a, b: a*b, ft_tiny, ft_tiny)
self.assert_raises_fpe(overflow,
lambda a, b: a*b, ft_max, ftype(2))
self.assert_raises_fpe(overflow,
lambda a, b: a/b, ft_max, ftype(0.5))
self.assert_raises_fpe(overflow,
lambda a, b: a+b, ft_max, ft_max*ft_eps)
self.assert_raises_fpe(overflow,
lambda a, b: a-b, -ft_max, ft_max*ft_eps)
self.assert_raises_fpe(overflow,
np.power, ftype(2), ftype(2**fi.nexp))
self.assert_raises_fpe(divbyzero,
lambda a, b: a/b, ftype(1), ftype(0))
self.assert_raises_fpe(invalid,
lambda a, b: a/b, ftype(np.inf), ftype(np.inf))
self.assert_raises_fpe(invalid,
lambda a, b: a/b, ftype(0), ftype(0))
self.assert_raises_fpe(invalid,
lambda a, b: a-b, ftype(np.inf), ftype(np.inf))
self.assert_raises_fpe(invalid,
lambda a, b: a+b, ftype(np.inf), ftype(-np.inf))
self.assert_raises_fpe(invalid,
lambda a, b: a*b, ftype(0), ftype(np.inf))
def test_warnings(self):
# test warning code path
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
with np.errstate(all="warn"):
np.divide(1, 0.)
assert_equal(len(w), 1)
assert_("divide by zero" in str(w[0].message))
np.array(1e300) * np.array(1e300)
assert_equal(len(w), 2)
assert_("overflow" in str(w[-1].message))
np.array(np.inf) - np.array(np.inf)
assert_equal(len(w), 3)
assert_("invalid value" in str(w[-1].message))
np.array(1e-300) * np.array(1e-300)
assert_equal(len(w), 4)
assert_("underflow" in str(w[-1].message))
class TestTypes(object):
def check_promotion_cases(self, promote_func):
# tests that the scalars get coerced correctly.
b = np.bool_(0)
i8, i16, i32, i64 = np.int8(0), np.int16(0), np.int32(0), np.int64(0)
u8, u16, u32, u64 = np.uint8(0), np.uint16(0), np.uint32(0), np.uint64(0)
f32, f64, fld = np.float32(0), np.float64(0), np.longdouble(0)
c64, c128, cld = np.complex64(0), np.complex128(0), np.clongdouble(0)
# coercion within the same kind
assert_equal(promote_func(i8, i16), np.dtype(np.int16))
assert_equal(promote_func(i32, i8), np.dtype(np.int32))
assert_equal(promote_func(i16, i64), np.dtype(np.int64))
assert_equal(promote_func(u8, u32), np.dtype(np.uint32))
assert_equal(promote_func(f32, f64), np.dtype(np.float64))
assert_equal(promote_func(fld, f32), np.dtype(np.longdouble))
assert_equal(promote_func(f64, fld), np.dtype(np.longdouble))
assert_equal(promote_func(c128, c64), np.dtype(np.complex128))
assert_equal(promote_func(cld, c128), np.dtype(np.clongdouble))
assert_equal(promote_func(c64, fld), np.dtype(np.clongdouble))
# coercion between kinds
assert_equal(promote_func(b, i32), np.dtype(np.int32))
assert_equal(promote_func(b, u8), np.dtype(np.uint8))
assert_equal(promote_func(i8, u8), np.dtype(np.int16))
assert_equal(promote_func(u8, i32), np.dtype(np.int32))
assert_equal(promote_func(i64, u32), np.dtype(np.int64))
assert_equal(promote_func(u64, i32), np.dtype(np.float64))
assert_equal(promote_func(i32, f32), np.dtype(np.float64))
assert_equal(promote_func(i64, f32), np.dtype(np.float64))
assert_equal(promote_func(f32, i16), np.dtype(np.float32))
assert_equal(promote_func(f32, u32), np.dtype(np.float64))
assert_equal(promote_func(f32, c64), np.dtype(np.complex64))
assert_equal(promote_func(c128, f32), np.dtype(np.complex128))
assert_equal(promote_func(cld, f64), np.dtype(np.clongdouble))
# coercion between scalars and 1-D arrays
assert_equal(promote_func(np.array([b]), i8), np.dtype(np.int8))
assert_equal(promote_func(np.array([b]), u8), np.dtype(np.uint8))
assert_equal(promote_func(np.array([b]), i32), np.dtype(np.int32))
assert_equal(promote_func(np.array([b]), u32), np.dtype(np.uint32))
assert_equal(promote_func(np.array([i8]), i64), np.dtype(np.int8))
assert_equal(promote_func(u64, np.array([i32])), np.dtype(np.int32))
assert_equal(promote_func(i64, np.array([u32])), np.dtype(np.uint32))
assert_equal(promote_func(np.int32(-1), np.array([u64])),
np.dtype(np.float64))
assert_equal(promote_func(f64, np.array([f32])), np.dtype(np.float32))
assert_equal(promote_func(fld, np.array([f32])), np.dtype(np.float32))
assert_equal(promote_func(np.array([f64]), fld), np.dtype(np.float64))
assert_equal(promote_func(fld, np.array([c64])),
np.dtype(np.complex64))
assert_equal(promote_func(c64, np.array([f64])),
np.dtype(np.complex128))
assert_equal(promote_func(np.complex64(3j), np.array([f64])),
np.dtype(np.complex128))
# coercion between scalars and 1-D arrays, where
# the scalar has greater kind than the array
assert_equal(promote_func(np.array([b]), f64), np.dtype(np.float64))
assert_equal(promote_func(np.array([b]), i64), np.dtype(np.int64))
assert_equal(promote_func(np.array([b]), u64), np.dtype(np.uint64))
assert_equal(promote_func(np.array([i8]), f64), np.dtype(np.float64))
assert_equal(promote_func(np.array([u16]), f64), np.dtype(np.float64))
# uint and int are treated as the same "kind" for
# the purposes of array-scalar promotion.
assert_equal(promote_func(np.array([u16]), i32), np.dtype(np.uint16))
# float and complex are treated as the same "kind" for
# the purposes of array-scalar promotion, so that you can do
# (0j + float32array) to get a complex64 array instead of
# a complex128 array.
assert_equal(promote_func(np.array([f32]), c128),
np.dtype(np.complex64))
def test_coercion(self):
def res_type(a, b):
return np.add(a, b).dtype
self.check_promotion_cases(res_type)
# Use-case: float/complex scalar * bool/int8 array
# shouldn't narrow the float/complex type
for a in [np.array([True, False]), np.array([-3, 12], dtype=np.int8)]:
b = 1.234 * a
assert_equal(b.dtype, np.dtype('f8'), "array type %s" % a.dtype)
b = np.longdouble(1.234) * a
assert_equal(b.dtype, np.dtype(np.longdouble),
"array type %s" % a.dtype)
b = np.float64(1.234) * a
assert_equal(b.dtype, np.dtype('f8'), "array type %s" % a.dtype)
b = np.float32(1.234) * a
assert_equal(b.dtype, np.dtype('f4'), "array type %s" % a.dtype)
b = np.float16(1.234) * a
assert_equal(b.dtype, np.dtype('f2'), "array type %s" % a.dtype)
b = 1.234j * a
assert_equal(b.dtype, np.dtype('c16'), "array type %s" % a.dtype)
b = np.clongdouble(1.234j) * a
assert_equal(b.dtype, np.dtype(np.clongdouble),
"array type %s" % a.dtype)
b = np.complex128(1.234j) * a
assert_equal(b.dtype, np.dtype('c16'), "array type %s" % a.dtype)
b = np.complex64(1.234j) * a
assert_equal(b.dtype, np.dtype('c8'), "array type %s" % a.dtype)
# The following use-case is problematic, and to resolve its
# tricky side-effects requires more changes.
#
# Use-case: (1-t)*a, where 't' is a boolean array and 'a' is
# a float32, shouldn't promote to float64
#
# a = np.array([1.0, 1.5], dtype=np.float32)
# t = np.array([True, False])
# b = t*a
# assert_equal(b, [1.0, 0.0])
# assert_equal(b.dtype, np.dtype('f4'))
# b = (1-t)*a
# assert_equal(b, [0.0, 1.5])
# assert_equal(b.dtype, np.dtype('f4'))
#
# Probably ~t (bitwise negation) is more proper to use here,
# but this is arguably less intuitive to understand at a glance, and
# would fail if 't' is actually an integer array instead of boolean:
#
# b = (~t)*a
# assert_equal(b, [0.0, 1.5])
# assert_equal(b.dtype, np.dtype('f4'))
def test_result_type(self):
self.check_promotion_cases(np.result_type)
assert_(np.result_type(None) == np.dtype(None))
def test_promote_types_endian(self):
# promote_types should always return native-endian types
assert_equal(np.promote_types('<i8', '<i8'), np.dtype('i8'))
assert_equal(np.promote_types('>i8', '>i8'), np.dtype('i8'))
assert_equal(np.promote_types('>i8', '>U16'), np.dtype('U21'))
assert_equal(np.promote_types('<i8', '<U16'), np.dtype('U21'))
assert_equal(np.promote_types('>U16', '>i8'), np.dtype('U21'))
assert_equal(np.promote_types('<U16', '<i8'), np.dtype('U21'))
assert_equal(np.promote_types('<S5', '<U8'), np.dtype('U8'))
assert_equal(np.promote_types('>S5', '>U8'), np.dtype('U8'))
assert_equal(np.promote_types('<U8', '<S5'), np.dtype('U8'))
assert_equal(np.promote_types('>U8', '>S5'), np.dtype('U8'))
assert_equal(np.promote_types('<U5', '<U8'), np.dtype('U8'))
assert_equal(np.promote_types('>U8', '>U5'), np.dtype('U8'))
assert_equal(np.promote_types('<M8', '<M8'), np.dtype('M8'))
assert_equal(np.promote_types('>M8', '>M8'), np.dtype('M8'))
assert_equal(np.promote_types('<m8', '<m8'), np.dtype('m8'))
assert_equal(np.promote_types('>m8', '>m8'), np.dtype('m8'))
def test_promote_types_strings(self):
assert_equal(np.promote_types('bool', 'S'), np.dtype('S5'))
assert_equal(np.promote_types('b', 'S'), np.dtype('S4'))
assert_equal(np.promote_types('u1', 'S'), np.dtype('S3'))
assert_equal(np.promote_types('u2', 'S'), np.dtype('S5'))
assert_equal(np.promote_types('u4', 'S'), np.dtype('S10'))
assert_equal(np.promote_types('u8', 'S'), np.dtype('S20'))
assert_equal(np.promote_types('i1', 'S'), np.dtype('S4'))
assert_equal(np.promote_types('i2', 'S'), np.dtype('S6'))
assert_equal(np.promote_types('i4', 'S'), np.dtype('S11'))
assert_equal(np.promote_types('i8', 'S'), np.dtype('S21'))
assert_equal(np.promote_types('bool', 'U'), np.dtype('U5'))
assert_equal(np.promote_types('b', 'U'), np.dtype('U4'))
assert_equal(np.promote_types('u1', 'U'), np.dtype('U3'))
assert_equal(np.promote_types('u2', 'U'), np.dtype('U5'))
assert_equal(np.promote_types('u4', 'U'), np.dtype('U10'))
assert_equal(np.promote_types('u8', 'U'), np.dtype('U20'))
assert_equal(np.promote_types('i1', 'U'), np.dtype('U4'))
assert_equal(np.promote_types('i2', 'U'), np.dtype('U6'))
assert_equal(np.promote_types('i4', 'U'), np.dtype('U11'))
assert_equal(np.promote_types('i8', 'U'), np.dtype('U21'))
assert_equal(np.promote_types('bool', 'S1'), np.dtype('S5'))
assert_equal(np.promote_types('bool', 'S30'), np.dtype('S30'))
assert_equal(np.promote_types('b', 'S1'), np.dtype('S4'))
assert_equal(np.promote_types('b', 'S30'), np.dtype('S30'))
assert_equal(np.promote_types('u1', 'S1'), np.dtype('S3'))
assert_equal(np.promote_types('u1', 'S30'), np.dtype('S30'))
assert_equal(np.promote_types('u2', 'S1'), np.dtype('S5'))
assert_equal(np.promote_types('u2', 'S30'), np.dtype('S30'))
assert_equal(np.promote_types('u4', 'S1'), np.dtype('S10'))
assert_equal(np.promote_types('u4', 'S30'), np.dtype('S30'))
assert_equal(np.promote_types('u8', 'S1'), np.dtype('S20'))
assert_equal(np.promote_types('u8', 'S30'), np.dtype('S30'))
def test_can_cast(self):
assert_(np.can_cast(np.int32, np.int64))
assert_(np.can_cast(np.float64, complex))
assert_(not np.can_cast(complex, float))
assert_(np.can_cast('i8', 'f8'))
assert_(not np.can_cast('i8', 'f4'))
assert_(np.can_cast('i4', 'S11'))
assert_(np.can_cast('i8', 'i8', 'no'))
assert_(not np.can_cast('<i8', '>i8', 'no'))
assert_(np.can_cast('<i8', '>i8', 'equiv'))
assert_(not np.can_cast('<i4', '>i8', 'equiv'))
assert_(np.can_cast('<i4', '>i8', 'safe'))
assert_(not np.can_cast('<i8', '>i4', 'safe'))
assert_(np.can_cast('<i8', '>i4', 'same_kind'))
assert_(not np.can_cast('<i8', '>u4', 'same_kind'))
assert_(np.can_cast('<i8', '>u4', 'unsafe'))
assert_(np.can_cast('bool', 'S5'))
assert_(not np.can_cast('bool', 'S4'))
assert_(np.can_cast('b', 'S4'))
assert_(not np.can_cast('b', 'S3'))
assert_(np.can_cast('u1', 'S3'))
assert_(not np.can_cast('u1', 'S2'))
assert_(np.can_cast('u2', 'S5'))
assert_(not np.can_cast('u2', 'S4'))
assert_(np.can_cast('u4', 'S10'))
assert_(not np.can_cast('u4', 'S9'))
assert_(np.can_cast('u8', 'S20'))
assert_(not np.can_cast('u8', 'S19'))
assert_(np.can_cast('i1', 'S4'))
assert_(not np.can_cast('i1', 'S3'))
assert_(np.can_cast('i2', 'S6'))
assert_(not np.can_cast('i2', 'S5'))
assert_(np.can_cast('i4', 'S11'))
assert_(not np.can_cast('i4', 'S10'))
assert_(np.can_cast('i8', 'S21'))
assert_(not np.can_cast('i8', 'S20'))
assert_(np.can_cast('bool', 'S5'))
assert_(not np.can_cast('bool', 'S4'))
assert_(np.can_cast('b', 'U4'))
assert_(not np.can_cast('b', 'U3'))
assert_(np.can_cast('u1', 'U3'))
assert_(not np.can_cast('u1', 'U2'))
assert_(np.can_cast('u2', 'U5'))
assert_(not np.can_cast('u2', 'U4'))
assert_(np.can_cast('u4', 'U10'))
assert_(not np.can_cast('u4', 'U9'))
assert_(np.can_cast('u8', 'U20'))
assert_(not np.can_cast('u8', 'U19'))
assert_(np.can_cast('i1', 'U4'))
assert_(not np.can_cast('i1', 'U3'))
assert_(np.can_cast('i2', 'U6'))
assert_(not np.can_cast('i2', 'U5'))
assert_(np.can_cast('i4', 'U11'))
assert_(not np.can_cast('i4', 'U10'))
assert_(np.can_cast('i8', 'U21'))
assert_(not np.can_cast('i8', 'U20'))
assert_raises(TypeError, np.can_cast, 'i4', None)
assert_raises(TypeError, np.can_cast, None, 'i4')
# Also test keyword arguments
assert_(np.can_cast(from_=np.int32, to=np.int64))
def test_can_cast_simple_to_structured(self):
# Non-structured can only be cast to structured in 'unsafe' mode.
assert_(not np.can_cast('i4', 'i4,i4'))
assert_(not np.can_cast('i4', 'i4,i2'))
assert_(np.can_cast('i4', 'i4,i4', casting='unsafe'))
assert_(np.can_cast('i4', 'i4,i2', casting='unsafe'))
# Even if there is just a single field which is OK.
assert_(not np.can_cast('i2', [('f1', 'i4')]))
assert_(not np.can_cast('i2', [('f1', 'i4')], casting='same_kind'))
assert_(np.can_cast('i2', [('f1', 'i4')], casting='unsafe'))
# It should be the same for recursive structured or subarrays.
assert_(not np.can_cast('i2', [('f1', 'i4,i4')]))
assert_(np.can_cast('i2', [('f1', 'i4,i4')], casting='unsafe'))
assert_(not np.can_cast('i2', [('f1', '(2,3)i4')]))
assert_(np.can_cast('i2', [('f1', '(2,3)i4')], casting='unsafe'))
def test_can_cast_structured_to_simple(self):
# Need unsafe casting for structured to simple.
assert_(not np.can_cast([('f1', 'i4')], 'i4'))
assert_(np.can_cast([('f1', 'i4')], 'i4', casting='unsafe'))
assert_(np.can_cast([('f1', 'i4')], 'i2', casting='unsafe'))
# Since it is unclear what is being cast, multiple fields to
# single should not work even for unsafe casting.
assert_(not np.can_cast('i4,i4', 'i4', casting='unsafe'))
# But a single field inside a single field is OK.
assert_(not np.can_cast([('f1', [('x', 'i4')])], 'i4'))
assert_(np.can_cast([('f1', [('x', 'i4')])], 'i4', casting='unsafe'))
# And a subarray is fine too - it will just take the first element
# (arguably not very consistently; might also take the first field).
assert_(not np.can_cast([('f0', '(3,)i4')], 'i4'))
assert_(np.can_cast([('f0', '(3,)i4')], 'i4', casting='unsafe'))
# But a structured subarray with multiple fields should fail.
assert_(not np.can_cast([('f0', ('i4,i4'), (2,))], 'i4',
casting='unsafe'))
def test_can_cast_values(self):
# gh-5917
for dt in np.sctypes['int'] + np.sctypes['uint']:
ii = np.iinfo(dt)
assert_(np.can_cast(ii.min, dt))
assert_(np.can_cast(ii.max, dt))
assert_(not np.can_cast(ii.min - 1, dt))
assert_(not np.can_cast(ii.max + 1, dt))
for dt in np.sctypes['float']:
fi = np.finfo(dt)
assert_(np.can_cast(fi.min, dt))
assert_(np.can_cast(fi.max, dt))
# Custom exception class to test exception propagation in fromiter
class NIterError(Exception):
pass
class TestFromiter(object):
def makegen(self):
for x in range(24):
yield x**2
def test_types(self):
ai32 = np.fromiter(self.makegen(), np.int32)
ai64 = np.fromiter(self.makegen(), np.int64)
af = np.fromiter(self.makegen(), float)
assert_(ai32.dtype == np.dtype(np.int32))
assert_(ai64.dtype == np.dtype(np.int64))
assert_(af.dtype == np.dtype(float))
def test_lengths(self):
expected = np.array(list(self.makegen()))
a = np.fromiter(self.makegen(), int)
a20 = np.fromiter(self.makegen(), int, 20)
assert_(len(a) == len(expected))
assert_(len(a20) == 20)
assert_raises(ValueError, np.fromiter,
self.makegen(), int, len(expected) + 10)
def test_values(self):
expected = np.array(list(self.makegen()))
a = np.fromiter(self.makegen(), int)
a20 = np.fromiter(self.makegen(), int, 20)
assert_(np.alltrue(a == expected, axis=0))
assert_(np.alltrue(a20 == expected[:20], axis=0))
def load_data(self, n, eindex):
# Utility method for the issue 2592 tests.
# Raise an exception at the desired index in the iterator.
for e in range(n):
if e == eindex:
raise NIterError('error at index %s' % eindex)
yield e
def test_2592(self):
# Test iteration exceptions are correctly raised.
count, eindex = 10, 5
assert_raises(NIterError, np.fromiter,
self.load_data(count, eindex), dtype=int, count=count)
def test_2592_edge(self):
# Test iter. exceptions, edge case (exception at end of iterator).
count = 10
eindex = count-1
assert_raises(NIterError, np.fromiter,
self.load_data(count, eindex), dtype=int, count=count)
class TestNonzero(object):
def test_nonzero_trivial(self):
assert_equal(np.count_nonzero(np.array([])), 0)
assert_equal(np.count_nonzero(np.array([], dtype='?')), 0)
assert_equal(np.nonzero(np.array([])), ([],))
assert_equal(np.count_nonzero(np.array([0])), 0)
assert_equal(np.count_nonzero(np.array([0], dtype='?')), 0)
assert_equal(np.nonzero(np.array([0])), ([],))
assert_equal(np.count_nonzero(np.array([1])), 1)
assert_equal(np.count_nonzero(np.array([1], dtype='?')), 1)
assert_equal(np.nonzero(np.array([1])), ([0],))
def test_nonzero_zerod(self):
assert_equal(np.count_nonzero(np.array(0)), 0)
assert_equal(np.count_nonzero(np.array(0, dtype='?')), 0)
with assert_warns(DeprecationWarning):
assert_equal(np.nonzero(np.array(0)), ([],))
assert_equal(np.count_nonzero(np.array(1)), 1)
assert_equal(np.count_nonzero(np.array(1, dtype='?')), 1)
with assert_warns(DeprecationWarning):
assert_equal(np.nonzero(np.array(1)), ([0],))
def test_nonzero_onedim(self):
x = np.array([1, 0, 2, -1, 0, 0, 8])
assert_equal(np.count_nonzero(x), 4)
assert_equal(np.count_nonzero(x), 4)
assert_equal(np.nonzero(x), ([0, 2, 3, 6],))
x = np.array([(1, 2), (0, 0), (1, 1), (-1, 3), (0, 7)],
dtype=[('a', 'i4'), ('b', 'i2')])
assert_equal(np.count_nonzero(x['a']), 3)
assert_equal(np.count_nonzero(x['b']), 4)
assert_equal(np.nonzero(x['a']), ([0, 2, 3],))
assert_equal(np.nonzero(x['b']), ([0, 2, 3, 4],))
def test_nonzero_twodim(self):
x = np.array([[0, 1, 0], [2, 0, 3]])
assert_equal(np.count_nonzero(x), 3)
assert_equal(np.nonzero(x), ([0, 1, 1], [1, 0, 2]))
x = np.eye(3)
assert_equal(np.count_nonzero(x), 3)
assert_equal(np.nonzero(x), ([0, 1, 2], [0, 1, 2]))
x = np.array([[(0, 1), (0, 0), (1, 11)],
[(1, 1), (1, 0), (0, 0)],
[(0, 0), (1, 5), (0, 1)]], dtype=[('a', 'f4'), ('b', 'u1')])
assert_equal(np.count_nonzero(x['a']), 4)
assert_equal(np.count_nonzero(x['b']), 5)
assert_equal(np.nonzero(x['a']), ([0, 1, 1, 2], [2, 0, 1, 1]))
assert_equal(np.nonzero(x['b']), ([0, 0, 1, 2, 2], [0, 2, 0, 1, 2]))
assert_(not x['a'].T.flags.aligned)
assert_equal(np.count_nonzero(x['a'].T), 4)
assert_equal(np.count_nonzero(x['b'].T), 5)
assert_equal(np.nonzero(x['a'].T), ([0, 1, 1, 2], [1, 1, 2, 0]))
assert_equal(np.nonzero(x['b'].T), ([0, 0, 1, 2, 2], [0, 1, 2, 0, 2]))
def test_sparse(self):
# test special sparse condition boolean code path
for i in range(20):
c = np.zeros(200, dtype=bool)
c[i::20] = True
assert_equal(np.nonzero(c)[0], np.arange(i, 200 + i, 20))
c = np.zeros(400, dtype=bool)
c[10 + i:20 + i] = True
c[20 + i*2] = True
assert_equal(np.nonzero(c)[0],
np.concatenate((np.arange(10 + i, 20 + i), [20 + i*2])))
def test_return_type(self):
class C(np.ndarray):
pass
for view in (C, np.ndarray):
for nd in range(1, 4):
shape = tuple(range(2, 2+nd))
x = np.arange(np.prod(shape)).reshape(shape).view(view)
for nzx in (np.nonzero(x), x.nonzero()):
for nzx_i in nzx:
assert_(type(nzx_i) is np.ndarray)
assert_(nzx_i.flags.writeable)
def test_count_nonzero_axis(self):
# Basic check of functionality
m = np.array([[0, 1, 7, 0, 0], [3, 0, 0, 2, 19]])
expected = np.array([1, 1, 1, 1, 1])
assert_equal(np.count_nonzero(m, axis=0), expected)
expected = np.array([2, 3])
assert_equal(np.count_nonzero(m, axis=1), expected)
assert_raises(ValueError, np.count_nonzero, m, axis=(1, 1))
assert_raises(TypeError, np.count_nonzero, m, axis='foo')
assert_raises(np.AxisError, np.count_nonzero, m, axis=3)
assert_raises(TypeError, np.count_nonzero,
m, axis=np.array([[1], [2]]))
def test_count_nonzero_axis_all_dtypes(self):
# More thorough test that the axis argument is respected
# for all dtypes and responds correctly when presented with
# either integer or tuple arguments for axis
msg = "Mismatch for dtype: %s"
def assert_equal_w_dt(a, b, err_msg):
assert_equal(a.dtype, b.dtype, err_msg=err_msg)
assert_equal(a, b, err_msg=err_msg)
for dt in np.typecodes['All']:
err_msg = msg % (np.dtype(dt).name,)
if dt != 'V':
if dt != 'M':
m = np.zeros((3, 3), dtype=dt)
n = np.ones(1, dtype=dt)
m[0, 0] = n[0]
m[1, 0] = n[0]
else: # np.zeros doesn't work for np.datetime64
m = np.array(['1970-01-01'] * 9)
m = m.reshape((3, 3))
m[0, 0] = '1970-01-12'
m[1, 0] = '1970-01-12'
m = m.astype(dt)
expected = np.array([2, 0, 0], dtype=np.intp)
assert_equal_w_dt(np.count_nonzero(m, axis=0),
expected, err_msg=err_msg)
expected = np.array([1, 1, 0], dtype=np.intp)
assert_equal_w_dt(np.count_nonzero(m, axis=1),
expected, err_msg=err_msg)
expected = np.array(2)
assert_equal(np.count_nonzero(m, axis=(0, 1)),
expected, err_msg=err_msg)
assert_equal(np.count_nonzero(m, axis=None),
expected, err_msg=err_msg)
assert_equal(np.count_nonzero(m),
expected, err_msg=err_msg)
if dt == 'V':
# There are no 'nonzero' objects for np.void, so the testing
# setup is slightly different for this dtype
m = np.array([np.void(1)] * 6).reshape((2, 3))
expected = np.array([0, 0, 0], dtype=np.intp)
assert_equal_w_dt(np.count_nonzero(m, axis=0),
expected, err_msg=err_msg)
expected = np.array([0, 0], dtype=np.intp)
assert_equal_w_dt(np.count_nonzero(m, axis=1),
expected, err_msg=err_msg)
expected = np.array(0)
assert_equal(np.count_nonzero(m, axis=(0, 1)),
expected, err_msg=err_msg)
assert_equal(np.count_nonzero(m, axis=None),
expected, err_msg=err_msg)
assert_equal(np.count_nonzero(m),
expected, err_msg=err_msg)
def test_count_nonzero_axis_consistent(self):
# Check that the axis behaviour for valid axes in
# non-special cases is consistent (and therefore
# correct) by checking it against an integer array
# that is then casted to the generic object dtype
from itertools import combinations, permutations
axis = (0, 1, 2, 3)
size = (5, 5, 5, 5)
msg = "Mismatch for axis: %s"
rng = np.random.RandomState(1234)
m = rng.randint(-100, 100, size=size)
n = m.astype(object)
for length in range(len(axis)):
for combo in combinations(axis, length):
for perm in permutations(combo):
assert_equal(
np.count_nonzero(m, axis=perm),
np.count_nonzero(n, axis=perm),
err_msg=msg % (perm,))
def test_countnonzero_axis_empty(self):
a = np.array([[0, 0, 1], [1, 0, 1]])
assert_equal(np.count_nonzero(a, axis=()), a.astype(bool))
def test_array_method(self):
# Tests that the array method
# call to nonzero works
m = np.array([[1, 0, 0], [4, 0, 6]])
tgt = [[0, 1, 1], [0, 0, 2]]
assert_equal(m.nonzero(), tgt)
def test_nonzero_invalid_object(self):
# gh-9295
a = np.array([np.array([1, 2]), 3])
assert_raises(ValueError, np.nonzero, a)
class BoolErrors:
def __bool__(self):
raise ValueError("Not allowed")
def __nonzero__(self):
raise ValueError("Not allowed")
assert_raises(ValueError, np.nonzero, np.array([BoolErrors()]))
def test_nonzero_sideeffect_safety(self):
# gh-13631
class FalseThenTrue:
_val = False
def __bool__(self):
try:
return self._val
finally:
self._val = True
class TrueThenFalse:
_val = True
def __bool__(self):
try:
return self._val
finally:
self._val = False
# result grows on the second pass
a = np.array([True, FalseThenTrue()])
assert_raises(RuntimeError, np.nonzero, a)
a = np.array([[True], [FalseThenTrue()]])
assert_raises(RuntimeError, np.nonzero, a)
# result shrinks on the second pass
a = np.array([False, TrueThenFalse()])
assert_raises(RuntimeError, np.nonzero, a)
a = np.array([[False], [TrueThenFalse()]])
assert_raises(RuntimeError, np.nonzero, a)
def test_nonzero_exception_safe(self):
# gh-13930
class ThrowsAfter:
def __init__(self, iters):
self.iters_left = iters
def __bool__(self):
if self.iters_left == 0:
raise ValueError("called `iters` times")
self.iters_left -= 1
return True
"""
Test that a ValueError is raised instead of a SystemError
If the __bool__ function is called after the error state is set,
Python (cpython) will raise a SystemError.
"""
# assert that an exception in first pass is handled correctly
a = np.array([ThrowsAfter(5)]*10)
assert_raises(ValueError, np.nonzero, a)
# raise exception in second pass for 1-dimensional loop
a = np.array([ThrowsAfter(15)]*10)
assert_raises(ValueError, np.nonzero, a)
# raise exception in second pass for n-dimensional loop
a = np.array([[ThrowsAfter(15)]]*10)
assert_raises(ValueError, np.nonzero, a)
class TestIndex(object):
def test_boolean(self):
a = rand(3, 5, 8)
V = rand(5, 8)
g1 = randint(0, 5, size=15)
g2 = randint(0, 8, size=15)
V[g1, g2] = -V[g1, g2]
assert_((np.array([a[0][V > 0], a[1][V > 0], a[2][V > 0]]) == a[:, V > 0]).all())
def test_boolean_edgecase(self):
a = np.array([], dtype='int32')
b = np.array([], dtype='bool')
c = a[b]
assert_equal(c, [])
assert_equal(c.dtype, np.dtype('int32'))
class TestBinaryRepr(object):
def test_zero(self):
assert_equal(np.binary_repr(0), '0')
def test_positive(self):
assert_equal(np.binary_repr(10), '1010')
assert_equal(np.binary_repr(12522),
'11000011101010')
assert_equal(np.binary_repr(10736848),
'101000111101010011010000')
def test_negative(self):
assert_equal(np.binary_repr(-1), '-1')
assert_equal(np.binary_repr(-10), '-1010')
assert_equal(np.binary_repr(-12522),
'-11000011101010')
assert_equal(np.binary_repr(-10736848),
'-101000111101010011010000')
def test_sufficient_width(self):
assert_equal(np.binary_repr(0, width=5), '00000')
assert_equal(np.binary_repr(10, width=7), '0001010')
assert_equal(np.binary_repr(-5, width=7), '1111011')
def test_neg_width_boundaries(self):
# see gh-8670
# Ensure that the example in the issue does not
# break before proceeding to a more thorough test.
assert_equal(np.binary_repr(-128, width=8), '10000000')
for width in range(1, 11):
num = -2**(width - 1)
exp = '1' + (width - 1) * '0'
assert_equal(np.binary_repr(num, width=width), exp)
def test_large_neg_int64(self):
# See gh-14289.
assert_equal(np.binary_repr(np.int64(-2**62), width=64),
'11' + '0'*62)
class TestBaseRepr(object):
def test_base3(self):
assert_equal(np.base_repr(3**5, 3), '100000')
def test_positive(self):
assert_equal(np.base_repr(12, 10), '12')
assert_equal(np.base_repr(12, 10, 4), '000012')
assert_equal(np.base_repr(12, 4), '30')
assert_equal(np.base_repr(3731624803700888, 36), '10QR0ROFCEW')
def test_negative(self):
assert_equal(np.base_repr(-12, 10), '-12')
assert_equal(np.base_repr(-12, 10, 4), '-000012')
assert_equal(np.base_repr(-12, 4), '-30')
def test_base_range(self):
with assert_raises(ValueError):
np.base_repr(1, 1)
with assert_raises(ValueError):
np.base_repr(1, 37)
class TestArrayComparisons(object):
def test_array_equal(self):
res = np.array_equal(np.array([1, 2]), np.array([1, 2]))
assert_(res)
assert_(type(res) is bool)
res = np.array_equal(np.array([1, 2]), np.array([1, 2, 3]))
assert_(not res)
assert_(type(res) is bool)
res = np.array_equal(np.array([1, 2]), np.array([3, 4]))
assert_(not res)
assert_(type(res) is bool)
res = np.array_equal(np.array([1, 2]), np.array([1, 3]))
assert_(not res)
assert_(type(res) is bool)
res = np.array_equal(np.array(['a'], dtype='S1'), np.array(['a'], dtype='S1'))
assert_(res)
assert_(type(res) is bool)
res = np.array_equal(np.array([('a', 1)], dtype='S1,u4'),
np.array([('a', 1)], dtype='S1,u4'))
assert_(res)
assert_(type(res) is bool)
def test_none_compares_elementwise(self):
a = np.array([None, 1, None], dtype=object)
assert_equal(a == None, [True, False, True])
assert_equal(a != None, [False, True, False])
a = np.ones(3)
assert_equal(a == None, [False, False, False])
assert_equal(a != None, [True, True, True])
def test_array_equiv(self):
res = np.array_equiv(np.array([1, 2]), np.array([1, 2]))
assert_(res)
assert_(type(res) is bool)
res = np.array_equiv(np.array([1, 2]), np.array([1, 2, 3]))
assert_(not res)
assert_(type(res) is bool)
res = np.array_equiv(np.array([1, 2]), np.array([3, 4]))
assert_(not res)
assert_(type(res) is bool)
res = np.array_equiv(np.array([1, 2]), np.array([1, 3]))
assert_(not res)
assert_(type(res) is bool)
res = np.array_equiv(np.array([1, 1]), np.array([1]))
assert_(res)
assert_(type(res) is bool)
res = np.array_equiv(np.array([1, 1]), np.array([[1], [1]]))
assert_(res)
assert_(type(res) is bool)
res = np.array_equiv(np.array([1, 2]), np.array([2]))
assert_(not res)
assert_(type(res) is bool)
res = np.array_equiv(np.array([1, 2]), np.array([[1], [2]]))
assert_(not res)
assert_(type(res) is bool)
res = np.array_equiv(np.array([1, 2]), np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]))
assert_(not res)
assert_(type(res) is bool)
def assert_array_strict_equal(x, y):
assert_array_equal(x, y)
# Check flags, 32 bit arches typically don't provide 16 byte alignment
if ((x.dtype.alignment <= 8 or
np.intp().dtype.itemsize != 4) and
sys.platform != 'win32'):
assert_(x.flags == y.flags)
else:
assert_(x.flags.owndata == y.flags.owndata)
assert_(x.flags.writeable == y.flags.writeable)
assert_(x.flags.c_contiguous == y.flags.c_contiguous)
assert_(x.flags.f_contiguous == y.flags.f_contiguous)
assert_(x.flags.writebackifcopy == y.flags.writebackifcopy)
# check endianness
assert_(x.dtype.isnative == y.dtype.isnative)
class TestClip(object):
def setup(self):
self.nr = 5
self.nc = 3
def fastclip(self, a, m, M, out=None, casting=None):
if out is None:
if casting is None:
return a.clip(m, M)
else:
return a.clip(m, M, casting=casting)
else:
if casting is None:
return a.clip(m, M, out)
else:
return a.clip(m, M, out, casting=casting)
def clip(self, a, m, M, out=None):
# use slow-clip
selector = np.less(a, m) + 2*np.greater(a, M)
return selector.choose((a, m, M), out=out)
# Handy functions
def _generate_data(self, n, m):
return randn(n, m)
def _generate_data_complex(self, n, m):
return randn(n, m) + 1.j * rand(n, m)
def _generate_flt_data(self, n, m):
return (randn(n, m)).astype(np.float32)
def _neg_byteorder(self, a):
a = np.asarray(a)
if sys.byteorder == 'little':
a = a.astype(a.dtype.newbyteorder('>'))
else:
a = a.astype(a.dtype.newbyteorder('<'))
return a
def _generate_non_native_data(self, n, m):
data = randn(n, m)
data = self._neg_byteorder(data)
assert_(not data.dtype.isnative)
return data
def _generate_int_data(self, n, m):
return (10 * rand(n, m)).astype(np.int64)
def _generate_int32_data(self, n, m):
return (10 * rand(n, m)).astype(np.int32)
# Now the real test cases
@pytest.mark.parametrize("dtype", '?bhilqpBHILQPefdgFDGO')
def test_ones_pathological(self, dtype):
# for preservation of behavior described in
# gh-12519; amin > amax behavior may still change
# in the future
arr = np.ones(10, dtype=dtype)
expected = np.zeros(10, dtype=dtype)
actual = np.clip(arr, 1, 0)
if dtype == 'O':
assert actual.tolist() == expected.tolist()
else:
assert_equal(actual, expected)
def test_simple_double(self):
# Test native double input with scalar min/max.
a = self._generate_data(self.nr, self.nc)
m = 0.1
M = 0.6
ac = self.fastclip(a, m, M)
act = self.clip(a, m, M)
assert_array_strict_equal(ac, act)
def test_simple_int(self):
# Test native int input with scalar min/max.
a = self._generate_int_data(self.nr, self.nc)
a = a.astype(int)
m = -2
M = 4
ac = self.fastclip(a, m, M)
act = self.clip(a, m, M)
assert_array_strict_equal(ac, act)
def test_array_double(self):
# Test native double input with array min/max.
a = self._generate_data(self.nr, self.nc)
m = np.zeros(a.shape)
M = m + 0.5
ac = self.fastclip(a, m, M)
act = self.clip(a, m, M)
assert_array_strict_equal(ac, act)
def test_simple_nonnative(self):
# Test non native double input with scalar min/max.
# Test native double input with non native double scalar min/max.
a = self._generate_non_native_data(self.nr, self.nc)
m = -0.5
M = 0.6
ac = self.fastclip(a, m, M)
act = self.clip(a, m, M)
assert_array_equal(ac, act)
# Test native double input with non native double scalar min/max.
a = self._generate_data(self.nr, self.nc)
m = -0.5
M = self._neg_byteorder(0.6)
assert_(not M.dtype.isnative)
ac = self.fastclip(a, m, M)
act = self.clip(a, m, M)
assert_array_equal(ac, act)
def test_simple_complex(self):
# Test native complex input with native double scalar min/max.
# Test native input with complex double scalar min/max.
a = 3 * self._generate_data_complex(self.nr, self.nc)
m = -0.5
M = 1.
ac = self.fastclip(a, m, M)
act = self.clip(a, m, M)
assert_array_strict_equal(ac, act)
# Test native input with complex double scalar min/max.
a = 3 * self._generate_data(self.nr, self.nc)
m = -0.5 + 1.j
M = 1. + 2.j
ac = self.fastclip(a, m, M)
act = self.clip(a, m, M)
assert_array_strict_equal(ac, act)
def test_clip_complex(self):
# Address Issue gh-5354 for clipping complex arrays
# Test native complex input without explicit min/max
# ie, either min=None or max=None
a = np.ones(10, dtype=complex)
m = a.min()
M = a.max()
am = self.fastclip(a, m, None)
aM = self.fastclip(a, None, M)
assert_array_strict_equal(am, a)
assert_array_strict_equal(aM, a)
def test_clip_non_contig(self):
# Test clip for non contiguous native input and native scalar min/max.
a = self._generate_data(self.nr * 2, self.nc * 3)
a = a[::2, ::3]
assert_(not a.flags['F_CONTIGUOUS'])
assert_(not a.flags['C_CONTIGUOUS'])
ac = self.fastclip(a, -1.6, 1.7)
act = self.clip(a, -1.6, 1.7)
assert_array_strict_equal(ac, act)
def test_simple_out(self):
# Test native double input with scalar min/max.
a = self._generate_data(self.nr, self.nc)
m = -0.5
M = 0.6
ac = np.zeros(a.shape)
act = np.zeros(a.shape)
self.fastclip(a, m, M, ac)
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
@pytest.mark.parametrize("casting", [None, "unsafe"])
def test_simple_int32_inout(self, casting):
# Test native int32 input with double min/max and int32 out.
a = self._generate_int32_data(self.nr, self.nc)
m = np.float64(0)
M = np.float64(2)
ac = np.zeros(a.shape, dtype=np.int32)
act = ac.copy()
if casting is None:
with assert_warns(DeprecationWarning):
# NumPy 1.17.0, 2018-02-24 - casting is unsafe
self.fastclip(a, m, M, ac, casting=casting)
else:
# explicitly passing "unsafe" will silence warning
self.fastclip(a, m, M, ac, casting=casting)
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
def test_simple_int64_out(self):
# Test native int32 input with int32 scalar min/max and int64 out.
a = self._generate_int32_data(self.nr, self.nc)
m = np.int32(-1)
M = np.int32(1)
ac = np.zeros(a.shape, dtype=np.int64)
act = ac.copy()
self.fastclip(a, m, M, ac)
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
def test_simple_int64_inout(self):
# Test native int32 input with double array min/max and int32 out.
a = self._generate_int32_data(self.nr, self.nc)
m = np.zeros(a.shape, np.float64)
M = np.float64(1)
ac = np.zeros(a.shape, dtype=np.int32)
act = ac.copy()
with assert_warns(DeprecationWarning):
# NumPy 1.17.0, 2018-02-24 - casting is unsafe
self.fastclip(a, m, M, ac)
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
def test_simple_int32_out(self):
# Test native double input with scalar min/max and int out.
a = self._generate_data(self.nr, self.nc)
m = -1.0
M = 2.0
ac = np.zeros(a.shape, dtype=np.int32)
act = ac.copy()
with assert_warns(DeprecationWarning):
# NumPy 1.17.0, 2018-02-24 - casting is unsafe
self.fastclip(a, m, M, ac)
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
def test_simple_inplace_01(self):
# Test native double input with array min/max in-place.
a = self._generate_data(self.nr, self.nc)
ac = a.copy()
m = np.zeros(a.shape)
M = 1.0
self.fastclip(a, m, M, a)
self.clip(a, m, M, ac)
assert_array_strict_equal(a, ac)
def test_simple_inplace_02(self):
# Test native double input with scalar min/max in-place.
a = self._generate_data(self.nr, self.nc)
ac = a.copy()
m = -0.5
M = 0.6
self.fastclip(a, m, M, a)
self.clip(ac, m, M, ac)
assert_array_strict_equal(a, ac)
def test_noncontig_inplace(self):
# Test non contiguous double input with double scalar min/max in-place.
a = self._generate_data(self.nr * 2, self.nc * 3)
a = a[::2, ::3]
assert_(not a.flags['F_CONTIGUOUS'])
assert_(not a.flags['C_CONTIGUOUS'])
ac = a.copy()
m = -0.5
M = 0.6
self.fastclip(a, m, M, a)
self.clip(ac, m, M, ac)
assert_array_equal(a, ac)
def test_type_cast_01(self):
# Test native double input with scalar min/max.
a = self._generate_data(self.nr, self.nc)
m = -0.5
M = 0.6
ac = self.fastclip(a, m, M)
act = self.clip(a, m, M)
assert_array_strict_equal(ac, act)
def test_type_cast_02(self):
# Test native int32 input with int32 scalar min/max.
a = self._generate_int_data(self.nr, self.nc)
a = a.astype(np.int32)
m = -2
M = 4
ac = self.fastclip(a, m, M)
act = self.clip(a, m, M)
assert_array_strict_equal(ac, act)
def test_type_cast_03(self):
# Test native int32 input with float64 scalar min/max.
a = self._generate_int32_data(self.nr, self.nc)
m = -2
M = 4
ac = self.fastclip(a, np.float64(m), np.float64(M))
act = self.clip(a, np.float64(m), np.float64(M))
assert_array_strict_equal(ac, act)
def test_type_cast_04(self):
# Test native int32 input with float32 scalar min/max.
a = self._generate_int32_data(self.nr, self.nc)
m = np.float32(-2)
M = np.float32(4)
act = self.fastclip(a, m, M)
ac = self.clip(a, m, M)
assert_array_strict_equal(ac, act)
def test_type_cast_05(self):
# Test native int32 with double arrays min/max.
a = self._generate_int_data(self.nr, self.nc)
m = -0.5
M = 1.
ac = self.fastclip(a, m * np.zeros(a.shape), M)
act = self.clip(a, m * np.zeros(a.shape), M)
assert_array_strict_equal(ac, act)
def test_type_cast_06(self):
# Test native with NON native scalar min/max.
a = self._generate_data(self.nr, self.nc)
m = 0.5
m_s = self._neg_byteorder(m)
M = 1.
act = self.clip(a, m_s, M)
ac = self.fastclip(a, m_s, M)
assert_array_strict_equal(ac, act)
def test_type_cast_07(self):
# Test NON native with native array min/max.
a = self._generate_data(self.nr, self.nc)
m = -0.5 * np.ones(a.shape)
M = 1.
a_s = self._neg_byteorder(a)
assert_(not a_s.dtype.isnative)
act = a_s.clip(m, M)
ac = self.fastclip(a_s, m, M)
assert_array_strict_equal(ac, act)
def test_type_cast_08(self):
# Test NON native with native scalar min/max.
a = self._generate_data(self.nr, self.nc)
m = -0.5
M = 1.
a_s = self._neg_byteorder(a)
assert_(not a_s.dtype.isnative)
ac = self.fastclip(a_s, m, M)
act = a_s.clip(m, M)
assert_array_strict_equal(ac, act)
def test_type_cast_09(self):
# Test native with NON native array min/max.
a = self._generate_data(self.nr, self.nc)
m = -0.5 * np.ones(a.shape)
M = 1.
m_s = self._neg_byteorder(m)
assert_(not m_s.dtype.isnative)
ac = self.fastclip(a, m_s, M)
act = self.clip(a, m_s, M)
assert_array_strict_equal(ac, act)
def test_type_cast_10(self):
# Test native int32 with float min/max and float out for output argument.
a = self._generate_int_data(self.nr, self.nc)
b = np.zeros(a.shape, dtype=np.float32)
m = np.float32(-0.5)
M = np.float32(1)
act = self.clip(a, m, M, out=b)
ac = self.fastclip(a, m, M, out=b)
assert_array_strict_equal(ac, act)
def test_type_cast_11(self):
# Test non native with native scalar, min/max, out non native
a = self._generate_non_native_data(self.nr, self.nc)
b = a.copy()
b = b.astype(b.dtype.newbyteorder('>'))
bt = b.copy()
m = -0.5
M = 1.
self.fastclip(a, m, M, out=b)
self.clip(a, m, M, out=bt)
assert_array_strict_equal(b, bt)
def test_type_cast_12(self):
# Test native int32 input and min/max and float out
a = self._generate_int_data(self.nr, self.nc)
b = np.zeros(a.shape, dtype=np.float32)
m = np.int32(0)
M = np.int32(1)
act = self.clip(a, m, M, out=b)
ac = self.fastclip(a, m, M, out=b)
assert_array_strict_equal(ac, act)
def test_clip_with_out_simple(self):
# Test native double input with scalar min/max
a = self._generate_data(self.nr, self.nc)
m = -0.5
M = 0.6
ac = np.zeros(a.shape)
act = np.zeros(a.shape)
self.fastclip(a, m, M, ac)
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
def test_clip_with_out_simple2(self):
# Test native int32 input with double min/max and int32 out
a = self._generate_int32_data(self.nr, self.nc)
m = np.float64(0)
M = np.float64(2)
ac = | np.zeros(a.shape, dtype=np.int32) | numpy.zeros |
from sgin_utils import classification_metric, sgin_experiment, linear_experiment
import torch
import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler
import random
import datetime
import time
import re
import argparse
# %% Parse the Argument
parser = argparse.ArgumentParser(description='Run ASD Classification Experiment.')
parser.add_argument('--models', metavar='N', type=str, nargs='+', required=True,
help='the model that will be trained in this experiment. It can contain SGIN, Lasso, or group_lasso')
args = parser.parse_args()
args.models = [_.lower() for _ in args.models] # convert to lowercase
# %%
infname = "data_prepare/dummy_et_asd_classification.csv"
normalize_features = True
df = pd.read_csv(infname)
# The label column, "isASD", contains bool values
df.isASD = df.isASD.astype(int)
features = df.drop(["isASD"], axis=1)
assert(features.shape[1] == 9647)
# each stimulus is a group. Total 109 groups
stimulus_types = [re.findall("^[a-z]+_\d+", _)[0] for _ in features.keys()]
unique_type = np.unique(stimulus_types).tolist()
open("rst/et_group_names.txt", "w").write("\n".join(unique_type))
feature_groups = [unique_type.index(_) for _ in stimulus_types]
feature_groups = np.array(feature_groups)
indices = list(df.index)
random.seed("ASD")
random.shuffle(indices)
cross_validation_ids = np.array_split(np.array(indices), 10)
random.seed(time.time()) # reset the seed
# Save Cross Validation Split
f = open("rst/asd_cv_split.txt", "w")
for cv_ in cross_validation_ids:
f.write("\t".join([str(_) for _ in cv_]) + "\n")
f.close()
# Save group definition to file
f = open("rst/et_group_definition.txt", "w")
f.write("\t".join([str(_ + 1) for _ in feature_groups]) + "\n")
f.close()
# %%
def run_asd_experiment(method, lambda_term, **kwargs):
print("begin method:", method, "with \\lambda", lambda_term, datetime.datetime.now(), "#" * 20)
method = method.lower()
all_real = []
all_pred = []
all_prob = []
sparse_features_counts = []
sparse_groups_counts = []
sparse_groups_all = []
if "num_epochs" in kwargs:
num_epochs = kwargs["num_epochs"]
else:
num_epochs = 5
# we will use cross entropy
layers = [3000, 500, 2]
criterion = torch.nn.CrossEntropyLoss()
for cv_id, val_indices in enumerate(cross_validation_ids):
num_val = len(val_indices)
train_features = features.drop(val_indices).values
train_labels = df.isASD.drop(val_indices).values.reshape(-1)
val_features = features.ix[val_indices].values.reshape(num_val, -1)
val_labels = np.array(df.isASD[val_indices]).reshape(-1)
# Normalize Features
if normalize_features:
scaler = StandardScaler().fit(train_features)
train_features = scaler.transform(train_features)
val_features = scaler.transform(val_features)
print("CV:", cv_id, "Shape Verification:", str(datetime.datetime.now()))
if method == "lasso" or method == "linear regression" or \
method == "logistic regression" or method == "group lasso":
val_rst, test_rst, n_sparse_features, n_sparse_groups, sparse_groups =\
linear_experiment(train_features, train_labels,
val_features, val_labels,
None, None, # nothing for testing
feature_groups,
lambda_term=lambda_term, model_to_use=method)
if method == "sgin" or method == "sgin_sgd" or method == "nn" or method == "theory":
if method == "sgin":
opt_method = "sbcgd"
lam = lambda_term
elif method == "sgin_sgd":
opt_method = "sgd"
lam = lambda_term
elif method == "nn":
opt_method = "sgd"
lam = 0 # ignore and override the lambda for standard NN method
elif method == "theory":
opt_method = "theory"
lam = lambda_term
val_rst, test_rst, n_sparse_features, n_sparse_groups, sparse_groups =\
sgin_experiment(
train_features, train_labels,
val_features, val_labels,
None, None, # no testing set. We use cross validation here
feature_groups, cv_id=cv_id, criterion=criterion,
optmizer_method=opt_method, lam=lam, layers=layers,
num_epochs=num_epochs, train_batch_size=100,
verbose=False)
real, pred, prob = val_rst
all_real += real
all_pred += pred
all_prob += prob
sparse_features_counts.append(n_sparse_features)
sparse_groups_counts.append(n_sparse_groups)
sparse_groups_all.append(sparse_groups)
classification_metric(all_real, all_pred, all_prob)
print("Final Sparsity %d features from %d groups in this Cross Validation:" % (n_sparse_features, n_sparse_groups))
print("#" * 10, "SUMMARY for", method)
print("avg sparse features: %.2f; avg sparse groups: %.2f" % (np.mean(sparse_features_counts),
np.mean(sparse_groups_counts)))
acc, f1, auc, cm, precision, recall, sensitivity, specificity, _ = classification_metric(all_real, all_pred, all_prob)
# Cache Result
of = open("rst/et_asd_classification_rst.tsv", "a") # with > 9000 features
rst = [method, lambda_term,
np.mean(sparse_features_counts), np.mean(sparse_groups_counts),
np.std(sparse_features_counts), np.std(sparse_groups_counts),
acc, f1, auc, cm, precision, recall, sensitivity, specificity, kwargs, sparse_groups_all]
rst = [str(_) for _ in rst]
of.write("\t".join(rst).replace("\n", " ") + "\n")
of.close()
print("#" * 200)
# %%
if __name__ == "__main__":
# SGIN
lam1 = np.random.uniform(1e-5, 1e-3, size=[20])
lam2 = np.random.uniform(1e-6, 1e-4, size=[10])
lam3 = np.random.uniform(1e-8, 1e-4, size=[10])
nn_lambdas = np.concatenate([lam1, lam2, lam3])
random.shuffle(nn_lambdas)
if "sgin" in args.models:
for lam in nn_lambdas:
run_asd_experiment("SGIN", lambda_term=lam,)
# Theory
if "theory" in args.models:
for lam in nn_lambdas:
run_asd_experiment("theory", lambda_term=lam,)
# NN
if "nn" in args.models:
run_asd_experiment("nn", lambda_term=0)
# SGIN SGD
if "sgd" in args.models:
for lam in nn_lambdas:
run_asd_experiment("SGIN_sgd", lambda_term=lam)
lambdas1 = np.random.uniform(0.01, 0.1, size=[10])
lambdas2 = np.random.uniform(0., 1, size=[10])
lambdas3 = np.random.uniform(0.001, 0.01, size=[10])
lambdas4 = np.logspace(np.log(0.02), np.log(0.000001), 10, base=np.exp(1))
lambdas5 = np.logspace(np.log(1), np.log(0.02), 10, base=np.exp(1))
lambdas6 = np.logspace(np.log(10000), np.log(10), 10, base= | np.exp(1) | numpy.exp |
from __future__ import print_function, division, absolute_import
import numpy as np
from collections import OrderedDict
# ==================== Predefined datasets information ==================== #
nist15_cluster_lang = OrderedDict([
['ara', ['ara-arz', 'ara-acm', 'ara-apc', 'ara-ary', 'ara-arb']],
['zho', ['zho-yue', 'zho-cmn', 'zho-cdo', 'zho-wuu']],
['eng', ['eng-gbr', 'eng-usg', 'eng-sas']],
['fre', ['fre-waf', 'fre-hat']],
['qsl', ['qsl-pol', 'qsl-rus']],
['spa', ['spa-car', 'spa-eur', 'spa-lac', 'por-brz']]
])
nist15_lang_list = np.asarray([
# Egyptian, Iraqi, Levantine, Maghrebi, Modern Standard
'ara-arz', 'ara-acm', 'ara-apc', 'ara-ary', 'ara-arb',
# Cantonese, Mandarin, Min Dong, Wu
'zho-yue', 'zho-cmn', 'zho-cdo', 'zho-wuu',
# British, American, South Asian (Indian)
'eng-gbr', 'eng-usg', 'eng-sas',
# West african, Haitian
'fre-waf', 'fre-hat',
# Polish, Russian
'qsl-pol', 'qsl-rus',
# Caribbean, European, Latin American, Brazilian
'spa-car', 'spa-eur', 'spa-lac', 'por-brz'])
def nist15_label(label):
'''
Return
------
lang_id : int
idx in the list of 20 language, None if not found
cluster_id : int
idx in the list of 6 clusters, None if not found
within_cluster_id : int
idx in the list of each clusters, None if not found
'''
label = label.replace('spa-brz', 'por-brz')
rval = [None, None, None]
# lang_id
if label not in nist15_lang_list:
raise ValueError('Cannot found label:%s' % label)
rval[0] = np.argmax(label == nist15_lang_list)
# cluster_id
for c, x in enumerate(nist15_cluster_lang.iteritems()):
j = x[1]
if label in j:
rval[1] = c
rval[2] = j.index(label)
return rval
# ==================== Timit ==================== #
timit_61 = ['aa', 'ae', 'ah', 'ao', 'aw', 'ax', 'ax-h', 'axr', 'ay',
'b', 'bcl', 'ch', 'd', 'dcl', 'dh', 'dx', 'eh', 'el', 'em', 'en',
'eng', 'epi', 'er', 'ey', 'f', 'g', 'gcl', 'h#', 'hh', 'hv', 'ih',
'ix', 'iy', 'jh', 'k', 'kcl', 'l', 'm', 'n', 'ng', 'nx', 'ow',
'oy', 'p', 'pau', 'pcl', 'q', 'r', 's', 'sh', 't', 'tcl', 'th',
'uh', 'uw', 'ux', 'v', 'w', 'y', 'z', 'zh']
timit_39 = ['aa', 'ae', 'ah', 'aw', 'ay', 'b', 'ch', 'd',
'dh', 'dx', 'eh', 'er', 'ey', 'f', 'g', 'hh', 'ih', 'iy', 'jh', 'k',
'l', 'm', 'n', 'ng', 'ow', 'oy', 'p', 'r', 's', 'sh', 'sil', 't',
'th', 'uh', 'uw', 'v', 'w', 'y', 'z']
timit_map = {'ao': 'aa', 'ax': 'ah', 'ax-h': 'ah', 'axr': 'er',
'hv': 'hh', 'ix': 'ih', 'el': 'l', 'em': 'm',
'en': 'n', 'nx': 'n',
'eng': 'ng', 'zh': 'sh', 'ux': 'uw',
'pcl': 'sil', 'tcl': 'sil', 'kcl': 'sil', 'bcl': 'sil',
'dcl': 'sil', 'gcl': 'sil', 'h#': 'sil', 'pau': 'sil', 'epi': 'sil'}
def timit_phonemes(phn, map39=False, blank=False):
''' Included blank '''
if type(phn) not in (list, tuple, np.ndarray):
phn = [phn]
if map39:
timit = timit_39
timit_map = timit_map
l = 39
else:
timit = timit_61
timit_map = {}
l = 61
# ====== return phonemes ====== #
rphn = []
for p in phn:
if p not in timit_map and p not in timit:
if blank: rphn.append(l)
else:
rphn.append(timit.index(timit_map[p]) if p in timit_map else timit.index(p))
return rphn
# ==================== Speech Signal Processing ==================== #
def read(f, pcm = False):
'''
Return
------
waveform (ndarray), sample rate (int)
'''
if pcm or (isinstance(f, str) and 'pcm' in f):
return np.memmap(f, dtype=np.int16, mode='r')
from soundfile import read
return read(f)
def preprocess(signal, add_noise=False):
if len(signal.shape) > 1:
signal = signal.ravel()
signal = signal[signal != 0]
signal = signal.astype(np.float32)
if add_noise:
signal = signal + 1e-13 * np.random.randn(signal.shape)
return signal
def logmel(signal, fs, n_filters=40, n_ceps=13,
win=0.025, shift=0.01,
delta1=True, delta2=True, energy=False,
normalize=True, clean=True):
import sidekit
if len(signal.shape) > 1:
signal = signal.ravel()
# 1. Some const.
# n_filters = 40 # The number of mel filter bands
f_min = 0. # The minimal frequency of the filter bank
f_max = fs / 2
# overlap = nwin - int(shift * fs)
# 2. preprocess.
if clean:
signal = preprocess(signal)
# 3. logmel.
logmel = sidekit.frontend.features.mfcc(signal,
lowfreq=f_min, maxfreq=f_max,
nlinfilt=0, nlogfilt=n_filters,
fs=fs, nceps=n_ceps, midfreq=1000,
nwin=win, shift=shift,
get_spec=False, get_mspec=True)
logenergy = logmel[1]
logmel = logmel[3].astype(np.float32)
# 4. delta.
tmp = [logmel]
if delta1 or delta2:
d1 = sidekit.frontend.features.compute_delta(logmel,
win=3, method='filter')
d2 = sidekit.frontend.features.compute_delta(d1,
win=3, method='filter')
if delta1: tmp.append(d1)
if delta2: tmp.append(d2)
logmel = np.concatenate(tmp, 1)
if energy:
logmel = np.concatenate((logmel, logenergy.reshape(-1, 1)), axis=1)
# 5. VAD and normalize.
nwin = int(fs * win)
idx = sidekit.frontend.vad.vad_snr(signal, 30, fs=fs, shift=shift, nwin=nwin)
# if not returnVAD:
# logmel = logmel[idx, :]
# Normalize
if normalize:
mean = np.mean(logmel, axis = 0)
var = np.var(logmel, axis = 0)
logmel = (logmel - mean) / np.sqrt(var)
# return
return logmel, idx
def mfcc(signal, fs, n_ceps, n_filters=40,
win=0.025, shift=0.01,
delta1=True, delta2=True, energy=False,
normalize=True, clean=True):
import sidekit
# 1. Const.
f_min = 0. # The minimal frequency of the filter bank
f_max = fs / 2
# 2. Speech.
if clean:
signal = preprocess(signal)
#####################################
# 3. mfcc.
# MFCC
mfcc = sidekit.frontend.features.mfcc(signal,
lowfreq=f_min, maxfreq=f_max,
nlinfilt=0, nlogfilt=n_filters,
fs=fs, nceps=n_ceps, midfreq=1000,
nwin=win, shift=shift,
get_spec=False, get_mspec=False)
logenergy = mfcc[1]
mfcc = mfcc[0].astype(np.float32)
# 4. Add more information.
tmp = [mfcc]
if delta1 or delta2:
d1 = sidekit.frontend.features.compute_delta(mfcc,
win=3, method='filter')
d2 = sidekit.frontend.features.compute_delta(d1,
win=3, method='filter')
if delta1: tmp.append(d1)
if delta2: tmp.append(d2)
mfcc = np.concatenate(tmp, 1)
if energy:
mfcc = np.concatenate((mfcc, logenergy.reshape(-1, 1)), axis=1)
# 5. Vad and normalize.
# VAD
nwin = int(fs * win)
idx = sidekit.frontend.vad.vad_snr(signal, 30, fs=fs, shift=shift, nwin=nwin)
# if not returnVAD:
# mfcc = mfcc[idx, :]
# Normalize
if normalize:
mean = np.mean(mfcc, axis = 0)
var = np.var(mfcc, axis = 0)
mfcc = (mfcc - mean) / np.sqrt(var)
# return
return mfcc, idx
def spectrogram(signal, fs, n_ceps=13, n_filters=40,
win=0.025, shift=0.01,
delta1=True, delta2=True, energy=False,
normalize=False, clean=True):
import sidekit
# 1. Const.
f_min = 0. # The minimal frequency of the filter bank
f_max = fs / 2
# 2. Speech.
if clean:
signal = preprocess(signal)
# 3. mfcc.
# MFCC
spt = sidekit.frontend.features.mfcc(signal,
lowfreq=f_min, maxfreq=f_max,
nlinfilt=0, nlogfilt=n_filters,
fs=fs, nceps=n_ceps, midfreq=1000,
nwin=win, shift=shift,
get_spec=True, get_mspec=False)
logenergy = spt[1]
spt = spt[2].astype(np.float32)
# 4. Add more information.
tmp = [spt]
if delta1 or delta2:
d1 = sidekit.frontend.features.compute_delta(spt,
win=3, method='filter')
d2 = sidekit.frontend.features.compute_delta(d1,
win=3, method='filter')
if delta1: tmp.append(d1)
if delta2: tmp.append(d2)
spt = np.concatenate(tmp, 1)
if energy:
spt = np.concatenate((spt, logenergy.reshape(-1, 1)), axis=1)
# 5. Vad and normalize.
# VAD
nwin = int(fs * win)
idx = sidekit.frontend.vad.vad_snr(signal, 30,
fs=fs, shift=shift, nwin=nwin)
# if not returnVAD:
# spt = spt[idx, :]
# Normalize
if normalize:
mean = np.mean(spt, axis = 0)
var = np.var(spt, axis = 0)
spt = (spt - mean) / | np.sqrt(var) | numpy.sqrt |
#!/usr/bin/env python
# coding: utf-8
#
# # LMC 3D structure Final Version with Systematics
#
# np.random.choice([Roger,Hector, Alfred,Luis,Angel,Xavi])
# In[ ]:
#######################
#### Load packages ####
#######################
from scipy import stats
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib.colors import LogNorm
# import warnings
import sys
import numpy as np
import pandas as pd
import time
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.mixture import GaussianMixture
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import ConstantKernel
from scipy.optimize import curve_fit
from scipy.stats import gaussian_kde
from scipy.interpolate import Rbf
from scipy.stats import multivariate_normal
from scipy.linalg import pinv
def rbf(X,y,k):
idx = np.random.randint(np.size(X,axis = 0),size = k)
centroids = X[idx,:]
xcross = np.dot(X,X.T)
xnorms = np.repeat(np.diag(np.dot(X,X.T)).reshape(1,-1),np.size(X,axis=0),axis=0)
sigma = np.median(xnorms-2.*xcross+xnorms.T)
n = X.shape[0]
values = []
for x in X:
for c in centroids:
values.append(np.exp(-np.sum((x-c)**2.)/sigma))
phiX = np.reshape(values,(n,k))
psinv = pinv(np.dot(phiX.T,phiX))
w = np.dot(psinv,np.dot(phiX.T,y))
return w,centroids,sigma
def rbf_predict(Xhat,w,centroids,sigma):
n = Xhat.shape[0]
k = centroids.shape[0]
values = []
for x in Xhat:
for c in centroids:
values.append(np.exp(-np.sum((x-c)**2.)/sigma))
phi_Xhat = np.reshape(values,(n,k))
return np.dot(phi_Xhat,w)
def proper2geo_fn(xyz,distCenterLMC,alphaCenterLMC,deltaCenterLMC,
posAngleLMC,inclAngleLMC):
# Transform samples of location coordinates in the proper frame of the LMC
# to the rectangular heliocentric frame
#
# References:
# <NAME> & Cioni (2001)
# Weinberg and Nikolaev (2001)
#
# Parameters:
# -xyz A tensor of shape=(N, 3) containing N samples in the
# proper LMC frame
# -N No of samples
# -distCenterLMC Distance to the LMC centre (kpc)
# -alphaCenterLMC RA of the LMC centre (rad)
# -deltaCenterLMC Dec of the LMC centre (rad)
# -posAngleLMC Position angle of the LON measured w.r.t. the North (rad)
# -inclAngleLMC Inclination angle (rad)
#
# Return: A tensor of shape=(N, 3) containing N samples of rectangular
# coordinates in the heliocentric frame
# Affine transformation from local LMC frame to heliocentric frame
s11 = np.sin(alphaCenterLMC)
s12 = -np.cos(alphaCenterLMC) * np.sin(deltaCenterLMC)
s13 = -np.cos(alphaCenterLMC) * np.cos(deltaCenterLMC)
s21 = -np.cos(alphaCenterLMC)
s22 = -np.sin(alphaCenterLMC) * np.sin(deltaCenterLMC)
s23 = -np.sin(alphaCenterLMC) * np.cos(deltaCenterLMC)
s31 = np.zeros([])
s32 = np.cos(deltaCenterLMC)
s33 = -np.sin(deltaCenterLMC)
matrix = np.stack((s11,s12,s13,s21,s22,s23,s31,s32,s33),
axis=-1) # pyformat: disable
output_shape = np.concatenate((
np.shape(np.zeros(4))[:-1], (3, 3)), axis=-1)
OXYZ2 = np.reshape(matrix, output_shape.astype(int))
LMC_center = np.stack(
[
distCenterLMC *
np.cos(deltaCenterLMC) *
np.cos(alphaCenterLMC),
distCenterLMC *
np.cos(deltaCenterLMC) *
np.sin(alphaCenterLMC),
distCenterLMC *
np.sin(deltaCenterLMC)
], axis=0)
#print("LMC_center",LMC_center)
# Linear transformation from proper to local LMC frame
s11 = np.cos(posAngleLMC)
s12 = -np.sin(posAngleLMC) * np.cos(inclAngleLMC)
s13 = -np.sin(posAngleLMC) * np.sin(inclAngleLMC)
s21 = np.sin(posAngleLMC)
s22 = np.cos(posAngleLMC) * np.cos(inclAngleLMC)
s23 = np.cos(posAngleLMC) * np.sin(inclAngleLMC)
s31 = np.zeros([])
s32 = -np.sin(inclAngleLMC)
s33 = np.cos(inclAngleLMC)
matrix2 = np.stack((s11,s12,s13,s21,s22,s23,s31,s32,s33),
axis=-1) # pyformat: disable
output_shape = np.concatenate((
np.shape(np.zeros(4))[:-1], (3, 3)), axis=-1)
OXYZ5 = np.reshape(matrix2, output_shape.astype(int))
#mat1=xyz.dot(OXYZ5)
mat1=OXYZ5.dot(xyz.T).T
#print("mat1",mat1.shape)
#print(OXYZ2.shape)
#output0n=mat1.dot(OXYZ2) + np.array(LMC_center)
output0n=OXYZ2.dot(mat1.T).T + np.array(LMC_center)
#print("output0n",output0n)
#mat1 = np.matmul(OXYZ5,xyz) + np.zeros(3)
#mat2 = np.matmul(OXYZ2,mat1) + LMC_center
return output0n
def disk_fn(n, scaleHeight, scaleLength, psiAngle, ellFactor):
# Generate samples of location coordinates of the LMC disk in a proper
# reference frame and transform them to a proper LMC reference frame
# References:
# Mancini et al. (2004)
#
# Parameters:
# -N No of samples
# -scaleHeight Disk scale height (kpc)
# -scaleLength Disk scale length (kpc)
# -ellFactor Disk ellipticity factor. For a circular disk set = 1
# -psiAngle Disk minor axis position angle measured w.r.t. LON (rad)
# For a circular disk set = 0
#
# Return: A tensor of shape=(n, 3) containing N samples of the
# star locations in a local LMC reference frame
s11 = np.cos(psiAngle)
s12 = -np.sin(psiAngle)
s13 = np.zeros([])
s21 = np.sin(psiAngle)
s22 = np.cos(psiAngle)
s23 = np.zeros([])
s31 = np.zeros([])
s32 = np.zeros([])
s33 = np.ones([])
matrix = np.stack((s11,s12,s13,s21,s22,s23,s31,s32,s33),
axis=-1) # pyformat: disable
output_shape = np.concatenate((
np.shape(np.zeros(4))[:-1], (3, 3)), axis=-1)
OXYZ6 = np.reshape(matrix, output_shape.astype(int))
#S3_ = tf.linalg.LinearOperatorFullMatrix(OXYZ6)
#S3_ALO = tfb.AffineLinearOperator(shift=tf.zeros(3), scale=S3_)
#r = tfd.Gamma(concentration=2, rate=1./scaleLength).sample(n )
r = np.random.gamma(shape = 2,scale = scaleLength, size = n)
theta = np.random.uniform(low=0., high=2.*np.pi, size = n)
x = ellFactor * r * np.cos(theta)
y = r * np.sin(theta)
z = np.random.laplace(loc=0., scale=scaleHeight, size=n )
#chain = tfb.Chain([ S3_ALO ])
#output1n=np.stack([x,y,z],axis=1).dot(OXYZ6)
output1n=OXYZ6.dot(np.stack([x,y,z],axis=1).T).T # NO entenc perquè pero sembla que així queda ben encarat
#print("mat11",output1n.shape)
#mat1 = np.matmul(OXYZ6,np.stack([x,y,z],axis=1)) + np.zeros(3)
return output1n
def geo2plx_fn( x ):
# Transform rectangular heliocentric coordinates to (ra,dec,parallax) coordinates
x0 = x[..., 0]
x1 = x[..., 1]
x2 = x[..., 2]
y0 = np.array([])
#for element in range(len(x1)):
# if x1[element] > 0.:
# y0 = np.append(y0,np.dot(180./np.pi,np.arctan2(x1[element],x0[element])))
# else:
# y0 = np.append(y0,np.dot(180./np.pi,np.arctan2(x1[element],x0[element]))+360.)
#y0 = np.where(x1>0,np.dot(180.0/np.pi,np.arctan2(x1,x0)),np.dot(180.0/np.pi,np.arctan2(x1,x0))+360.)
y0 = np.where(x1>0,180.0/np.pi* | np.arctan2(x1,x0) | numpy.arctan2 |
"""Definitions for common filters."""
import numpy as np
import numpy.random as rnd
import numpy.linalg as la
from scipy.linalg import expm
import scipy.integrate as s_integrate
import scipy.stats as stats
import abc
from warnings import warn
from copy import deepcopy
import matplotlib.pyplot as plt
import gncpy.math as gmath
import gncpy.plotting as pltUtil
import gncpy.distributions as gdistrib
import gncpy.dynamics as gdyn
import gncpy.errors as gerr
class BayesFilter(metaclass=abc.ABCMeta):
"""Generic base class for Bayesian Filters such as a Kalman Filter.
This defines the required functions and provides their recommended function
signature for inherited classes.
Attributes
----------
use_cholesky_inverse : bool
Flag indicating if a cholesky decomposition should be performed before
taking the inverse. This can improve numerical stability but may also
increase runtime. The default is True.
"""
def __init__(self, use_cholesky_inverse=True, **kwargs):
self.use_cholesky_inverse = use_cholesky_inverse
super().__init__(**kwargs)
@abc.abstractmethod
def predict(self, timestep, *args, **kwargs):
"""Generic method for the filters prediction step.
This must be overridden in the inherited class. It is recommended to
keep the same structure/order for the arguments to allow for
standardized implementation of wrapper code.
"""
pass
@abc.abstractmethod
def correct(self, timestep, meas, *args, **kwargs):
"""Generic method for the filters correction step.
This must be overridden in the inherited class. It is recommended to
keep the same structure/order for the arguments to allow for
standardized implementation of wrapper code.
"""
pass
@abc.abstractmethod
def set_state_model(self, **kwargs):
"""Generic method for tsetting the state model.
This must be overridden in the inherited class. The signature for this
is arbitrary.
"""
pass
@abc.abstractmethod
def set_measurement_model(self, **kwargs):
"""Generic method for tsetting the measurement model.
This must be overridden in the inherited class. The signature for this
is arbitrary.
"""
pass
class KalmanFilter(BayesFilter):
"""Implementation of a discrete time Kalman Filter.
Notes
-----
This is loosely based on :cite:`Crassidis2011_OptimalEstimationofDynamicSystems`
Attributes
----------
cov : N x N numpy array
Covariance matrix
meas_noise : Nm x Nm numpy array
Measurement noise matrix
proc_noise : N x N numpy array
Process noise matrix
dt : float, optional
Time difference between simulation steps.
"""
def __init__(self, cov=np.array([[]]), meas_noise=np.array([[]]), dt=None,
**kwargs):
self.cov = cov
self.meas_noise = meas_noise
self.proc_noise = np.array([[]])
self.dt = dt
self._dyn_obj = None
self._state_mat = np.array([[]])
self._input_mat = np.array([[]])
self._get_state_mat = None
self._get_input_mat = None
self._meas_mat = np.array([[]])
self._meas_fnc = None
super().__init__(**kwargs)
def set_state_model(self, state_mat=None, input_mat=None, cont_time=False,
state_mat_fun=None, input_mat_fun=None, dyn_obj=None):
r"""Sets the state model equation for the filter.
If the continuous time model is used then a `dt` must be provided, see
the note for algorithm details. Alternatively, if the system is time
varying then functions can be specified to return the matrices at each
time step.
Note
-----
This can use a continuous or discrete model. The continuous model will
be automatically discretized so standard matrix equations can be used.
If the discrete model is used it is assumed to have the form
.. math::
x_{k+1} = F x_k + G u_k
If the continuous model is used it is assumed to have the form
.. math::
\dot{x} = A x + B u
and is discretized according to
.. math::
expm\left[\begin{bmatrix}
A & B\\
0 & 0
\end{bmatrix}dt\right]=\begin{bmatrix}
F & G\\
0 & I
\end{bmatrix}
Parameters
----------
state_mat : N x N numpy array, optional
State matrix, continuous or discrete case. The default is None.
input_mat : N x Nu numpy array, optional
Input matrixx, continuous or discrete case. The default is None.
cont_time : bool, optional
Flag inidicating if the continuous model is provided. The default
is False.
state_mat_fun : callable, optional
Function that returns the `state_mat`, must take timestep and
`*args`. The default is None.
input_mat_fun : callable, optional
Function that returns the `input_mat`, must take timestep, and
`*args`. The default is None.
dyn_obj : :class:`gncpy.dynamics.LinearDynamicsBase`, optional
Sets the dynamics according to the class. The default is None.
Raises
------
RuntimeError
If the improper combination of input arguments are specified.
Returns
-------
None.
"""
have_obj = dyn_obj is not None
have_mats = state_mat is not None
have_funs = state_mat_fun is not None
if have_obj:
self._dyn_obj = dyn_obj
elif have_mats and not cont_time:
self._state_mat = state_mat
self._input_mat = input_mat
elif have_mats:
if self.dt is None:
msg = 'dt must be specified when using continuous time model'
raise RuntimeError(msg)
n_cols = state_mat.shape[1] + input_mat.shape[1]
big_mat = np.vstack((np.hstack((state_mat, input_mat)),
np.zeros((input_mat.shape[1], n_cols))))
res = expm(big_mat * self.dt)
r_s = 0
r_e = state_mat.shape[0]
c_s = 0
c_e = state_mat.shape[1]
self._state_mat = res[r_s:r_e, c_s:c_e]
c_s = c_e
c_e = res.shape[1]
self._input_mat = res[r_s:r_e, c_s:c_e]
elif have_funs:
self._get_state_mat = state_mat_fun
self._get_input_mat = input_mat_fun
else:
raise RuntimeError('Invalid combination of inputs')
def set_measurement_model(self, meas_mat=None, meas_fun=None):
r"""Sets the measurement model for the filter.
This can either set the constant measurement matrix, or the matrix can
be time varying.
Notes
-----
This assumes a measurement model of the form
.. math::
\tilde{y}_{k+1} = H_{k+1} x_{k+1}^-
where :math:`H_{k+1}` can be constant over time.
Parameters
----------
meas_mat : Nm x N numpy array, optional
Measurement matrix that transforms the state to estimated
measurements. The default is None.
meas_fun : callable, optional
Function that returns the matrix for transforming the state to
estimated measurements. Must take timestep, and `*args` as
arguments. The default is None.
Raises
------
RuntimeError
Rasied if no arguments are specified.
Returns
-------
None.
"""
if meas_mat is not None:
self._meas_mat = meas_mat
elif meas_fun is not None:
self._meas_fnc = meas_fun
else:
raise RuntimeError('Invalid combination of inputs')
def _predict_next_state(self, timestep, cur_state, cur_input, state_mat_args,
input_mat_args):
if self._dyn_obj is not None:
next_state = self._dyn_obj.propagate_state(timestep, cur_state,
u=cur_input,
state_args=state_mat_args,
ctrl_args=input_mat_args)
state_mat = self._dyn_obj.get_state_mat(timestep, *state_mat_args)
else:
if self._get_state_mat is not None:
state_mat = self._get_state_mat(timestep, *state_mat_args)
elif self._state_mat is not None:
state_mat = self._state_mat
else:
raise RuntimeError('State model not set')
if self._get_input_mat is not None:
input_mat = self._get_input_mat(timestep, *input_mat_args)
elif self._input_mat is not None:
input_mat = self._input_mat
else:
input_mat = None
next_state = state_mat @ cur_state
if input_mat is not None and cur_input is not None:
next_state += input_mat @ cur_input
return next_state, state_mat
def predict(self, timestep, cur_state, cur_input=None, state_mat_args=(),
input_mat_args=()):
"""Implements a discrete time prediction step for a Kalman Filter.
Parameters
----------
timestep : float
Current timestep.
cur_state : N x 1 numpy array
Current state.
cur_input : N x Nu numpy array, optional
Current input. The default is None.
state_mat_args : tuple, optional
keyword arguments for the get state matrix function if one has
been specified or the propagate state function if using a dynamic
object. The default is ().
input_mat_args : tuple, optional
keyword arguments for the get input matrix function if one has
been specified or the propagate state function if using a dynamic
object. The default is ().
Raises
------
RuntimeError
If the state model has not been set
Returns
-------
next_state : N x 1 numpy array
Next state.
"""
next_state, state_mat = self._predict_next_state(timestep, cur_state,
cur_input, state_mat_args,
input_mat_args)
self.cov = state_mat @ self.cov @ state_mat.T + self.proc_noise
self.cov = (self.cov + self.cov.T) * 0.5
return next_state
def _get_meas_mat(self, t, state, n_meas, meas_fun_args):
# time varying matrix
if self._meas_fnc is not None:
meas_mat = self._meas_fnc(t, *meas_fun_args)
else:
# constant matrix
meas_mat = self._meas_mat
return meas_mat
def _est_meas(self, timestep, cur_state, n_meas, meas_fun_args):
meas_mat = self._get_meas_mat(timestep, cur_state, n_meas,
meas_fun_args)
est_meas = meas_mat @ cur_state
return est_meas, meas_mat
def correct(self, timestep, meas, cur_state, meas_fun_args=()):
"""Implementss a discrete time correction step for a Kalman Filter.
Parameters
----------
timestep : float
Current timestep.
meas : Nm x 1 numpy array
Current measurement.
cur_state : N x 1 numpy array
Current state.
meas_fun_args : tuple, optional
Arguments for the measurement matrix function if one has
been specified. The default is ().
Returns
-------
next_state : N x 1 numpy array
The corrected state.
meas_fit_prob : float
Goodness of fit of the measurement based on the state and
covariance assuming Gaussian noise.
"""
est_meas, meas_mat = self._est_meas(timestep, cur_state, meas.size,
meas_fun_args)
# get the Kalman gain
cov_meas_T = self.cov @ meas_mat.T
inov_cov = meas_mat @ cov_meas_T + self.meas_noise
inov_cov = (inov_cov + inov_cov.T) * 0.5
if self.use_cholesky_inverse:
sqrt_inv_inov_cov = la.inv(la.cholesky(inov_cov))
inv_inov_cov = sqrt_inv_inov_cov.T @ sqrt_inv_inov_cov
else:
inv_inov_cov = la.inv(inov_cov)
kalman_gain = cov_meas_T @ inv_inov_cov
# update the state with measurement
inov = meas - est_meas
next_state = cur_state + kalman_gain @ inov
# update the covariance
n_states = cur_state.shape[0]
self.cov = (np.eye(n_states) - kalman_gain @ meas_mat) @ self.cov
# calculate the measuremnt fit probability assuming Gaussian
meas_fit_prob = np.exp(-0.5 * (meas.size * np.log(2 * np.pi)
+ np.log(la.det(inov_cov))
+ inov.T @ inv_inov_cov @ inov))
meas_fit_prob = meas_fit_prob.item()
return (next_state, meas_fit_prob)
class ExtendedKalmanFilter(KalmanFilter):
"""Implementation of a continuous-discrete time Extended Kalman Filter.
This is loosely based on :cite:`Crassidis2011_OptimalEstimationofDynamicSystems`
Attributes
----------
cont_cov : bool, optional
Flag indicating if a continuous model of the covariance matrix should
be used in the filter update step. The default is True.
integrator_type : string, optional
integrator type as defined by scipy's integrate.ode function. The
default is `dopri5`. Only used if a dynamic object is not specified.
integrator_params : dict, optional
additional parameters for the integrator. The default is {}. Only used
if a dynamic object is not specified.
"""
def __init__(self, cont_cov=True, dyn_obj=None, ode_lst=None, **kwargs):
self.cont_cov = cont_cov
self.integrator_type = 'dopri5'
self.integrator_params = {}
self._dyn_obj = None
self._ode_lst = None
if dyn_obj is not None or ode_lst is not None:
self.set_state_model(dyn_obj=dyn_obj, ode_lst=ode_lst)
self._integrator = None
super().__init__(**kwargs)
def set_state_model(self, dyn_obj=None, ode_lst=None):
r"""Sets the state model equations.
This allows for setting the differential equations directly
.. math::
\dot{x} = f(t, x, u)
or setting a :class:`gncpy.dynamics.NonlinearDynamicsBase` object. If
the object is specified then a local copy is created.
Parameters
----------
dyn_obj : :class:`gncpy.dynamics.NonlinearDynamicsBase`, optional
Sets the dynamics according to the class. The default is None.
ode_lst : list, optional
callable functions, 1 per ode/state. The callabale must have the
signature `f(t, x, *f_args)` just like scipy.integrate's ode
function. The default is None.
Raises
------
RuntimeError
If neither argument is specified.
Returns
-------
None.
"""
if dyn_obj is not None:
self._dyn_obj = deepcopy(dyn_obj)
elif ode_lst is not None and len(ode_lst) > 0:
self._ode_lst = ode_lst
else:
msg = 'Invalid state model specified. Check arguments'
raise RuntimeError(msg)
def _predict_next_state(self, timestep, cur_state, dyn_fun_params):
if self._dyn_obj is not None:
next_state = self._dyn_obj.propagate_state(timestep, cur_state,
state_args=dyn_fun_params)
state_mat = self._dyn_obj.get_state_mat(timestep, cur_state,
dyn_fun_params)
dt = self._dyn_obj.dt
elif self._ode_lst is not None:
next_state = np.nan * np.ones(cur_state.shape)
for ii, f in enumerate(self._ode_lst):
self._integrator = s_integrate.ode(f)
self._integrator.set_integrator(self.integrator_type,
**self.integrator_params)
self._integrator.set_initial_value(cur_state, timestep)
self._integrator.set_f_params(*dyn_fun_params)
next_time = timestep + self.dt
next_state[ii, 0] = self._integrator.integrate(next_time)
if not self._integrator.successful():
msg = 'Integration failed at time {}'.format(timestep)
raise RuntimeError(msg)
state_mat = gmath.get_state_jacobian(timestep, cur_state,
self._ode_lst, dyn_fun_params)
dt = self.dt
else:
raise RuntimeError('State model not set')
return next_state, state_mat, dt
def predict(self, timestep, cur_state, dyn_fun_params=()):
r"""Prediction step of the EKF.
This assumes continuous time dynamics and integrates the ode's to get
the next state.
.. math::
x_{k+1} = \int_t^{t+dt} f(t, x, \phi) dt
for arbitrary parameters :math:`\phi`
Parameters
----------
timestep : float
Current timestep.
cur_state : N x 1 numpy array
Current state.
dyn_fun_params : tuple, optional
Extra arguments to be passed to the dynamics function. The default
is ().
Raises
------
RuntimeError
Integration fails, or state model not set.
Returns
-------
next_state : N x 1 numpy array
The predicted state.
"""
next_state, state_mat, dt = self._predict_next_state(timestep,
cur_state,
dyn_fun_params)
if self.cont_cov:
def ode(t, x, n_states, F, proc_noise):
P = x.reshape((n_states, n_states))
P_dot = F @ P + P @ F.T + proc_noise
return P_dot.ravel()
integrator = s_integrate.ode(ode)
integrator.set_integrator(self.integrator_type,
**self.integrator_params)
integrator.set_initial_value(self.cov.flatten(), timestep)
integrator.set_f_params(cur_state.size, state_mat, self.proc_noise)
tmp = integrator.integrate(timestep + dt)
if not integrator.successful():
msg = 'Failed to integrate covariance at {}'.format(timestep)
raise RuntimeError(msg)
self.cov = tmp.reshape(self.cov.shape)
else:
self.cov = state_mat @ self.cov @ state_mat.T + self.proc_noise
return next_state
def _get_meas_mat(self, t, state, n_meas, meas_fun_args):
# non-linear mapping, potentially time varying
if self._meas_fnc is not None:
# calculate partial derivatives
meas_mat = np.zeros((n_meas, state.size))
for ii, h in enumerate(self._meas_fnc):
res = gmath.get_jacobian(state.copy(),
lambda _x, *_f_args: h(t, _x, *_f_args),
f_args=meas_fun_args)
meas_mat[[ii], :] = res.T
else:
# constant matrix
meas_mat = self._meas_mat
return meas_mat
def _est_meas(self, timestep, cur_state, n_meas, meas_fun_args):
meas_mat = self._get_meas_mat(timestep, cur_state, n_meas, meas_fun_args)
if self._meas_fnc is not None:
est_meas = np.nan * np.ones((n_meas, 1))
for ii, h in enumerate(self._meas_fnc):
est_meas[ii] = h(timestep, cur_state, *meas_fun_args)
else:
est_meas = meas_mat @ cur_state
return est_meas, meas_mat
def set_measurement_model(self, meas_mat=None, meas_fun_lst=None):
r"""Sets the measurement model for the filter.
This can either set the constant measurement matrix, or a set of
non-linear functions (potentially time varying) to map states to
measurements.
Notes
-----
The constant matrix assumes a measurement model of the form
.. math::
\tilde{y}_{k+1} = H x_{k+1}^-
and the non-linear case assumes
.. math::
\tilde{y}_{k+1} = h(t, x_{k+1}^-)
Parameters
----------
meas_mat : Nm x N numpy array, optional
Measurement matrix that transforms the state to estimated
measurements. The default is None.
meas_fun_lst : list, optional
Non-linear functions that return the expected measurement for the
given state. Each function must have the signature `h(t, x, *args)`.
The default is None.
Raises
------
RuntimeError
Rasied if no arguments are specified.
Returns
-------
None.
"""
super().set_measurement_model(meas_mat=meas_mat, meas_fun=meas_fun_lst)
class StudentsTFilter(KalmanFilter):
r"""Implementation of a Students T filter.
This is based on :cite:`Liu2018_AStudentsTMixtureProbabilityHypothesisDensityFilterforMultiTargetTrackingwithOutliers`
and :cite:`Roth2013_AStudentsTFilterforHeavyTailedProcessandMeasurementNoise`
and uses moment matching to limit the degree of freedom growth.
Notes
-----
This models the multi-variate Student's t-distribution as
.. math::
\begin{align}
p(x) &= \frac{\Gamma(\frac{\nu + 2}{2})}{\Gamma(\frac{\nu}{2})}
\frac{1}{(\nu \pi)^{d/2}}
\frac{1}{\sqrt{\vert \Sigma \vert}}\left( 1 +
\frac{\Delta^2}{\nu}\right)^{-\frac{\nu + 2}{\nu}} \\
\Delta^2 &= (x - m)^T \Sigma^{-1} (x - m)
\end{align}
or compactly as :math:`St(x; m,\Sigma, \nu) = p(x)` for scale matrix
:math:`\Sigma` and degree of freedom :math:`\nu`
Attributes
----------
scale : N x N numpy array, optional
Scaling matrix of the Students T distribution. The default is np.array([[]]).
dof : int , optional
Degree of freedom for the state distribution. The default is 3.
proc_noise_dof : int, optional
Degree of freedom for the process noise model. The default is 3.
meas_noise_dof : int, optional
Degree of freedom for the measurement noise model. The default is 3.
use_moment_matching : bool, optional
Flag indicating if moment matching is used to maintain the heavy tail
property as the filter propagates over time. The default is True.
"""
def __init__(self, scale= | np.array([[]]) | numpy.array |
import numpy as np
import torch
import cv2
import math
def iou(tens1, tens2):
assert tens1.size() == tens2.size()
squeeze = False
if tens1.dim() == 2 and tens2.dim() == 2:
squeeze = True
tens1 = tens1.unsqueeze(0)
tens2 = tens2.unsqueeze(0)
assert tens1.dim() == 3
assert tens1.size(-1) == 4 and tens2.size(-1) == 4
maxs = torch.max(tens1[:,:,:2], tens2[:,:,:2])
mins = torch.min(tens1[:,:,2:], tens2[:,:,2:])
diff = torch.clamp(mins - maxs, min=0.0)
intersection = diff[:,:,0] * diff[:,:,1]
diff1 = torch.clamp(tens1[:,:,2:] - tens1[:,:,:2], min=0.0)
area1 = diff1[:,:,0] * diff1[:,:,1]
diff2 = torch.clamp(tens2[:,:,2:] - tens2[:,:,:2], min=0.0)
area2 = diff2[:,:,0] * diff2[:,:,1]
iou = intersection/(area1 + area2 - intersection)
if squeeze:
iou = iou.squeeze(0)
return iou
def bbox_iou(box1, box2, x1y1x2y2=True, GIoU=False, DIoU=False, CIoU=False):
"""
Returns the IoU of two bounding boxes
"""
if not x1y1x2y2:
# Transform from center and width to exact coordinates
b1_x1, b1_x2 = box1[:, 0] - box1[:, 2] / 2, box1[:, 0] + box1[:, 2] / 2
b1_y1, b1_y2 = box1[:, 1] - box1[:, 3] / 2, box1[:, 1] + box1[:, 3] / 2
b2_x1, b2_x2 = box2[:, 0] - box2[:, 2] / 2, box2[:, 0] + box2[:, 2] / 2
b2_y1, b2_y2 = box2[:, 1] - box2[:, 3] / 2, box2[:, 1] + box2[:, 3] / 2
else:
# Get the coordinates of bounding boxes
b1_x1, b1_y1, b1_x2, b1_y2 = box1[:, 0], box1[:, 1], box1[:, 2], box1[:, 3]
b2_x1, b2_y1, b2_x2, b2_y2 = box2[:, 0], box2[:, 1], box2[:, 2], box2[:, 3]
# get the corrdinates of the intersection rectangle
inter_rect_x1 = torch.max(b1_x1, b2_x1)
inter_rect_y1 = torch.max(b1_y1, b2_y1)
inter_rect_x2 = torch.min(b1_x2, b2_x2)
inter_rect_y2 = torch.min(b1_y2, b2_y2)
# Intersection area
inter_area = torch.clamp(inter_rect_x2 - inter_rect_x1 + 1, min=0) * torch.clamp(
inter_rect_y2 - inter_rect_y1 + 1, min=0
)
# Union Area
b1_area = (b1_x2 - b1_x1 + 1) * (b1_y2 - b1_y1 + 1)
b2_area = (b2_x2 - b2_x1 + 1) * (b2_y2 - b2_y1 + 1)
union = (b1_area + b2_area - inter_area + 1e-16)
iou = inter_area / union
return iou
def bbox_iou_v5(box1, box2, x1y1x2y2=True, GIoU=False, DIoU=False, CIoU=False):
# Returns the IoU of box1 to box2. box1 is 4, box2 is nx4
# box2 = box2.t()
# Get the coordinates of bounding boxes
if x1y1x2y2: # x1, y1, x2, y2 = box1
b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]
else: # transform from xywh to xyxy
b1_x1, b1_x2 = box1[0] - box1[2] / 2, box1[0] + box1[2] / 2
b1_y1, b1_y2 = box1[1] - box1[3] / 2, box1[1] + box1[3] / 2
b2_x1, b2_x2 = box2[0] - box2[2] / 2, box2[0] + box2[2] / 2
b2_y1, b2_y2 = box2[1] - box2[3] / 2, box2[1] + box2[3] / 2
# Intersection area
inter = (torch.min(b1_x2, b2_x2) - torch.max(b1_x1, b2_x1)).clamp(0) * \
(torch.min(b1_y2, b2_y2) - torch.max(b1_y1, b2_y1)).clamp(0)
# Union Area
w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1
w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1
union = (w1 * h1 + 1e-16) + w2 * h2 - inter
iou = inter / union # iou
if GIoU or DIoU or CIoU:
cw = torch.max(b1_x2, b2_x2) - torch.min(b1_x1, b2_x1) # convex (smallest enclosing box) width
ch = torch.max(b1_y2, b2_y2) - torch.min(b1_y1, b2_y1) # convex height
if GIoU: # Generalized IoU https://arxiv.org/pdf/1902.09630.pdf
c_area = cw * ch + 1e-16 # convex area
return iou - (c_area - union) / c_area # GIoU
if DIoU or CIoU: # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1
# convex diagonal squared
c2 = cw ** 2 + ch ** 2 + 1e-16
# centerpoint distance squared
rho2 = ((b2_x1 + b2_x2) - (b1_x1 + b1_x2)) ** 2 / 4 + ((b2_y1 + b2_y2) - (b1_y1 + b1_y2)) ** 2 / 4
if DIoU:
return iou - rho2 / c2 # DIoU
elif CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47
v = (4 / math.pi ** 2) * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2)
with torch.no_grad():
alpha = v / (1 - iou + v)
return iou - (rho2 / c2 + v * alpha) # CIoU
return iou
def get_batch_statistics(outputs, targets, iou_threshold):
""" Compute true positives, predicted scores and predicted labels per sample """
batch_metrics = []
# TPs, confs, preds = [], [], []
for sample_i in range(len(outputs)):
if outputs[sample_i] is None:
continue
output = outputs[sample_i]
pred_boxes = output[:, :4]
pred_scores = output[:, 4]
pred_labels = output[:, -1]
true_positives = np.zeros(pred_boxes.shape[0])
annotations = targets[targets[:, 0] == sample_i][:, 1:]
target_labels = annotations[:, 0] if len(annotations) else []
if len(annotations):
detected_boxes = []
target_boxes = annotations[:, 1:]
for pred_i, (pred_box, pred_label) in enumerate(zip(pred_boxes, pred_labels)):
# If targets are found break
if len(detected_boxes) == len(annotations):
break
# Ignore if label is not one of the target labels
if pred_label not in target_labels:
continue
iou, box_index = bbox_iou(pred_box.unsqueeze(0), target_boxes).max(0)
if iou >= iou_threshold and box_index not in detected_boxes:
true_positives[pred_i] = 1
detected_boxes += [box_index]
batch_metrics.append([true_positives, pred_scores.cpu().data.numpy(), pred_labels.cpu().data.numpy()])
return batch_metrics
def mark_target(self, img, targets, index):
# img = cv2.UMat(img).get()
for target in targets:
target = target.numpy()
if target[0] == index:
box = target[2:]
cls_id = int(target[1])
color = self.colors[cls_id]
xmin, ymin, xmax, ymax = int(box[0]), int(box[1]), int(box[2]), int(box[3])
xmax += xmin
ymax += ymin
# img = np.array(img, dtype=np.uint8)
cv2.rectangle(img, (xmin, ymin), (xmax, ymax), color, 2)
text_size = cv2.getTextSize(self.classes[cls_id] , cv2.FONT_HERSHEY_PLAIN, 1, 1)[0]
cv2.rectangle(img, (xmin, ymin), (xmin + text_size[0] + 3, ymin + text_size[1] + 4), color, -1)
cv2.putText(
img, self.classes[cls_id],
(xmin, ymin + text_size[1] + 4), cv2.FONT_HERSHEY_PLAIN, 1,
(255, 255, 255), 1)
# print("Object: {}, Bounding box: ({},{}) ({},{})".format(classes[cls_id], xmin, xmax, ymin, ymax))
# cv2.imshow('win', img)
# cv2.waitKey()
return img
def mark_pred(self, pred_img, pred_boxes):
# pred_img = np.array(pred_img.permute(1, 2, 0).cpu()*255, dtype=np.uint8) # Re multiply
# for pred_boxes in suppress_output:
if type(None) == type(pred_boxes): return pred_img
for target in pred_boxes:
target = target.cpu().numpy() # (x1, y1, x2, y2, object_conf, class_score, class_pred)
box = target[:4]
cls_id = int(target[6])
color = self.colors[cls_id]
xmin, ymin, xmax, ymax = int(box[0]), int(box[1]), int(box[2]), int(box[3])
xmax += xmin
ymax += ymin
cv2.rectangle(pred_img, (xmin, ymin), (xmax, ymax), color, 2)
text_size = cv2.getTextSize(self.classes[cls_id] , cv2.FONT_HERSHEY_PLAIN, 1, 1)[0]
cv2.rectangle(pred_img, (xmin, ymin), (xmin + text_size[0] + 3, ymin + text_size[1] + 4), color, -1)
cv2.putText(
pred_img, self.classes[cls_id],
(xmin, ymin + text_size[1] + 4), cv2.FONT_HERSHEY_PLAIN, 1,
(255, 255, 255), 1)
# print("Object: {}, Bounding box: ({},{}) ({},{})".format(classes[cls_id], xmin, xmax, ymin, ymax))
# cv2.imshow('win', pred_img)
# cv2.waitKey()
return pred_img
def ap_per_class(tp, conf, pred_cls, target_cls):
""" Compute the average precision, given the recall and precision curves.
Source: https://github.com/rafaelpadilla/Object-Detection-Metrics.
# Arguments
tp: True positives (list).
conf: Objectness value from 0-1 (list).
pred_cls: Predicted object classes (list).
target_cls: True object classes (list).
# Returns
The average precision as computed in py-faster-rcnn.
"""
# Sort by objectness
i = np.argsort(-conf)
tp, conf, pred_cls = tp[i], conf[i], pred_cls[i]
# Find unique classes
unique_classes = np.unique(target_cls)
# Create Precision-Recall curve and compute AP for each class
ap, p, r = [], [], []
# for c in tqdm(unique_classes, desc="Computing AP"):
for c in unique_classes:
i = pred_cls == c
n_gt = (target_cls == c).sum() # Number of ground truth objects
n_p = i.sum() # Number of predicted objects
if n_p == 0 and n_gt == 0:
continue
elif n_p == 0 or n_gt == 0:
ap.append(0)
r.append(0)
p.append(0)
else:
# Accumulate FPs and TPs
fpc = (1 - tp[i]).cumsum()
tpc = (tp[i]).cumsum()
# Recall
recall_curve = tpc / (n_gt + 1e-16)
r.append(recall_curve[-1])
# Precision
precision_curve = tpc / (tpc + fpc)
p.append(precision_curve[-1])
# AP from recall-precision curve
ap.append(compute_ap(recall_curve, precision_curve))
# Compute F1 score (harmonic mean of precision and recall)
p, r, ap = np.array(p), np.array(r), np.array(ap)
f1 = 2 * p * r / (p + r + 1e-16)
return p, r, ap, f1, unique_classes.astype("int32")
def compute_ap(recall, precision):
""" Compute the average precision, given the recall and precision curves.
Code originally from https://github.com/rbgirshick/py-faster-rcnn.
# Arguments
recall: The recall curve (list).
precision: The precision curve (list).
# Returns
The average precision as computed in py-faster-rcnn.
"""
# correct AP calculation
# first append sentinel values at the end
mrec = np.concatenate(([0.0], recall, [1.0]))
mpre = np.concatenate(([0.0], precision, [0.0]))
# compute the precision envelope
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = | np.maximum(mpre[i - 1], mpre[i]) | numpy.maximum |
#!/usr/bin/env python
from argparse import ArgumentParser
from distributed import Client, Future
import numpy as np
import os
import sys
import time
def init_julia(re, im, n):
'''Initialize the complex domain.
Positional arguments:
re -- minimum and maximum real value as 2-tuple
im -- minimum and maximum imaginary value as 2-tuple
n -- number of real and imaginary points as 2-tuple
'''
re_vals, im_vals = np.meshgrid(
np.linspace(re[0], re[1], n[0]),
np.linspace(im[0], im[1], n[1])
)
domain = re_vals + im_vals*1j
return domain.flatten()
def init_pyx(dask_worker):
import pyximport
pyximport.install()
sys.path.insert(0, os.getcwd())
# sys.path.insert(0, '/scratch/leuven/301/vsc30140/julia_set/')
from julia_cython import julia_set
def init_omp_pyx(dask_worker):
import pyximport
pyximport.install()
sys.path.insert(0, os.getcwd())
# sys.path.insert(0, '/scratch/leuven/301/vsc30140/julia_set/')
from julia_cython_omp import julia_set
if __name__ == '__main__':
arg_parser = ArgumentParser(description='Compute julia set')
arg_parser.add_argument('--re_min', type=float, default=-1.8,
help='minimum real value')
arg_parser.add_argument('--re_max', type=float, default=1.8,
help='maximum real value')
arg_parser.add_argument('--im_min', type=float, default=-1.8,
help='minimum imaginary value')
arg_parser.add_argument('--im_max', type=float, default=1.8,
help='maximum imaginary value')
arg_parser.add_argument('--max_norm', type=float, default=2.0,
help='maximum complex norm for z')
arg_parser.add_argument('--n_re', type=int, default=100,
help='number of points on the real axis')
arg_parser.add_argument('--n_im', type=int, default=100,
help='number of points on the imaginary axis')
arg_parser.add_argument('--max_iters', type=int, default=300,
help='maximum number of iterations')
arg_parser.add_argument('--implementation', default='python',
choices=['python', 'cython', 'cython_omp'],
help='implementation to use')
arg_parser.add_argument('--partitions', type=int, default=100,
help='number of partitions for dask workers')
arg_parser.add_argument('--host', required=True,
help='hostname of the dask scheduler')
arg_parser.add_argument('--port', type=int, required=True,
help='port of the dask scheduler')
options = arg_parser.parse_args()
client = Client(f'{options.host}:{options.port:d}')
if options.implementation == 'python':
from julia_python import julia_set
elif options.implementation == 'cython':
from julia_cython import julia_set
client.register_worker_callbacks(init_pyx)
elif options.implementation == 'cython_omp':
from julia_cython_omp import julia_set
client.register_worker_callbacks(init_omp_pyx)
else:
msg = '{0} version not implemented\n'
sys.stderr.write(msg.format(options.implementation))
sys.exit(1)
domain = init_julia(
(options.re_min, options.re_max),
(options.im_min, options.im_max),
(options.n_re, options.n_im)
)
domains = | np.array_split(domain, options.partitions) | numpy.array_split |
from collections import Iterable
from typing import Union, List, Tuple
import numpy as np
from functools import partial
from scipy import ndimage as ndi
import cv2
import random
from .utils import clipBBoxes
from .base import AugBase
# -------------- channel aug_cuda --------------- #
class RGB2Gray(AugBase):
def __init__(self):
super().__init__()
self.always = True
@property
def canBackward(self):
return True
def _backward_params(self, result):
self._init_params(result)
self.params = True
def apply_to_img(self, result):
if self.isForwarding:
assert self.channels == 3 and self.dim == 2, f"{self.channels} {self.dim}"
result['img'] = cv2.cvtColor(np.moveaxis(result['img'], 0, -1), cv2.COLOR_RGB2GRAY)[None, ...]
result['img_shape'] = result['img'].shape
else:
assert self.channels == 1
result['img'] = np.repeat(result['img'], 3, axis=0).astype(np.uint8)
result['img_shape'] = result['img'].shape
return result
class Gray2RGB(AugBase):
def __init__(self):
super().__init__()
self.always = True
@property
def canBackward(self):
return True
def _backward_params(self, result):
self._init_params(result)
self.params = True
def apply_to_img(self, result):
if self.isForwarding:
assert self.channels == 1
result['img'] = np.repeat(result['img'], 3, axis=0).astype(np.uint8)
result['img_shape'] = result['img'].shape
else:
assert self.channels == 3 and self.dim == 2
result['img'] = result['img'][[0], ...]
result['img_shape'] = result['img'].shape
return result
class ChannelSelect(AugBase):
def __init__(self, index: (list, tuple, int)):
super().__init__()
self.always = True
if isinstance(index, (int, float)):
index = [int(index)]
self.index = index
assert isinstance(self.index, (list, tuple))
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += '(channel_index={})'.format(self.index)
return repr_str
@property
def canBackward(self):
return True
def _forward_params(self, result):
self._init_params(result)
self.params = tuple([self.index, self.channels])
result[self.key_name] = self.params
def _backward_params(self, result):
self._init_params(result)
params = result.pop(self.key_name, None)
if params:
self.params = params
def apply_to_img(self, result):
if self.isForwarding:
index, _ = self.params
result['img'] = result['img'].take(indices=index, axis=0)
result['img_shape'] = result['img'].shape
else:
_, channels = self.params
result['img'] = | np.repeat(result['img'], channels, axis=0) | numpy.repeat |
# -*- coding: utf-8 -*-
"""
This is the offline RANSAC calculation for hypergraph propagation.
To use:
under Hypergraph_Propagation_and_Community_Selection_for_Objects_Retrieval/
import pre_match
pre_match(0,100000,2)
This is the cpu version.
To open multi threads for accelarating dataset with R1M distractors, several cpus and a large amount of memory are needed.
You may want to change the (#get the pre matching pairs) and (#load the local descriptors) code based on your equipment.
We would be grateful if anyone offers a gpu version geometric-verification code.
Created on Sat Mar 13 09:57:28 2021
@author: <NAME>
"""
import numpy as np
import pickle
from utils.image_reranking import MatchFeatures
dataset='rparis6k'
#dataset='roxford5k'
with open('data/'+dataset[:-2]+'/gnd_'+dataset+'.pkl','rb') as f:
roxford=pickle.load(f)
#oxford 的features
vecs=np.load('features/'+dataset[:-2]+'_np_delg_features/a_global_vecs.npy').T #(2048,6322)
qvecs=np.load('features/'+dataset[:-2]+'_np_delg_features/a_global_qvecs.npy').T #(2048,70)
qscores=np.dot(vecs.T,qvecs) #(4993,70)
qranks=np.argsort(-qscores,axis=0) #(4993, 70)
qscore_ranks=np.sort(-qscores,axis=0) #(4993,70)
scores=np.dot(vecs.T,vecs) #(4993,4993)
ranks=np.argsort(-scores,axis=0) #(4993,4993)
graph_dir='graph/delg/'+dataset[:-2]+'/' #'graph/delg/rparis/'
#get the pre matching pairs
def sp_pairs():
all_pairs_200=set() # 记录所有可能需要计算ransac的pairs,既每张图片与其global的前200张
for i in range(vecs.shape[1]):
top200=list(ranks[:200,i])
for x in top200:
if 'imlist_'+str(x)+'_imlist_'+str(i) not in all_pairs_200:
all_pairs_200.add('imlist_'+str(i)+'_imlist_'+str(x))
all_pairs_200=list(all_pairs_200)
| np.save(graph_dir+'pre_pairs.npy',all_pairs_200) | numpy.save |
# ============================ libraries =====================================
import numpy as np
### ========================== functions =====================================
def test_year_quarter_by_park(park):
if park == "Gonarezhou" or park == "mbe" or park == "mbe_gun" or park == "CRNP":
return 2018, 1
elif park == "AMWS":
return 2017, 4
elif park == "MFNP" or park == "QENP":
return 2015, None
elif park == "Mondulkiri":
return 2017, 4
elif park == "Dja":
return 2018, 1
elif park == "MPF":
return 2017, 1
elif park == "SWS":
return 2018, 1
else:
raise Exception("Park '{}' not implemented.".format(park))
# TODO: kai says this may not be used anymore
def selected_threshold_by_park(park):
if park == "QENP":
return np.arange(0, 8, 0.5)
elif park == "MFNP":
return np.arange(0, 8, 0.5)
elif park == "CRNP":
return np.arange(0, 1.2, 0.1)
elif park == "Gonarezhou":
return np.arange(0, 15, 1.5)
elif park == "AMWS":
return np.arange(0, 1.2, 0.05)
elif park == "mbe" or park == "mbe_gun":
return np.arange(0, 1.65, 0.15)
# elif park == "Mondulkiri":
# return np.arange(0, 12, 0.5)
elif park == "Dja":
return np.arange(0, 7, 0.5)
elif park == "MPF":
#return np.arange(0, 15, 1.5)
#return np.arange(0, 7, 0.5)
# return np.arange(0, 12, 0.5) # for full data
return | np.arange(0, 5, 0.3) | numpy.arange |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue May 12 18:28:54 2020
@author: Dr <NAME> (CIMAT-CONACYT, Mexico) jac at cimat.mx
Instantaneous reproduction numbers calculations.
Rts_P, Implementation of Cori et al (2013)
Rts_AR, new filtering version using an autoregressive linear model of Capistrán, Capella and Christen (2020):
https://arxiv.org/abs/2012.02168, 05DIC2021
01FEB2021: Some buggs were corrected to avoid error when too low counts are used and for prediction when g=1.
Go directly to __main__ for examples.
"""
import os
from datetime import date, timedelta
from pickle import load, dump
from numpy import arange, diff, loadtxt, zeros, flip, array, log, quantile, ones
from numpy import savetxt, linspace, exp, cumsum, where, append, sqrt
from numpy import sum as np_sum
from scipy.stats import erlang, gamma, nbinom, uniform, beta
from scipy.stats import t as t_student
from matplotlib.pyplot import subplots, rcParams, close
from matplotlib.dates import drange
from pytwalk import pytwalk
from plotfrozen import PlotFrozenDist
def Rts_P( data, tau=7, n=30, IP_dist=erlang( a=3, scale=8/3),\
Rt_pr_a=5, Rt_pr_b=5/5, q=[10,25,50,75,90]):
"""Calculate Rt as in:
<NAME>, <NAME>, <NAME>, <NAME>,
A New Framework and Software to Estimate Time-Varying Reproduction Numbers
During Epidemics, American Journal of Epidemiology,
Volume 178, Issue 9, 1 November 2013, Pages 1505–1512,
https://doi.org/10.1093/aje/kwt133
data: array with case incidence.
tau: Use a window tau (default 7) to calculate R_{t,\tau}'s.
n: calculate n R_{t,\tau}'s to the past n days (default 30).
IP_dist: 'frozen' infectiousness profile distribution,
default erlang( a=3, scale=8/3), chosen for covid19.
Only the cdf is needed, ie. IP_dist.cdf(i), to calculate w_s.
Rt_pr_a=5, Rt_pr_b=5/5, parameters for the gamma prior for R_t.
q=[10,25,50,75,90], quantiles to use to calulate in the post. dust for R_t.
If q ia a single integer, return a simulation of the Rts of size q, for each Rt
Returns: a (len(q), n) array with quantiles of the R_{t,\tau}'s.
"""
if isinstance( q, list): ## Return a list of quantiles
q = array(q)/100
rt = zeros(( len(q), n))
simulate = False
else: ## If q ia a single integer, return a simulation of the Rts of size q, for each Rt
if q == 2: # return a and b of post gamma
rt = zeros(( q, n))
else:
rt = zeros(( q, n))
simulate = True
m = len(data)
w = diff(IP_dist.cdf( arange( 0, m+1)))
w /= sum(w)
w = flip(w)
for t in range(max(m-n,0), m):
S1 = 0.0
S2 = 0.0
if sum(data[:t]) <= 10:# Only for more than 10 counts
continue
for k in range(tau):
I = data[:(t-k)] ## window of reports
S2 += data[(t-k)]
S1 += sum(I * w[(m-(t-k)):]) #\Gamma_k
#print( (Rt_pr_a+S2) * (1/(S1 + 1/Rt_pr_b)), (Rt_pr_a+S2), 1/(S1 + 1/Rt_pr_b))
if simulate:
if q == 2: #Return Rt_pr_a+S2, scale=1/(S1 + 1/Rt_pr_b)
rt[:,t-(m-n)] = Rt_pr_a+S2, 1/(S1 + 1/Rt_pr_b)
else:
rt[:,t-(m-n)] = gamma.rvs( Rt_pr_a+S2, scale=1/(S1 + 1/Rt_pr_b), size=q)
else:
rt[:,t-(m-n)] = gamma.ppf( q, Rt_pr_a+S2, scale=1/(S1 + 1/Rt_pr_b))
return rt
def PlotRts_P( data_fnam, init_date, trim=0,\
tau=7, n=30, IP_dist=erlang( a=3, scale=8/3), Rt_pr_a=5, Rt_pr_b=5/5,\
q=[10,25,50,75,90], csv_fnam=None, color='blue', median_color='red', alpha=0.25, ax=None):
"""Makes a board with the Rt evolution for the past n days (n=30).
All parameters are passed to function Rts_P.
csv_fnam is an optional file name toi save the Rts info.
ax is an Axis hadle to for the plot, if None, it creates one and retruns it.
"""
if type(data_fnam) == str:
data = | loadtxt(data_fnam) | numpy.loadtxt |
import copy
import numpy as np
import matplotlib.pyplot as plt
import scipy as sp
from scipy.special import logit,expit
import time
from numpy import random,linalg,corrcoef,ones,float32,float64,c_,exp,log
from numpy import zeros,mean,where,array,unique,equal
import torch
import torchvision
import torchvision.transforms as transforms
from mnist import MNIST
mndata = MNIST('/mnist/')
images, labels = mndata.load_training()
y=np.array(labels).astype(float32)
x= | np.array(images) | numpy.array |
# Imports
import torch
from itertools import count
from torch.autograd import Variable
from utils import *
import random
import numpy as np
USE_CUDA = torch.cuda.is_available()
dtype = torch.cuda.FloatTensor if torch.cuda.is_available() else torch.FloatTensor
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def deep_Q_learning(env, optimizer_spec, exploration_params, replay_buffer_size=100000,
start_learning=50000, batch_size=128, gamma=0.99, target_update_freq=10000,
save_fig=True, save_model=False):
"""
Implementation of DQN learning procedure
:param env: gym environment
:param architecture: dict. with input_size, hidden_size and output_size (2-layer NN)
:param optimizer_spec: optimizer and its params
:param encode_type: how to encode state - one_hot or ???
:param exploration_params: dict. with final epsilon and num. of time steps until final epsilon
:param replay_buffer_size: size of replay memory
:param start_learning: num. iterations before start learning (filling the buffer)
:param batch_size: batch size for optimization steps
:param gamma: discount factor of MDP
:param target_update_freq: num. of iterations between target network update
:param save_fig: flag for saving plots
:param save_model: flag for saving optimal weirhts of the net at the end of training session
Algorithm saves a trained network
"""
def select_epsilon_greedy_action(model, state, exploration_params, t):
"""
:param model: Q network
:param state: current state of env - in 3D image difference
:param exploration_params: final epsilon and num. timesteps until final epsilon
:param t: current timestep
:return: Algorithm returns an action chosen by an epsilon greedy policy
"""
# Compute current epsilon
fraction = min(1.0, float(t) /exploration_params["timesteps"])
epsilon = 1 + fraction * (exploration_params["final_eps"] - 1)
num_actions = model.head.out_features # output size of Q network is as action space
sample = random.random()
if sample <= epsilon:
return random.randrange(num_actions), epsilon
else:
return int(model(Variable(state)).data.argmax()), epsilon
num_actions = env.action_space.n
# Initialize network and target network
Q = DQN(num_actions).to(device)
Q_target = DQN(num_actions).to(device)
# Construct optimizer
optimizer = optimizer_spec.constructor(Q.parameters(), **optimizer_spec.kwargs)
# Construct the replay buffer
replay_buffer = PriorReplayMemory(replay_buffer_size)
# Initialize episodic reward list
episodic_rewards = []
avg_episodic_rewards = []
stdev_episodic_rewards = []
best_avg_episodic_reward = -np.inf
acc_episodic_reward = 0.0
num_param_updates = 0
episodes_passed = 0
stopping_counter = 0
_ = env.reset()
current_screen = get_screen(env)
state = current_screen
for t in count():
# Stop if last average accumulated episodic reward over 10 episodes is above -150
if len(avg_episodic_rewards) > 0:
if avg_episodic_rewards[-1] > -115:
stopping_counter += 1
if stopping_counter >= 11:
if save_model:
torch.save(Q, 'stable_trained_Acrobot_model_v4')
break
else:
stopping_counter = 0
# Choose random action if not yet start learning
if t > start_learning:
action, eps_val = select_epsilon_greedy_action(Q, state, exploration_params, t)
else:
action = random.randrange(num_actions)
eps_val = 1.0
# Advance one step
_, reward, done, _ = env.step(action)
last_screen = current_screen
current_screen = get_screen(env)
next_state = current_screen - last_screen
# Construct priority for the current sample
# Q value for state-action pair that were taken
current_Q_value = Q(state)[0][action]
# Best Q value from next state - using Q_target as estimator
next_Q_value = Q_target(next_state).detach().max(1)[0]
# Compute estimated Q values (based on Q_target)
target_Q_value = reward + (gamma * next_Q_value)
# Compute Bellman error
bellman_error = target_Q_value - current_Q_value.squeeze()
# document accumulated reward
acc_episodic_reward = acc_episodic_reward + reward
# Save and insert transition to replay buffer
transition = Transition(state=state, action=action, reward=reward, next_state=next_state, done=int(done))
replay_buffer.insert(transition, np.abs(bellman_error.data))
# Resets the environment when reaching an episode boundary.
if done:
# Resets the environment when finishing an episode
_ = env.reset()
current_screen = get_screen(env)
next_state = current_screen
# Document statistics
episodic_rewards.append(acc_episodic_reward)
acc_episodic_reward = 0.0
episodes_passed += 1
# Compute average reward and variance (standard deviation)
if len(episodic_rewards) <= 10:
avg_episodic_rewards.append(np.mean(np.array(episodic_rewards)))
if len(episodic_rewards) >= 2:
stdev_episodic_rewards.append(np.std(np.array(episodic_rewards)))
else:
avg_episodic_rewards.append(np.mean(np.array(episodic_rewards[-10:])))
stdev_episodic_rewards.append(np.std( | np.array(episodic_rewards[-10:]) | numpy.array |
from __future__ import print_function
import sys
import numpy as np
from numpy import ma
from numpy.lib.stride_tricks import as_strided
import warnings
import getopt
import os
python3 = sys.version_info[0] > 2
if python3:
# no unicode type in python 3, use bytes instead when testing
# for a string-like object
unicode = str
else:
range = xrange
try:
bytes
except NameError:
# no bytes type in python < 2.6
bytes = str
def _safecast(a,b):
# check to see if array a can be safely cast
# to array b. A little less picky than numpy.can_cast.
try:
is_safe = ((a == b) | (np.isnan(a) & np.isnan(b))).all()
#is_safe = np.allclose(a, b, equal_nan=True) # numpy 1.10.0
except:
try:
is_safe = (a == b).all() # string arrays.
except:
is_safe = False
return is_safe
def _sortbylist(A,B):
# sort one list (A) using the values from another list (B)
return [A[i] for i in sorted(range(len(A)), key=B.__getitem__)]
def _find_dim(grp, dimname):
# find Dimension instance given group and name.
# look in current group, and parents.
group = grp
dim = None
while 1:
try:
dim = group.dimensions[dimname]
break
except:
try:
group = group.parent
except:
raise ValueError("cannot find dimension %s in this group or parent groups" % dimname)
return dim
def _walk_grps(topgrp):
"""Iterate through all (sub-) groups of topgrp, similar to os.walktree.
"""
grps = topgrp.groups.values()
yield grps
for grp in topgrp.groups.values():
for children in _walk_grps(grp):
yield children
def _quantize(data,least_significant_digit):
"""
quantize data to improve compression. data is quantized using
around(scale*data)/scale, where scale is 2**bits, and bits is determined
from the least_significant_digit. For example, if
least_significant_digit=1, bits will be 4.
"""
precision = pow(10.,-least_significant_digit)
exp = np.log10(precision)
if exp < 0:
exp = int(np.floor(exp))
else:
exp = int(np.ceil(exp))
bits = np.ceil(np.log2(pow(10.,-exp)))
scale = pow(2.,bits)
datout = np.around(scale*data)/scale
if ma.isMA(datout):
datout.set_fill_value(data.fill_value)
return datout
else:
return datout
def _StartCountStride(elem, shape, dimensions=None, grp=None, datashape=None,\
put=False, use_get_vars = False):
"""Return start, count, stride and indices needed to store/extract data
into/from a netCDF variable.
This function is used to convert a slicing expression into a form that is
compatible with the nc_get_vars function. Specifically, it needs
to interpret integers, slices, Ellipses, and 1-d sequences of integers
and booleans.
Numpy uses "broadcasting indexing" to handle array-valued indices.
"Broadcasting indexing" (a.k.a "fancy indexing") treats all multi-valued
indices together to allow arbitrary points to be extracted. The index
arrays can be multidimensional, and more than one can be specified in a
slice, as long as they can be "broadcast" against each other.
This style of indexing can be very powerful, but it is very hard
to understand, explain, and implement (and can lead to hard to find bugs).
Most other python packages and array processing
languages (such as netcdf4-python, xray, biggus, matlab and fortran)
use "orthogonal indexing" which only allows for 1-d index arrays and
treats these arrays of indices independently along each dimension.
The implementation of "orthogonal indexing" used here requires that
index arrays be 1-d boolean or integer. If integer arrays are used,
the index values must be sorted and contain no duplicates.
In summary, slicing netcdf4-python variable objects with 1-d integer or
boolean arrays is allowed, but may give a different result than slicing a
numpy array.
Numpy also supports slicing an array with a boolean array of the same
shape. For example x[x>0] returns a 1-d array with all the positive values of x.
This is also not supported in netcdf4-python, if x.ndim > 1.
Orthogonal indexing can be used in to select netcdf variable slices
using the dimension variables. For example, you can use v[lat>60,lon<180]
to fetch the elements of v obeying conditions on latitude and longitude.
Allow for this sort of simple variable subsetting is the reason we decided to
deviate from numpy's slicing rules.
This function is used both by the __setitem__ and __getitem__ method of
the Variable class.
Parameters
----------
elem : tuple of integer, slice, ellipsis or 1-d boolean or integer
sequences used to slice the netCDF Variable (Variable[elem]).
shape : tuple containing the current shape of the netCDF variable.
dimensions : sequence
The name of the dimensions.
__setitem__.
grp : netCDF Group
The netCDF group to which the variable being set belongs to.
datashape : sequence
The shape of the data that is being stored. Only needed by __setitem__
put : True|False (default False). If called from __setitem__, put is True.
Returns
-------
start : ndarray (..., n)
A starting indices array of dimension n+1. The first n
dimensions identify different independent data chunks. The last dimension
can be read as the starting indices.
count : ndarray (..., n)
An array of dimension (n+1) storing the number of elements to get.
stride : ndarray (..., n)
An array of dimension (n+1) storing the steps between each datum.
indices : ndarray (..., n)
An array storing the indices describing the location of the
data chunk in the target/source array (__getitem__/__setitem__).
Notes:
netCDF data is accessed via the function:
nc_get_vars(grpid, varid, start, count, stride, data)
Assume that the variable has dimension n, then
start is a n-tuple that contains the indices at the beginning of data chunk.
count is a n-tuple that contains the number of elements to be accessed.
stride is a n-tuple that contains the step length between each element.
"""
# Adapted from pycdf (http://pysclint.sourceforge.net/pycdf)
# by <NAME>..
# Modified by <NAME> to handle efficiently fancy indexing with
# sequences of integers or booleans.
nDims = len(shape)
if nDims == 0:
nDims = 1
shape = (1,)
# is there an unlimited dimension? (only defined for __setitem__)
if put:
hasunlim = False
unlimd={}
if dimensions:
for i in range(nDims):
dimname = dimensions[i]
# is this dimension unlimited?
# look in current group, and parents for dim.
dim = _find_dim(grp, dimname)
unlimd[dimname]=dim.isunlimited()
if unlimd[dimname]:
hasunlim = True
else:
hasunlim = False
# When a single array or (non-tuple) sequence of integers is given
# as a slice, assume it applies to the first dimension,
# and use ellipsis for remaining dimensions.
if np.iterable(elem):
if type(elem) == np.ndarray or (type(elem) != tuple and \
np.array([_is_int(e) for e in elem]).all()):
elem = [elem]
for n in range(len(elem)+1,nDims+1):
elem.append(slice(None,None,None))
else: # Convert single index to sequence
elem = [elem]
# ensure there is at most 1 ellipse
# we cannot use elem.count(Ellipsis), as with fancy indexing would occur
# np.array() == Ellipsis which gives ValueError: The truth value of an
# array with more than one element is ambiguous. Use a.any() or a.all()
if sum(1 for e in elem if e is Ellipsis) > 1:
raise IndexError("At most one ellipsis allowed in a slicing expression")
# replace boolean arrays with sequences of integers.
newElem = []
IndexErrorMsg=\
"only integers, slices (`:`), ellipsis (`...`), and 1-d integer or boolean arrays are valid indices"
i=0
for e in elem:
# string-like object try to cast to int
# needs to be done first, since strings are iterable and
# hard to distinguish from something castable to an iterable numpy array.
if type(e) in [str,bytes,unicode]:
try:
e = int(e)
except:
raise IndexError(IndexErrorMsg)
ea = np.asarray(e)
# Raise error if multidimensional indexing is used.
if ea.ndim > 1:
raise IndexError("Index cannot be multidimensional")
# set unlim to True if dimension is unlimited and put==True
# (called from __setitem__)
if hasunlim and put and dimensions:
try:
dimname = dimensions[i]
unlim = unlimd[dimname]
except IndexError: # more slices than dimensions (issue 371)
unlim = False
else:
unlim = False
# convert boolean index to integer array.
if np.iterable(ea) and ea.dtype.kind =='b':
# check that boolen array not too long
if not unlim and shape[i] != len(ea):
msg="""
Boolean array must have the same shape as the data along this dimension."""
raise IndexError(msg)
ea = np.flatnonzero(ea)
# an iterable (non-scalar) integer array.
if np.iterable(ea) and ea.dtype.kind == 'i':
# convert negative indices in 1d array to positive ones.
ea = np.where(ea < 0, ea + shape[i], ea)
if np.any(ea < 0):
raise IndexError("integer index out of range")
# if unlim, let integer index be longer than current dimension
# length.
if ea.shape != (0,):
elen = shape[i]
if unlim:
elen = max(ea.max()+1,elen)
if ea.max()+1 > elen:
msg="integer index exceeds dimension size"
raise IndexError(msg)
newElem.append(ea)
# integer scalar
elif ea.dtype.kind == 'i':
newElem.append(e)
# slice or ellipsis object
elif type(e) == slice or type(e) == type(Ellipsis):
if not use_get_vars and type(e) == slice and e.step not in [None,-1,1] and\
dimensions is not None and grp is not None:
# convert strided slice to integer sequence if possible
# (this will avoid nc_get_vars, which is slow - issue #680).
start = e.start if e.start is not None else 0
step = e.step
if e.stop is None and dimensions is not None and grp is not None:
stop = len(_find_dim(grp, dimensions[i]))
else:
stop = e.stop
if stop < 0:
stop = len(_find_dim(grp, dimensions[i])) + stop
try:
ee = np.arange(start,stop,e.step)
if len(ee) > 0:
e = ee
except:
pass
newElem.append(e)
else: # castable to a scalar int, otherwise invalid
try:
e = int(e)
newElem.append(e)
except:
raise IndexError(IndexErrorMsg)
if type(e)==type(Ellipsis):
i+=1+nDims-len(elem)
else:
i+=1
elem = newElem
# replace Ellipsis and integer arrays with slice objects, if possible.
newElem = []
for e in elem:
ea = np.asarray(e)
# Replace ellipsis with slices.
if type(e) == type(Ellipsis):
# The ellipsis stands for the missing dimensions.
newElem.extend((slice(None, None, None),) * (nDims - len(elem) + 1))
# Replace sequence of indices with slice object if possible.
elif np.iterable(e) and len(e) > 1:
start = e[0]
stop = e[-1]+1
step = e[1]-e[0]
try:
ee = range(start,stop,step)
except ValueError: # start, stop or step is not valid for a range
ee = False
if ee and len(e) == len(ee) and (e == np.arange(start,stop,step)).all():
# don't convert to slice unless abs(stride) == 1
# (nc_get_vars is very slow, issue #680)
if not use_get_vars and step not in [1,-1]:
newElem.append(e)
else:
newElem.append(slice(start,stop,step))
else:
newElem.append(e)
elif np.iterable(e) and len(e) == 1:
newElem.append(slice(e[0], e[0] + 1, 1))
else:
newElem.append(e)
elem = newElem
# If slice doesn't cover all dims, assume ellipsis for rest of dims.
if len(elem) < nDims:
for n in range(len(elem)+1,nDims+1):
elem.append(slice(None,None,None))
# make sure there are not too many dimensions in slice.
if len(elem) > nDims:
raise ValueError("slicing expression exceeds the number of dimensions of the variable")
# Compute the dimensions of the start, count, stride and indices arrays.
# The number of elements in the first n dimensions corresponds to the
# number of times the _get method will be called.
sdim = []
for i, e in enumerate(elem):
# at this stage e is a slice, a scalar integer, or a 1d integer array.
# integer array: _get call for each True value
if np.iterable(e):
sdim.append( | np.alen(e) | numpy.alen |
'''
script for generating the MTL and fiberassign on DR9SV imaging.
'''
import os
import glob
import h5py
import numpy as np
import numpy.lib.recfunctions as rfn
import fitsio
import healpy as hp
from astropy.table import Table
from pydl.pydlutils.spheregroup import spherematch
# -- desitarget --
from desitarget.targets import calc_priority, main_cmx_or_sv, set_obsconditions
from desitarget.sv1.sv1_targetmask import desi_mask, bgs_mask, mws_mask
# -- plotting --
import matplotlib as mpl
import matplotlib.pyplot as plt
if os.environ['NERSC_HOST'] != 'cori':
mpl.rcParams['text.usetex'] = True
mpl.rcParams['font.family'] = 'serif'
mpl.rcParams['axes.linewidth'] = 1.5
mpl.rcParams['axes.xmargin'] = 1
mpl.rcParams['xtick.labelsize'] = 'x-large'
mpl.rcParams['xtick.major.size'] = 5
mpl.rcParams['xtick.major.width'] = 1.5
mpl.rcParams['ytick.labelsize'] = 'x-large'
mpl.rcParams['ytick.major.size'] = 5
mpl.rcParams['ytick.major.width'] = 1.5
mpl.rcParams['legend.frameon'] = False
dir_dat = '/global/cscratch1/sd/chahah/feasibgs/survey_validation/'
dir_cfs = '/global/cfs/cdirs/desi/users/chahah/'
if not os.path.isdir(dir_dat):
dir_dat = '/Users/ChangHoon/data/feasiBGS/survey_validation/'
f_svfields = 'BGS_SV_30_3x_superset60_Apr2020v2.fits'
######################################################################
# constructing MTLs
######################################################################
def mtl_dr9sv(seed=0, clobber=False):
''' make MTL using DR9SV imaging
'''
np.random.seed(seed)
#########################################################################
# compile sv tiles
#########################################################################
# read SV tiles
sv = fitsio.read(os.path.join(dir_dat, f_svfields)) # new SV tiles
print('%i BGS SV tiles' % len(sv['RA']))
# get SV tiles *outside* of the DR9 SV imaging region
in_dr9 = _in_DR9_SVregion(sv['RA'], sv['DEC'])
print('%i tiles outside of DR9' % np.sum(~in_dr9))
#########################################################################
# compile targets and match to truth table and SN host
#########################################################################
# read targets from DR9SV and DR8 cut out
ftargets = [
'sv1-targets-dr9-hp-X.spec_truth.sn_host.fits',
'sv1-targets-dr8.sv_cutout.spec_truth.sn_host.fits'
]
ntargets = len(ftargets)
for i, _ftarget in enumerate(ftargets):
ftarget = os.path.join(dir_dat, 'sv.spec_truth', _ftarget)
if not os.path.isfile(ftarget) or clobber:
# read target files with truth tables
_f = os.path.join(dir_dat, 'sv.spec_truth',
_ftarget.replace('.spec_truth.sn_host.fits', '.spec_truth.fits'))
__f = os.path.join(dir_dat, 'sv.spec_truth',
_ftarget.replace('.spec_truth.sn_host.fits', '.fits'))
if not os.path.isfile(_f) or clobber:
print('... matching %s to truth table' % __f)
_target = fitsio.read(os.path.join(dir_dat, __f))
target = match2spectruth(_target)
fitsio.write(_f, target, clobber=True)
else:
target = fitsio.read(_f)
print('... matching %s to SN host' % _ftarget)
target = match2snhost(target)
fitsio.write(ftarget, target, clobber=True)
else:
print('... reading %s targets' % ftarget)
target = fitsio.read(ftarget)
# construct MTLs for set of targets
mtl = make_mtl(target, seed=seed)
fmtl = os.path.join(dir_dat, 'mtl',
'mtl.bgs.dr9sv.%iof%i.seed%i.fits' % (i+1, ntargets, seed))
mtl.write(fmtl, format='fits', overwrite=True)
return None
def make_mtl(targets, seed=None):
''' construct mtl given targets.
notes:
-----
* At the moment, highest priority is set for targets with spectroscopic
redshifts or are SN hosts.
'''
assert 'IN_SPECTRUTH' in targets.dtype.names
assert 'HAS_SN' in targets.dtype.names
np.random.seed(seed)
# determine whether the input targets are main survey, cmx or SV.
colnames, masks, survey = main_cmx_or_sv(targets)
# ADM set the first column to be the "desitarget" column
desi_target, desi_mask = colnames[0], masks[0]
n = len(targets)
# ADM if the input target columns were incorrectly called NUMOBS or PRIORITY
# ADM rename them to NUMOBS_INIT or PRIORITY_INIT.
for name in ['NUMOBS', 'PRIORITY']:
targets.dtype.names = [name+'_INIT' if col == name else col for col in targets.dtype.names]
# ADM if a redshift catalog was passed, order it to match the input targets
# ADM catalog on 'TARGETID'.
ztargets = Table()
ztargets['TARGETID'] = targets['TARGETID']
ztargets['NUMOBS'] = np.zeros(n, dtype=np.int32)
ztargets['Z'] = -1 * np.ones(n, dtype=np.float32)
ztargets['ZWARN'] = -1 * np.ones(n, dtype=np.int32)
# ADM if zcat wasn't passed, there is a one-to-one correspondence
# ADM between the targets and the zcat.
zmatcher = np.arange(n)
# ADM extract just the targets that match the input zcat.
targets_zmatcher = targets[zmatcher]
# ADM use passed value of NUMOBS_INIT instead of calling the memory-heavy calc_numobs.
# ztargets['NUMOBS_MORE'] = np.maximum(0, calc_numobs(ztargets) - ztargets['NUMOBS'])
ztargets['NUMOBS_MORE'] = np.maximum(0, targets_zmatcher['NUMOBS_INIT'] - ztargets['NUMOBS'])
# ADM need a minor hack to ensure BGS targets are observed once
# ADM (and only once) every time during the BRIGHT survey, regardless
# ADM of how often they've previously been observed. I've turned this
# ADM off for commissioning. Not sure if we'll keep it in general.
# ADM only if we're considering bright survey conditions.
ii = targets_zmatcher[desi_target] & desi_mask.BGS_ANY > 0
ztargets['NUMOBS_MORE'][ii] = 1
# ADM assign priorities, note that only things in the zcat can have changed priorities.
# ADM anything else will be assigned PRIORITY_INIT, below.
priority = calc_priority(targets_zmatcher, ztargets, 'BRIGHT')
# set subpriority in order to tune the SV target densities
# BGS target classes: BRIGHT, FAINT, EXTFAINT, FIBERMAG, LOWQ
# initial DR8 target density ---> desired density
# BRIGHT: 882.056980 ---> 540 = 63% 0.62 - 1
# FAINT: 746.769486 ---> 300 = 41% 0.41 - 1
# EXTFAINT: 623.470673 ---> 150 = 24% 0 - 1
# FIBERMAG: 207.534409 ---> 150 = 71% 0.66 - 1
# LOW Q: 55.400240 ---> 60 = 100% 0.76 - 1
# (depending on imaging LOWQ varies a lot! DES~50/deg2, DECALS~114/deg2, North~185/deg2)
# bgs bitmask
bitmask_bgs = targets['SV1_BGS_TARGET']
has_spec = targets['IN_SPECTRUTH'] # objects in spectroscopic truth table
has_sn = targets['HAS_SN']
# BGS objects with spectra or hosts SN
special = np.zeros(n).astype(bool) #(has_spec | has_sn)
bgs_special = special & (bitmask_bgs).astype(bool)
bgs_all = ~special & (bitmask_bgs).astype(bool)
bgs_bright = ~special & (bitmask_bgs & bgs_mask.mask('BGS_BRIGHT')).astype(bool)
bgs_faint = ~special & (bitmask_bgs & bgs_mask.mask('BGS_FAINT')).astype(bool)
bgs_extfaint = ~special & (bitmask_bgs & bgs_mask.mask('BGS_FAINT_EXT')).astype(bool) # extended faint
bgs_fibmag = ~special & (bitmask_bgs & bgs_mask.mask('BGS_FIBMAG')).astype(bool) # fiber magn limited
bgs_lowq = ~special & (bitmask_bgs & bgs_mask.mask('BGS_LOWQ')).astype(bool) # low quality
n_bgs = np.sum(bgs_special) + np.sum(bgs_all)
n_bgs_special = np.sum(bgs_special)
n_bgs_bright = np.sum(bgs_bright)
n_bgs_faint = np.sum(bgs_faint)
n_bgs_extfaint = np.sum(bgs_extfaint)
n_bgs_fibmag = np.sum(bgs_fibmag)
n_bgs_lowq = np.sum(bgs_lowq)
# target classes with spectra
n_bgs_sp, n_bgs_bright_sp, n_bgs_faint_sp, n_bgs_extfaint_sp, n_bgs_fibmag_sp, n_bgs_lowq_sp = \
bgs_targetclass(targets['SV1_BGS_TARGET'][special])
#f_special = 1. # keep 100%
#f_bright = 0.45 / n_bgs_bright
#f_faint = 0.25 / n_bgs_faint
#f_extfaint = 0.125 / n_bgs_extfaint
#f_fibmag = 0.125 / n_bgs_fibmag
#f_lowq = 0.05 / n_bgs_lowq
f_bright = 540. / (n_bgs_bright + n_bgs_bright_sp)
f_faint = 300. / (n_bgs_faint + n_bgs_faint_sp)
f_extfaint = 150. / (n_bgs_extfaint + n_bgs_extfaint_sp)
f_fibmag = 150. / (n_bgs_fibmag + n_bgs_fibmag_sp)
f_lowq = 60. / (n_bgs_lowq + n_bgs_lowq_sp)
f_ref = np.min([f_bright, f_faint, f_extfaint, f_fibmag, f_lowq])
r_special = 1.#(1. - f_ref / f_special)
r_bright = (1. - f_ref / f_bright)
r_faint = (1. - f_ref / f_faint)
r_extfaint = (1. - f_ref / f_extfaint)
r_fibmag = (1. - f_ref / f_fibmag)
r_lowq = (1. - f_ref / f_lowq)
subpriority = np.random.uniform(0., 1., n)
subpriority[bgs_special] = np.random.uniform(r_special, 1., np.sum(bgs_special))
subpriority[bgs_bright] = np.random.uniform(r_bright, 1., np.sum(bgs_bright))
subpriority[bgs_faint] = np.random.uniform(r_faint, 1., np.sum(bgs_faint))
subpriority[bgs_extfaint] = np.random.uniform(f_extfaint, 1, np.sum(bgs_extfaint))
subpriority[bgs_fibmag] = np.random.uniform(r_fibmag, 1, np.sum(bgs_fibmag))
subpriority[bgs_lowq] = np.random.uniform(r_lowq, 1, np.sum(bgs_lowq))
_sample = (bitmask_bgs).astype(bool) & (subpriority > 0.943)#np.random.uniform(0., 1., n))
_n_bgs, _n_bgs_bright, _n_bgs_faint, _n_bgs_extfaint, _n_bgs_fibmag, _n_bgs_lowq = \
bgs_targetclass(targets['SV1_BGS_TARGET'][_sample])
# set priority of all BGS targets equal
priority[bgs_all] = 2000
print('---------------------------------')
print('total n_bgs = %i' % n_bgs)
print('approx. target class fractions')
print(' orig frac exp. frac (target frac)')
print(' ------------------------------------')
#print(' BGS special %i %.3f' % (n_bgs_special, n_bgs_special/n_bgs))
#print(' BGS Bright %i %.3f (0.45)' % (n_bgs_bright, n_bgs_bright/n_bgs))
#print(' BGS Faint %i %.3f (0.25)' % (n_bgs_faint, n_bgs_faint/n_bgs))
#print(' BGS Ext.Faint %i %.3f (0.125)' % (n_bgs_extfaint, n_bgs_extfaint/n_bgs))
#print(' BGS Fib.Mag %i %.3f (0.125)' % (n_bgs_fibmag, n_bgs_fibmag/n_bgs))
#print(' BGS Low Q. %i %.3f (0.05)' % (n_bgs_lowq, n_bgs_lowq/n_bgs))
print(' BGS Bright %.3f %.3f (0.45)' % (n_bgs_bright/n_bgs, _n_bgs_bright/_n_bgs))
print(' BGS Faint %.3f %.3f (0.25)' % (n_bgs_faint/n_bgs, _n_bgs_faint/_n_bgs))
print(' BGS Ext.Faint %.3f %.3f (0.125)' % (n_bgs_extfaint/n_bgs, _n_bgs_extfaint/_n_bgs))
print(' BGS Fib.Mag %.3f %.3f (0.125)' % (n_bgs_fibmag/n_bgs, _n_bgs_fibmag/_n_bgs))
print(' BGS Low Q. %.3f %.3f (0.05)' % (n_bgs_lowq/n_bgs, _n_bgs_lowq/_n_bgs))
# If priority went to 0==DONOTOBSERVE or 1==OBS or 2==DONE, then NUMOBS_MORE should also be 0.
# ## mtl['NUMOBS_MORE'] = ztargets['NUMOBS_MORE']
#ii = (priority <= 2)
#log.info('{:d} of {:d} targets have priority zero, setting N_obs=0.'.format(np.sum(ii), n))
#ztargets['NUMOBS_MORE'][ii] = 0
# - Set the OBSCONDITIONS mask for each target bit.
obsconmask = set_obsconditions(targets)
# ADM set up the output mtl table.
mtl = Table(targets)
mtl.meta['EXTNAME'] = 'MTL'
# ADM any target that wasn't matched to the ZCAT should retain its
# ADM original (INIT) value of PRIORITY and NUMOBS.
mtl['NUMOBS_MORE'] = mtl['NUMOBS_INIT']
mtl['PRIORITY'] = mtl['PRIORITY_INIT']
# ADM now populate the new mtl columns with the updated information.
mtl['OBSCONDITIONS'] = obsconmask
mtl['PRIORITY'][zmatcher] = priority
mtl['SUBPRIORITY'][zmatcher] = subpriority
mtl['NUMOBS_MORE'][zmatcher] = ztargets['NUMOBS_MORE']
# Filtering can reset the fill_value, which is just wrong wrong wrong
# See https://github.com/astropy/astropy/issues/4707
# and https://github.com/astropy/astropy/issues/4708
mtl['NUMOBS_MORE'].fill_value = -1
return mtl
def match2spectruth(targets):
''' match target table to spectroscopic truth table
'''
assert 'BRICKID' in targets.dtype.names
assert 'BRICK_OBJID' in targets.dtype.names
isbgs = (targets['SV1_BGS_TARGET']).astype(bool)
targ_brickid = targets['BRICKID'][isbgs]
targ_objid = targets['BRICK_OBJID'][isbgs]
# read in spectroscopic truth table
spectruth = h5py.File(os.path.join(dir_dat, 'bgs_truth_table.hdf5'), 'r')
st_brickid = spectruth['BRICKID'][...]
st_objid = spectruth['OBJID'][...]
in_spectruth = np.zeros(targets.shape[0]).astype(bool)
gama_cataid = np.repeat(-999, targets.shape[0])
#in_spectruth.dtype.names = ['ID', 'IN_SPECTRUTH']
ii = np.arange(targets.shape[0])
indices, cataid = [], []
uniq_brickid = np.unique(targ_brickid)
for brickid in uniq_brickid:
in_targbrick = (targ_brickid == brickid)
in_specbrick = (st_brickid == brickid)
#in_spec = np.isin(targ_objid[in_targbrick], st_objid[in_specbrick])
_, in_spec, in_targ = np.intersect1d(targ_objid[in_targbrick], st_objid[in_specbrick],
return_indices=True)
if len(in_spec) > 0:
#print(len(in_spec))
#print(targets['RA'][isbgs][in_targbrick][in_spec] - spectruth['RA'][...][in_specbrick][in_targ])
#print(targets['DEC'][isbgs][in_targbrick][in_spec] - spectruth['DEC'][...][in_specbrick][in_targ])
#print(spectruth['GAMA_CATAID'][...][in_specbrick][in_targ])
indices.append(ii[isbgs][in_targbrick][in_spec])
cataid.append(spectruth['GAMA_CATAID'][...][in_specbrick][in_targ])
in_spectruth[np.concatenate(indices)] = True
gama_cataid[np.concatenate(indices)] = np.concatenate(cataid)
print('%i BGS SV targets have spectra' % np.sum(in_spectruth))
targets = rfn.append_fields(targets, ['IN_SPECTRUTH'], [in_spectruth])
targets = rfn.append_fields(targets, ['GAMA_CATAID'], [gama_cataid])
return targets
def match2snhost(targets):
''' match target table to supernovae hosts compiled by Segev
'''
assert 'BRICKID' in targets.dtype.names
assert 'BRICK_OBJID' in targets.dtype.names
isbgs = (targets['SV1_BGS_TARGET']).astype(bool)
targ_ra = targets['RA'][isbgs]
targ_dec = targets['DEC'][isbgs]
# read in supernovae hosts
snhost = fitsio.read(os.path.join(dir_dat, 'snhost_dr8_target.fits'))
sn_ra = snhost['RA']
sn_dec = snhost['DEC']
has_sn = np.zeros(targets.shape[0]).astype(bool)
# spherematch compiled hosts
m_targ, m_sn, d_match = spherematch(targ_ra, targ_dec, sn_ra, sn_dec, 0.000277778, maxmatch=1)
has_sn[m_targ] = True
print('%i BGS SV targets are supernova hosts' % np.sum(has_sn))
targets = rfn.append_fields(targets, ['HAS_SN'], [has_sn])
return targets
def _in_DR9_SVregion(ras, decs):
''' DR9 imaging SV region listed in
https://desi.lbl.gov/trac/wiki/TargetSelectionWG/SVFields_for_DR9
'''
sv_regions = {}
sv_regions['01_s82'] = [30.,40.,-7.,2.]
sv_regions['02_egs'] = [210.,220.,50.,55.]
sv_regions['03_gama09'] = [129.,141.,-2.,3.]
sv_regions['04_gama12'] = [175.,185.,-3.,2.]
sv_regions['05_gama15'] = [212.,222.,-2.,3.]
sv_regions['06_overlap'] = [135.,160.,30.,35.]
sv_regions['07_refnorth'] = [215.,230.,41.,46.]
sv_regions['08_ages'] = [215.,220.,30.,40.]
sv_regions['09_sagittarius'] = [200.,210.,5.,10.]
sv_regions['10_highebv_n'] = [140.,150.,65.,70.]
sv_regions['11_highebv_s'] = [240.,245.,20.,25.]
sv_regions['12_highstardens_n'] = [273.,283.,40.,45.]
sv_regions['13_highstardens_s'] = [260.,270.,15.,20.]
n_tiles = len(ras)
in_dr9 = np.zeros(n_tiles).astype(bool)
for i, ra, dec in zip(range(n_tiles), ras, decs):
for k in sv_regions.keys():
if ((ra >= sv_regions[k][0]) & (ra <= sv_regions[k][1]) &
(dec >= sv_regions[k][2]) & (dec <= sv_regions[k][3])):
in_dr9[i] = True
return in_dr9
def bgs_targetclass(bitmask_bgs):
n_bgs = np.float(np.sum(bitmask_bgs.astype(bool)))
n_bgs_bright = np.sum((bitmask_bgs & bgs_mask.mask('BGS_BRIGHT')).astype(bool))
n_bgs_faint = np.sum((bitmask_bgs & bgs_mask.mask('BGS_FAINT')).astype(bool))
n_bgs_extfaint = np.sum((bitmask_bgs & bgs_mask.mask('BGS_FAINT_EXT')).astype(bool)) # extended faint
n_bgs_fibmag = np.sum((bitmask_bgs & bgs_mask.mask('BGS_FIBMAG')).astype(bool)) # fiber magnitude limited
n_bgs_lowq = np.sum((bitmask_bgs & bgs_mask.mask('BGS_LOWQ')).astype(bool)) # low quality
return n_bgs, n_bgs_bright, n_bgs_faint, n_bgs_extfaint, n_bgs_fibmag, n_bgs_lowq
def check_targets_dr9sv():
'''
'''
# read SV tiles
sv = fitsio.read(os.path.join(dir_dat, f_svfields)) # new SV tiles
print('%i BGS SV tiles' % len(sv['RA']))
ftargets = ['sv1-targets-dr9-hp-X.fits', 'sv1-targets-dr8.sv_cutout.fits']
ntargets = len(ftargets)
# plot confirming coverage
fig = plt.figure(figsize=(10,7))
sub = fig.add_subplot(111)
targs = []
for i, _ftarget in enumerate(ftargets):
ftarget = os.path.join(dir_dat, 'sv.spec_truth',
_ftarget.replace('.fits', '.spec_truth.sn_host.fits'))
targ = fitsio.read(ftarget)
sub.scatter(targ['RA'][::100], targ['DEC'][::100], c='k')
targs.append(targ)
for ra, dec in zip(sv['RA'], sv['DEC']):
circ = plt.Circle((ra, dec), 1.6275, fill=False, edgecolor='C1',
linewidth=3)
sub.add_artist(circ)
sub.set_xlabel('RA', fontsize=25)
sub.set_xlim(0., 360.)
sub.set_ylabel('DEC', fontsize=25)
sub.set_ylim(-40., 85)
fig.savefig(os.path.join(dir_dat, 'sv.spec_truth',
'check_dr9sv_targets.png'), bbox_inches='tight')
# plot confirming coverage tile by tile
fig = plt.figure(figsize=(20,12))
bkgd = fig.add_subplot(111, frameon=False)
for i, ra, dec in zip(range(len(sv['RA'])), sv['RA'], sv['DEC']):
sub = fig.add_subplot(6,10,i+1)
for targ in targs:
sub.scatter(targ['RA'][::100], targ['DEC'][::100], c='k', s=1)
circ = plt.Circle((ra, dec), 1.6275, fill=False, edgecolor='C1',
linewidth=3)
sub.add_artist(circ)
sub.set_xlim(ra - 2.5, ra + 2.5)
sub.set_ylim(dec - 2.5, dec + 2.5)
bkgd.tick_params(labelcolor='none', top=False, bottom=False, left=False, right=False)
bkgd.set_xlabel(r'RA', labelpad=7, fontsize=25)
bkgd.set_ylabel(r'DEC', labelpad=7, fontsize=25)
fig.savefig(os.path.join(dir_dat, 'sv.spec_truth',
'check_dr9sv_targets.tile_by_tile.png'), bbox_inches='tight')
return None
def check_mtl_dr9sv():
''' check the target fraction of the MTLs
'''
mtls = []
for fmtl in glob.glob(os.path.join(dir_dat, 'mtl', 'mtl*.fits')):
print('--- %s ---' % fmtl)
# read MTL
mtl = fitsio.read(fmtl)
assigned = mtl['SUBPRIORITY'] > 0.943
n_bgs, n_bgs_bright, n_bgs_faint, n_bgs_extfaint, n_bgs_fibmag, n_bgs_lowq = \
bgs_targetclass(mtl['SV1_BGS_TARGET'][assigned])
print('total n_bgs = %i' % n_bgs)
print(' nobj frac (expected frac)')
print(' ------------------------------------')
print(' BGS Bright %i %.3f (0.45)' % (n_bgs_bright, n_bgs_bright/n_bgs))
print(' BGS Faint %i %.3f (0.25)' % (n_bgs_faint, n_bgs_faint/n_bgs))
print(' BGS Ext.Faint %i %.3f (0.125)' % (n_bgs_extfaint, n_bgs_extfaint/n_bgs))
print(' BGS Fib.Mag %i %.3f (0.125)' % (n_bgs_fibmag, n_bgs_fibmag/n_bgs))
print(' BGS Low Q. %i %.3f (0.05)' % (n_bgs_lowq, n_bgs_lowq/n_bgs))
mtls.append(mtl)
# read SV tiles
sv = fitsio.read(os.path.join(dir_dat, f_svfields))
# plot confirming coverage
fig = plt.figure(figsize=(10,5))
sub = fig.add_subplot(111)
for mtl in mtls:
sub.scatter(mtl['RA'][::100], mtl['DEC'][::100], c='k', s=1)
for ra, dec in zip(sv['RA'], sv['DEC']):
circ = plt.Circle((ra, dec), 1.6275, fill=False, edgecolor='C1',
linewidth=3)
sub.add_artist(circ)
sub.set_xlabel('RA', fontsize=25)
sub.set_xlim(0., 360.)
sub.set_ylabel('DEC', fontsize=25)
sub.set_ylim(-40., 85)
fig.savefig(os.path.join(dir_dat, 'mtl', 'mtl_dr9sv_check.png'),
bbox_inches='tight')
# plot confirming coverage tile by tile
fig = plt.figure(figsize=(20,12))
bkgd = fig.add_subplot(111, frameon=False)
for i, ra, dec in zip(range(len(sv['RA'])), sv['RA'], sv['DEC']):
sub = fig.add_subplot(6,10,i+1)
for mtl in mtls:
sub.scatter(mtl['RA'][::100], mtl['DEC'][::100], c='k', s=1)
circ = plt.Circle((ra, dec), 1.6275, fill=False, edgecolor='C1',
linewidth=3)
sub.add_artist(circ)
sub.set_xlim(ra - 2.5, ra + 2.5)
sub.set_ylim(dec - 2.5, dec + 2.5)
bkgd.tick_params(labelcolor='none', top=False, bottom=False, left=False, right=False)
bkgd.set_xlabel(r'RA', labelpad=7, fontsize=25)
bkgd.set_ylabel(r'DEC', labelpad=7, fontsize=25)
fig.savefig(os.path.join(dir_dat, 'mtl', 'mtl_dr9sv_check.tile_by_tile.png'),
bbox_inches='tight')
return None
def _dr8_target_cutouts():
''' combine dr8 target files for SV tiles that are outside of the dr9sv
region.
* April 9, 2020: Turns out some of the BGS SV fields are chopped up!
'''
# read SV tiles
sv = fitsio.read(os.path.join(dir_dat, f_svfields)) # new SV tiles
print('%i BGS SV tiles' % len(sv['RA']))
# get SV tiles *outside* of the DR9 SV imaging region
in_dr9 = _in_DR9_SVregion(sv['RA'], sv['DEC'])
print('%i tiles outside of DR9' % np.sum(~in_dr9))
# for tiles outside of DR9SV read all dr8 healpix that sufficiently covers
# the tiles
ras, decs = [], []
for ra, dec in zip(sv['RA'][~in_dr9], sv['DEC'][~in_dr9]):
corners_ra = [ra - 2., ra + 2., ra + 2., ra - 2.]
corners_dec = [dec + 2., dec + 2., dec - 2., dec - 2.]
ras += corners_ra
decs += corners_dec
phi = np.deg2rad(ras)
theta = 0.5 * np.pi - np.deg2rad(decs)
ipixs = np.unique(hp.ang2pix(2, theta, phi, nest=True))
print(' reading in healpixels', ipixs)
targs = []
for i, ipix in enumerate(ipixs):
fpix = os.path.join(dir_dat, 'sv.spec_truth',
'sv1-targets-dr8-hp-%i.spec_truth.sn_host.fits' % ipix)
if not os.path.isfile(fpix):
ftarg = os.path.join(dir_dat, 'sv1-targets-dr8-hp-%i.fits' % ipix)
ftrue = fpix.replace('.spec_truth.sn_host.fits', '.spec_truth.fits')
if not os.path.isfile(ftrue):
# read target files with truth tables
print('... matching %s to truth table' % ftarg)
_targ = fitsio.read(ftarg)
targ = match2spectruth(_targ)
fitsio.write(ftrue, targ)
else:
targ = fitsio.read(ftrue)
print('... matching %s to SN host' % os.path.basename(ftrue))
targ = match2snhost(targ)
fitsio.write(fpix, targ)
else:
print('reading ... %s' % fpix)
targ = fitsio.read(fpix)
near_tile = np.zeros(len(targ)).astype(bool)
for ra, dec in zip(sv['RA'][~in_dr9], sv['DEC'][~in_dr9]):
near_tile |= ((targ['RA'] > ra - 2.) & (targ['RA'] < ra + 2.) &
(targ['DEC'] > dec - 2.) & (targ['DEC'] < dec + 2.))
assert np.sum(near_tile) > 0
if i == 0:
targs = targ[near_tile]
else:
targs = np.concatenate([targs, targ[near_tile]])
fitsio.write(os.path.join(dir_dat, 'sv.spec_truth',
'sv1-targets-dr8.sv_cutout.spec_truth.sn_host.fits'), targs,
clobber=True)
# plot confirming coverage
fig = plt.figure(figsize=(10,7))
sub = fig.add_subplot(111)
sub.scatter(targs['RA'], targs['DEC'], c='k')
for ra, dec in zip(sv['RA'][~in_dr9], sv['DEC'][~in_dr9]):
circ = plt.Circle((ra, dec), 1.6275, fill=False, edgecolor='C1',
linewidth=3)
sub.add_artist(circ)
sub.set_xlabel('RA', fontsize=25)
sub.set_xlim(0., 360.)
sub.set_ylabel('DEC', fontsize=25)
sub.set_ylim(-40., 85)
fig.savefig(os.path.join(dir_dat, 'sv.spec_truth',
'sv1-targets-dr8.sv_cutout.png'), bbox_inches='tight')
return None
def _dr8_skies_cutouts():
''' compiled skies file for BGS SV tiles outside of the dr9sv regions
'''
sv = fitsio.read(os.path.join(dir_dat, f_svfields)) # new SV tiles
in_dr9 = _in_DR9_SVregion(sv['RA'], sv['DEC'])
print("%i tiles outside of DR9SV" % (np.sum(~in_dr9)))
# DR8 sky files
fskies = glob.glob('/global/cfs/cdirs/desi/target/catalogs/dr8/0.37.0/skies/*fits')
dr8_skies = []
for fsky in fskies:
print('... reading %s' % fsky)
sky = fitsio.read(fsky)
keep = np.zeros(len(sky['RA'])).astype(bool)
for tile_ra, tile_dec in zip(sv['RA'][~in_dr9], sv['DEC'][~in_dr9]):
keep = keep | (np.sqrt((sky['RA'] - tile_ra)**2 + (sky['DEC'] - tile_dec)**2) < 2.)
dr8_skies.append(sky[keep])
dr8_skies = np.concatenate(dr8_skies)
# only kep unique TARGTEID
_, uniq = np.unique(dr8_skies['TARGETID'], return_index=True)
fitsio.write(os.path.join(dir_dat, 'mtl', 'dr8_skies_cutout.fits'),
dr8_skies[uniq], clobber=True)
return None
######################################################################
# fiberassign
######################################################################
def run_fiberassign(sky_supp=False):
''' generate script for running fiberassign (for posterity) and run it
'''
assert os.environ['NERSC_HOST'] == 'cori'
if not sky_supp:
dir_out = '/global/cfs/cdirs/desi/users/chahah/fba_dr9sv.spec_truth.Apr2020'
else:
dir_out = '/global/cfs/cdirs/desi/users/chahah/fba_dr9sv.spec_truth.Apr2020.sky_supp'
dir_mtl = '/global/cfs/cdirs/desi/users/chahah/mtl_apr2020'
fmtls = glob.glob(os.path.join(dir_mtl, 'mtl*fits'))
fskies = glob.glob(os.path.join('/global/cfs/cdirs/desi/target/catalogs/dr9sv/0.37.0/skies/',
'skies*.fits'))
fskies += [os.path.join(dir_mtl, 'dr8_skies_cutout.fits')]
if sky_supp:
fsupps = glob.glob(os.path.join(
'/global/cfs/cdirs/desi/target/catalogs/dr9sv/0.37.0/skies/skies-supp',
'skies*.fits'))
fskies += fsupps
scrpt = '\n'.join([
'#!/bin/bash',
'export DESIMODEL="/global/cscratch1/sd/chahah/feasibgs/desimodel_0.12.0"',
'',
'odir="%s"' % dir_out,
'tfile="%s"' % os.path.join(dir_dat, f_svfields),
'',
'export OMP_NUM_THREADS=32',
'',
'mkdir ${odir}',
'rm ${odir}/*.fits',
'', #'export DESI_LOGLEVEL=DEBUG',
'fba_run --targets %s --sky %s --footprint ${tfile} --standards_per_petal 20 --sky_per_petal 80 --write_all_targets --dir ${odir} --overwrite | tee log.o' % (' '.join(fmtls), ' '.join(fskies)),
'',
'fba_merge_results --targets %s --dir ${odir}' % (' '.join(fmtls + fskies))
])
f = open('cori_dr9sv_fba.sh','w')
f.write(scrpt)
f.close()
os.system('sh cori_dr9sv_fba.sh')
os.system('cp cori_dr9sv_fba.sh %s/cori_dr9sv_fba.sh' % dir_out)
return None
def check_fba(sky_supp=False):
''' test target densities in the fiberassign output of DR9SV
'''
# all the fiberassign output files
if not sky_supp:
dir_fba = os.path.join(dir_cfs, 'fba_dr9sv.spec_truth.Apr2020')
else:
dir_fba = os.path.join(dir_cfs, 'fba_dr9sv.spec_truth.sky_supp')
f_fbas = glob.glob(os.path.join(dir_fba, 'fiberassign*.fits'))
# sarah's fiberassign files
#f_fbas = glob.glob(os.path.join(dir_fba, 'fba_dr9sv.sarah',
# 'fiberassign*.fits'))
n_zero = 0
n_nosky = 0
tbl = ['\t'.join(['TILEID', 'SV1_BGS_TARGET', 'BGS_BRIGHT (0.45)', 'BGS_FAINT (0.25)'
'BGS_FAINT_EXT (0.125)', 'BGS_FIBMAG (0.125)', 'BGS_LOWQ (0.05)',
'MWS', 'STD', 'SKY', 'BAD', 'BLANK'])]
__n_bgs_bright, __n_bgs_faint, __n_bgs_extfaint, __n_bgs_fibmag, __n_bgs_lowq, __n_sky = [], [], [], [], [], []
for i, f in enumerate(f_fbas):
# read in tile
tile_i = fitsio.read(f)
if i == 0:
tile = tile_i
else:
tile = np.concatenate([tile, tile_i])
_n_bgs, _n_bgs_bright, _n_bgs_faint, _n_bgs_extfaint, _n_bgs_fibmag, _n_bgs_lowq = \
bgs_targetclass(tile_i['SV1_BGS_TARGET'])
_n_mws = np.sum(tile_i['SV1_MWS_TARGET'].astype(bool))
_n_std = np.sum(~tile_i['SV1_MWS_TARGET'].astype(bool) & (tile_i['SV1_DESI_TARGET'] &
desi_mask.mask('STD_FAINT|STD_WD|STD_BRIGHT')).astype(bool))
_n_sky = np.sum(tile_i['OBJTYPE'] == 'SKY')
_n_bad = np.sum(tile_i['OBJTYPE'] == 'BAD')
_n_blank = np.sum(tile_i['OBJTYPE'] == '')
if _n_bgs + _n_mws + _n_std + _n_sky + _n_bad + _n_blank < 5000:
print('--- %s ---' % f.split('-')[-1].split('.')[0])
notdesi = ~(tile_i['SV1_DESI_TARGET'].astype(bool))
notbgs = ~(tile_i['SV1_BGS_TARGET'].astype(bool))
notmws = ~(tile_i['SV1_MWS_TARGET'].astype(bool))
notsky = (tile_i['OBJTYPE'] != 'SKY')
notbad = (tile_i['OBJTYPE'] != 'BAD')
print(5000 - (_n_bgs + _n_mws + _n_std + _n_sky + _n_bad))
print(np.sum(notdesi & notbgs & notmws))
print(np.sum(notdesi & notbgs & notmws & notsky & notbad))
print(tile_i['OBJTYPE'][notdesi & notbgs & notmws & notsky &
notbad] )
raise ValueError
#not_any = ((tile_i['OBJTYPE'] != 'SKY') &
# (tile_i['OBJTYPE'] != 'BAD') &
# (tile_i['SV1_BGS_TARGET'] == 0) &
# (tile_i['SV1_MWS_TARGET'] == 0))
##print(tile_i['SV1_DESI_TARGET'][not_any])
#for i in range(58):
# if np.sum((tile_i['SV1_DESI_TARGET'][not_any] &
# desi_mask.mask(i)).astype(bool)) > 0:
# print('%i %s' %
# (np.sum((tile_i['SV1_DESI_TARGET'][not_any] & desi_mask.mask(i)).astype(bool)),
# desi_mask.bitname(i)))
__n_bgs_bright.append(_n_bgs_bright/_n_bgs)
__n_bgs_faint.append(_n_bgs_faint/_n_bgs)
__n_bgs_extfaint.append(_n_bgs_extfaint/_n_bgs)
__n_bgs_fibmag.append(_n_bgs_fibmag/_n_bgs)
__n_bgs_lowq.append(_n_bgs_lowq/_n_bgs)
print('---------------------------------')
print('tiles: %s' % os.path.basename(f))
print('total n_bgs = %i' % _n_bgs)
print(' nobj frac (expected frac)')
print(' ------------------------------------')
print(' BGS Bright %i %.3f (0.45)' % (_n_bgs_bright, _n_bgs_bright/_n_bgs))
print(' BGS Faint %i %.3f (0.25)' % (_n_bgs_faint, _n_bgs_faint/_n_bgs))
print(' BGS Ext.Faint %i %.3f (0.125)' % (_n_bgs_extfaint, _n_bgs_extfaint/_n_bgs))
print(' BGS Fib.Mag %i %.3f (0.125)' % (_n_bgs_fibmag, _n_bgs_fibmag/_n_bgs))
print(' BGS Low Q. %i %.3f (0.05)' % (_n_bgs_lowq, _n_bgs_lowq/_n_bgs))
print(' SKY %i' % _n_sky)
print(' BAD %i' % _n_bad)
__n_sky.append(_n_sky)
tbl.append('\t'.join([
'%s' % f.split('-')[-1].split('.')[0],
'%i' % _n_bgs,
'%i (%.3f)' % (_n_bgs_bright, _n_bgs_bright/_n_bgs),
'%i (%.3f)' % (_n_bgs_faint, _n_bgs_faint/_n_bgs),
'%i (%.3f)' % (_n_bgs_extfaint, _n_bgs_extfaint/_n_bgs),
'%i (%.3f)' % (_n_bgs_fibmag, _n_bgs_fibmag/_n_bgs),
'%i (%.3f)' % (_n_bgs_lowq, _n_bgs_lowq/_n_bgs),
'%i' % _n_mws,
'%i' % _n_std,
'%i' % _n_sky,
'%i' % _n_bad,
'%i' % _n_blank]))
# tiles with no sky targets
if _n_sky == 0: n_nosky += 1
# tiles with no BGS targets
if _n_bgs == 0: n_zero += 1
print('---------------------------------')
print('%i tiles with zero BGS targets' % n_zero)
print('%i tiles with zero SKY targets' % n_nosky)
n_bgs, n_bgs_bright, n_bgs_faint, n_bgs_extfaint, n_bgs_fibmag, n_bgs_lowq = \
bgs_targetclass(tile['SV1_BGS_TARGET'])
n_mws = np.sum(tile['SV1_MWS_TARGET'].astype(bool))
n_std = np.sum((tile['SV1_DESI_TARGET'] &
desi_mask.mask('STD_FAINT|STD_WD|STD_BRIGHT')).astype(bool))
n_sky = np.sum(tile['OBJTYPE'] == 'SKY')
n_bad = np.sum(tile['OBJTYPE'] == 'BAD')
n_blank = np.sum(tile['OBJTYPE'] == '')
print('---------------------------------')
print('total n_bgs = %i' % n_bgs)
print('total n_bgs = %i' % np.sum(tile['SV1_BGS_TARGET'] != 0))
print(' nobj frac (expected frac)')
print(' ------------------------------------')
print(' BGS Bright %i %.3f, %.3f-%.3f (0.45)' % (n_bgs_bright, n_bgs_bright/n_bgs, np.min(__n_bgs_bright), np.max(__n_bgs_bright)))
print(' BGS Faint %i %.3f, %.3f-%.3f (0.25)' % (n_bgs_faint, n_bgs_faint/n_bgs, np.min(__n_bgs_faint), np.max(__n_bgs_faint)))
print(' BGS Ext.Faint %i %.3f, %.3f-%.3f (0.125)' % (n_bgs_extfaint, n_bgs_extfaint/n_bgs, np.min(__n_bgs_extfaint), np.max(__n_bgs_extfaint)))
print(' BGS Fib.Mag %i %.3f, %.3f-%.3f (0.125)' % (n_bgs_fibmag, n_bgs_fibmag/n_bgs, np.min(__n_bgs_fibmag), np.max(__n_bgs_fibmag)))
print(' BGS Low Q. %i %.3f, %.3f-%.3f (0.05)' % (n_bgs_lowq, n_bgs_lowq/n_bgs, np.min(__n_bgs_lowq), np.max(__n_bgs_lowq)))
print(' SKY %i' % (np.mean(__n_sky)))
tbl.append('---------------------------------')
print(' BGS Bright %i' % | np.sum((tile['SV1_BGS_TARGET'] & 2**0) != 0) | numpy.sum |
import numpy as np
import math
import random
import matplotlib.pyplot as plt
from scipy import stats
# declare number of particles used for object track estimation
particles = 100
# declare arrays
likelihood = np.empty(particles) # calculate likelihood of estimate provided by the particle position
estimated = np.empty(observations) # stores estimated path of the particle
# initial particle position
particle_estimate = | np.random.uniform(-0.5,1,(particles)) | numpy.random.uniform |
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 5 14:53:10 2018
@author: gregz
"""
import os.path as op
import sys
from astropy.io import fits
from astropy.table import Table
from utils import biweight_location
import numpy as np
from scipy.interpolate import LSQBivariateSpline, interp1d
from astropy.convolution import Gaussian1DKernel, interpolate_replace_nans
from astropy.convolution import convolve
from scipy.signal import medfilt, savgol_filter
from skimage.feature import register_translation
import argparse as ap
from input_utils import setup_logging
import warnings
from astropy.modeling.models import Polynomial2D
from astropy.modeling.fitting import LevMarLSQFitter
get_newwave = True
def get_script_path():
return op.dirname(op.realpath(sys.argv[0]))
DIRNAME = get_script_path()
blueinfo = [['BL', 'uv', 'multi_503_056_7001', [3640., 4640.], ['LL', 'LU'],
[4350., 4375.]], ['BR', 'orange', 'multi_503_056_7001',
[4660., 6950.], ['RU', 'RL'], [6270., 6470.]]]
redinfo = [['RL', 'red', 'multi_502_066_7002', [6450., 8400.], ['LL', 'LU'],
[7225., 7425.]], ['RR', 'farred', 'multi_502_066_7002',
[8275., 10500.], ['RU', 'RL'], [9280., 9530.]]]
parser = ap.ArgumentParser(add_help=True)
parser.add_argument("-b", "--basedir",
help='''base directory for reductions''',
type=str, default=None)
parser.add_argument("-s", "--side",
help='''blue for LRS2-B and red for LRS2-R''',
type=str, default='blue')
parser.add_argument("-scd", "--scidateobsexp",
help='''Example: "20180112,lrs20000027,exp01"''',
type=str, default=None)
parser.add_argument("-skd", "--skydateobsexp",
help='''Example: "20180112,lrs20000027,exp01"''',
type=str, default=None)
targs = ["-b", "/Users/gregz/cure/reductions",
"-s", "red", "-scd", "20181108,lrs20000025,exp01", "-skd",
"20181108,lrs20000024,exp01"]
args = parser.parse_args(args=targs)
args.log = setup_logging('test_skysub')
if args.scidateobsexp is None:
args.log.error('--scidateobsexp/-scd was not set.')
sys.exit(1)
if args.skydateobsexp is None:
args.log.error('--skydateobsexp/-skd was not set.')
sys.exit(1)
if args.side == 'blue':
list_of_blue = [args.scidateobsexp.split(',') +
args.skydateobsexp.split(',')]
if args.side == 'red':
list_of_red = [args.scidateobsexp.split(',') +
args.skydateobsexp.split(',')]
basedir = op.join(args.basedir, '%s/lrs2/%s/%s/lrs2/%s')
skyline_file = op.join(DIRNAME, 'lrs2_config/%s_skylines.dat')
def make_frame(xloc, yloc, data, wave, dw, Dx, Dy, wstart=5700.,
wend=5800., scale=0.4, seeing_fac=1.3):
seeing = seeing_fac * scale
a, b = data.shape
x = np.arange(xloc.min()-scale,
xloc.max()+1*scale, scale)
y = np.arange(yloc.min()-scale,
yloc.max()+1*scale, scale)
xgrid, ygrid = np.meshgrid(x, y)
zgrid = np.zeros((b,)+xgrid.shape)
area = 3. / 4. * np.sqrt(3.) * 0.59**2
for k in np.arange(b):
sel = np.isfinite(data[:, k])
D = np.sqrt((xloc[:, np.newaxis, np.newaxis] - Dx[k] - xgrid)**2 +
(yloc[:, np.newaxis, np.newaxis] - Dy[k] - ygrid)**2)
W = np.exp(-0.5 / (seeing/2.35)**2 * D**2)
N = W.sum(axis=0)
zgrid[k, :, :] = ((data[sel, k][:, np.newaxis, np.newaxis] *
W[sel]).sum(axis=0) / N / scale**2 / area)
wi = np.searchsorted(wave, wstart, side='left')
we = | np.searchsorted(wave, wend, side='right') | numpy.searchsorted |
import numpy as np
from scipy.linalg import eigh
import voice_activity_detector
import features_extraction
import statistics
import utils
def get_sigma(ubm, space_dimension):
sigma = np.zeros(shape=(len(ubm.covariances) * len(ubm.covariances[0])))
k = 0
for i in range(len(ubm.covariances[0])):
for j in range(len(ubm.covariances)):
sigma[k] = ubm.covariances[j][i]
k += 1
repeat_sigma = np.repeat(np.transpose(sigma)[:, np.newaxis],
space_dimension, axis=1)
return repeat_sigma
def save_i_vector_model(path, i_vector, speaker, components_number):
f = open(
path + "/ivectors/" + speaker + "_ivector_model_" +
str(components_number) + ".txt",
"wb")
np.save(f, i_vector)
f.close
def load_i_vector_model(path, speaker, components_number):
f = open(
path + "/ivectors/" + speaker + "_ivector_model_" +
str(components_number) + ".txt",
"rb")
i_vector = np.load(f)
f.close
return i_vector
def save_i_vectors(path, i_vectors, speaker, components_number):
f = open(
path + "/ivectors/" + speaker + "_ivector_" + str(
components_number) +
".txt",
"wb")
np.save(f, i_vectors)
f.close
def extract_i_vector_from_signal(ubm, utterance_path, t_matrix,
space_dimension,
mfcc_number, frame_duration, step_duration,
sigma):
t_matrix_divides_sigma = np.divide(t_matrix, sigma)
t_matrix_divides_sigma_transpose = np.transpose(t_matrix_divides_sigma)
identity_matrix = np.eye(space_dimension, dtype=float)
vad_object = voice_activity_detector.Vad(utterance_path, 2)
signal_samples, sample_rate = vad_object.get_speech_signal()
del vad_object
mfcc = features_extraction.FeaturesExtraction(mfcc_number, True,
frame_duration,
step_duration)
features = mfcc.extract_mfcc_from_signal(signal_samples, sample_rate)
log_likelihood = statistics.log_likelihood_computation(features, ubm)
n, f, s = statistics.statistics_computation(log_likelihood, features)
# first order statistics are centered by the mean vector
f = np.subtract(f, np.multiply(np.transpose(
np.repeat(n[:, np.newaxis], np.shape(ubm.means)[1], axis=1)),
np.transpose(ubm.means)))
# i-vector computation
i1 = np.matmul(np.transpose(
np.multiply(t_matrix_divides_sigma,
np.repeat(
np.transpose(np.repeat(n, np.shape(features)[1]))[:,
np.newaxis], space_dimension, axis=1))), t_matrix)
i2 = np.matmul(np.linalg.pinv(np.add(identity_matrix, i1)),
t_matrix_divides_sigma_transpose)
i3 = []
for i in range(np.shape(f)[1]):
if i == 0:
i3 = np.transpose(f)[i]
else:
i3 = np.concatenate((i3, np.transpose(f)[i]), axis=0)
i_vector = | np.matmul(i2, i3) | numpy.matmul |
import numpy as np
import torch
from torch.utils.data import DataLoader
from torch.utils.data.sampler import BatchSampler
from tqdm import tqdm
class BalancedBatchSampler(BatchSampler):
"""
BatchSampler - from a MNIST-like dataset, samples n_classes and within these classes samples n_samples.
Returns batches of size n_classes * n_samples
"""
def __init__(self, dataset, n_classes, n_samples):
loader = DataLoader(dataset)
self.labels_list = []
for i, data in enumerate(tqdm(loader)):
label = data["label"]
# print(label)
self.labels_list.append(label)
self.labels = torch.LongTensor(self.labels_list)
self.labels_set = list(set(self.labels.numpy()))
print(f"Label set: {self.labels_set}")
self.label_to_indices = {label: np.where(self.labels.numpy() == label)[0]
for label in self.labels_set}
self.array_of_labels = []
for l in self.labels_set:
| np.random.shuffle(self.label_to_indices[l]) | numpy.random.shuffle |
import gym
import time
import tensorflow as tf
import tflearn
import numpy as np
import matplotlib
import seaborn as sns
env = gym.make("CartPole-v0")
observation = tflearn.input_data(shape=[None, 4])
net = tflearn.fully_connected(observation, 256, activation="relu")
net = tflearn.fully_connected(net, 256, activation="relu")
net = tflearn.fully_connected(net, 256, activation="relu")
out = tflearn.fully_connected(net, 2, activation="softmax")
reward_holder = tf.placeholder(tf.float32, [None])
action_holder = tf.placeholder(tf.int32, [None])
responsible_outputs = tf.gather(tf.reshape(out, [-1]), tf.range(0, tf.shape(out)[0] * tf.shape(out)[1], 2) + action_holder)
loss = -tf.reduce_mean(tf.log(responsible_outputs) * reward_holder)
optimizer = tf.train.AdamOptimizer()
update = optimizer.minimize(loss)
gamma = 0.99
def discount_reward(rewards):
running_reward = 0
result = np.zeros_like(rewards)
for i in reversed(range(len(rewards))):
result[i] = rewards[i] + gamma * running_reward
running_reward += rewards[i]
return result
num_episodes = 1500
max_time = 200
all_rewards = []
saver = tf.train.Saver()
train_data = []
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i in range(num_episodes):
obs = env.reset()
episode_reward = 0
ep_history = []
for j in range(max_time):
#Choose an action
a_one_hot = sess.run(out, feed_dict={observation: [obs]}).reshape(2)
action = np.random.choice(a_one_hot, p=a_one_hot)
action = | np.argmax(a_one_hot == action) | numpy.argmax |
#%% [markdown]
# # Evaluate clustering concordance
#%%
import datetime
import pprint
import time
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
import pandas as pd
import seaborn as sns
from anytree import LevelOrderGroupIter, Node, NodeMixin, Walker
from matplotlib.colors import ListedColormap
from mpl_toolkits.axes_grid1 import make_axes_locatable
from sklearn.metrics import adjusted_rand_score, rand_score
from giskard.plot import stacked_barplot
from graspologic.align import OrthogonalProcrustes, SeedlessProcrustes
from graspologic.cluster import DivisiveCluster
from graspologic.embed import (
AdjacencySpectralEmbed,
OmnibusEmbed,
select_dimension,
selectSVD,
)
from graspologic.plot import pairplot
from graspologic.utils import (
augment_diagonal,
binarize,
is_fully_connected,
multigraph_lcc_intersection,
pass_to_ranks,
to_laplacian,
)
from pkg.data import load_adjacency, load_maggot_graph, load_node_meta
from pkg.io import savefig
from pkg.plot import set_theme
from pkg.utils import get_paired_inds, get_paired_subgraphs, set_warnings
from src.visualization import CLASS_COLOR_DICT as palette
from src.visualization import adjplot # TODO fix graspologic version and replace here
from pkg.flow import signal_flow
from pkg.utils import get_paired_inds
set_warnings()
t0 = time.time()
def stashfig(name, **kwargs):
foldername = "cluster_concordance"
savefig(name, foldername=foldername, **kwargs)
set_theme()
#%% [markdown]
# ### Load the data
#%%
mg = load_maggot_graph()
mg = mg[mg.nodes["paper_clustered_neurons"] | mg.nodes["accessory_neurons"]]
mg.to_largest_connected_component()
mg.fix_pairs()
mg.nodes["sf"] = signal_flow(mg.sum.adj)
mg.nodes["_inds"] = range(len(mg.nodes))
lp_inds, rp_inds = get_paired_inds(mg.nodes)
(mg.nodes.iloc[lp_inds]["pair"] == mg.nodes.iloc[rp_inds].index).all()
#%% [markdown]
# ## Evaluate cluster concordance between same or different hemispheres
#%% [markdown]
# ## Run multiple rounds of embedding/clustering each hemisphere independently
#%%
def preprocess_adjs(adjs, method="ase"):
"""Preprocessing necessary prior to embedding a graph, opetates on a list
Parameters
----------
adjs : list of adjacency matrices
[description]
method : str, optional
[description], by default "ase"
Returns
-------
[type]
[description]
"""
adjs = [pass_to_ranks(a) for a in adjs]
adjs = [a + 1 / a.size for a in adjs]
if method == "ase":
adjs = [augment_diagonal(a) for a in adjs]
elif method == "lse": # haven't really used much. a few params to look at here
adjs = [to_laplacian(a) for a in adjs]
return adjs
def svd(X, n_components=None):
return selectSVD(X, n_components=n_components, algorithm="full")[0]
n_omni_components = 8 # this is used for all of the embedings initially
n_svd_components = 16 # this is for the last step
method = "ase" # one could also do LSE
n_init = 1
cluster_kws = dict(affinity=["euclidean", "manhattan", "cosine"])
rows = []
for side in ["left", "right"]:
# TODO this is ignoring the contralateral connections!
print("Preprocessing...")
# side_mg = mg[mg.nodes[side]]
# side_mg.to_largest_connected_component()
# adj = side_mg.sum.adj
if side == "left":
inds = lp_inds
else:
inds = rp_inds
adj = mg.sum.adj[ | np.ix_(inds, inds) | numpy.ix_ |
# Copyright 2020 Makani Technologies LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Scoring functions relating to loads."""
import json
import os
import makani
from makani.analysis.aero import apparent_wind_util
from makani.analysis.control import geometry
from makani.control import control_types
from makani.lib.python import c_helpers
from makani.lib.python.batch_sim import scoring_functions
import numpy as np
from scipy import interpolate
import scoring_functions_util as scoring_util
_FLIGHT_MODE_HELPER = c_helpers.EnumHelper('FlightMode', control_types)
class YbAccelerationScoringFunction(
scoring_functions.DoubleSidedLimitScoringFunction):
"""Tests if the body-y acceleration falls outside of acceptable limits."""
def __init__(self, bad_lower_limit, good_lower_limit, good_upper_limit,
bad_upper_limit, severity):
super(YbAccelerationScoringFunction, self).__init__(
'Acceleration', 'm/s^2', bad_lower_limit, good_lower_limit,
good_upper_limit, bad_upper_limit, severity)
def GetSystemLabels(self):
return ['loads']
def GetValue(self, output):
return np.array([output['yb_accel_min'],
output['yb_accel_max']])
def GetOutput(self, timeseries):
return {
'yb_accel_min': np.min(timeseries['yb_accel']),
'yb_accel_max': np.max(timeseries['yb_accel'])
}
def GetTimeSeries(self, params, sim, control):
yb_accel = self._SelectTelemetry(sim, control, 'wing_acc')['y']
return {'yb_accel': yb_accel}
class ZgAccelerationScoringFunction(
scoring_functions.SingleSidedLimitScoringFunction):
"""Tests the kite vertical acceleration in ground frame."""
def __init__(self, good_limit, bad_limit, severity):
super(ZgAccelerationScoringFunction, self).__init__(
'Wing Accel Zg', 'm/s^2', good_limit, bad_limit, severity)
def GetSystemLabels(self):
return ['controls']
def GetValue(self, output):
return np.array([output['zg_accel_min'],
output['zg_accel_max']])
def GetOutput(self, timeseries):
return {
'zg_accel_min': np.min(timeseries['zg_accel']),
'zg_accel_max': np.max(timeseries['zg_accel'])
}
def GetTimeSeries(self, params, sim, control):
zg_accel = self._SelectTelemetry(sim, control, 'wing_acc')['z']
return {'zg_accel': zg_accel}
class MaxServoMoment(
scoring_functions.SingleSidedLimitScoringFunction):
"""Tests if a maximum hinge moment limit is met on any of the servos."""
def __init__(self, good_limit, bad_limit, severity):
super(MaxServoMoment, self).__init__(
'Max Servo Moment', 'N.m', good_limit, bad_limit, severity)
def GetSystemLabels(self):
return ['loads']
def GetValue(self, output):
return output['servo_moment_max']
def GetOutput(self, timeseries):
max_moment = np.max(timeseries['servo_torques_abs'], axis=0)
return {
'servo_moment_max': max_moment.tolist()
}
def GetTimeSeries(self, params, sim, control):
servo_torques = self._SelectTelemetry(sim, control,
'servo_shaft_torques')
servo_torques_abs = np.abs(np.array(servo_torques))
return {'servo_torques_abs': servo_torques_abs}
class MaxElevatorServoMoment(
scoring_functions.SingleSidedLimitScoringFunction):
"""Tests the maximum hinge moment for the elevator servo pair."""
def __init__(self, good_limit, bad_limit, severity):
super(MaxElevatorServoMoment, self).__init__(
'Max Elevator Servo Moment', 'N.m', good_limit, bad_limit, severity)
def GetSystemLabels(self):
return ['loads']
def GetValue(self, output):
return output['elev_hm_max']
def GetOutput(self, timeseries):
return {'elev_hm_max': np.max(timeseries['elev_hm'])}
def GetTimeSeries(self, params, sim, control):
servo_torques = self._SelectTelemetry(sim, control,
'servo_shaft_torques')
if scoring_util.IsSelectionValid(servo_torques):
elev_hm_1 = servo_torques[:, 6]
elev_hm_2 = servo_torques[:, 7]
else:
# Servo_torques are returned as np.array([float('nan')]) for flight logs.
elev_hm_1 = np.array([float('nan')])
elev_hm_2 = np.array([float('nan')])
summed_moment = np.abs(elev_hm_1) + np.abs(elev_hm_2)
return {'elev_hm': summed_moment}
class MaxRudderServoMoment(
scoring_functions.SingleSidedLimitScoringFunction):
"""Tests the maximum hinge moment for the rudder servo pair."""
def __init__(self, good_limit, bad_limit, severity):
super(MaxRudderServoMoment, self).__init__(
'Max Rudder Servo Moment', 'N.m', good_limit, bad_limit, severity)
def GetSystemLabels(self):
return ['loads']
def GetValue(self, output):
return output['rud_hm_max']
def GetOutput(self, timeseries):
return {'rud_hm_max': np.max(timeseries['rud_hm'])}
def GetTimeSeries(self, params, sim, control):
servo_torques = self._SelectTelemetry(sim, control,
'servo_shaft_torques')
if scoring_util.IsSelectionValid(servo_torques):
rud_hm_1 = servo_torques[:, 8]
rud_hm_2 = servo_torques[:, 9]
else:
# Servo_torques are returned as np.array([float('nan')]) for flight logs.
rud_hm_1 = np.array([float('nan')])
rud_hm_2 = np.array([float('nan')])
summed_moment = np.abs(rud_hm_1) + np.abs(rud_hm_2)
return {'rud_hm': summed_moment}
class MaxRotorInPlaneMoment(
scoring_functions.SingleSidedLimitScoringFunction):
"""Tests if the limit on maximum rotor in-plane moment is met."""
def __init__(self, good_limit, bad_limit, severity):
super(MaxRotorInPlaneMoment, self).__init__(
'Max Rotor In-plane Moment', 'N.m', good_limit, bad_limit, severity)
# TODO: Pull stability tables based on rotor rev.
path = 'database/m600/rotor_rev4_stability_tables.json'
with open(os.path.join(makani.HOME, path), 'r') as f:
lookup_tables = json.load(f)
self._omegas = lookup_tables['omegas']
self._v_freestreams = lookup_tables['v_freestreams']
self._my_a_cw = np.array(lookup_tables['My_a_cw'])
self._mz_a_cw = np.array(lookup_tables['Mz_a_cw'])
self._my_a_ccw = np.array(lookup_tables['My_a_ccw'])
self._mz_a_ccw = np.array(lookup_tables['Mz_a_ccw'])
def GetSystemLabels(self):
return ['loads', 'experimental']
def GetValue(self, output):
return output['rotor_moment_max']
def GetOutput(self, timeseries):
rotor_moments = timeseries['rotor_moments']
m_max = 0.0
for nr in rotor_moments:
m_res = np.linalg.norm(rotor_moments[nr], axis=1)
if m_res.size != 0:
m_max = max(m_max, m_res.max())
return {'rotor_moment_max': m_max}
def GetTimeSeries(self, params, sim, control):
# TODO: This scoring function takes a while to evaluate, needs
# some attention to reduce execution time.
# Table look-up
v = self._v_freestreams
o = self._omegas
my_cw_lookup = interpolate.RectBivariateSpline(v, o, self._my_a_cw.T,
kx=1, ky=1)
mz_cw_lookup = interpolate.RectBivariateSpline(v, o, self._mz_a_cw.T,
kx=1, ky=1)
my_ccw_lookup = interpolate.RectBivariateSpline(v, o, self._my_a_ccw.T,
kx=1, ky=1)
mz_ccw_lookup = interpolate.RectBivariateSpline(v, o, self._mz_a_ccw.T,
kx=1, ky=1)
rotors_dir = params['system_params']['rotors']['dir']
rotors_axis = params['system_params']['rotors']['axis']
# Note: rotors_inertia is rotational inertia of rotor and motor.
rotors_inertia = params['system_params']['rotors']['I']
param_names = ['airspeed', 'alpha', 'beta',
'rotor_speeds', 'rotor_gyro_moments', 'body_rates']
(vapp, alpha, beta, rotor_omega, gyro_moments_xyz,
body_rates) = self._SelectTelemetry(sim, control, param_names)
# Transformation: Standard kite body (b) to standard hub fixed (h)
# (x-forward, z-downward)
dcm_b2h = np.array(geometry.AngleToDcm(0.0, np.deg2rad(-3.0), 0.0))
# Transformation: Geometric hub fixed (gh, X-rearward, Z-upward)) to
# standard hub fixed (h)
dcm_gh2h = np.array(geometry.AngleToDcm(0.0, np.pi, 0.0))
# Kite apparent speed components in (h)
vk = vapp[np.newaxis].T * np.matmul(dcm_b2h, np.transpose(
np.array([[-np.cos(alpha)*np.cos(beta)],
[-np.sin(beta)],
[-np.sin(alpha)*np.cos(beta)]]), (2, 0, 1)))[:, :, 0]
# Rotor apparent speed in spherical coordinates
va = np.linalg.norm(vk, axis=1)
a = -np.arctan2(np.hypot(vk[:, 1], vk[:, 2]), vk[:, 0])
t = -np.arctan2(vk[:, 2], vk[:, 1])
# Geometric wind aligned (gw) to geometric hub fixed (gh)
# TODO: Vectorize this function.
dcm_gw2gh = np.ndarray((len(t), 3, 3))
for i in range(len(t)):
dcm_gw2gh[i, :, :] = geometry.AngleToDcm(0.0, 0.0, t[i])
# Gyroscopic moment components in (h)
if scoring_util.IsSelectionValid(gyro_moments_xyz):
gyro_moment_y = gyro_moments_xyz['y']
gyro_moment_z = gyro_moments_xyz['z']
else:
angular_momentum_h = (rotors_inertia[0, :] * rotor_omega
* rotors_dir[0, :])
axis = np.concatenate([rotors_axis['x'], rotors_axis['y'],
rotors_axis['z']])
angular_momentum_b = np.multiply(
np.transpose(angular_momentum_h[np.newaxis], (1, 0, 2)),
axis[np.newaxis])
body_omega = np.concatenate([[body_rates['x']], [body_rates['y']],
[body_rates['z']]])
gyro_moment_b = np.cross(angular_momentum_b,
np.transpose(body_omega[np.newaxis], (2, 1, 0)),
axis=1)
gyro_moment_y = gyro_moment_b[:, 1, :]
gyro_moment_z = gyro_moment_b[:, 2, :]
gyro_moment = np.zeros((gyro_moment_y.shape[0], gyro_moment_y.shape[1],
3, 1))
gyro_moment[:, :, 1, 0] = gyro_moment_y
gyro_moment[:, :, 2, 0] = gyro_moment_z
m_gyro = np.matmul(dcm_b2h, gyro_moment)[:, :, :, 0]
v_freestream_in_range = np.logical_and(
va >= np.min(self._v_freestreams),
va <= np.max(self._v_freestreams))
# Loop on 8 rotors
m_totals = {}
for nr in range(0, 8):
rotor_omega_cur = rotor_omega[:, nr]
omega_in_range = np.logical_and(
rotor_omega_cur >= np.min(self._omegas),
rotor_omega_cur <= np.max(self._omegas))
in_range = np.logical_and(omega_in_range, v_freestream_in_range)
# Table look-up
if rotors_dir[0, nr] > 0:
my_aero_w = (my_cw_lookup(va[in_range], rotor_omega_cur[in_range],
grid=False) * a[in_range])
mz_aero_w = (mz_cw_lookup(va[in_range], rotor_omega_cur[in_range],
grid=False) * a[in_range])
else:
my_aero_w = (my_ccw_lookup(va[in_range], rotor_omega_cur[in_range],
grid=False) * a[in_range])
mz_aero_w = (mz_ccw_lookup(va[in_range], rotor_omega_cur[in_range],
grid=False) * a[in_range])
# Aerodynamic moment components in (h)
m_aero_w = np.zeros((my_aero_w.shape[0], 3, 1))
m_aero_w[:, 1, 0] = my_aero_w
m_aero_w[:, 2, 0] = mz_aero_w
m_aero = np.matmul(dcm_gh2h, np.matmul(dcm_gw2gh[in_range, :, :],
m_aero_w))[:, :, 0]
# Total resultant in-plane moment
m_totals[nr] = m_aero + m_gyro[in_range, nr]
return {
'rotor_moments': m_totals
}
class MaxWingBendingFailureIndex(
scoring_functions.SingleSidedLimitScoringFunction):
"""Tests if the limit on maximum wing bending is met."""
def __init__(self, good_limit, bad_limit, severity,
aero_tension_limit=1.0, accel_limit=1.0, domega_limit=1.0):
super(MaxWingBendingFailureIndex, self).__init__(
'Max Wing Bending Failure Index', '-', good_limit, bad_limit, severity)
self._aero_tension_limit = aero_tension_limit
self._accel_limit = accel_limit
self._domega_limit = domega_limit
def GetSystemLabels(self):
return ['loads']
def GetValue(self, output):
return output['wing_bending_failure_index']
def GetOutput(self, timeseries):
wing_bending_failure_indices = timeseries['wing_bending_failure_indices']
return {
'wing_bending_failure_index': np.max(wing_bending_failure_indices)
}
def GetTimeSeries(self, params, sim, control):
# Exclude Perched, PilotHover, HoverAscend and HoverDescend flight modes.
flight_mode_exclusion_list = ['kFlightModePilotHover', 'kFlightModePerched',
'kFlightModeHoverAscend',
'kFlightModeHoverDescend']
flight_modes = []
for name in _FLIGHT_MODE_HELPER.Names():
if name not in flight_mode_exclusion_list:
flight_modes.append(name)
tension, wing_acc, omega_i, domega_i, time_i = (
self._SelectTelemetry(sim, control,
['tether_tension', 'wing_acc',
'body_rates', 'angular_acc', 'time'],
flight_modes=flight_modes))
# Check if body rates data exist. These may not exist if the relevant
# flight modes do not exist.
if not (scoring_util.IsSelectionValid(omega_i) and
scoring_util.IsSelectionValid(wing_acc)):
return {'wing_bending_failure_indices': np.array(float('nan'))}
# Create angular acceleration data for flight logs.
if scoring_util.IsSelectionValid(domega_i):
domega = domega_i
else:
domega = {'x': np.gradient(omega_i['x'], time_i),
'y': | np.gradient(omega_i['y'], time_i) | numpy.gradient |
# https://stackoverflow.com/questions/34276663/tkinter-gui-layout-using-frames-and-grid
import time
import traceback
import multiprocessing as mp
from multiprocessing.managers import BaseManager
import threading
import logging
import numpy as np
import tkinter as tk
from PIL import Image, ImageTk
from config import Config
from message import Message
from messagebus_manager import MessageBusManager, ProcessNames
#class MessageBusManager(BaseManager):
# pass
class ProcessMessages (threading.Thread):
def __init__(self, _name, config, mbus, on_state_change, on_error):
threading.Thread.__init__(self)
self.name = _name
self.config = config
self.message_bus = mbus
self.callback = on_state_change
self.on_error_callback = on_error
self.daemon = True
return
def run(self):
self.config.log.info("control tower message processing thread has started (%s)" % self.name )
self.message_bus.subscribe(topic = "map_updates", subscriber=self.name)
while True:
try:
msg = self.message_bus.receive(self.name, latest_message=True) # latest=True becasue we only care about latest (discard older messages)
self.config.log.info("control tower received message %s" % msg.cmd )
data = | np.array(msg.params['grid']) | numpy.array |
import importlib
import time
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.spatial.distance import jensenshannon
from scipy.stats import gaussian_kde
from ..core.prior import PriorDict
from ..core.sampler.base_sampler import SamplerError
from ..core.utils import logger, reflect
from ..gw.source import PARAMETER_SETS
class ProposalCycle(object):
def __init__(self, proposal_list):
self.proposal_list = proposal_list
self.weights = [prop.weight for prop in self.proposal_list]
self.normalized_weights = [w / sum(self.weights) for w in self.weights]
self.weighted_proposal_list = [
np.random.choice(self.proposal_list, p=self.normalized_weights)
for _ in range(10 * int(1 / min(self.normalized_weights)))
]
self.nproposals = len(self.weighted_proposal_list)
self._position = 0
@property
def position(self):
return self._position
@position.setter
def position(self, position):
self._position = np.mod(position, self.nproposals)
def get_proposal(self):
prop = self.weighted_proposal_list[self._position]
self.position += 1
return prop
def __str__(self):
string = "ProposalCycle:\n"
for prop in self.proposal_list:
string += f" {prop}\n"
return string
class BaseProposal(object):
_accepted = 0
_rejected = 0
__metaclass__ = ABCMeta
def __init__(self, priors, weight=1, subset=None):
self._str_attrs = ["acceptance_ratio", "n"]
self.parameters = priors.non_fixed_keys
self.weight = weight
self.subset = subset
# Restrict to a subset
if self.subset is not None:
self.parameters = [p for p in self.parameters if p in subset]
self._str_attrs.append("parameters")
self.ndim = len(self.parameters)
self.prior_boundary_dict = {key: priors[key].boundary for key in priors}
self.prior_minimum_dict = {key: np.max(priors[key].minimum) for key in priors}
self.prior_maximum_dict = {key: np.min(priors[key].maximum) for key in priors}
self.prior_width_dict = {key: np.max(priors[key].width) for key in priors}
@property
def accepted(self):
return self._accepted
@accepted.setter
def accepted(self, accepted):
self._accepted = accepted
@property
def rejected(self):
return self._rejected
@rejected.setter
def rejected(self, rejected):
self._rejected = rejected
@property
def acceptance_ratio(self):
if self.n == 0:
return np.nan
else:
return self.accepted / self.n
@property
def n(self):
return self.accepted + self.rejected
def __str__(self):
msg = [f"{type(self).__name__}("]
for attr in self._str_attrs:
val = getattr(self, attr, "N/A")
if isinstance(val, (float, int)):
val = f"{val:1.2g}"
msg.append(f"{attr}:{val},")
return "".join(msg) + ")"
def apply_boundaries(self, point):
for key in self.parameters:
boundary = self.prior_boundary_dict[key]
if boundary is None:
continue
elif boundary == "periodic":
point[key] = self.apply_periodic_boundary(key, point[key])
elif boundary == "reflective":
point[key] = self.apply_reflective_boundary(key, point[key])
else:
raise SamplerError(f"Boundary {boundary} not implemented")
return point
def apply_periodic_boundary(self, key, val):
minimum = self.prior_minimum_dict[key]
width = self.prior_width_dict[key]
return minimum + np.mod(val - minimum, width)
def apply_reflective_boundary(self, key, val):
minimum = self.prior_minimum_dict[key]
width = self.prior_width_dict[key]
val_normalised = (val - minimum) / width
val_normalised_reflected = reflect(np.array(val_normalised))
return minimum + width * val_normalised_reflected
def __call__(self, chain):
sample, log_factor = self.propose(chain)
sample = self.apply_boundaries(sample)
return sample, log_factor
@abstractmethod
def propose(self, chain):
"""Propose a new point
This method must be overwritten by implemented proposals. The propose
method is called by __call__, then boundaries applied, before returning
the proposed point.
Parameters
----------
chain: bilby.core.sampler.bilby_mcmc.chain.Chain
The chain to use for the proposal
Returns
-------
proposal: bilby.core.sampler.bilby_mcmc.Sample
The proposed point
log_factor: float
The natural-log of the additional factor entering the acceptance
probability to ensure detailed balance. For symmetric proposals,
a value of 0 should be returned.
"""
pass
@staticmethod
def check_dependencies(warn=True):
"""Check the dependencies required to use the proposal
Parameters
----------
warn: bool
If true, print a warning
Returns
-------
check: bool
If true, dependencies exist
"""
return True
class FixedGaussianProposal(BaseProposal):
"""A proposal using a fixed non-correlated Gaussian distribution
Parameters
----------
priors: bilby.core.prior.PriorDict
The set of priors
weight: float
Weighting factor
subset: list
A list of keys for which to restrict the proposal to (other parameters
will be kept fixed)
sigma: float
The scaling factor for proposals
"""
def __init__(self, priors, weight=1, subset=None, sigma=0.01):
super(FixedGaussianProposal, self).__init__(priors, weight, subset)
self.sigmas = {}
for key in self.parameters:
if np.isinf(self.prior_width_dict[key]):
self.prior_width_dict[key] = 1
if isinstance(sigma, float):
self.sigmas[key] = sigma
elif isinstance(sigma, dict):
self.sigmas[key] = sigma[key]
else:
raise SamplerError("FixedGaussianProposal sigma not understood")
def propose(self, chain):
sample = chain.current_sample
for key in self.parameters:
sigma = self.prior_width_dict[key] * self.sigmas[key]
sample[key] += sigma * np.random.randn()
log_factor = 0
return sample, log_factor
class AdaptiveGaussianProposal(BaseProposal):
def __init__(
self,
priors,
weight=1,
subset=None,
sigma=1,
scale_init=1e0,
stop=1e5,
target_facc=0.234,
):
super(AdaptiveGaussianProposal, self).__init__(priors, weight, subset)
self.sigmas = {}
for key in self.parameters:
if np.isinf(self.prior_width_dict[key]):
self.prior_width_dict[key] = 1
if isinstance(sigma, (float, int)):
self.sigmas[key] = sigma
elif isinstance(sigma, dict):
self.sigmas[key] = sigma[key]
else:
raise SamplerError("AdaptiveGaussianProposal sigma not understood")
self.target_facc = target_facc
self.scale = scale_init
self.stop = stop
self._str_attrs.append("scale")
self._last_accepted = 0
def propose(self, chain):
sample = chain.current_sample
self.update_scale(chain)
if np.random.random() < 1e-3:
factor = 1e1
elif np.random.random() < 1e-4:
factor = 1e2
else:
factor = 1
for key in self.parameters:
sigma = factor * self.scale * self.prior_width_dict[key] * self.sigmas[key]
sample[key] += sigma * np.random.randn()
log_factor = 0
return sample, log_factor
def update_scale(self, chain):
"""
The adaptation of the scale follows (35)/(36) of https://arxiv.org/abs/1409.7215
"""
if 0 < self.n < self.stop:
s_gamma = (self.stop / self.n) ** 0.2 - 1
if self.accepted > self._last_accepted:
self.scale += s_gamma * (1 - self.target_facc) / 100
else:
self.scale -= s_gamma * self.target_facc / 100
self._last_accepted = self.accepted
self.scale = max(self.scale, 1 / self.stop)
class DifferentialEvolutionProposal(BaseProposal):
"""A proposal using Differential Evolution
Parameters
----------
priors: bilby.core.prior.PriorDict
The set of priors
weight: float
Weighting factor
subset: list
A list of keys for which to restrict the proposal to (other parameters
will be kept fixed)
mode_hopping_frac: float
The fraction of proposals which use 'mode hopping'
"""
def __init__(self, priors, weight=1, subset=None, mode_hopping_frac=0.5):
super(DifferentialEvolutionProposal, self).__init__(priors, weight, subset)
self.mode_hopping_frac = mode_hopping_frac
def propose(self, chain):
theta = chain.current_sample
theta1 = chain.random_sample
theta2 = chain.random_sample
if np.random.rand() > self.mode_hopping_frac:
gamma = 1
else:
# Base jump size
gamma = np.random.normal(0, 2.38 / np.sqrt(2 * self.ndim))
# Scale uniformly in log between 0.1 and 10 times
gamma *= np.exp(np.log(0.1) + np.log(100.0) * np.random.rand())
for key in self.parameters:
theta[key] += gamma * (theta2[key] - theta1[key])
log_factor = 0
return theta, log_factor
class UniformProposal(BaseProposal):
"""A proposal using uniform draws from the prior support
Parameters
----------
priors: bilby.core.prior.PriorDict
The set of priors
weight: float
Weighting factor
subset: list
A list of keys for which to restrict the proposal to (other parameters
will be kept fixed)
"""
def __init__(self, priors, weight=1, subset=None):
super(UniformProposal, self).__init__(priors, weight, subset)
def propose(self, chain):
sample = chain.current_sample
for key in self.parameters:
sample[key] = np.random.uniform(
self.prior_minimum_dict[key], self.prior_maximum_dict[key]
)
log_factor = 0
return sample, log_factor
class PriorProposal(BaseProposal):
"""A proposal using draws from the prior distribution
Note: for priors which use interpolation, this proposal can be problematic
as the proposal gets pickled in multiprocessing. Either, use serial
processing (npool=1) or fall back to a UniformProposal.
Parameters
----------
priors: bilby.core.prior.PriorDict
The set of priors
weight: float
Weighting factor
subset: list
A list of keys for which to restrict the proposal to (other parameters
will be kept fixed)
"""
def __init__(self, priors, weight=1, subset=None):
super(PriorProposal, self).__init__(priors, weight, subset)
self.priors = PriorDict({key: priors[key] for key in self.parameters})
def propose(self, chain):
sample = chain.current_sample
lnp_theta = self.priors.ln_prob(sample.as_dict(self.parameters))
prior_sample = self.priors.sample()
for key in self.parameters:
sample[key] = prior_sample[key]
lnp_thetaprime = self.priors.ln_prob(sample.as_dict(self.parameters))
log_factor = lnp_theta - lnp_thetaprime
return sample, log_factor
_density_estimate_doc = """ A proposal using draws from a {estimator} fit to the chain
Parameters
----------
priors: bilby.core.prior.PriorDict
The set of priors
weight: float
Weighting factor
subset: list
A list of keys for which to restrict the proposal to (other parameters
will be kept fixed)
first_fit: int
The number of steps to take before first fitting the KDE
fit_multiplier: int
The multiplier for the next fit
nsamples_for_density: int
The number of samples to use when fitting the KDE
fallback: bilby.core.sampler.bilby_mcmc.proposal.BaseProposal
A proposal to use before first training
scale_fits: int
A scaling factor for both the initial and subsequent updates
"""
class DensityEstimateProposal(BaseProposal):
def __init__(
self,
priors,
weight=1,
subset=None,
first_fit=1000,
fit_multiplier=10,
nsamples_for_density=1000,
fallback=AdaptiveGaussianProposal,
scale_fits=1,
):
super(DensityEstimateProposal, self).__init__(priors, weight, subset)
self.nsamples_for_density = nsamples_for_density
self.fallback = fallback(priors, weight, subset)
self.fit_multiplier = fit_multiplier * scale_fits
# Counters
self.steps_since_refit = 0
self.next_refit_time = first_fit * scale_fits
self.density = None
self.trained = False
self._str_attrs.append("trained")
density_name = None
__doc__ = _density_estimate_doc.format(estimator=density_name)
def _fit(self, dataset):
raise NotImplementedError
def _evaluate(self, point):
raise NotImplementedError
def _sample(self, nsamples=None):
raise NotImplementedError
def refit(self, chain):
current_density = self.density
start = time.time()
# Draw two (possibly overlapping) data sets for training and verification
dataset = []
verification_dataset = []
nsamples_for_density = min(chain.position, self.nsamples_for_density)
for _ in range(nsamples_for_density):
s = chain.random_sample
dataset.append([s[key] for key in self.parameters])
s = chain.random_sample
verification_dataset.append([s[key] for key in self.parameters])
# Fit the density
self.density = self._fit(np.array(dataset).T)
# Print a log message
took = time.time() - start
logger.info(
f"{self.density_name} construction at {self.steps_since_refit} finished"
f" for length {chain.position} chain, took {took:0.2f}s."
f" Current accept-ratio={self.acceptance_ratio:0.2f}"
)
# Reset counters for next training
self.steps_since_refit = 0
self.next_refit_time *= self.fit_multiplier
# Verify training hasn't overconstrained
new_draws = np.atleast_2d(self._sample(1000))
verification_dataset = np.array(verification_dataset)
fail_parameters = []
for ii, key in enumerate(self.parameters):
std_draws = np.std(new_draws[:, ii])
std_verification = np.std(verification_dataset[:, ii])
if std_draws < 0.1 * std_verification:
fail_parameters.append(key)
if len(fail_parameters) > 0:
logger.info(
f"{self.density_name} construction failed verification and is discarded"
)
self.density = current_density
else:
self.trained = True
def propose(self, chain):
self.steps_since_refit += 1
# Check if we refit
testA = self.steps_since_refit >= self.next_refit_time
if testA:
self.refit(chain)
# If KDE is yet to be fitted, use the fallback
if self.trained is False:
return self.fallback.propose(chain)
# Grab the current sample and it's probability under the KDE
theta = chain.current_sample
ln_p_theta = self._evaluate(list(theta.as_dict(self.parameters).values()))
# Sample and update theta
new_sample = self._sample(1)
for key, val in zip(self.parameters, new_sample):
theta[key] = val
# Calculate the probability of the new sample and the KDE
ln_p_thetaprime = self._evaluate(list(theta.as_dict(self.parameters).values()))
# Calculate Q(theta|theta') / Q(theta'|theta)
log_factor = ln_p_theta - ln_p_thetaprime
return theta, log_factor
class KDEProposal(DensityEstimateProposal):
density_name = "Gaussian KDE"
__doc__ = _density_estimate_doc.format(estimator=density_name)
def _fit(self, dataset):
return gaussian_kde(dataset)
def _evaluate(self, point):
return self.density.logpdf(point)[0]
def _sample(self, nsamples=None):
return np.atleast_1d(np.squeeze(self.density.resample(nsamples)))
class GMMProposal(DensityEstimateProposal):
density_name = "Gaussian Mixture Model"
__doc__ = _density_estimate_doc.format(estimator=density_name)
def _fit(self, dataset):
from sklearn.mixture import GaussianMixture
density = GaussianMixture(n_components=10)
density.fit(dataset.T)
return density
def _evaluate(self, point):
return np.squeeze(self.density.score_samples(np.atleast_2d(point)))
def _sample(self, nsamples=None):
return np.squeeze(self.density.sample(n_samples=nsamples)[0])
def check_dependencies(warn=True):
if importlib.util.find_spec("sklearn") is None:
if warn:
logger.warning(
"Unable to utilise GMMProposal as sklearn is not installed"
)
return False
else:
return True
class NormalizingFlowProposal(DensityEstimateProposal):
density_name = "Normalizing Flow"
__doc__ = _density_estimate_doc.format(estimator=density_name) + (
"""
js_factor: float
The factor to use in determining the max-JS factor to terminate
training.
max_training_epochs: int
The maximum bumber of traning steps to take
"""
)
def __init__(
self,
priors,
weight=1,
subset=None,
first_fit=1000,
fit_multiplier=10,
max_training_epochs=1000,
scale_fits=1,
nsamples_for_density=1000,
js_factor=10,
fallback=AdaptiveGaussianProposal,
):
super(NormalizingFlowProposal, self).__init__(
priors=priors,
weight=weight,
subset=subset,
first_fit=first_fit,
fit_multiplier=fit_multiplier,
nsamples_for_density=nsamples_for_density,
fallback=fallback,
scale_fits=scale_fits,
)
self.setup_flow()
self.setup_optimizer()
self.max_training_epochs = max_training_epochs
self.js_factor = js_factor
def setup_flow(self):
if self.ndim < 3:
self.setup_basic_flow()
else:
self.setup_NVP_flow()
def setup_NVP_flow(self):
from .flows import NVPFlow
self.flow = NVPFlow(
features=self.ndim,
hidden_features=self.ndim * 2,
num_layers=2,
num_blocks_per_layer=2,
batch_norm_between_layers=True,
batch_norm_within_layers=True,
)
def setup_basic_flow(self):
from .flows import BasicFlow
self.flow = BasicFlow(features=self.ndim)
def setup_optimizer(self):
from torch import optim
self.optimizer = optim.Adam(self.flow.parameters())
def get_training_data(self, chain):
training_data = []
nsamples_for_density = min(chain.position, self.nsamples_for_density)
for _ in range(nsamples_for_density):
s = chain.random_sample
training_data.append([s[key] for key in self.parameters])
return training_data
def _calculate_js(self, validation_samples, training_samples_draw):
# Calculate the maximum JS between the validation and draw
max_js = 0
for i in range(self.ndim):
A = validation_samples[:, i]
B = training_samples_draw[:, i]
xmin = np.min([np.min(A), np.min(B)])
xmax = np.min([np.max(A), np.max(B)])
xval = np.linspace(xmin, xmax, 100)
Apdf = gaussian_kde(A)(xval)
Bpdf = gaussian_kde(B)(xval)
js = jensenshannon(Apdf, Bpdf)
max_js = max(max_js, js)
return np.power(max_js, 2)
def train(self, chain):
logger.info("Starting NF training")
import torch
start = time.time()
training_samples = np.array(self.get_training_data(chain))
validation_samples = np.array(self.get_training_data(chain))
training_tensor = torch.tensor(training_samples, dtype=torch.float32)
max_js_threshold = self.js_factor / self.nsamples_for_density
for epoch in range(1, self.max_training_epochs + 1):
self.optimizer.zero_grad()
loss = -self.flow.log_prob(inputs=training_tensor).mean()
loss.backward()
self.optimizer.step()
# Draw from the current flow
self.flow.eval()
training_samples_draw = (
self.flow.sample(self.nsamples_for_density).detach().numpy()
)
self.flow.train()
if np.mod(epoch, 10) == 0:
max_js_bits = self._calculate_js(
validation_samples, training_samples_draw
)
if max_js_bits < max_js_threshold:
logger.info(
f"Training complete after {epoch} steps, "
f"max_js_bits={max_js_bits:0.5f}<{max_js_threshold}"
)
break
took = time.time() - start
logger.info(
f"Flow training step ({self.steps_since_refit}) finished"
f" for length {chain.position} chain, took {took:0.2f}s."
f" Current accept-ratio={self.acceptance_ratio:0.2f}"
)
self.steps_since_refit = 0
self.next_refit_time *= self.fit_multiplier
self.trained = True
def propose(self, chain):
import torch
self.steps_since_refit += 1
theta = chain.current_sample
# Check if we retrain the NF
testA = self.steps_since_refit >= self.next_refit_time
if testA:
self.train(chain)
if self.trained is False:
return self.fallback.propose(chain)
self.flow.eval()
theta_prime_T = self.flow.sample(1)
logp_theta_prime = self.flow.log_prob(theta_prime_T).detach().numpy()[0]
theta_T = torch.tensor(
np.atleast_2d([theta[key] for key in self.parameters]), dtype=torch.float32
)
logp_theta = self.flow.log_prob(theta_T).detach().numpy()[0]
log_factor = logp_theta - logp_theta_prime
flow_sample_values = np.atleast_1d(np.squeeze(theta_prime_T.detach().numpy()))
for key, val in zip(self.parameters, flow_sample_values):
theta[key] = val
return theta, float(log_factor)
def check_dependencies(warn=True):
if importlib.util.find_spec("nflows") is None:
if warn:
logger.warning(
"Unable to utilise NormalizingFlowProposal as nflows is not installed"
)
return False
else:
return True
class FixedJumpProposal(BaseProposal):
def __init__(self, priors, jumps=1, subset=None, weight=1, scale=1e-4):
super(FixedJumpProposal, self).__init__(priors, weight, subset)
self.scale = scale
if isinstance(jumps, (int, float)):
self.jumps = {key: jumps for key in self.parameters}
elif isinstance(jumps, dict):
self.jumps = jumps
else:
raise SamplerError("jumps not understood")
def propose(self, chain):
sample = chain.current_sample
for key, jump in self.jumps.items():
sign = | np.random.randint(2) | numpy.random.randint |
import torch
import torch.utils.data as utils
import DCE_matt as dce
import pydcemri.dcemri as classic
import copy
import numpy as np
import model
import pickle
import time
import train
from tqdm import tqdm
def run_simulations(hp, SNR=15, eval=False, var_seq=False):
print(hp.device)
if hp.network.nn == 'lsq':
hp.simulations.num_samples = hp.simulations.num_samples_leval
rep1 = hp.acquisition.rep1-1
rep2 = hp.acquisition.rep2-1
if hp.create_data:
# create simulation parameters
test = np.random.uniform(0, 1, (hp.simulations.num_samples, 1))
vp = hp.simulations.vp_min + (test * (hp.simulations.vp_max - hp.simulations.vp_min))
test = np.random.uniform(0, 1, (hp.simulations.num_samples, 1))
ve = hp.simulations.ve_min + (test * (hp.simulations.ve_max - hp.simulations.ve_min))
test = np.random.uniform(0, 1, (hp.simulations.num_samples, 1))
kep = hp.simulations.kep_min + (test * (hp.simulations.kep_max - hp.simulations.kep_min))
Tonset = np.random.uniform(hp.simulations.Tonset_min, hp.simulations.Tonset_max, (hp.simulations.num_samples, 1))
test = np.random.uniform(0, 1, (hp.simulations.num_samples, 1))
R1 = hp.simulations.R1_min + (test * (hp.simulations.R1_max - hp.simulations.R1_min))
hp.acquisition.FAlist = np.array([hp.acquisition.FA2])
del test
hp.acquisition.timing = np.arange(0, rep2 + rep1) * hp.simulations.time / 60
num_time = len(hp.acquisition.timing)
X_dw = np.zeros((hp.simulations.num_samples, num_time))
R1eff = np.zeros((hp.simulations.num_samples, num_time))
C = np.zeros((hp.simulations.num_samples, num_time))
test = np.random.uniform(0, 1, (hp.simulations.num_samples))
# vary the Hct value from 0.3 to 0.6
if hp.network.aif:
Hct = 0.3 + (test * (0.3))
else:
Hct = None
aif = hp.aif.aif
aif['ab'] /= (1-hp.aif.Hct)
if hp.network.full_aif or eval:
AIF_curves = np.zeros((len(Hct), rep2))
for aa in tqdm(range(len(kep))):
if hp.network.aif:
aif = hp.aif.aif.copy()
aif['ab'] /= (1-Hct[aa])
C[aa, :] = dce.Cosine8AIF_ExtKety(hp.acquisition.timing, aif, kep[aa][0], (Tonset[aa][0] + rep1*hp.simulations.time)/60,
ve[aa][0], vp[aa][0])
R1eff[aa, :] = classic.con_to_R1eff(C[aa, :], R1[aa][0], hp.acquisition.r1)
X_dw[aa, :] = classic.r1eff_to_dce(R1eff[aa, :], hp.acquisition.TR, hp.acquisition.FAlist)
if hp.network.full_aif or eval:
AIF_curves[aa] = dce.Cosine8AIF(hp.acquisition.timing, aif['ab'], aif['ar'], aif['ae'], aif['mb'],
aif['mm'], aif['mr'], aif['me'], aif['tr'], aif['t0'])
# scale the signal to the baseline signal
S0_out = np.mean(X_dw[:, :rep2//10], axis=1)
dce_signal_scaled = X_dw / S0_out[:, None]
# vary SNR from 7 to 100
if SNR == 'all':
SNR = np.linspace(7, 100, num=hp.simulations.num_samples)
noise = np.random.normal(0, 1/SNR, (num_time, hp.simulations.num_samples)).T
dce_signal_noisy = dce_signal_scaled + noise
del noise, X_dw, R1eff, C
hp.acquisition.timing = torch.FloatTensor(hp.acquisition.timing)
# if not lsq, convert to concentration and save data
if hp.network.nn != 'lsq':
S0 = np.mean(dce_signal_noisy[:, :rep2//10], axis=1)
R1eff2 = dce.dce_to_r1eff(dce_signal_noisy, S0, R1.squeeze(), hp.acquisition.TR, hp.acquisition.FA2)
C1 = dce.r1eff_to_conc(R1eff2, R1, hp.acquisition.r1)
dce_signal_noisy = C1
if hp.network.full_aif or eval:
data = [dce_signal_noisy, hp, Hct, kep, ve, vp, Tonset, AIF_curves]
else:
data = [dce_signal_noisy, hp, Hct, kep, ve, vp, Tonset]
# change name when varying SNR or acquisition points
if eval:
if var_seq:
hp.create_name_copy = hp.create_name.replace('.p', '')+'seq_'+str(hp.acquisition.rep2)+'.p'
else:
hp.create_name_copy = hp.create_name.replace('.p', '')+'_'+str(SNR)+'.p'
else:
hp.create_name_copy = hp.create_name
pickle.dump(data, open(hp.create_name_copy, "wb"))
hp.acquisition.timing = hp.acquisition.timing.to(hp.device)
else:
print('load simulation data')
begin = time.time()
if eval:
if var_seq:
hp.create_name_copy = hp.create_name.replace('.p', '')+'seq_'+str(hp.acquisition.rep2)+'.p'
else:
hp.create_name_copy = hp.create_name.replace('.p', '')+'_'+str(SNR)+'.p'
else:
hp.create_name_copy = hp.create_name
if hp.network.full_aif or eval:
dce_signal_noisy, hp_data, Hct, kep, ve, vp, Tonset, AIF_curves = pickle.load(open(hp.create_name_copy, "rb"))
else:
dce_signal_noisy, hp_data, Hct, kep, ve, vp, Tonset = pickle.load(open(hp.create_name_copy, "rb"))
time_passed = time.time() - begin
print('{} data points, loading time:{:2f} s'.format(len(dce_signal_noisy), time_passed))
hp.acquisition = hp_data.acquisition
hp.aif = hp_data.aif
hp.acquisition.timing = hp.acquisition.timing.to(hp.device)
# executing non-linear least squares fit on data
if hp.network.nn == 'lsq':
out = np.zeros((4, hp.simulations.num_samples_leval))
for i in tqdm(range(hp.simulations.num_samples)):
aif = hp.aif.aif.copy()
aif['ab'] /= (1-Hct[i])
params = dce.fit_tofts_model(np.expand_dims(dce_signal_noisy[i], axis=0), hp.acquisition.timing.cpu().numpy(), aif,
X0=(np.mean(kep), np.mean(Tonset)/60, np.mean(ve), np.mean(vp)))
out[0, i] = params[0]
out[1, i] = params[1]
out[2, i] = params[2]
out[3, i] = params[3]
# loading model for evaluation on neural networks
elif eval:
net = model.DCE_NET(hp).to(hp.device)
net.load_state_dict(torch.load('pretrained/pretrained_'+hp.exp_name+'.pt'))
net.to(hp.device)
if hp.network.full_aif:
Hct = np.concatenate([Hct[:, np.newaxis], AIF_curves], axis=1)
# start training for neural networks
else:
if hp.network.full_aif:
Hct = np.concatenate([Hct[:, np.newaxis], AIF_curves], axis=1)
if hp.pretrained:
net = model.DCE_NET(hp).to(hp.device)
net.load_state_dict(torch.load(hp.pretrain_name))
net.to(hp.device)
net = train.train(dce_signal_noisy, hp, net=net, Hct=Hct,
orig_params=torch.Tensor([np.squeeze(kep),
np.squeeze(ve),
np.squeeze(vp),
(np.squeeze(Tonset)+rep1*hp.simulations.time) / 60]))
else:
net = train.train(dce_signal_noisy, hp, Hct=Hct,
orig_params=torch.Tensor([np.squeeze(kep),
np.squeeze(ve),
np.squeeze(vp),
(np.squeeze(Tonset)+rep1*hp.simulations.time) / 60]))
torch.save(net.state_dict(), 'pretrained/pretrained_'+hp.exp_name+'.pt')
# evaluate on current dataset
if hp.network.nn != 'lsq':
out = predict_DCE(dce_signal_noisy, copy.deepcopy(net), hp, Hct=Hct)
param_results = sim_results(out, hp, kep, ve, vp, Tonset)
return param_results
def predict_DCE(C1, net, hp, Hct=None, one_dim=True):
net.eval()
first_params = True
C1[np.isnan(C1)] = 0
# perform interpolation for FCN when acquisition points is lower than max rep
if hp.network.nn == 'linear' and C1.shape[1] < hp.max_rep:
delta = (C1.shape[1]-1) / (hp.max_rep-1)
C = np.zeros((C1.shape[0], hp.max_rep))
for i in tqdm(range(len(C1))):
C[i] = np.array([train.interpolate(C1[i], j*delta) for j in range(hp.max_rep)])
C1 = C
hp.acquisition.timing = torch.arange(hp.max_rep, device=hp.device) * hp.simulations.time / 60
print('using full network with voxel-wise aif')
# temporal framework
if one_dim:
if hp.network.full_aif:
C1 = np.concatenate([Hct, C1], axis=1)
else:
C1 = np.concatenate([Hct[:, np.newaxis], C1], axis=1)
ke = torch.zeros(len(C1))
ve = torch.zeros(len(C1))
vp = torch.zeros(len(C1))
dt = torch.zeros(len(C1))
X = torch.zeros((len(C1), 160))
# spatiotemporal framework
else:
Hct = np.expand_dims(Hct, axis=(1, 2, 3))
Hct = np.repeat(np.repeat(Hct, C1.shape[1], axis=1), C1.shape[2], axis=2)
C1 = np.concatenate([Hct, C1], axis=3)
C1 = np.moveaxis(C1, 3, 1)
ke = torch.zeros((C1.shape[0], C1.shape[2], C1.shape[3]))
ve = torch.zeros((C1.shape[0], C1.shape[2], C1.shape[3]))
vp = torch.zeros((C1.shape[0], C1.shape[2], C1.shape[3]))
dt = torch.zeros((C1.shape[0], C1.shape[2], C1.shape[3]))
X = torch.zeros((C1.shape[0], 160, C1.shape[2], C1.shape[3]))
C1 = torch.from_numpy(C1.astype(np.float32))
inferloader = utils.DataLoader(C1,
batch_size=hp.training.val_batch_size,
shuffle=False,
drop_last=False)
# perform inference
size = hp.training.val_batch_size
with torch.no_grad():
for i, X_batch in enumerate(tqdm(inferloader, position=0, leave=True), 0):
X_batch = X_batch.to(hp.device)
if hp.network.full_aif:
X_dw, ket, dtt, vet, vpt = net(X_batch[:, hp.acquisition.rep2:], Hct=X_batch[:, :hp.acquisition.rep2])
else:
X_dw, ket, dtt, vet, vpt = net(X_batch[:, 1:], Hct=X_batch[:, :1])
ke[i*size:(i+1)*size] = ket.cpu().squeeze()
ve[i*size:(i+1)*size] = vet.cpu().squeeze()
vp[i*size:(i+1)*size] = vpt.cpu().squeeze()
dt[i*size:(i+1)*size] = dtt.cpu().squeeze()
X[i*size:(i+1)*size] = X_dw.cpu().squeeze()
# if first_params:
# ke = ket.cpu().numpy()
# dt = dtt.cpu().numpy()
# ve = vet.cpu().numpy()
# vp = vpt.cpu().numpy()
# X = X_dw.cpu().numpy()
# first_params = False
# else:
# ke = np.concatenate((ke, ket.cpu().numpy()), axis=0)
# dt = np.concatenate((dt, dtt.cpu().numpy()), axis=0)
# ve = np.concatenate((ve, vet.cpu().numpy()), axis=0)
# vp = np.concatenate((vp, vpt.cpu().numpy()), axis=0)
# X = np.concatenate((X, X_dw.cpu().numpy()), axis=0)
ke = np.array(ke)
ve = np.array(ve)
vp = np.array(vp)
dt = np.array(dt)
X = np.array(X)
params = [ke, dt, ve, vp, X]
return params
def sim_results(paramsNN_full, hp, kep, ve, vp, Tonset, Hct=None):
# calculate the random and systematic error of every parameter
rep1 = hp.acquisition.rep1 - 1
error_ke = paramsNN_full[0] - np.squeeze(kep)
randerror_ke = np.std(error_ke)
syserror_ke = np.mean(error_ke)
del error_ke
error_ve = paramsNN_full[2] - np.squeeze(ve)
randerror_ve = np.std(error_ve)
syserror_ve = np.mean(error_ve)
del error_ve
error_vp = paramsNN_full[3] - np.squeeze(vp)
randerror_vp = np.std(error_vp)
syserror_vp = np.mean(error_vp)
del error_vp
error_dt = paramsNN_full[1] - (np.squeeze(Tonset) + rep1 * hp.simulations.time) / 60
randerror_dt = np.std(error_dt)
syserror_dt = np.mean(error_dt)
del error_dt
normke = np.mean(kep)
normve = np.mean(ve)
normvp = np.mean(vp)
normdt = | np.mean(Tonset / 60) | numpy.mean |
import numpy as np
from skopt.learning import GaussianProcessRegressor, RandomForestRegressor
from skopt.learning.gaussian_process.kernels import Matern, WhiteKernel
from scipy.optimize import fmin_l_bfgs_b
from .acq import *
from tqdm import tqdm_notebook
def transform(x, space):
return (x - space[None, :, 0]) / (space[:, 1] - space[:, 0])[None, :]
def reverse_transform(x, space):
return x * (space[:, 1] - space[:, 0])[None, :] + space[None, :, 0]
def gpbo_cycle(ndim, space, target_f, n_iters=10, acq_function=ei, model=None, n_multi_start=100, show_progress=True):
xrange = (lambda title, n: tqdm_notebook(range(n), postfix=title)) if show_progress else (lambda title, n: range(n))
space = | np.array(space) | numpy.array |
import vsketch
import numpy as np
from shapely.geometry import LineString, MultiLineString
from shapely.ops import clip_by_rect
import sys
sys.path.append('./')
from tiles import *
# Declare global constants
ROWS = 150
COLS = 150
SCALE = 50
# Store global state here
GRID = np.zeros((ROWS, COLS, 2))
VISITED = np.zeros((ROWS, COLS))
FRONTIER = []
TILES = [ SINGLETON, STAIRCASE_1, STAIRCASE_2, ROD_1, ROD_2, ROD_3, PLANE_1, PLANE_2, L1, L2, L3, L4, BIG ]
class TilingSketch(vsketch.SketchClass):
def to_cartesian(self, i, j):
return (j-i) * SCALE, (i+j) * SCALE / (3 ** .5)
def hatch(self, coords, num_lines=5):
p1, p2, p3 = coords
lines = | np.arange(num_lines) | numpy.arange |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Helper functions to work with distributions."""
import numbers
import numpy as np
import scipy.integrate
from astropy.coordinates import Angle
from astropy.time import TimeDelta
__all__ = [
"get_random_state",
"sample_sphere",
"sample_sphere_distance",
"sample_powerlaw",
"sample_times",
"normalize",
"draw",
"pdf",
]
def normalize(func, x_min, x_max):
"""Normalize a 1D function over a given range."""
def f(x):
return func(x) / scipy.integrate.quad(func, x_min, x_max)[0]
return f
def pdf(func):
"""One-dimensional PDF of a given radial surface density."""
def f(x):
return x * func(x)
return f
def draw(low, high, size, dist, random_state="random-seed", *args, **kwargs):
"""Allows drawing of random numbers from any distribution."""
from .inverse_cdf import InverseCDFSampler
n = 1000
x = np.linspace(low, high, n)
pdf = dist(x)
sampler = InverseCDFSampler(pdf=pdf, random_state=random_state)
idx = sampler.sample(size)
x_sampled = np.interp(idx, np.arange(n), x)
return np.squeeze(x_sampled)
def get_random_state(init):
"""Get a `numpy.random.RandomState` instance.
The purpose of this utility function is to have a flexible way
to initialise a `~numpy.random.RandomState` instance,
a.k.a. a random number generator (``rng``).
See :ref:`dev_random` for usage examples and further information.
Parameters
----------
init : {int, 'random-seed', 'global-rng', `~numpy.random.RandomState`}
Available options to initialise the RandomState object:
* ``int`` -- new RandomState instance seeded with this integer
(calls `~numpy.random.RandomState` with ``seed=init``)
* ``'random-seed'`` -- new RandomState instance seeded in a random way
(calls `~numpy.random.RandomState` with ``seed=None``)
* ``'global-rng'``, return the RandomState singleton used by ``numpy.random``.
* `~numpy.random.RandomState` -- do nothing, return the input.
Returns
-------
random_state : `~numpy.random.RandomState`
RandomState instance.
"""
if isinstance(init, (numbers.Integral, np.integer)):
return | np.random.RandomState(init) | numpy.random.RandomState |
"""
Various low-dimensional dynamical systems in Python.
For flows that occur on unbounded intervals (eg non-autonomous systems),
coordinates are transformed to a basis where the domain remains bounded
Requirements:
+ numpy
+ scipy
+ sdeint (for integration with noise)
+ numba (optional, for faster integration)
"""
import numpy as np
from .base import DynSys, DynSysDelay, staticjit
class Lorenz(DynSys):
@staticjit
def _rhs(x, y, z, t, beta, rho, sigma):
xdot = sigma * (y - x)
ydot = x * (rho - z) - y
zdot = x * y - beta * z
return xdot, ydot, zdot
@staticjit
def _jac(x, y, z, t, beta, rho, sigma):
row1 = [-sigma, sigma, 0]
row2 = [rho - z, -1, -x]
row3 = [y, x, -beta]
return [row1, row2, row3]
class LorenzBounded(DynSys):
@staticjit
def _rhs(x, y, z, t, beta, r, rho, sigma):
f = 1 - (x ** 2 + y ** 2 + z ** 2) / r ** 2
xdot = sigma * (y - x) * f
ydot = (x * (rho - z) - y) * f
zdot = (x * y - beta * z) * f
return xdot, ydot, zdot
class LorenzCoupled(DynSys):
@staticjit
def _rhs(x1, y1, z1, x2, y2, z2, t, beta, eps, rho, rho1, rho2, sigma):
x1dot = sigma * (y1 - x1)
y1dot = x1 * (rho1 - z1) - y1
z1dot = x1 * y1 - beta * z1
x2dot = sigma * (y2 - x2) + eps * (x1 - x2)
y2dot = x2 * (rho2 - z2) - y2
z2dot = x2 * y2 - beta * z2
return x1dot, y1dot, z1dot, x2dot, y2dot, z2dot
class Lorenz96(DynSys):
def rhs(self, X, t):
Xdot = np.zeros_like(X)
Xdot[0] = (X[1] - X[-2]) * X[-1] - X[0] + self.f
Xdot[1] = (X[2] - X[-1]) * X[0] - X[1] + self.f
Xdot[-1] = (X[0] - X[-3]) * X[-2] - X[-1] + self.f
Xdot[2:-1] = (X[3:] - X[:-3]) * X[1:-2] - X[2:-1] + self.f
return Xdot
class Lorenz84(DynSys):
@staticjit
def _rhs(x, y, z, t, a, b, f, g):
xdot = -a * x - y ** 2 - z ** 2 + a * f
ydot = -y + x * y - b * x * z + g
zdot = -z + b * x * y + x * z
return xdot, ydot, zdot
class Rossler(DynSys):
@staticjit
def _rhs(x, y, z, t, a, b, c):
xdot = -y - z
ydot = x + a * y
zdot = b + z * (x - c)
return xdot, ydot, zdot
class Thomas(DynSys):
@staticjit
def _rhs(x, y, z, t, a, b):
xdot = -a * x + b * np.sin(y)
ydot = -a * y + b * np.sin(z)
zdot = -a * z + b * np.sin(x)
return xdot, ydot, zdot
class ThomasLabyrinth(Thomas):
pass
class DoublePendulum(DynSys):
@staticjit
def _rhs(th1, th2, p1, p2, t, d, m):
g = 9.82
pre = 6 / (m * d ** 2)
denom = 16 - 9 * np.cos(th1 - th2) ** 2
th1_dot = pre * (2 * p1 - 3 * np.cos(th1 - th2) * p2) / denom
th2_dot = pre * (8 * p2 - 3 * np.cos(th1 - th2) * p1) / denom
p1_dot = (
-0.5
* (m * d ** 2)
* (th1_dot * th2_dot * np.sin(th1 - th2) + 3 * (g / d) * np.sin(th1))
)
p2_dot = (
-0.5
* (m * d ** 2)
* (-th1_dot * th2_dot * np.sin(th1 - th2) + 3 * (g / d) * np.sin(th2))
)
return th1_dot, th2_dot, p1_dot, p2_dot
@staticjit
def _postprocessing(th1, th2, p1, p2):
return np.sin(th1), np.sin(th2), p1, p2
class SwingingAtwood(DynSys):
@staticjit
def _rhs(r, th, pr, pth, t, m1, m2):
g = 9.82
rdot = pr / (m1 + m2)
thdot = pth / (m1 * r ** 2)
prdot = pth ** 2 / (m1 * r ** 3) - m2 * g + m1 * g * np.cos(th)
pthdot = -m1 * g * r * np.sin(th)
return rdot, thdot, prdot, pthdot
@staticjit
def _postprocessing(r, th, pr, pth):
return r, np.sin(th), pr, pth
class GuckenheimerHolmes(DynSys):
@staticjit
def _rhs(x, y, z, t, a, b, c, d, e, f):
xdot = a * x - b * y + c * z * x + d * z * (x ** 2 + y ** 2)
ydot = a * y + b * x + c * z * y
zdot = e - z ** 2 - f * (x ** 2 + y ** 2) - a * z ** 3
return xdot, ydot, zdot
class HenonHeiles(DynSys):
@staticjit
def _rhs(x, y, px, py, t, lam):
xdot = px
ydot = py
pxdot = -x - 2 * lam * x * y
pydot = -y - lam * (x ** 2 - y ** 2)
return xdot, ydot, pxdot, pydot
class Halvorsen(DynSys):
@staticjit
def _rhs(x, y, z, t, a, b):
xdot = -a * x - b * (y + z) - y ** 2
ydot = -a * y - b * (z + x) - z ** 2
zdot = -a * z - b * (x + y) - x ** 2
return xdot, ydot, zdot
class Chua(DynSys):
@staticjit
def _rhs(x, y, z, t, alpha, beta, m0, m1):
ramp_x = m1 * x + 0.5 * (m0 - m1) * (np.abs(x + 1) - np.abs(x - 1))
xdot = alpha * (y - x - ramp_x)
ydot = x - y + z
zdot = -beta * y
return xdot, ydot, zdot
class MultiChua(DynSys):
def diode(self, x):
m, c = self.m, self.c
total = m[-1] * x
for i in range(1, 6):
total += 0.5 * (m[i - 1] - m[i]) * (np.abs(x + c[i]) - np.abs(x - c[i]))
return total
def rhs(self, X, t):
x, y, z = X
xdot = self.a * (y - self.diode(x))
ydot = x - y + z
zdot = -self.b * y
return (xdot, ydot, zdot)
class Duffing(DynSys):
@staticjit
def _rhs(x, y, z, t, alpha, beta, delta, gamma, omega):
xdot = y
ydot = -delta * y - beta * x - alpha * x ** 3 + gamma * np.cos(z)
zdot = omega
return xdot, ydot, zdot
@staticjit
def _postprocessing(x, y, z):
return x, y, np.cos(z)
class MackeyGlass(DynSysDelay):
@staticjit
def _rhs(x, xt, t, beta, gamma, n, tau):
xdot = beta * (xt / (1 + xt ** n)) - gamma * x
return xdot
class IkedaDelay(DynSysDelay):
@staticjit
def _rhs(x, xt, t, c, mu, tau, x0):
xdot = mu * np.sin(xt - x0) - c * x
return xdot
class SprottDelay(IkedaDelay):
pass
class VossDelay(DynSysDelay):
@staticjit
def _rhs(x, xt, t, alpha, tau):
f = -10.44 * xt ** 3 - 13.95 * xt ** 2 - 3.63 * xt + 0.85
xdot = -alpha * x + f
return xdot
class ScrollDelay(DynSysDelay):
@staticjit
def _rhs(x, xt, t, alpha, beta, tau):
f = np.tanh(10 * xt)
xdot = -alpha * xt + beta * f
return xdot
class PiecewiseCircuit(DynSysDelay):
@staticjit
def _rhs(x, xt, t, alpha, beta, c, tau):
f = -((xt / c) ** 3) + 3 * xt / c
xdot = -alpha * xt + beta * f
return xdot
# ## this was not chaotic
# class ENSODelay(DynSysDelay):
# @staticjit
# def _rhs(x, xt, t, alpha, beta, tau):
# xdot = x - x**3 - alpha * xt + beta
# return xdot
class DoubleGyre(DynSys):
@staticjit
def _rhs(x, y, z, t, alpha, eps, omega):
a = eps * np.sin(z)
b = 1 - 2 * eps * np.sin(z)
f = a * x ** 2 + b * x
dx = -alpha * np.pi * np.sin(np.pi * f) * np.cos(np.pi * y)
dy = alpha * np.pi * np.cos(np.pi * f) * np.sin(np.pi * y) * (2 * a * x + b)
dz = omega
return dx, dy, dz
@staticjit
def _postprocessing(x, y, z):
return x, y, np.sin(z)
class BlinkingRotlet(DynSys):
@staticjit
def _rotlet(r, theta, a, b, bc):
"""A rotlet velocity field"""
kappa = a ** 2 + (b ** 2 * r ** 2) / a ** 2 - 2 * b * r * np.cos(theta)
gamma = (1 - r ** 2 / a ** 2) * (a ** 2 - (b ** 2 * r ** 2) / a ** 2)
iota = (b ** 2 * r) / a ** 2 - b * np.cos(theta)
zeta = b ** 2 + r ** 2 - 2 * b * r * np.cos(theta)
nu = a ** 2 + b ** 2 - (2 * b ** 2 * r ** 2) / a ** 2
vr = b * np.sin(theta) * (-bc * (gamma / kappa ** 2) - 1 / kappa + 1 / zeta)
vth = (
bc * (gamma * iota) / kappa ** 2
+ bc * r * nu / (a ** 2 * kappa)
+ iota / kappa
- (r - b * np.cos(theta)) / zeta
)
return vr, vth
@staticjit
def _protocol(t, tau, stiffness=20):
return 0.5 + 0.5 * np.tanh(tau * stiffness * np.sin(2 * np.pi * t / tau))
def rhs(self, X, t):
r, theta, tt = X
weight = self._protocol(tt, self.tau)
dr1, dth1 = self._rotlet(r, theta, self.a, self.b, self.bc)
dr2, dth2 = self._rotlet(r, theta, self.a, -self.b, self.bc)
dr = weight * dr1 + (1 - weight) * dr2
dth = (weight * dth1 + (1 - weight) * dth2) / r
dtt = 1
return self.sigma * dr, self.sigma * dth, dtt
def _postprocessing(self, r, th, tt):
return r * np.cos(th), r * np.sin(th), np.sin(2 * np.pi * tt / self.tau)
class BlinkingVortex(BlinkingRotlet):
pass
class OscillatingFlow(DynSys):
@staticjit
def _rhs(x, y, z, t, b, k, omega, u):
f = x + b * np.sin(z)
dx = u * np.cos(k * y) * np.sin(k * f)
dy = -u * np.sin(k * y) * np.cos(k * f)
dz = omega
return dx, dy, dz
def _postprocessing(self, x, y, z):
return np.cos(self.k * x), y, np.sin(z)
class BickleyJet(DynSys):
@staticjit
def _rhs(y, x, z, t, ell, eps, k, omega, sigma, u):
sechy = 1 / np.cosh(y / ell)
inds = np.arange(3)
un = k[inds] * (x - z * sigma[inds])
dx = u * sechy ** 2 * (-1 - 2 * np.dot(np.cos(un), eps) * np.tanh(y / ell))
dy = ell * u * sechy ** 2 * np.dot(eps * k, np.sin(un))
dz = omega
return dy, dx, dz
def _postprocessing(self, x, y, z):
km = np.min(self.k)
sm = np.min(self.sigma)
return x, np.sin(km * y), np.sin(self.omega * z * km * sm)
class ArnoldBeltramiChildress(DynSys):
@staticjit
def _rhs(x, y, z, t, a, b, c):
dx = a * np.sin(z) + c * np.cos(y)
dy = b * np.sin(x) + a * np.cos(z)
dz = c * np.sin(y) + b * np.cos(x)
return dx, dy, dz
@staticjit
def _postprocessing(x, y, z):
return np.sin(x), np.cos(y), np.sin(z)
class JerkCircuit(DynSys):
@staticjit
def _rhs(x, y, z, t, eps, y0):
xdot = y
ydot = z
zdot = -z - x - eps * (np.exp(y / y0) - 1)
return xdot, ydot, zdot
class ForcedBrusselator(DynSys):
@staticjit
def _rhs(x, y, z, t, a, b, f, w):
xdot = a + x ** 2 * y - (b + 1) * x + f * np.cos(z)
ydot = b * x - x ** 2 * y
zdot = w
return xdot, ydot, zdot
@staticjit
def _postprocessing(x, y, z):
return x, y, np.sin(z)
class WindmiReduced(DynSys):
@staticjit
def _rhs(i, v, p, t, a1, b1, b2, b3, d1, vsw):
idot = a1 * (vsw - v)
vdot = b1 * i - b2 * p ** 1 / 2 - b3 * v
pdot = (
vsw ** 2 - p ** (5 / 4) * vsw ** (1 / 2) * (1 + np.tanh(d1 * (i - 1))) / 2
)
return idot, vdot, pdot
class MooreSpiegel(DynSys):
@staticjit
def _rhs(x, y, z, t, a, b, eps):
xdot = y
ydot = a * z
zdot = -z + eps * y - y * x ** 2 - b * x
return xdot, ydot, zdot
class CoevolvingPredatorPrey(DynSys):
@staticjit
def _rhs(x, y, alpha, t, a1, a2, a3, b1, b2, d1, d2, delta, k1, k2, k4, vv):
xdot = x * (
-((a3 * y) / (1 + b2 * x))
+ (a1 * alpha * (1 - k1 * x * (-alpha + alpha * delta))) / (1 + b1 * alpha)
- d1
* (
1
- k2 * (-(alpha ** 2) + (alpha * delta) ** 2)
+ k4 * (-(alpha ** 4) + (alpha * delta) ** 4)
)
)
ydot = (-d2 + (a2 * x) / (1 + b2 * x)) * y
alphadot = vv * (
-((a1 * k1 * x * alpha * delta) / (1 + b1 * alpha))
- d1 * (-2 * k2 * alpha * delta ** 2 + 4 * k4 * alpha ** 3 * delta ** 4)
)
return xdot, ydot, alphadot
class KawczynskiStrizhak(DynSys):
@staticjit
def _rhs(x, y, z, t, beta, gamma, kappa, mu):
xdot = gamma * (y - x ** 3 + 3 * mu * x)
ydot = -2 * mu * x - y - z + beta
zdot = kappa * (x - z)
return xdot, ydot, zdot
class BelousovZhabotinsky(DynSys):
@staticjit
def _rhs(
x,
z,
v,
t,
c1,
c10,
c11,
c12,
c13,
c2,
c3,
c4,
c5,
c6,
c7,
c8,
c9,
ci,
kf,
t0,
y0,
yb1,
yb2,
yb3,
z0,
):
ybar = (1 / y0) * yb1 * z * v / (yb2 * x + yb3 + kf)
if x < 0.0:
x = 0
rf = (ci - z0 * z) * np.sqrt(x)
xdot = c1 * x * ybar + c2 * ybar + c3 * x ** 2 + c4 * rf + c5 * x * z - kf * x
zdot = (c6 / z0) * rf + c7 * x * z + c8 * z * v + c9 * z - kf * z
vdot = c10 * x * ybar + c11 * ybar + c12 * x ** 2 + c13 * z * v - kf * v
return xdot * t0, zdot * t0, vdot * t0
class IsothermalChemical(DynSys):
@staticmethod
def _rhs(alpha, beta, gamma, t, delta, kappa, mu, sigma):
alphadot = mu * (kappa + gamma) - alpha * beta ** 2 - alpha
betadot = (alpha * beta ** 2 + alpha - beta) / sigma
gammadot = (beta - gamma) / delta
return alphadot, betadot, gammadot
class VallisElNino(DynSys):
@staticmethod
def _rhs(x, y, z, t, b, c, p):
xdot = b * y - c * (x + p)
ydot = -y + x * z
zdot = -z - x * y + 1
return xdot, ydot, zdot
class RabinovichFabrikant(DynSys):
@staticjit
def _rhs(x, y, z, t, a, g):
xdot = y * (z - 1 + x ** 2) + g * x
ydot = x * (3 * z + 1 - x ** 2) + g * y
zdot = -2 * z * (a + x * y)
return (xdot, ydot, zdot)
class NoseHoover(DynSys):
@staticjit
def _rhs(x, y, z, t, a):
xdot = y
ydot = -x + y * z
zdot = a - y ** 2
return xdot, ydot, zdot
class Dadras(DynSys):
@staticjit
def _rhs(x, y, z, t, c, e, o, p, r):
xdot = y - p * x + o * y * z
ydot = r * y - x * z + z
zdot = c * x * y - e * z
return xdot, ydot, zdot
class RikitakeDynamo(DynSys):
@staticjit
def _rhs(x, y, z, t, a, mu):
xdot = -mu * x + y * z
ydot = -mu * y + x * (z - a)
zdot = 1 - x * y
return xdot, ydot, zdot
class NuclearQuadrupole(DynSys):
@staticjit
def _rhs(q1, q2, p1, p2, t, a, b, d):
q1dot = a * p1
q2dot = a * p2
p1dot = (
-(a * q1)
+ (3 * b * (q1 ** 2 - q2 ** 2)) / np.sqrt(2)
- d * q1 * (q1 ** 2 + q2 ** 2)
)
p2dot = -(q2 * (a + 3 * np.sqrt(2) * b * q1 + d * (q1 ** 2 + q2 ** 2)))
return q1dot, q2dot, p1dot, p2dot
class PehlivanWei(DynSys):
@staticjit
def _rhs(x, y, z, t):
xdot = y - y * z
ydot = y + y * z - 2 * x
zdot = 2 - x * y - y ** 2
return xdot, ydot, zdot
class SprottTorus(DynSys):
@staticjit
def _rhs(x, y, z, t):
xdot = y + 2 * x * y + x * z
ydot = 1 - 2 * x ** 2 + y * z
zdot = x - x ** 2 - y ** 2
return xdot, ydot, zdot
class SprottJerk(DynSys):
@staticjit
def _rhs(x, y, z, t, mu):
xdot = y
ydot = z
zdot = -x + y ** 2 - mu * z
return xdot, ydot, zdot
## Not chaotic
# class JerkCircuit(DynSys):
# def rhs(self, X, t):
# x, y, z = X
# xdot = y
# ydot = z
# zdot = -z - x - self.eps*(np.exp(y/self.y0) - 1)
# return (xdot, ydot, zdot)
class SprottA(DynSys):
@staticjit
def _rhs(x, y, z, t):
xdot = y
ydot = -x + y * z
zdot = 1 - y ** 2
return xdot, ydot, zdot
class SprottB(DynSys):
@staticjit
def _rhs(x, y, z, t):
xdot = y * z
ydot = x - y
zdot = 1 - x * y
return xdot, ydot, zdot
class SprottC(DynSys):
@staticjit
def _rhs(x, y, z, t):
xdot = y * z
ydot = x - y
zdot = 1 - x ** 2
return xdot, ydot, zdot
class SprottD(DynSys):
@staticjit
def _rhs(x, y, z, t):
xdot = -y
ydot = x + z
zdot = x * z + 3 * y ** 2
return xdot, ydot, zdot
class SprottE(DynSys):
@staticjit
def _rhs(x, y, z, t):
xdot = y * z
ydot = x ** 2 - y
zdot = 1 - 4 * x
return xdot, ydot, zdot
class SprottF(DynSys):
@staticjit
def _rhs(x, y, z, t, a):
xdot = y + z
ydot = -x + a * y
zdot = x ** 2 - z
return xdot, ydot, zdot
class SprottG(DynSys):
@staticjit
def _rhs(x, y, z, t, a):
xdot = a * x + z
ydot = x * z - y
zdot = -x + y
return xdot, ydot, zdot
class SprottH(DynSys):
@staticjit
def _rhs(x, y, z, t, a):
xdot = -y + z ** 2
ydot = x + a * y
zdot = x - z
return xdot, ydot, zdot
class SprottI(DynSys):
@staticjit
def _rhs(x, y, z, t, a):
xdot = -a * y
ydot = x + z
zdot = x + y ** 2 - z
return xdot, ydot, zdot
class SprottJ(DynSys):
@staticjit
def _rhs(x, y, z, t):
xdot = 2 * z
ydot = -2 * y + z
zdot = -x + y + y ** 2
return (xdot, ydot, zdot)
class SprottK(DynSys):
@staticjit
def _rhs(x, y, z, t, a):
xdot = x * y - z
ydot = x - y
zdot = x + a * z
return xdot, ydot, zdot
class SprottL(DynSys):
@staticjit
def _rhs(x, y, z, t, a, b):
xdot = y + b * z
ydot = a * x ** 2 - y
zdot = 1 - x
return xdot, ydot, zdot
class SprottM(DynSys):
@staticjit
def _rhs(x, y, z, t, a):
xdot = -z
ydot = -(x ** 2) - y
zdot = a * (1 + x) + y
return xdot, ydot, zdot
class SprottN(DynSys):
@staticjit
def _rhs(x, y, z, t):
xdot = -2 * y
ydot = x + z ** 2
zdot = 1 + y - 2 * z
return xdot, ydot, zdot
class SprottO(DynSys):
@staticjit
def _rhs(x, y, z, t, a):
xdot = y
ydot = x - z
zdot = x + x * z + a * y
return xdot, ydot, zdot
class SprottP(DynSys):
@staticjit
def _rhs(x, y, z, t, a):
xdot = a * y + z
ydot = -x + y ** 2
zdot = x + y
return xdot, ydot, zdot
class SprottQ(DynSys):
@staticjit
def _rhs(x, y, z, t, a, b):
xdot = -z
ydot = x - y
zdot = a * x + y ** 2 + b * z
return (xdot, ydot, zdot)
class SprottR(DynSys):
@staticjit
def _rhs(x, y, z, t, a, b):
xdot = a - y
ydot = b + z
zdot = x * y - z
return xdot, ydot, zdot
class SprottS(DynSys):
@staticjit
def _rhs(x, y, z, t):
xdot = -x - 4 * y
ydot = x + z ** 2
zdot = 1 + x
return xdot, ydot, zdot
class SprottMore(DynSys):
@staticjit
def _rhs(x, y, z, t):
xdot = y
ydot = -x - np.sign(z) * y
zdot = y ** 2 - np.exp(-(x ** 2))
return xdot, ydot, zdot
class Arneodo(DynSys):
@staticjit
def _rhs(x, y, z, t, a, b, c, d):
xdot = y
ydot = z
zdot = -a * x - b * y - c * z + d * x ** 3
return xdot, ydot, zdot
class Coullet(Arneodo):
pass
class Rucklidge(DynSys):
@staticjit
def _rhs(x, y, z, t, a, b):
xdot = -a * x + b * y - y * z
ydot = x
zdot = -z + y ** 2
return xdot, ydot, zdot
class Sakarya(DynSys):
@staticjit
def _rhs(x, y, z, t, a, b, c, h, p, q, r, s):
xdot = a * x + h * y + s * y * z
ydot = -b * y - p * x + q * x * z
zdot = c * z - r * x * y
return xdot, ydot, zdot
class LiuChen(Sakarya):
pass
class RayleighBenard(DynSys):
@staticjit
def _rhs(x, y, z, t, a, b, r):
xdot = a * (y - x)
ydot = r * y - x * z
zdot = x * y - b * z
return xdot, ydot, zdot
class Finance(DynSys):
@staticjit
def _rhs(x, y, z, t, a, b, c):
xdot = (1 / b - a) * x + z + x * y
ydot = -b * y - x ** 2
zdot = -x - c * z
return xdot, ydot, zdot
class Bouali2(DynSys):
@staticjit
def _rhs(x, y, z, t, a, b, bb, c, g, m, y0):
xdot = a * x * (y0 - y) - b * z
ydot = -g * y * (1 - x ** 2)
zdot = -m * x * (1.5 - bb * z) - c * z
return xdot, ydot, zdot
class Bouali(Bouali2):
pass
class LuChenCheng(DynSys):
@staticjit
def _rhs(x, y, z, t, a, b, c):
xdot = -(a * b) / (a + b) * x - y * z + c
ydot = a * y + x * z
zdot = b * z + x * y
return xdot, ydot, zdot
class LuChen(DynSys):
@staticjit
def _rhs(x, y, z, t, a, b, c):
xdot = a * (y - x)
ydot = -x * z + c * y
zdot = x * y - b * z
return xdot, ydot, zdot
class QiChen(DynSys):
@staticjit
def _rhs(x, y, z, t, a, b, c):
xdot = a * (y - x) + y * z
ydot = c * x + y - x * z
zdot = x * y - b * z
return xdot, ydot, zdot
class ZhouChen(DynSys):
@staticjit
def _rhs(x, y, z, t, a, b, c, d, e):
xdot = a * x + b * y + y * z
ydot = c * y - x * z + d * y * z
zdot = e * z - x * y
return xdot, ydot, zdot
class BurkeShaw(DynSys):
@staticjit
def _rhs(x, y, z, t, e, n):
xdot = -n * (x + y)
ydot = y - n * x * z
zdot = n * x * y + e
return xdot, ydot, zdot
class Chen(DynSys):
@staticjit
def _rhs(x, y, z, t, a, b, c):
xdot = a * (y - x)
ydot = (c - a) * x - x * z + c * y
zdot = x * y - b * z
return xdot, ydot, zdot
class ChenLee(DynSys):
@staticjit
def _rhs(x, y, z, t, a, b, c):
xdot = a * x - y * z
ydot = b * y + x * z
zdot = c * z + x * y / 3
return xdot, ydot, zdot
class WangSun(DynSys):
@staticjit
def _rhs(x, y, z, t, a, b, d, e, f, q):
xdot = a * x + q * y * z
ydot = b * x + d * y - x * z
zdot = e * z + f * x * y
return xdot, ydot, zdot
class YuWang(DynSys):
@staticjit
def _rhs(x, y, z, t, a, b, c, d):
xdot = a * (y - x)
ydot = b * x - c * x * z
zdot = np.exp(x * y) - d * z
return xdot, ydot, zdot
class YuWang2(DynSys):
@staticjit
def _rhs(x, y, z, t, a, b, c, d):
xdot = a * (y - x)
ydot = b * x - c * x * z
zdot = np.cosh(x * y) - d * z
return xdot, ydot, zdot
class SanUmSrisuchinwong(DynSys):
@staticjit
def _rhs(x, y, z, t, a):
xdot = y - x
ydot = -z * np.tanh(x)
zdot = -a + x * y + np.abs(y)
return xdot, ydot, zdot
class DequanLi(DynSys):
@staticjit
def _rhs(x, y, z, t, a, c, d, eps, f, k):
xdot = a * (y - x) + d * x * z
ydot = k * x + f * y - x * z
zdot = c * z + x * y - eps * x ** 2
return xdot, ydot, zdot
class PanXuZhou(DequanLi):
pass
class Tsucs2(DequanLi):
pass
class ArnoldWeb(DynSys):
@staticjit
def _rhs(p1, p2, x1, x2, z, t, mu, w):
denom = 4 + | np.cos(z) | numpy.cos |
# Copyright 2019 The TensorNetwork Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from typing import List, Union, Tuple, Optional
# pylint: disable=line-too-long
from tensornetwork.contractors.custom_path_solvers.pathsolvers import full_solve_complete
def ncon_solver(tensors: List[np.ndarray],
labels: List[List[int]],
max_branch: Optional[int] = None):
"""
Solve for the contraction order of a tensor network (encoded in the `ncon`
syntax) that minimizes the computational cost.
Args:
tensors: list of the tensors in the network.
labels: list of the tensor connections (in standard `ncon` format).
max_branch: maximum number of contraction paths to search at each step.
Returns:
np.ndarray: the cheapest contraction order found (in ncon format).
float: the cost of the network contraction, given as log10(total_FLOPS).
bool: specifies if contraction order is guaranteed optimal.
"""
# build log-adjacency matrix
log_adj = ncon_to_adj(tensors, labels)
# run search algorithm
order, costs, is_optimal = full_solve_complete(log_adj, max_branch=max_branch)
# put contraction order back into ncon format
con_order = ord_to_ncon(labels, order)
return con_order, costs, is_optimal
def ncon_to_adj(tensors: List[np.ndarray], labels: List[List[int]]):
"""
Create a log-adjacency matrix, where element [i,j] is the log10 of the total
dimension of the indices connecting ith and jth tensors, for a network
defined in the `ncon` syntax.
Args:
tensors: list of the tensors in the network.
labels: list of the tensor connections (in standard `ncon` format).
Returns:
np.ndarray: the log-adjacency matrix.
"""
# process inputs
N = len(labels)
ranks = [len(labels[i]) for i in range(N)]
flat_labels = np.hstack([labels[i] for i in range(N)])
tensor_counter = np.hstack(
[i * np.ones(ranks[i], dtype=int) for i in range(N)])
index_counter = np.hstack([np.arange(ranks[i]) for i in range(N)])
# build log-adjacency index-by-index
log_adj = np.zeros([N, N])
unique_labels = np.unique(flat_labels)
for ele in unique_labels:
# identify tensor/index location of each edge
tnr = tensor_counter[flat_labels == ele]
ind = index_counter[flat_labels == ele]
if len(ind) == 1: # external index
log_adj[tnr[0], tnr[0]] += np.log10(tensors[tnr[0]].shape[ind[0]])
elif len(ind) == 2: # internal index
if tnr[0] != tnr[1]: # ignore partial traces
log_adj[tnr[0], tnr[1]] += np.log10(tensors[tnr[0]].shape[ind[0]])
log_adj[tnr[1], tnr[0]] += np.log10(tensors[tnr[0]].shape[ind[0]])
return log_adj
def ord_to_ncon(labels: List[List[int]], orders: np.ndarray):
"""
Produces a `ncon` compatible index contraction order from the sequence of
pairwise contractions.
Args:
labels: list of the tensor connections (in standard `ncon` format).
orders: array of dim (2,N-1) specifying the set of N-1 pairwise
tensor contractions.
Returns:
np.ndarray: the contraction order (in `ncon` format).
"""
N = len(labels)
orders = orders.reshape(2, N - 1)
new_labels = [np.array(labels[i]) for i in range(N)]
con_order = np.zeros([0], dtype=int)
# remove all partial trace indices
for counter, temp_label in enumerate(new_labels):
uni_inds, counts = np.unique(temp_label, return_counts=True)
tr_inds = uni_inds[np.flatnonzero(counts == 2)]
con_order = np.concatenate((con_order, tr_inds))
new_labels[counter] = temp_label[np.isin(temp_label, uni_inds[counts == 1])]
for i in range(N - 1):
# find common indices between tensor pair
cont_many, A_cont, B_cont = np.intersect1d(
new_labels[orders[0, i]], new_labels[orders[1, i]], return_indices=True)
temp_labels = np.append(
np.delete(new_labels[orders[0, i]], A_cont),
np.delete(new_labels[orders[1, i]], B_cont))
con_order = list(np.concatenate((con_order, cont_many), axis=0))
# build new set of labels
new_labels[orders[0, i]] = temp_labels
del new_labels[orders[1, i]]
return con_order
def ncon_cost_check(tensors: List[np.ndarray],
labels: List[Union[List[int], Tuple[int]]],
con_order: Optional[Union[List[int], str]] = None):
"""
Checks the computational cost of an `ncon` contraction (without actually
doing the contraction). Ignore the cost contributions from partial traces
(which are always sub-leading).
Args:
tensors: list of the tensors in the network.
labels: length-N list of lists (or tuples) specifying the network
connections. The jth entry of the ith list in labels labels the edge
connected to the jth index of the ith tensor. Labels should be positive
integers for internal indices and negative integers for free indices.
con_order: optional argument to specify the order for contracting the
positive indices. Defaults to ascending order if omitted.
Returns:
float: the cost of the network contraction, given as log10(total_FLOPS).
"""
total_cost = np.float('-inf')
N = len(tensors)
tensor_dims = [np.array(np.log10(ele.shape)) for ele in tensors]
connect_list = [np.array(ele) for ele in labels]
# generate contraction order if necessary
flat_connect = np.concatenate(connect_list)
if con_order is None:
con_order = np.unique(flat_connect[flat_connect > 0])
else:
con_order = np.array(con_order)
# do all partial traces
for counter, temp_connect in enumerate(connect_list):
uni_inds, counts = np.unique(temp_connect, return_counts=True)
tr_inds = np.isin(temp_connect, uni_inds[counts == 1])
tensor_dims[counter] = tensor_dims[counter][tr_inds]
connect_list[counter] = temp_connect[tr_inds]
con_order = con_order[np.logical_not(
np.isin(con_order, uni_inds[counts == 2]))]
# do all binary contractions
while len(con_order) > 0:
# identify tensors to be contracted
cont_ind = con_order[0]
locs = [
ele for ele in range(len(connect_list))
if sum(connect_list[ele] == cont_ind) > 0
]
# identify indices to be contracted
c1 = connect_list.pop(locs[1])
c0 = connect_list.pop(locs[0])
cont_many, A_cont, B_cont = np.intersect1d(
c0, c1, assume_unique=True, return_indices=True)
# identify dimensions of contracted
d1 = tensor_dims.pop(locs[1])
d0 = tensor_dims.pop(locs[0])
single_cost = np.sum(d0) + np.sum(d1) - np.sum(d0[A_cont])
total_cost = single_cost + np.log10(1 + 10**(total_cost - single_cost))
# update lists
tensor_dims.append(np.append(np.delete(d0, A_cont), | np.delete(d1, B_cont) | numpy.delete |
'''
Copyright 2022 Airbus SAS
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
"""
Common file to have methods of func manager (mainly smooth max and it derivative) in other repositories
"""
# pylint: disable=unsubscriptable-object
import numpy as np
from sos_trades_core.tools.base_functions.exp_min import compute_dfunc_with_exp_min, compute_func_with_exp_min
import warnings
def smooth_maximum(cst, alpha=3):
"""
Function
"""
max_exp = 650 # max value for exponent input, higher value gives infinity
min_exp = -300
max_alphax = np.max(alpha * cst)
k = max_alphax - max_exp
# Deal with underflow . max with exp(-300)
exp_func = np.maximum(min_exp, np.multiply(alpha, cst) - k)
den = np.sum(np.exp(exp_func))
num = np.sum(cst * np.exp(exp_func))
if den != 0:
result = num / den
else:
result = np.max(cst)
print('Warning in smooth_maximum! den equals 0, hard max is used')
return result
def smooth_maximum_vect(cst, alpha=3):
"""
Vectorized version of smooth_maximum function
"""
cst_array = np.array(cst)
max_exp = 650 # max value for exponent input, higher value gives infinity
min_exp = -300
max_alphax = np.amax(alpha * cst_array, axis=1)
k = max_alphax - max_exp
# Deal with underflow . max with exp(-300)
exp_func = np.maximum(min_exp, alpha * cst_array -
np.repeat(k, cst_array.shape[1]).reshape(cst_array.shape))
den = np.sum(np.exp(exp_func), axis=1)
num = np.sum(cst_array * np.exp(exp_func), axis=1)
result = np.where(den != 0, num / den, np.amax(cst_array, axis=1))
if (den == 0).any():
print('Warning in smooth_maximum! den equals 0, hard max is used')
return result
def get_dsmooth_dvariable(cst, alpha=3):
max_exp = 650.0 # max value for exponent input, higher value gives infinity
min_exp = -300
alphaxcst = alpha * np.array(cst)
max_alphax = np.max(alphaxcst)
#index_max = alphaxcst.index(max_alphax)
k = max_alphax - max_exp
exp_func = np.maximum(min_exp, alpha * np.array(cst) - k)
den = np.sum(np.exp(exp_func))
num = np.sum(np.array(cst) * np.exp(exp_func))
d_den = []
d_num = []
grad_value = []
for elem in cst:
if alpha * elem == max_alphax:
dden = np.sum([-alpha * np.exp(max(min_exp, alpha * elem_cst - k))
for elem_cst in cst if elem_cst * alpha != max_alphax])
# derivative of den wto cstmax is 0
dden = dden + 0.0
d_den.append(dden)
dnum = np.sum([-alpha * elem_cst * np.exp(max(min_exp, alpha * elem_cst - k))
for elem_cst in cst if elem_cst * alpha != max_alphax])
dnum = dnum + 1.0 * np.exp(alpha * np.array(elem) - k)
d_num.append(dnum)
#grad_val_i = dnum / den - (num / den) * (dden / den)
else:
exp_func = max(min_exp, alpha * elem - k)
dden = alpha * np.exp(exp_func)
d_den.append(dden)
dnum = elem * (alpha * np.exp(exp_func)
) + np.exp(exp_func)
d_num.append(dnum)
# add if den != 0
grad_val_i = dnum / den - (num / den) * (dden / den)
grad_value.append(grad_val_i)
return grad_value
def get_dsmooth_dvariable_vect(cst, alpha=3):
cst_array = np.array(cst)
max_exp = 650.0 # max value for exponent input, higher value gives infinity
min_exp = -300
alphaxcst = alpha * cst_array
max_alphax = np.amax(alphaxcst, axis=1)
k = max_alphax - max_exp
exp_func = np.maximum(min_exp, alpha * cst_array -
np.repeat(k, cst_array.shape[1]).reshape(cst_array.shape))
den = np.sum(np.exp(exp_func), axis=1)
num = np.sum(cst_array * np.exp(exp_func), axis=1)
# Vectorized calculation
exp_func = np.maximum(min_exp, alpha * cst_array -
np.repeat(k, cst_array.shape[1]).reshape(cst_array.shape))
dden = alpha * np.exp(exp_func)
dnum = cst_array * (alpha * np.exp(exp_func)
) + np.exp(exp_func)
grad_value = dnum / np.repeat(den, cst_array.shape[1]).reshape(cst_array.shape) - (np.repeat(num, cst_array.shape[1]).reshape(
cst_array.shape) / np.repeat(den, cst_array.shape[1]).reshape(cst_array.shape)) * (dden / np.repeat(den, cst_array.shape[1]).reshape(cst_array.shape))
# Special case for max element
max_elem = np.amax(cst_array * np.sign(alpha), axis=1) * np.sign(alpha)
non_max_idx = np.array([cst_array[i] != max_elem[i]
for i in np.arange(cst_array.shape[0])]).reshape(cst_array.shape[0], cst_array.shape[1])
dden_max = np.sum(-alpha * non_max_idx *
np.exp(np.maximum(min_exp, alpha * cst_array - np.repeat(k, cst_array.shape[1]).reshape(cst_array.shape))), axis=1)
dnum_max = np.sum(-alpha * cst_array * non_max_idx *
np.exp(np.maximum(min_exp, alpha * cst_array - np.repeat(k, cst_array.shape[1]).reshape(cst_array.shape))), axis=1)
# derivative of den wto cstmax is 0
dden_max = dden_max + 0.0
dnum_max = dnum_max + 1.0 * np.exp(alpha * max_elem - k)
grad_val_max = dnum_max / den - (num / den) * (dden_max / den)
for i in np.arange(cst_array.shape[0]):
grad_value[i][np.logical_not(non_max_idx)[i]] = grad_val_max[i]
return grad_value
def soft_maximum_vect(cst, k=7e2):
"""
Soft maximum function to get the maximum between array of values while always remaining above or equal to the
maximum and ensuring gradient continuity.
The default value of k is intended for arrays scaled between [-1.0, 1.0], the formula will overflow if
k*max(array)>=710.
Quasi-arithmetic mean function.
https://www.johndcook.com/blog/2010/01/13/soft-maximum/
https://www.johndcook.com/blog/2010/01/20/how-to-compute-the-soft-maximum/
"""
cst_array = np.array(cst)
cst_array_limited = np.sign(cst_array)*compute_func_with_exp_min(np.abs(cst_array), 1.0E-15/k)
if 'complex' in str(cst_array.dtype):
cst_array_limited += np.imag(cst_array)*1j
if np.amax(abs(cst_array_limited))*k>709:
raise ValueError('The absolute value of k*max(cst_array) is too high and would cause a floating point error')
result = np.log(np.sum(np.exp(k*cst_array_limited), axis=1))/k
return result
def get_dsoft_maximum_vect(cst, k=7e2):
"""
Return derivative of soft maximum
"""
cst_array = np.array(cst)
cst_array_limited = np.sign(cst_array)*compute_func_with_exp_min( | np.abs(cst_array) | numpy.abs |
import numpy as np
import sys
entrada = sys.argv[1]
try:
arquivo = open(entrada,'r')#abre o arquivo
except:
print("Arquivo não encontrado!")
exit(0)
linhas = arquivo.readlines()#le as linhas do arquivo e coloca na variavel linha
arquivo.close()#fecha o arquivo
processos = []#vetor de processos
'''
A entrada é composta por uma série números inteiros,
um por linha, indicando,
primeiro a quantidade de quadros disponíveis na memória RAM e,
em seguida, a sequência de referências à memória.
'''
'''
A saída é composta por linhas contendo a sigla de cada um dos
três algoritmos e a quantidade de faltas de página obtidas com a utilização de cada um deles.
'''
for i in linhas:
processos.append(int(i.replace("\n","")))
quantMaxMoldura = processos[0]#qauntidade máxima de processos na moldura
del(processos[0])
def segundaChance(passoApasso = False):
'''Considere que o bit R de todas as páginas é zerada a cada 4(quatro) referências à memória.'''
moldura = []#lista de moldura contém o processo e o bit de referencia
filaMolduraEnvelhecimento = []#possui a fila do processo mais velho p o mais novo, referente aos processos que estao na moldura
numFaltas = 0 #numero de falta é incrementado cada vez que um processo entra na moldura
for indice,processo in enumerate(processos):
if (indice%4 == 0 and indice != 0): #se tiver sido 4 referencias a memória entao coloca o bit R de cada processo da moldura para False
for k in moldura:
k[1] = False
if (passoApasso):
print("Bit R dos processos na moldura foram resetados!\n")
if len(moldura) < quantMaxMoldura:#se a moldura tiver com vaga so add o processo
try:#verifica se o processo já foi adiconado na moldura, se tiver sido, nao faz nada
list(np.array(moldura)[:,0]).index(processo)
except (ValueError , IndexError):#se nao tiver sido adiocionado a moldura entao adiociona
moldura.append([processo,True])#salva o processo, o bit de referencia e o tempo virtual atual
numFaltas+=1
filaMolduraEnvelhecimento.append(processo)
else:
try:#ve se o processo está na com o status de bit R False coloca para true
moldura[moldura.index([processo,False])][1] = True
except ValueError:#se entrar aqui é pq o processo não está false ou nao está na moldura
try:
moldura.index([processo,True])#verifica se tem o processo com bit R True, se tiver nao faz nada
except:#se entrar aqui é pq o processo não está na moldura, algum tem que sair para este entrar, o processo que sai é o mais velho com bit de referencia 0
numFaltas+=1
for indice, i in enumerate(filaMolduraEnvelhecimento):
if moldura[list(np.array(moldura)[:,0]).index(i)][1] == False:#ve qual processo na moldura que precisa ser substituido
indiceSubstituicao = list(np.array(moldura)[:,0]).index(i)#pega o indice, na matriz moldura, do processo que deve ser substituido
moldura[indiceSubstituicao][0] = processo #coloca o novo processo na moldura
moldura[indiceSubstituicao][1] = True #seta o bit R para True
filaMolduraEnvelhecimento+=filaMolduraEnvelhecimento[:indice]#coloca os processos que n foram substiuidos para o final da fila
filaMolduraEnvelhecimento+=[processo]#add no final da fila no processo que entrou
del(filaMolduraEnvelhecimento[:(indice+1)])#retira os processos que foram para o final da fila do começo da fila e retira o processo que foi tirado da moldura
break
if (passoApasso):
for i in moldura:
print(i)
print("Processo",processo,"chegou!")
print("Numero de faltas:",numFaltas)
print("Fila atual:",filaMolduraEnvelhecimento,"\n")
return numFaltas
def otimo(passoApasso = False):
moldura = []#lista de moldura contém o processo e o bit de referencia
numFaltas = 0 #numero de falta é incrementado cada vez que um processo entra na moldura
for indice,processo in enumerate(processos):
if len(moldura) < quantMaxMoldura:#se a moldura tiver com vaga so add a pagina e o OT de cada uma é vazio = None
try:#verifica se o processo já foi adiconado na moldura
list(np.array(moldura)[:,0]).index(processo)
except (ValueError , IndexError):
moldura.append([processo,-1])#salva o processo e o OT vazios, -1 foi adotado para valor vazio
numFaltas+=1
else:
try:
list(np.array(moldura)[:,0]).index(processo)#confere se a pagina já tá na moldura, se tiver nao faz nada, só reseta a moldura
for i in moldura:
i[1] = -1
except ValueError:#se nao estiver ver qual é a pagina q deve ser substituida da moldura, a pagina a ser substituidade é aquela que tiver o maior valor de distancia
numFaltas+=1
for k in range(len(moldura)):#percorre as paginas da moldura
indiceSubstituicao = list(np.array(moldura)[:,0]).index(moldura[k][0])#pega o indice, na matriz moldura, do processo que deve ser substituido
try:#se entrar aqui é porque achou a pagina em algum lugar mais a frente
indiceOP = processos[(indice+1):].index(moldura[k][0]) + 1
moldura[indiceSubstituicao][1] = indiceOP #seta o valor correspondente ao OP
except ValueError:# se der erro é porque o processo nao se encontra a frente, logo atribui o valor infinito para ele
indiceOP = float('inf') #inf significa valor infinito(numero muito grande)
moldura[indiceSubstituicao][1] = indiceOP #seta o valor correspondente ao OP
maiorDaMoldura = max(list(np.array(moldura)[:,1]))#pega o que tem maior valor na moldura
indiceMaiorDaMoldura = list(np.array(moldura)[:,1]).index(maiorDaMoldura)#indice do que tem o maior valor na moldura
moldura[indiceMaiorDaMoldura][0] = processo #subistui o processo no primeiro que encontrar que tiver maior valor
moldura[indiceMaiorDaMoldura][1] = -1 #valor nulo para esse novo valor que entrar
if (passoApasso):
for i in moldura:
print(i)
print("Processo",processo,"chegou!")
print("Numero de faltas:",numFaltas,"\n")
return numFaltas
def conjuntoDeTrabalho(passoApasso = False):
moldura = []#[PROCESSO, BIT R, ULTIMO USO]
numFaltas = 0
limiar = quantMaxMoldura/2 + 1
for indice,processo in enumerate(processos):
if (indice%4 == 0 and indice != 0): #se tiver sido 4 referencias a memória entao coloca o bit R de cada processo da moldura para False
for k in moldura:
k[1] = False
if (passoApasso):
print("Bit R dos processos na moldura foram resetados!\n")
if len(moldura) < quantMaxMoldura :#se a moldura tiver com vaga so add o processo
try:#verifica se o processo já foi adicionado, se tiver sido, nao faz nada
indiceNaMoldura = list( | np.array(moldura) | numpy.array |
# Copyright (c) 2003-2019 by <NAME>
#
# TreeCorr is free software: redistribution and use in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions, and the disclaimer given in the accompanying LICENSE
# file.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the disclaimer given in the documentation
# and/or other materials provided with the distribution.
from __future__ import print_function
import numpy as np
import os
import coord
import time
import treecorr
from test_helper import get_from_wiki, get_script_name, do_pickle, CaptureLog
from test_helper import assert_raises, timer, assert_warns
from numpy import sin, cos, tan, arcsin, arccos, arctan, arctan2, pi
@timer
def test_direct():
# If the catalogs are small enough, we can do a direct calculation to see if comes out right.
# This should exactly match the treecorr result if brute_force=True
ngal = 200
s = 10.
rng = np.random.RandomState(8675309)
x1 = rng.normal(0,s, (ngal,) )
y1 = rng.normal(0,s, (ngal,) )
w1 = rng.random_sample(ngal)
g11 = rng.normal(0,0.2, (ngal,) )
g21 = rng.normal(0,0.2, (ngal,) )
x2 = rng.normal(0,s, (ngal,) )
y2 = rng.normal(0,s, (ngal,) )
w2 = rng.random_sample(ngal)
g12 = rng.normal(0,0.2, (ngal,) )
g22 = rng.normal(0,0.2, (ngal,) )
cat1 = treecorr.Catalog(x=x1, y=y1, w=w1, g1=g11, g2=g21)
cat2 = treecorr.Catalog(x=x2, y=y2, w=w2, g1=g12, g2=g22)
min_sep = 1.
max_sep = 50.
nbins = 50
bin_size = np.log(max_sep/min_sep) / nbins
gg = treecorr.GGCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, brute=True)
gg.process(cat1, cat2)
true_npairs = np.zeros(nbins, dtype=int)
true_weight = np.zeros(nbins, dtype=float)
true_xip = np.zeros(nbins, dtype=complex)
true_xim = np.zeros(nbins, dtype=complex)
for i in range(ngal):
# It's hard to do all the pairs at once with numpy operations (although maybe possible).
# But we can at least do all the pairs for each entry in cat1 at once with arrays.
rsq = (x1[i]-x2)**2 + (y1[i]-y2)**2
r = np.sqrt(rsq)
logr = np.log(r)
expmialpha = ((x1[i]-x2) - 1j*(y1[i]-y2)) / r
ww = w1[i] * w2
xip = ww * (g11[i] + 1j*g21[i]) * (g12 - 1j*g22)
xim = ww * (g11[i] + 1j*g21[i]) * (g12 + 1j*g22) * expmialpha**4
index = np.floor(np.log(r/min_sep) / bin_size).astype(int)
mask = (index >= 0) & (index < nbins)
np.add.at(true_npairs, index[mask], 1)
np.add.at(true_weight, index[mask], ww[mask])
np.add.at(true_xip, index[mask], xip[mask])
np.add.at(true_xim, index[mask], xim[mask])
true_xip /= true_weight
true_xim /= true_weight
print('true_npairs = ',true_npairs)
print('diff = ',gg.npairs - true_npairs)
np.testing.assert_array_equal(gg.npairs, true_npairs)
print('true_weight = ',true_weight)
print('diff = ',gg.weight - true_weight)
np.testing.assert_allclose(gg.weight, true_weight, rtol=1.e-5, atol=1.e-8)
print('true_xip = ',true_xip)
print('gg.xip = ',gg.xip)
print('gg.xip_im = ',gg.xip_im)
np.testing.assert_allclose(gg.xip, true_xip.real, rtol=1.e-4, atol=1.e-8)
np.testing.assert_allclose(gg.xip_im, true_xip.imag, rtol=1.e-4, atol=1.e-8)
print('true_xim = ',true_xim)
print('gg.xim = ',gg.xim)
print('gg.xim_im = ',gg.xim_im)
np.testing.assert_allclose(gg.xim, true_xim.real, rtol=1.e-4, atol=1.e-8)
np.testing.assert_allclose(gg.xim_im, true_xim.imag, rtol=1.e-4, atol=1.e-8)
try:
import fitsio
except ImportError:
print('Skipping FITS tests, since fitsio is not installed')
return
# Check that running via the corr2 script works correctly.
config = treecorr.config.read_config('configs/gg_direct.yaml')
cat1.write(config['file_name'])
cat2.write(config['file_name2'])
treecorr.corr2(config)
data = fitsio.read(config['gg_file_name'])
np.testing.assert_allclose(data['r_nom'], gg.rnom)
np.testing.assert_allclose(data['npairs'], gg.npairs)
np.testing.assert_allclose(data['weight'], gg.weight)
np.testing.assert_allclose(data['xip'], gg.xip, rtol=1.e-3)
np.testing.assert_allclose(data['xip_im'], gg.xip_im, rtol=1.e-3)
np.testing.assert_allclose(data['xim'], gg.xim, rtol=1.e-3)
np.testing.assert_allclose(data['xim_im'], gg.xim_im, rtol=1.e-3)
# Repeat with binslop = 0.
# And don't do any top-level recursion so we actually test not going to the leaves.
gg = treecorr.GGCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, bin_slop=0,
max_top=0)
gg.process(cat1, cat2)
np.testing.assert_array_equal(gg.npairs, true_npairs)
np.testing.assert_allclose(gg.weight, true_weight, rtol=1.e-5, atol=1.e-8)
np.testing.assert_allclose(gg.xip, true_xip.real, rtol=1.e-4, atol=1.e-8)
np.testing.assert_allclose(gg.xip_im, true_xip.imag, rtol=1.e-4, atol=1.e-8)
print('true_xim = ',true_xim)
print('gg.xim = ',gg.xim)
print('gg.xim_im = ',gg.xim_im)
print('diff = ',gg.xim - true_xim.real)
print('max diff = ',np.max(np.abs(gg.xim - true_xim.real)))
print('rel diff = ',(gg.xim - true_xim.real)/true_xim.real)
# This is the one that is highly affected by the approximation from averaging the shears
# before projecting, rather than averaging each shear projected to its own connecting line.
np.testing.assert_allclose(gg.xim, true_xim.real, rtol=1.e-3, atol=3.e-4)
np.testing.assert_allclose(gg.xim_im, true_xim.imag, atol=1.e-3)
# Check a few basic operations with a GGCorrelation object.
do_pickle(gg)
gg2 = gg.copy()
gg2 += gg
np.testing.assert_allclose(gg2.npairs, 2*gg.npairs)
np.testing.assert_allclose(gg2.weight, 2*gg.weight)
np.testing.assert_allclose(gg2.meanr, 2*gg.meanr)
np.testing.assert_allclose(gg2.meanlogr, 2*gg.meanlogr)
np.testing.assert_allclose(gg2.xip, 2*gg.xip)
np.testing.assert_allclose(gg2.xip_im, 2*gg.xip_im)
np.testing.assert_allclose(gg2.xim, 2*gg.xim)
np.testing.assert_allclose(gg2.xim_im, 2*gg.xim_im)
gg2.clear()
gg2 += gg
np.testing.assert_allclose(gg2.npairs, gg.npairs)
np.testing.assert_allclose(gg2.weight, gg.weight)
np.testing.assert_allclose(gg2.meanr, gg.meanr)
np.testing.assert_allclose(gg2.meanlogr, gg.meanlogr)
np.testing.assert_allclose(gg2.xip, gg.xip)
np.testing.assert_allclose(gg2.xip_im, gg.xip_im)
np.testing.assert_allclose(gg2.xim, gg.xim)
np.testing.assert_allclose(gg2.xim_im, gg.xim_im)
ascii_name = 'output/gg_ascii.txt'
gg.write(ascii_name, precision=16)
gg3 = treecorr.GGCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins)
gg3.read(ascii_name)
np.testing.assert_allclose(gg3.npairs, gg.npairs)
np.testing.assert_allclose(gg3.weight, gg.weight)
np.testing.assert_allclose(gg3.meanr, gg.meanr)
np.testing.assert_allclose(gg3.meanlogr, gg.meanlogr)
np.testing.assert_allclose(gg3.xip, gg.xip)
np.testing.assert_allclose(gg3.xip_im, gg.xip_im)
np.testing.assert_allclose(gg3.xim, gg.xim)
np.testing.assert_allclose(gg3.xim_im, gg.xim_im)
fits_name = 'output/gg_fits.fits'
gg.write(fits_name)
gg4 = treecorr.GGCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins)
gg4.read(fits_name)
np.testing.assert_allclose(gg4.npairs, gg.npairs)
np.testing.assert_allclose(gg4.weight, gg.weight)
np.testing.assert_allclose(gg4.meanr, gg.meanr)
np.testing.assert_allclose(gg4.meanlogr, gg.meanlogr)
np.testing.assert_allclose(gg4.xip, gg.xip)
np.testing.assert_allclose(gg4.xip_im, gg.xip_im)
np.testing.assert_allclose(gg4.xim, gg.xim)
np.testing.assert_allclose(gg4.xim_im, gg.xim_im)
with assert_raises(TypeError):
gg2 += config
gg4 = treecorr.GGCorrelation(min_sep=min_sep/2, max_sep=max_sep, nbins=nbins)
with assert_raises(ValueError):
gg2 += gg4
gg5 = treecorr.GGCorrelation(min_sep=min_sep, max_sep=max_sep*2, nbins=nbins)
with assert_raises(ValueError):
gg2 += gg5
gg6 = treecorr.GGCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins*2)
with assert_raises(ValueError):
gg2 += gg6
@timer
def test_direct_spherical():
# Repeat in spherical coords
ngal = 100
s = 10.
rng = np.random.RandomState(8675309)
x1 = rng.normal(0,s, (ngal,) )
y1 = rng.normal(0,s, (ngal,) ) + 200 # Put everything at large y, so small angle on sky
z1 = rng.normal(0,s, (ngal,) )
w1 = rng.random_sample(ngal)
g11 = rng.normal(0,0.2, (ngal,) )
g21 = rng.normal(0,0.2, (ngal,) )
x2 = rng.normal(0,s, (ngal,) )
y2 = rng.normal(0,s, (ngal,) ) + 200
z2 = rng.normal(0,s, (ngal,) )
w2 = rng.random_sample(ngal)
g12 = rng.normal(0,0.2, (ngal,) )
g22 = rng.normal(0,0.2, (ngal,) )
ra1, dec1 = coord.CelestialCoord.xyz_to_radec(x1,y1,z1)
ra2, dec2 = coord.CelestialCoord.xyz_to_radec(x2,y2,z2)
cat1 = treecorr.Catalog(ra=ra1, dec=dec1, ra_units='rad', dec_units='rad', w=w1, g1=g11, g2=g21)
cat2 = treecorr.Catalog(ra=ra2, dec=dec2, ra_units='rad', dec_units='rad', w=w2, g1=g12, g2=g22)
min_sep = 1.
max_sep = 10.
nbins = 50
bin_size = np.log(max_sep/min_sep) / nbins
gg = treecorr.GGCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
sep_units='deg', brute=True)
gg.process(cat1, cat2)
r1 = np.sqrt(x1**2 + y1**2 + z1**2)
r2 = np.sqrt(x2**2 + y2**2 + z2**2)
x1 /= r1; y1 /= r1; z1 /= r1
x2 /= r2; y2 /= r2; z2 /= r2
north_pole = coord.CelestialCoord(0*coord.radians, 90*coord.degrees)
true_npairs = np.zeros(nbins, dtype=int)
true_weight = np.zeros(nbins, dtype=float)
true_xip = np.zeros(nbins, dtype=complex)
true_xim = np.zeros(nbins, dtype=complex)
rad_min_sep = min_sep * coord.degrees / coord.radians
c1 = [coord.CelestialCoord(r*coord.radians, d*coord.radians) for (r,d) in zip(ra1, dec1)]
c2 = [coord.CelestialCoord(r*coord.radians, d*coord.radians) for (r,d) in zip(ra2, dec2)]
for i in range(ngal):
for j in range(ngal):
rsq = (x1[i]-x2[j])**2 + (y1[i]-y2[j])**2 + (z1[i]-z2[j])**2
r = np.sqrt(rsq)
logr = np.log(r)
index = np.floor(np.log(r/rad_min_sep) / bin_size).astype(int)
if index < 0 or index >= nbins:
continue
# Rotate shears to coordinates where line connecting is horizontal.
# Original orientation is where north is up.
theta1 = 90*coord.degrees - c1[i].angleBetween(north_pole, c2[j])
theta2 = 90*coord.degrees - c2[j].angleBetween(north_pole, c1[i])
exp2theta1 = np.cos(2*theta1) + 1j * np.sin(2*theta1)
exp2theta2 = np.cos(2*theta2) + 1j * np.sin(2*theta2)
g1 = g11[i] + 1j * g21[i]
g2 = g12[j] + 1j * g22[j]
g1 *= exp2theta1
g2 *= exp2theta2
ww = w1[i] * w2[j]
xip = ww * g1 * np.conjugate(g2)
xim = ww * g1 * g2
true_npairs[index] += 1
true_weight[index] += ww
true_xip[index] += xip
true_xim[index] += xim
true_xip /= true_weight
true_xim /= true_weight
print('true_npairs = ',true_npairs)
print('diff = ',gg.npairs - true_npairs)
np.testing.assert_array_equal(gg.npairs, true_npairs)
print('true_weight = ',true_weight)
print('diff = ',gg.weight - true_weight)
np.testing.assert_allclose(gg.weight, true_weight, rtol=1.e-5, atol=1.e-8)
print('true_xip = ',true_xip)
print('gg.xip = ',gg.xip)
print('gg.xip_im = ',gg.xip_im)
np.testing.assert_allclose(gg.xip, true_xip.real, rtol=1.e-4, atol=1.e-8)
np.testing.assert_allclose(gg.xip_im, true_xip.imag, rtol=1.e-4, atol=1.e-8)
print('true_xim = ',true_xim)
print('gg.xim = ',gg.xim)
print('gg.xim_im = ',gg.xim_im)
np.testing.assert_allclose(gg.xim, true_xim.real, rtol=1.e-4, atol=1.e-8)
np.testing.assert_allclose(gg.xim_im, true_xim.imag, rtol=1.e-4, atol=1.e-8)
try:
import fitsio
except ImportError:
print('Skipping FITS tests, since fitsio is not installed')
return
# Check that running via the corr2 script works correctly.
config = treecorr.config.read_config('configs/gg_direct_spherical.yaml')
cat1.write(config['file_name'])
cat2.write(config['file_name2'])
treecorr.corr2(config)
data = fitsio.read(config['gg_file_name'])
np.testing.assert_allclose(data['r_nom'], gg.rnom)
np.testing.assert_allclose(data['npairs'], gg.npairs)
np.testing.assert_allclose(data['weight'], gg.weight)
np.testing.assert_allclose(data['xip'], gg.xip, rtol=1.e-3)
np.testing.assert_allclose(data['xip_im'], gg.xip_im, rtol=1.e-3)
np.testing.assert_allclose(data['xim'], gg.xim, rtol=1.e-3)
np.testing.assert_allclose(data['xim_im'], gg.xim_im, rtol=1.e-3)
# Repeat with binslop = 0
# And don't do any top-level recursion so we actually test not going to the leaves.
gg = treecorr.GGCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
sep_units='deg', bin_slop=0, max_top=0)
gg.process(cat1, cat2)
np.testing.assert_array_equal(gg.npairs, true_npairs)
np.testing.assert_allclose(gg.weight, true_weight, rtol=1.e-5, atol=1.e-8)
np.testing.assert_allclose(gg.xip, true_xip.real, rtol=1.e-3, atol=1.e-6)
np.testing.assert_allclose(gg.xip_im, true_xip.imag, rtol=1.e-3, atol=1.e-6)
diff = np.abs(gg.xim - true_xim.real)
reldiff = diff / true_xim.real
np.testing.assert_allclose(gg.xim, true_xim.real, rtol=1.e-3, atol=2.e-4)
np.testing.assert_allclose(gg.xim_im, true_xim.imag, rtol=1.e-3, atol=2.e-4)
@timer
def test_pairwise():
# Test the pairwise option.
ngal = 1000
s = 10.
rng = np.random.RandomState(8675309)
x1 = rng.normal(0,s, (ngal,) )
y1 = rng.normal(0,s, (ngal,) )
w1 = rng.random_sample(ngal)
g11 = rng.normal(0,0.2, (ngal,) )
g21 = rng.normal(0,0.2, (ngal,) )
x2 = rng.normal(0,s, (ngal,) )
y2 = rng.normal(0,s, (ngal,) )
w2 = rng.random_sample(ngal)
g12 = rng.normal(0,0.2, (ngal,) )
g22 = rng.normal(0,0.2, (ngal,) )
w1 = np.ones_like(w1)
w2 = np.ones_like(w2)
cat1 = treecorr.Catalog(x=x1, y=y1, w=w1, g1=g11, g2=g21)
cat2 = treecorr.Catalog(x=x2, y=y2, w=w2, g1=g12, g2=g22)
min_sep = 5.
max_sep = 50.
nbins = 10
bin_size = np.log(max_sep/min_sep) / nbins
gg = treecorr.GGCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins)
with assert_warns(FutureWarning):
gg.process_pairwise(cat1, cat2)
gg.finalize(cat1.varg, cat2.varg)
true_npairs = np.zeros(nbins, dtype=int)
true_weight = np.zeros(nbins, dtype=float)
true_xip = np.zeros(nbins, dtype=complex)
true_xim = np.zeros(nbins, dtype=complex)
rsq = (x1-x2)**2 + (y1-y2)**2
r = np.sqrt(rsq)
logr = np.log(r)
expmialpha = ((x1-x2) - 1j*(y1-y2)) / r
ww = w1 * w2
xip = ww * (g11 + 1j*g21) * (g12 - 1j*g22)
xim = ww * (g11 + 1j*g21) * (g12 + 1j*g22) * expmialpha**4
index = np.floor(np.log(r/min_sep) / bin_size).astype(int)
mask = (index >= 0) & (index < nbins)
np.add.at(true_npairs, index[mask], 1)
np.add.at(true_weight, index[mask], ww[mask])
np.add.at(true_xip, index[mask], xip[mask])
np.add.at(true_xim, index[mask], xim[mask])
true_xip /= true_weight
true_xim /= true_weight
np.testing.assert_array_equal(gg.npairs, true_npairs)
np.testing.assert_allclose(gg.weight, true_weight, rtol=1.e-5, atol=1.e-8)
np.testing.assert_allclose(gg.xip, true_xip.real, rtol=1.e-4, atol=1.e-8)
np.testing.assert_allclose(gg.xip_im, true_xip.imag, rtol=1.e-4, atol=1.e-8)
np.testing.assert_allclose(gg.xim, true_xim.real, rtol=1.e-4, atol=1.e-8)
np.testing.assert_allclose(gg.xim_im, true_xim.imag, rtol=1.e-4, atol=1.e-8)
# If cats have names, then the logger will mention them.
# Also, test running with optional args.
cat1.name = "first"
cat2.name = "second"
with CaptureLog() as cl:
gg.logger = cl.logger
with assert_warns(FutureWarning):
gg.process_pairwise(cat1, cat2, metric='Euclidean', num_threads=2)
assert "for cats first, second" in cl.output
@timer
def test_gg():
# cf. http://adsabs.harvard.edu/abs/2002A%26A...389..729S for the basic formulae I use here.
#
# Use gamma_t(r) = gamma0 r^2/r0^2 exp(-r^2/2r0^2)
# i.e. gamma(r) = -gamma0 exp(-r^2/2r0^2) (x+iy)^2 / r0^2
#
# The Fourier transform is: gamma~(k) = -2 pi gamma0 r0^4 k^2 exp(-r0^2 k^2/2) / L^2
# P(k) = (1/2pi) <|gamma~(k)|^2> = 2 pi gamma0^2 r0^8 k^4 / L^4 exp(-r0^2 k^2)
# xi+(r) = (1/2pi) int( dk k P(k) J0(kr) )
# = pi/16 gamma0^2 (r0/L)^2 exp(-r^2/4r0^2) (r^4 - 16r^2r0^2 + 32r0^4)/r0^4
# xi-(r) = (1/2pi) int( dk k P(k) J4(kr) )
# = pi/16 gamma0^2 (r0/L)^2 exp(-r^2/4r0^2) r^4/r0^4
# Note: I'm not sure I handled the L factors correctly, but the units at the end need
# to be gamma^2, so it needs to be (r0/L)^2.
gamma0 = 0.05
r0 = 10.
if __name__ == "__main__":
ngal = 1000000
L = 50.*r0 # Not infinity, so this introduces some error. Our integrals were to infinity.
tol_factor = 1
else:
ngal = 100000
L = 50.*r0
# Rather than have a single set tolerance, we tune the tolerances for the above
# __main__ setup, but scale up by a factor of 5 for the quicker run.
tol_factor = 5
rng = np.random.RandomState(8675309)
x = (rng.random_sample(ngal)-0.5) * L
y = (rng.random_sample(ngal)-0.5) * L
r2 = (x**2 + y**2)/r0**2
g1 = -gamma0 * np.exp(-r2/2.) * (x**2-y**2)/r0**2
g2 = -gamma0 * np.exp(-r2/2.) * (2.*x*y)/r0**2
cat = treecorr.Catalog(x=x, y=y, g1=g1, g2=g2, x_units='arcmin', y_units='arcmin')
gg = treecorr.GGCorrelation(bin_size=0.1, min_sep=1., max_sep=100., sep_units='arcmin',
verbose=1)
gg.process(cat)
# log(<R>) != <logR>, but it should be close:
print('meanlogr - log(meanr) = ',gg.meanlogr - np.log(gg.meanr))
np.testing.assert_allclose(gg.meanlogr, np.log(gg.meanr), atol=1.e-3)
r = gg.meanr
temp = np.pi/16. * gamma0**2 * (r0/L)**2 * np.exp(-0.25*r**2/r0**2)
true_xip = temp * (r**4 - 16.*r**2*r0**2 + 32.*r0**4)/r0**4
true_xim = temp * r**4/r0**4
print('gg.xip = ',gg.xip)
print('true_xip = ',true_xip)
print('ratio = ',gg.xip / true_xip)
print('diff = ',gg.xip - true_xip)
print('max diff = ',max(abs(gg.xip - true_xip)))
# It's within 10% everywhere except at the zero crossings.
np.testing.assert_allclose(gg.xip, true_xip, rtol=0.1 * tol_factor, atol=1.e-7 * tol_factor)
print('xip_im = ',gg.xip_im)
np.testing.assert_allclose(gg.xip_im, 0, atol=2.e-7 * tol_factor)
print('gg.xim = ',gg.xim)
print('true_xim = ',true_xim)
print('ratio = ',gg.xim / true_xim)
print('diff = ',gg.xim - true_xim)
print('max diff = ',max(abs(gg.xim - true_xim)))
np.testing.assert_allclose(gg.xim, true_xim, rtol=0.1 * tol_factor, atol=2.e-7 * tol_factor)
print('xim_im = ',gg.xim_im)
np.testing.assert_allclose(gg.xim_im, 0, atol=1.e-7 * tol_factor)
# Should also work as a cross-correlation with itself
gg.process(cat,cat)
np.testing.assert_allclose(gg.meanlogr, np.log(gg.meanr), atol=1.e-3)
assert max(abs(gg.xip - true_xip)) < 3.e-7 * tol_factor
assert max(abs(gg.xip_im)) < 2.e-7 * tol_factor
assert max(abs(gg.xim - true_xim)) < 3.e-7 * tol_factor
assert max(abs(gg.xim_im)) < 1.e-7 * tol_factor
# We check the accuracy of the MapSq calculation below in test_mapsq.
# Here we just check that it runs, round trips correctly through an output file,
# and gives the same answer when run through corr2.
mapsq, mapsq_im, mxsq, mxsq_im, varmapsq = gg.calculateMapSq()
print('mapsq = ',mapsq)
print('mxsq = ',mxsq)
mapsq_file = 'output/gg_m2.txt'
gg.writeMapSq(mapsq_file, precision=16)
data = np.genfromtxt(os.path.join('output','gg_m2.txt'), names=True)
np.testing.assert_allclose(data['Mapsq'], mapsq)
np.testing.assert_allclose(data['Mxsq'], mxsq)
# Check that we get the same result using the corr2 function:
cat.write(os.path.join('data','gg.dat'))
config = treecorr.read_config('configs/gg.yaml')
config['verbose'] = 0
config['precision'] = 8
treecorr.corr2(config)
corr2_output = np.genfromtxt(os.path.join('output','gg.out'), names=True, skip_header=1)
print('gg.xip = ',gg.xip)
print('from corr2 output = ',corr2_output['xip'])
print('ratio = ',corr2_output['xip']/gg.xip)
print('diff = ',corr2_output['xip']-gg.xip)
np.testing.assert_allclose(corr2_output['xip'], gg.xip, rtol=1.e-4)
print('gg.xim = ',gg.xim)
print('from corr2 output = ',corr2_output['xim'])
print('ratio = ',corr2_output['xim']/gg.xim)
print('diff = ',corr2_output['xim']-gg.xim)
np.testing.assert_allclose(corr2_output['xim'], gg.xim, rtol=1.e-4)
print('xip_im from corr2 output = ',corr2_output['xip_im'])
print('max err = ',max(abs(corr2_output['xip_im'])))
np.testing.assert_allclose(corr2_output['xip_im'], 0, atol=2.e-7 * tol_factor)
print('xim_im from corr2 output = ',corr2_output['xim_im'])
print('max err = ',max(abs(corr2_output['xim_im'])))
np.testing.assert_allclose(corr2_output['xim_im'], 0, atol=2.e-7 * tol_factor)
# Check m2 output
corr2_output2 = np.genfromtxt(os.path.join('output','gg_m2.out'), names=True)
print('mapsq = ',mapsq)
print('from corr2 output = ',corr2_output2['Mapsq'])
print('ratio = ',corr2_output2['Mapsq']/mapsq)
print('diff = ',corr2_output2['Mapsq']-mapsq)
np.testing.assert_allclose(corr2_output2['Mapsq'], mapsq, rtol=1.e-4)
print('mxsq = ',mxsq)
print('from corr2 output = ',corr2_output2['Mxsq'])
print('ratio = ',corr2_output2['Mxsq']/mxsq)
print('diff = ',corr2_output2['Mxsq']-mxsq)
np.testing.assert_allclose(corr2_output2['Mxsq'], mxsq, rtol=1.e-4)
# OK to have m2 output, but not gg
del config['gg_file_name']
treecorr.corr2(config)
corr2_output2 = np.genfromtxt(os.path.join('output','gg_m2.out'), names=True)
np.testing.assert_allclose(corr2_output2['Mapsq'], mapsq, rtol=1.e-4)
np.testing.assert_allclose(corr2_output2['Mxsq'], mxsq, rtol=1.e-4)
try:
import fitsio
except ImportError:
print('Skipping FITS tests, since fitsio is not installed')
return
# Check the fits write option
out_file_name = os.path.join('output','gg_out.fits')
gg.write(out_file_name)
data = fitsio.read(out_file_name)
np.testing.assert_allclose(data['r_nom'], np.exp(gg.logr))
np.testing.assert_allclose(data['meanr'], gg.meanr)
np.testing.assert_allclose(data['meanlogr'], gg.meanlogr)
np.testing.assert_allclose(data['xip'], gg.xip)
np.testing.assert_allclose(data['xim'], gg.xim)
np.testing.assert_allclose(data['xip_im'], gg.xip_im)
np.testing.assert_allclose(data['xim_im'], gg.xim_im)
np.testing.assert_allclose(data['sigma_xip'], np.sqrt(gg.varxip))
np.testing.assert_allclose(data['sigma_xim'], np.sqrt(gg.varxim))
np.testing.assert_allclose(data['weight'], gg.weight)
np.testing.assert_allclose(data['npairs'], gg.npairs)
# Check the read function
gg2 = treecorr.GGCorrelation(bin_size=0.1, min_sep=1., max_sep=100., sep_units='arcmin')
gg2.read(out_file_name)
np.testing.assert_allclose(gg2.logr, gg.logr)
np.testing.assert_allclose(gg2.meanr, gg.meanr)
np.testing.assert_allclose(gg2.meanlogr, gg.meanlogr)
np.testing.assert_allclose(gg2.xip, gg.xip)
np.testing.assert_allclose(gg2.xim, gg.xim)
np.testing.assert_allclose(gg2.xip_im, gg.xip_im)
np.testing.assert_allclose(gg2.xim_im, gg.xim_im)
np.testing.assert_allclose(gg2.varxip, gg.varxip)
np.testing.assert_allclose(gg2.varxim, gg.varxim)
np.testing.assert_allclose(gg2.weight, gg.weight)
np.testing.assert_allclose(gg2.npairs, gg.npairs)
assert gg2.coords == gg.coords
assert gg2.metric == gg.metric
assert gg2.sep_units == gg.sep_units
assert gg2.bin_type == gg.bin_type
# Also check the Schneider version.
mapsq, mapsq_im, mxsq, mxsq_im, varmapsq = gg.calculateMapSq(m2_uform='Schneider')
print('Schneider mapsq = ',mapsq)
print('mxsq = ',mxsq)
print('max = ',max(abs(mxsq)))
# And GamSq.
gamsq, vargamsq = gg.calculateGamSq()
print('gamsq = ',gamsq)
gamsq, vargamsq, gamsq_e, gamsq_b, vargamsq_eb = gg.calculateGamSq(eb=True)
print('gamsq_e = ',gamsq_e)
print('gamsq_b = ',gamsq_b)
# The Gamsq columns were already output in the above m2_output run of corr2.
np.testing.assert_allclose(corr2_output2['Gamsq'], gamsq, rtol=1.e-4)
@timer
def test_mapsq():
# Use the same gamma(r) as in test_gg.
# This time, rather than use a smaller catalog in the nosetests run, we skip the run
# in that case and just read in the output file. This way we can test the Map^2 formulae
# on the more precise output.
# When running from the command line, the output file is made from scratch.
gamma0 = 0.05
r0 = 10.
L = 50.*r0
cat_name = os.path.join('data','gg_map.dat')
out_name = os.path.join('data','gg_map.out')
gg = treecorr.GGCorrelation(bin_size=0.1, min_sep=1, nbins=47, sep_units='arcmin',
verbose=1)
if __name__ == "__main__":
ngal = 1000000
rng = | np.random.RandomState(8675309) | numpy.random.RandomState |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
import numpy as np
from pyapprox.multivariate_polynomials import PolynomialChaosExpansion
from pyapprox.utilities import evaluate_tensor_product_function,\
gradient_of_tensor_product_function
def get_quadrature_weights_from_samples(compute_basis_matrix,indices,samples):
"""
Get the quadrature weights from a set of samples. The number os samples
must equal the number of terms in the polynomial basis.
Parameters
----------
compute_basis_matrix : callable compute_basis_matrix(samples,indices)
Function used to construct the basis matrix eavluate at samples.
indices : np.ndarray (num_vars, num_indices)
The mutivariate indices definining the polynomial basis
samples : np.ndarray (num_vars, num_samples)
The samples for which quadrature weights are desired.
Return
------
weights : np.ndarray (num_samples)
The quadrature weights.
"""
assert samples.shape[1]==indices.shape[1]
basis_matrix = compute_basis_matrix(indices,samples)
basis_matrix_inv = np.linalg.inv(basis_matrix)
weights = basis_matrix_inv[0,:]
return weights
def leja_objective_and_gradient(samples, leja_sequence, poly, new_indices,
coeff, weight_function, weight_function_deriv,
deriv_order=0):
"""
Evaluate the Leja objective at a set of samples.
Parameters
----------
samples : np.ndarray (num_vars, num_samples)
The sample at which to evaluate the leja_objective
leja_sequence : np.ndarray (num_vars, num_leja_samples)
The sample already in the Leja sequence
deriv_order : integer
Flag specifiying whether to compute gradients of the objective
new_indices : np.ndarray (num_vars, num_new_indices)
The new indices that are considered when choosing next sample
in the Leja sequence
coeff : np.ndarray (num_indices, num_new_indices)
The coefficient of the approximation that interpolates the polynomial
terms specified by new_indices
Return
------
residuals : np.ndarray(num_new_indices,num_samples):
objective_vals : np.ndarray (num_samples)
The values of the objective at samples
objective_grads : np.ndarray (num_vars,num_samples)
The gradient of the objective at samples. Return only
if deriv_order==1
"""
assert samples.ndim == 2
num_vars, num_samples = samples.shape
assert num_samples == 1
indices = poly.indices.copy()
poly.set_indices(new_indices)
basis_matrix_for_new_indices_at_samples = poly.basis_matrix(
samples,{'deriv_order':deriv_order})
if deriv_order==1:
basis_deriv_matrix_for_new_indices_at_samples = \
basis_matrix_for_new_indices_at_samples[1:,:]
basis_matrix_for_new_indices_at_samples = \
basis_matrix_for_new_indices_at_samples[:1,:]
poly.set_indices(indices)
basis_matrix_at_samples = poly.basis_matrix(
samples[:,:1],{'deriv_order':deriv_order})
if deriv_order==1:
basis_deriv_matrix_at_samples = basis_matrix_at_samples[1:,:]
basis_matrix_at_samples = basis_matrix_at_samples[:1,:]
weights = weight_function(samples)
# to avoid division by zero
weights = np.maximum(weights,0)
assert weights.ndim==1
sqrt_weights = np.sqrt(weights)
poly_vals = np.dot(basis_matrix_at_samples,coeff)
unweighted_residual = basis_matrix_for_new_indices_at_samples-poly_vals
residual = sqrt_weights*unweighted_residual
num_residual_entries = residual.shape[1]
if deriv_order==0:
return (residual,)
poly_derivs = np.dot(basis_deriv_matrix_at_samples,coeff)
weight_derivs = weight_function_deriv(samples)
unweighted_residual_derivs = \
poly_derivs-basis_deriv_matrix_for_new_indices_at_samples
jacobian = np.zeros((num_residual_entries,num_vars),dtype=float)
I = np.where(weights>0)[0]
for dd in range(num_vars):
jacobian[I,dd]=(
unweighted_residual[0,I]*weight_derivs[dd,I]/(2.0*sqrt_weights[I])-
unweighted_residual_derivs[dd,I]*sqrt_weights[I])
assert residual.ndim==2
return residual, jacobian
def compute_coefficients_of_leja_interpolant(leja_sequence, poly,new_indices,
weight_function):
weights = weight_function(leja_sequence)
# to avoid division by zero
weights = np.maximum(weights, 0)
assert weights.ndim == 1
sqrt_weights = np.sqrt(weights)
indices = poly.indices.copy()
poly.set_indices(new_indices)
#basis_matrix_for_new_indices_at_leja = poly.basis_matrix(leja_sequence)
basis_matrix_for_new_indices_at_leja = (
poly.basis_matrix(leja_sequence).T*sqrt_weights).T
poly.set_indices(indices)
# replace with more efficient procedure that just updates LU and
# uses backwards subtitution
#basis_matrix_at_leja = poly.basis_matrix(leja_sequence)
basis_matrix_at_leja = (poly.basis_matrix(leja_sequence).T*sqrt_weights).T
out = np.linalg.lstsq(
basis_matrix_at_leja, basis_matrix_for_new_indices_at_leja, rcond=None)
coeffs = out[0]
return coeffs
def leja_objective(samples, leja_sequence, poly, new_indices, coeff,
weight_function, weight_function_deriv):
objective_vals = np.empty((samples.shape[1]),dtype=float)
for ii in range(samples.shape[1]):
residual = leja_objective_and_gradient(
samples[:,ii:ii+1], leja_sequence, poly, new_indices, coeff,
weight_function, weight_function_deriv)[0]
objective_vals[ii] = 0.5*np.dot(residual.squeeze(),residual.squeeze())
return objective_vals
def compute_finite_difference_derivative(func,sample,fd_eps=1e-6):
assert sample.ndim==2
num_vars = sample.shape[0]
fd_samples = np.empty((num_vars,num_vars+1))
fd_samples[:,0] = sample[:,0].copy()
for dd in range(num_vars):
fd_samples[:,dd+1]=sample[:,0].copy()
fd_samples[dd,dd+1]+=fd_eps
objective_at_fd_samples = func(fd_samples)
fd_deriv = np.empty((num_vars,1))
for dd in range(num_vars):
fd_deriv[dd]=\
(objective_at_fd_samples[dd+1]-objective_at_fd_samples[0])/(fd_eps)
return fd_deriv
class LejaObjective():
def __init__(self,poly,weight_function,weight_function_deriv):
self.poly=poly
self.unscaled_weight_function = weight_function
self.unscaled_weight_function_deriv = weight_function_deriv
self.set_scale(1)
def set_scale(self,scale):
"""
scale objective function by a scalar. This can make finding values of
small objectives easier.
"""
assert scale>0
self.scale=max(scale,1e-8)
self.weight_function = \
lambda x: self.unscaled_weight_function(x)/self.scale
self.weight_function_deriv = \
lambda x: self.unscaled_weight_function_deriv(x)/self.scale
def precompute_residual_and_jacobian(self,sample,leja_sequence,
new_indices,coeffs):
self.sample=sample
if sample.ndim==1:
sample = sample[:,np.newaxis]
self.residual, self.jacobian = leja_objective_and_gradient(
sample, leja_sequence, self.poly, new_indices, coeffs,
self.weight_function, self.weight_function_deriv, deriv_order=1)
def gradient(self,sample,leja_sequence,new_indices,coeffs):
assert np.allclose(sample,self.sample)
if sample.ndim==1:
sample = sample[:,np.newaxis]
# from functools import partial
# func = partial(leja_objective,leja_sequence=leja_sequence,
# poly=self.poly,
# new_indices=new_indices, coeff=coeffs,
# weight_function=self.weight_function,
# weight_function_deriv=self.weight_function_deriv)
# fd_eps=1e-7
# fd_deriv = -compute_finite_difference_derivative(
# func,sample,fd_eps=fd_eps)
gradient = -np.dot(self.jacobian.T,self.residual)
#print('gk',sample,gradient)
return gradient
def __call__(self,sample,leja_sequence,new_indices,coeffs):
self.precompute_residual_and_jacobian(
sample, leja_sequence,new_indices,coeffs)
val = -0.5*np.dot(self.residual,self.residual)
#print('val',sample,val)
return val
def jacobian(self,sample,leja_sequence,new_indices,coeffs):
assert np.allclose(sample,self.sample)
return self.jacobian
def plot(self,leja_sequence,poly,new_indices,coeffs,ranges):
import matplotlib.pyplot as plt
if leja_sequence.shape[0]==1:
num_samples = 400
samples = np.linspace(
ranges[0],ranges[1],num_samples).reshape(1,num_samples)
objective_vals = -leja_objective(
samples, leja_sequence, poly, new_indices, coeffs,
self.weight_function, self.weight_function_deriv)
# print(self.weight_function(samples))
# unweighted_objective_vals = -leja_objective(
# samples, leja_sequence, poly, new_indices, coeffs,
# lambda x: np.ones(x.shape[1]), lambda x: np.zeros(x.shape[1]))
# print(unweighted_objective_vals)
#objective_vals = np.array([self(samples[:,ii],leja_sequence,new_indices,coeffs) for ii in range(num_samples)]).squeeze()
plt.plot(samples[0,:],objective_vals,lw=3)
plt.plot(leja_sequence[0,:],leja_sequence[0,:]*0.0,'o',
label='Leja sequence')
#plt.ylim(-1,1)
from scipy.optimize import fmin_l_bfgs_b
def optimize(obj,initial_guess,ranges,objective_args,scale):
bounds = []
for i in range(len(ranges)//2):
bounds.append([ranges[2*i],ranges[2*i+1]])
obj.set_scale(scale)
tol = 1e-8
callback=None
out = fmin_l_bfgs_b(
func=obj, x0=initial_guess, bounds=bounds,
args=objective_args,
factr = tol/np.finfo(float).eps, pgtol = tol, maxiter=1000,
iprint=0,
#approx_grad=True)
fprime=obj.gradient,callback=callback)
optimal_sample = out[0]
obj_val = out[1]*obj.scale
num_fn_evals = out[2]['funcalls']
#print ('initial_guess',initial_guess)
#print ('\tFunction evaluations: %d'%(num_fn_evals))
#print (optimal_sample)
obj.set_scale(1)
return optimal_sample, obj_val
def get_initial_guesses_1d(leja_sequence,ranges):
eps = 1e-6 # must be larger than optimization tolerance
intervals = np.sort(leja_sequence)
if ranges[0] != None and (leja_sequence.min()>ranges[0]+eps):
intervals = | np.hstack(([[ranges[0]]],intervals)) | numpy.hstack |
# -*- coding: utf-8 -*-
import numpy as np
import unittest
from ..utils import ShearFrame
from .. import *
class TestShearFrame(unittest.TestCase):
def setUp(self):
self.n = 5
self.m = 1e3
self.k = 1e6
self.shear_frame = ShearFrame(self.n, self.m, self.k)
def get_natural_frequencies(self):
k, m, n = self.k, self.m, self.n
freqs = np.array([
2 * np.sqrt(k / m) * np.sin(np.pi / 2 * (2*r-1) / (2*n+1))
for r in range(1, self.n+1)])
return freqs
def get_mode_shapes(self):
Q = []
for r in range(1, self.n+1):
q = np.array([np.sin(i*np.pi*(2*r-1)/(2*self.n+1))
for i in range(1, self.n+1)])
q /= np.linalg.norm(q, 2)
Q.append(q)
Q = np.array(Q).T
return Q
def test_sf_k(self):
assert self.k == self.shear_frame.k
def test_sf_m(self):
assert self.m == self.shear_frame.m
def test_sf_n(self):
assert self.n == self.shear_frame.n
def test_eigvals(self):
fn_true = self.get_natural_frequencies()
M, K = self.shear_frame.M, self.shear_frame.K
l, Q = np.linalg.eig(np.linalg.solve(M, K))
fn = np.sqrt(l)
fn.sort()
np.testing.assert_almost_equal(fn_true, fn)
def test_eigvecs(self):
M, K = self.shear_frame.M, self.shear_frame.K
l, Q = np.linalg.eig(np.linalg.solve(M, K))
n = np.argsort(l)
Q = Q[:, n]
Q_true = self.get_mode_shapes()
np.testing.assert_almost_equal(np.abs(Q), | np.abs(Q_true) | numpy.abs |
#-------------------------------------------
#
# FILENAME: IFI_compare_RadIA_PIREP.py
#
# CREATED: 12.15.2021 - dserke
#
# PURPOSE: 1) ingest matched RadIA and PIREPs csv file, 2) manipulate and plot the data
#
#-------------------------------------------
#-------------------------------------------
# IMPORT LIBRARIES
#-------------------------------------------
import pandas as pd
import geopandas as gpd
import numpy as np
from numpy import *
import csv
import wradlib as wrl
import matplotlib as mpl
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
from mpl_toolkits.axes_grid1 import make_axes_locatable
import warnings
warnings.filterwarnings('ignore')
#-------------------------------------------
# DEFINE INPUT PATHS
#-------------------------------------------
# ... define raduis (r) of earth in km
r_km = 6378.1
ft_TO_m = 0.3048
nbins = 1832.0
range_res_m = 250.0
bw_deg = 1.0 # half power beam width (deg)
vol_deg = [0.5, 1.5, 2.5, 3.5, 4.5]
lat_KBBX = 39.4969580
lon_KBBX = -121.6316557
alt_KBBX_m = 221.0 * ft_TO_m
sitecoords = (lon_KBBX, lat_KBBX, alt_KBBX_m)
# ... define base path dir
base_path_dir = '/d1/serke/projects/'
# ... paths to Rv3 INTs and PIRP csv data files
Rv3PIR_dir = base_path_dir+'RADIA_FAA/data/RadIAv3PIREPs/'
# ... names of Rv3 INTs and PIRP csv data files
# ... NOTE: currently, these files just represent ICICLE F17
Rv3PIR_FZDZ_name = 'exfout_MrmsPostProcessor_fzdz_interest.csv'
Rv3PIR_SSLW_name = 'exfout_MrmsPostProcessor_slw_interest.csv'
Rv3PIR_PIRP_name = 'exmatch_MrmsPostProcessor.csv'
# ... path to NEXRAD site location csv
nexrad_sites_dir = base_path_dir+'case_studies/SNOWIE_2017/data/RadIA_data/nexrad_site_data/'
nexrad_sites_name = 'nexrad_site_whdr.csv'
#-------------------------------------------
# LOAD INPUT DATASETS
#-------------------------------------------
# ... radar data into radar object
Rv3PIR_FZDZ = pd.read_csv(Rv3PIR_dir+Rv3PIR_FZDZ_name, header=0, index_col=0)
Rv3PIR_SSLW = pd.read_csv(Rv3PIR_dir+Rv3PIR_SSLW_name, header=0, index_col=0)
Rv3PIR_PIRP = pd.read_csv(Rv3PIR_dir+Rv3PIR_PIRP_name, header=0, index_col=0)
# ... radar site location dataset
nexrad_sites = pd.read_csv(nexrad_sites_dir+nexrad_sites_name, header=0, index_col=1)
# ... low res countries dataset
countries = gpd.read_file(gpd.datasets.get_path("naturalearth_lowres"))
#-------------------------------------------
# MANIPULATE INPUT DATA
#-------------------------------------------
# Data from full month Feb2019 ICICLE have a few missing RadIA matchups (rows)
# ... find missing integers in RadIA FZDZ/SSLW lists
def find_missing(input):
return [x for x in range(input[0], input[-1]+1)
if x not in input]
missing_SSLW_inds = find_missing(Rv3PIR_SSLW.index)
missing_FZDZ_inds = find_missing(Rv3PIR_FZDZ.index)
# ... exclude the inds missing from FZDZ/SSLW dfs from the PIRP df
Rv3PIR_PIRP.drop(Rv3PIR_PIRP.index[[missing_SSLW_inds]], inplace=True)
# ... exclude ind 0 from the PIRP df
#Rv3PIR_PIRP.drop(Rv3PIR_PIRP.index[[0]], inplace=True)
Rv3PIR_FZDZ.index = Rv3PIR_FZDZ.index-1
Rv3PIR_SSLW.index = Rv3PIR_SSLW.index-1
# ... define function for distance between two lat/lon points
def haversine_distance(lat1, lon1, lat2, lon2):
phi1 = np.radians(lat1)
phi2 = np.radians(lat2)
delta_phi = np.radians(lat2 - lat1)
delta_lambda = np.radians(lon2 - lon1)
a = | np.sin(delta_phi / 2) | numpy.sin |
from typing import Any, Dict, Union
import numpy as np
from numpy.core.defchararray import center
import panda_gym
from panda_gym.envs.core import Task
from panda_gym.utils import distance
class ReachBimanual(Task):
def __init__(
self,
sim,
get_ee_position0,
get_ee_position1,
reward_type="sparse",
distance_threshold=0.05,
goal_range=0.35,
has_object = False,
absolute_pos = False,
obj_not_in_hand_rate = 1,
) -> None:
super().__init__(sim)
self.has_object = has_object
self.absolute_pos = absolute_pos
self.object_size = 0.04
self.reward_type = reward_type
self.distance_threshold = distance_threshold
self.obj_not_in_hand_rate = obj_not_in_hand_rate
self.get_ee_position0 = get_ee_position0
self.get_ee_position1 = get_ee_position1
self.goal_range_low = np.array([goal_range / 4, goal_range / 4, -goal_range/1.5])
self.goal_range_high = np.array([goal_range, goal_range, goal_range/1.5])
obj_xyz_range=[0.3, 0.3, 0]
self.obj_range_low = np.array([0.1, -obj_xyz_range[1] / 2, self.object_size/2])
self.obj_range_high = np.array(obj_xyz_range) + self.obj_range_low
with self.sim.no_rendering():
self._create_scene()
self.sim.place_visualizer(target_position=np.zeros(3), distance=0.9, yaw=45, pitch=-30)
self._max_episode_steps = 50
def _create_scene(self) -> None:
self.sim.create_plane(z_offset=-0.4)
self.sim.create_table(length=1., width=0.7, height=0.4, x_offset=-0.575)
self.sim.create_table(length=1., width=0.7, height=0.4, x_offset=0.575)
self.sim.create_sphere(
body_name="target0",
radius=0.02,
mass=0.0,
ghost=True,
position=np.zeros(3),
rgba_color=np.array([0.1, 0.9, 0.1, 0.3]),
)
self.sim.create_sphere(
body_name="target1",
radius=0.02,
mass=0.0,
ghost=True,
position=np.zeros(3),
rgba_color=np.array([0.9, 0.1, 0.1, 0.3]),
)
self.sim.create_sphere(
body_name="target2",
radius=0.03,
mass=0.0,
ghost=True,
position=np.zeros(3),
rgba_color=np.array([0.1, 0.1, 0.9, 0.5]),
)
if self.has_object:
self.sim.create_box(
body_name="object0",
half_extents=np.ones(3) * self.object_size / 2,
mass=0.5,
position=np.array([0.0, 0.0, self.object_size / 2]),
rgba_color=np.array([0.1, 0.9, 0.1, 1.0]),
)
self.sim.create_box(
body_name="object1",
half_extents= | np.ones(3) | numpy.ones |
"""Lower-level plotting tools.
Routines that may be of use to users wishing for more fine-grained control may
wish to use.
- ``make_1d_axes``
- ``make_2d_axes``
to create a set of axes and legend proxies.
"""
import numpy as np
import pandas
import matplotlib.pyplot as plt
from scipy.stats import gaussian_kde
from matplotlib.gridspec import GridSpec as GS, GridSpecFromSubplotSpec as SGS
try:
from astropy.visualization import hist
except ImportError:
pass
try:
from anesthetic.kde import fastkde_1d, fastkde_2d
except ImportError:
pass
import matplotlib.cbook as cbook
import matplotlib.lines as mlines
from matplotlib.ticker import MaxNLocator
from matplotlib.colors import LinearSegmentedColormap
from matplotlib.transforms import Affine2D
from anesthetic.utils import check_bounds, nest_level
from anesthetic.utils import (sample_compression_1d, quantile,
triangular_sample_compression_2d,
iso_probability_contours,
iso_probability_contours_from_samples,
scaled_triangulation, match_contour_to_contourf)
from anesthetic.boundary import cut_and_normalise_gaussian
class AxesSeries(pandas.Series):
"""Anesthetic's axes version of `pandas.Series`."""
@property
def _constructor(self):
return AxesSeries
@property
def _constructor_expanddim(self):
return AxesDataFrame
class AxesDataFrame(pandas.DataFrame):
"""Anesthetic's axes version of `pandas.DataFrame`."""
@property
def _constructor(self):
return AxesDataFrame
@property
def _constructor_sliced(self):
return AxesSeries
def axlines(self, params, values, **kwargs):
"""Add vertical and horizontal lines across all axes.
Parameters
----------
params : str or list(str)
parameter label(s).
Should match the size of `values`.
values : float or list(float)
value(s) at which vertical and horizontal lines shall be added.
Should match the size of `params`.
kwargs
Any kwarg that can be passed to `plt.axvline` or `plt.axhline`.
"""
params = np.ravel(params)
values = np.ravel(values)
if params.size != values.size:
raise ValueError("The sizes of `params` and `values` must match "
"exactly, but params.size=%s and values.size=%s."
% (params.size, values.size))
for i, param in enumerate(params):
if param in self.columns:
for ax in self.loc[:, param]:
if ax is not None:
ax.axvline(values[i], **kwargs)
if param in self.index:
for ax in self.loc[param, self.columns != param]:
if ax is not None:
ax.axhline(values[i], **kwargs)
def axspans(self, params, vmins, vmaxs, **kwargs):
"""Add vertical and horizontal spans across all axes.
Parameters
----------
params : str or list(str)
parameter label(s).
Should match the size of `vmins` and `vmaxs`.
vmins : float or list(float)
Minimum value of the vertical and horizontal axes spans.
Should match the size of `params`.
vmaxs : float or list(float)
Maximum value of the vertical and horizontal axes spans.
Should match the size of `params`.
kwargs
Any kwarg that can be passed to `plt.axvspan` or `plt.axhspan`.
"""
kwargs = normalize_kwargs(kwargs, dict(color=['c']))
params = np.ravel(params)
vmins = np.ravel(vmins)
vmaxs = np.ravel(vmaxs)
if params.size != vmins.size:
raise ValueError("The sizes of `params`, `vmins` and `vmaxs` must "
"match exactly, but params.size=%s, "
"vmins.size=%s and vmaxs.size=%s."
% (params.size, vmins.size, vmaxs.size))
for i, param in enumerate(params):
if param in self.columns:
for ax in self.loc[:, param]:
if ax is not None:
ax.axvspan(vmins[i], vmaxs[i], **kwargs)
if param in self.index:
for ax in self.loc[param, self.columns != param]:
if ax is not None:
ax.axhspan(vmins[i], vmaxs[i], **kwargs)
def make_1d_axes(params, **kwargs):
"""Create a set of axes for plotting 1D marginalised posteriors.
Parameters
----------
params: list(str)
names of parameters.
tex: dict(str:str), optional
Dictionary mapping params to tex plot labels.
fig: matplotlib.figure.Figure, optional
Figure to plot on.
Default: matplotlib.pyplot.figure()
ncols: int
Number of columns in the plot
option, default ceil(sqrt(num_params))
subplot_spec: matplotlib.gridspec.GridSpec, optional
gridspec to plot array as part of a subfigure
Default: None
Returns
-------
fig: matplotlib.figure.Figure
New or original (if supplied) figure object
axes: pandas.Series(matplotlib.axes.Axes)
Pandas array of axes objects
"""
axes = AxesSeries(index=np.atleast_1d(params), dtype=object)
axes[:] = None
tex = kwargs.pop('tex', {})
fig = kwargs.pop('fig') if 'fig' in kwargs else plt.figure()
ncols = kwargs.pop('ncols', int(np.ceil(np.sqrt(len(axes)))))
nrows = int(np.ceil(len(axes)/float(ncols)))
if 'subplot_spec' in kwargs:
grid = SGS(nrows, ncols, wspace=0,
subplot_spec=kwargs.pop('subplot_spec'))
else:
grid = GS(nrows, ncols, wspace=0)
if kwargs:
raise TypeError('Unexpected **kwargs: %r' % kwargs)
tex = {p: tex[p] if p in tex else p for p in axes.index}
for p, g in zip(axes.index, grid):
axes[p] = ax = fig.add_subplot(g)
ax.set_xlabel(tex[p])
ax.set_yticks([])
for x, ax in axes.dropna().iteritems():
ax.xaxis.set_major_locator(MaxNLocator(2, integer=True))
return fig, axes
def make_2d_axes(params, **kwargs):
"""Create a set of axes for plotting 2D marginalised posteriors.
Parameters
----------
params: lists of parameters
Can be either:
* list(str) if the x and y axes are the same
* [list(str),list(str)] if the x and y axes are different
Strings indicate the names of the parameters
tex: dict(str:str), optional
Dictionary mapping params to tex plot labels.
Default: params
upper, lower, diagonal: logical, optional
Whether to create 2D marginalised plots above or below the
diagonal, or to create a 1D marginalised plot on the diagonal.
Default: True
fig: matplotlib.figure.Figure, optional
Figure to plot on.
Default: matplotlib.pyplot.figure()
ticks: str
If 'outer', plot ticks only on the very left and very bottom.
If 'inner', plot ticks also in inner subplots.
If None, plot no ticks at all.
Default: 'outer'
subplot_spec: matplotlib.gridspec.GridSpec, optional
gridspec to plot array as part of a subfigure.
Default: None
Returns
-------
fig: matplotlib.figure.Figure
New or original (if supplied) figure object
axes: pandas.DataFrame(matplotlib.axes.Axes)
Pandas array of axes objects
"""
if nest_level(params) == 2:
xparams, yparams = params
else:
xparams = yparams = params
ticks = kwargs.pop('ticks', 'outer')
upper = kwargs.pop('upper', True)
lower = kwargs.pop('lower', True)
diagonal = kwargs.pop('diagonal', True)
axes = AxesDataFrame(index=np.atleast_1d(yparams),
columns=np.atleast_1d(xparams),
dtype=object)
axes[:][:] = None
all_params = list(axes.columns) + list(axes.index)
for j, y in enumerate(axes.index):
for i, x in enumerate(axes.columns):
if all_params.index(x) < all_params.index(y):
if lower:
axes[x][y] = -1
elif all_params.index(x) > all_params.index(y):
if upper:
axes[x][y] = +1
elif diagonal:
axes[x][y] = 0
axes.dropna(axis=0, how='all', inplace=True)
axes.dropna(axis=1, how='all', inplace=True)
tex = kwargs.pop('tex', {})
tex = {p: tex[p] if p in tex else p for p in all_params}
fig = kwargs.pop('fig') if 'fig' in kwargs else plt.figure()
spec = kwargs.pop('subplot_spec', None)
if axes.shape[0] != 0 and axes.shape[1] != 0:
if spec is not None:
grid = SGS(*axes.shape, hspace=0, wspace=0, subplot_spec=spec)
else:
grid = GS(*axes.shape, hspace=0, wspace=0)
if kwargs:
raise TypeError('Unexpected **kwargs: %r' % kwargs)
if axes.size == 0:
return fig, axes
position = axes.copy()
axes[:][:] = None
for j, y in enumerate(axes.index[::-1]):
for i, x in enumerate(axes.columns):
if position[x][y] is not None:
sx = list(axes[x].dropna())
sx = sx[0] if sx else None
sy = list(axes.T[y].dropna())
sy = sy[0] if sy else None
axes[x][y] = fig.add_subplot(grid[axes.index.size-1-j, i],
sharex=sx, sharey=sy)
if position[x][y] == 0:
axes[x][y].twin = axes[x][y].twinx()
axes[x][y].twin.set_yticks([])
axes[x][y].twin.set_ylim(0, 1.1)
axes[x][y].set_zorder(axes[x][y].twin.get_zorder() + 1)
axes[x][y].lines = axes[x][y].twin.lines
axes[x][y].patches = axes[x][y].twin.patches
axes[x][y].collections = axes[x][y].twin.collections
axes[x][y].containers = axes[x][y].twin.containers
make_diagonal(axes[x][y])
axes[x][y].position = 'diagonal'
axes[x][y].twin.xaxis.set_major_locator(
MaxNLocator(3, prune='both'))
else:
if position[x][y] == 1:
axes[x][y].position = 'upper'
elif position[x][y] == -1:
axes[x][y].position = 'lower'
axes[x][y].yaxis.set_major_locator(
MaxNLocator(3, prune='both'))
axes[x][y].xaxis.set_major_locator(
MaxNLocator(3, prune='both'))
for y, ax in axes.bfill(axis=1).iloc[:, 0].dropna().iteritems():
ax.set_ylabel(tex[y])
for x, ax in axes.ffill(axis=0).iloc[-1, :].dropna().iteritems():
ax.set_xlabel(tex[x])
# left and right ticks and labels
for y, ax in axes.iterrows():
ax_ = ax.dropna()
if len(ax_) and ticks == 'inner':
for i, a in enumerate(ax_):
if i == 0: # first column
if a.position == 'diagonal' and len(ax_) == 1:
a.tick_params('y', left=False, labelleft=False)
else:
a.tick_params('y', left=True, labelleft=True)
elif a.position == 'diagonal': # not first column
tl = a.yaxis.majorTicks[0].tick1line.get_markersize()
a.tick_params('y', direction='out', length=tl/2,
left=True, labelleft=False)
else: # not diagonal and not first column
a.tick_params('y', direction='inout',
left=True, labelleft=False)
elif len(ax_) and ticks == 'outer': # no inner ticks
for a in ax_[1:]:
a.tick_params('y', left=False, labelleft=False)
elif len(ax_) and ticks is None: # no ticks at all
for a in ax_:
a.tick_params('y', left=False, right=False,
labelleft=False, labelright=False)
else:
raise ValueError(
"ticks=%s was requested, but ticks can only be one of "
"['outer', 'inner', None]." % ticks)
# bottom and top ticks and labels
for x, ax in axes.iteritems():
ax_ = ax.dropna()
if len(ax_):
if ticks == 'inner':
for i, a in enumerate(ax_):
if i == len(ax_) - 1: # bottom row
a.tick_params('x', bottom=True, labelbottom=True)
else: # not bottom row
a.tick_params('x', direction='inout',
bottom=True, labelbottom=False)
if a.position == 'diagonal':
a.twin.tick_params('x', direction='inout',
bottom=True, labelbottom=False)
elif ticks == 'outer': # no inner ticks
for a in ax_[:-1]:
a.tick_params('x', bottom=False, labelbottom=False)
elif ticks is None: # no ticks at all
for a in ax_:
a.tick_params('x', bottom=False, top=False,
labelbottom=False, labeltop=False)
else:
raise ValueError(
"ticks=%s was requested, but ticks can only be one of "
"['outer', 'inner', None]." % ticks)
return fig, axes
def fastkde_plot_1d(ax, data, *args, **kwargs):
"""Plot a 1d marginalised distribution.
This functions as a wrapper around matplotlib.axes.Axes.plot, with a kernel
density estimation computation provided by the package fastkde in between.
All remaining keyword arguments are passed onwards.
Parameters
----------
ax: matplotlib.axes.Axes
axis object to plot on
data: np.array
Uniformly weighted samples to generate kernel density estimator.
xmin, xmax: float
lower/upper prior bound
optional, default None
levels: list
values at which to draw iso-probability lines.
optional, default [0.95, 0.68]
facecolor: bool or string
If set to True then the 1d plot will be shaded with the value of the
``color`` kwarg. Set to a string such as 'blue', 'k', 'r', 'C1' ect.
to define the color of the shading directly.
optional, default False
Returns
-------
lines: matplotlib.lines.Line2D
A list of line objects representing the plotted data (same as
matplotlib matplotlib.axes.Axes.plot command)
"""
kwargs = normalize_kwargs(
kwargs,
dict(linewidth=['lw'], linestyle=['ls'], color=['c'],
facecolor=['fc'], edgecolor=['ec']))
if len(data) == 0:
return np.zeros(0), np.zeros(0)
if data.max()-data.min() <= 0:
return
levels = kwargs.pop('levels', [0.95, 0.68])
xmin = kwargs.pop('xmin', None)
xmax = kwargs.pop('xmax', None)
density = kwargs.pop('density', False)
cmap = kwargs.pop('cmap', None)
color = kwargs.pop('color', (next(ax._get_lines.prop_cycler)['color']
if cmap is None else cmap(0.68)))
facecolor = kwargs.pop('facecolor', False)
if 'edgecolor' in kwargs:
edgecolor = kwargs.pop('edgecolor')
if edgecolor:
color = edgecolor
else:
edgecolor = color
q = kwargs.pop('q', '5sigma')
q = quantile_plot_interval(q=q)
try:
x, p, xmin, xmax = fastkde_1d(data, xmin, xmax)
except NameError:
raise ImportError("You need to install fastkde to use fastkde")
p /= p.max()
i = ((x > quantile(x, q[0], p)) & (x < quantile(x, q[1], p)))
area = np.trapz(x=x[i], y=p[i]) if density else 1
ans = ax.plot(x[i], p[i]/area, color=color, *args, **kwargs)
ax.set_xlim(xmin, xmax, auto=True)
if facecolor and facecolor not in [None, 'None', 'none']:
if facecolor is True:
facecolor = color
c = iso_probability_contours(p[i], contours=levels)
cmap = basic_cmap(facecolor)
fill = []
for j in range(len(c)-1):
fill.append(ax.fill_between(x[i], p[i], where=p[i] >= c[j],
color=cmap(c[j]), edgecolor=edgecolor))
return ans, fill
return ans
def kde_plot_1d(ax, data, *args, **kwargs):
"""Plot a 1d marginalised distribution.
This functions as a wrapper around matplotlib.axes.Axes.plot, with a kernel
density estimation computation provided by scipy.stats.gaussian_kde in
between. All remaining keyword arguments are passed onwards.
Parameters
----------
ax: matplotlib.axes.Axes
axis object to plot on.
data: np.array
Samples to generate kernel density estimator.
weights: np.array, optional
Sample weights.
ncompress: int, optional
Degree of compression. Default 1000
xmin, xmax: float
lower/upper prior bound.
optional, default None
levels: list
values at which to draw iso-probability lines.
optional, default [0.95, 0.68]
facecolor: bool or string
If set to True then the 1d plot will be shaded with the value of the
``color`` kwarg. Set to a string such as 'blue', 'k', 'r', 'C1' ect.
to define the color of the shading directly.
optional, default False
Returns
-------
lines: matplotlib.lines.Line2D
A list of line objects representing the plotted data (same as
matplotlib matplotlib.axes.Axes.plot command)
"""
if len(data) == 0:
return np.zeros(0), | np.zeros(0) | numpy.zeros |
from math import fabs
import numpy as np
from numba import jit
from numba.extending import overload
@overload(np.clip)
def np_clip(a, a_min, a_max, out=None):
"""
Numba Overload of np.clip
:type a: np.ndarray
:type a_min: int
:type a_max: int
:type out: np.ndarray
:rtype: np.ndarray
"""
if out is None:
out = np.empty_like(a)
for i in range(len(a)):
if a[i] < a_min:
out[i] = a_min
elif a[i] > a_max:
out[i] = a_max
else:
out[i] = a[i]
return out
@jit(nopython=True)
def convolve(data, kernel):
"""
Convolution 1D Array
:type data: np.ndarray
:type kernel: np.ndarray
:rtype: np.ndarray
"""
size_data = len(data)
size_kernel = len(kernel)
size_out = size_data - size_kernel + 1
out = np.array([np.nan] * size_out)
kernel = np.flip(kernel)
for i in range(size_out):
window = data[i:i + size_kernel]
out[i] = sum([window[j] * kernel[j] for j in range(size_kernel)])
return out
@jit(nopython=True)
def sma(data, period):
"""
Simple Moving Average
:type data: np.ndarray
:type period: int
:rtype: np.ndarray
"""
size = len(data)
out = np.array([np.nan] * size)
for i in range(period - 1, size):
window = data[i - period + 1:i + 1]
out[i] = np.mean(window)
return out
@jit(nopython=True)
def wma(data, period):
"""
Weighted Moving Average
:type data: np.ndarray
:type period: int
:rtype: np.ndarray
"""
weights = np.arange(period, 0, -1)
weights = weights / weights.sum()
out = convolve(data, weights)
return np.concatenate((np.array([np.nan] * (len(data) - len(out))), out))
@jit(nopython=True)
def cma(data):
"""
Cumulative Moving Average
:type data: np.ndarray
:rtype: np.ndarray
"""
size = len(data)
out = np.array([np.nan] * size)
last_sum = np.array([np.nan] * size)
last_sum[1] = sum(data[:2])
for i in range(2, size):
last_sum[i] = last_sum[i - 1] + data[i]
out[i] = last_sum[i] / (i + 1)
return out
@jit(nopython=True)
def ema(data, period, smoothing=2.0):
"""
Exponential Moving Average
:type data: np.ndarray
:type period: int
:type smoothing: float
:rtype: np.ndarray
"""
size = len(data)
weight = smoothing / (period + 1)
out = np.array([np.nan] * size)
out[0] = data[0]
for i in range(1, size):
out[i] = (data[i] * weight) + (out[i - 1] * (1 - weight))
out[:period - 1] = np.nan
return out
@jit(nopython=True)
def ewma(data, period, alpha=1.0):
"""
Exponential Weighted Moving Average
:type data: np.ndarray
:type period: int
:type alpha: float
:rtype: np.ndarray
"""
weights = (1 - alpha) ** np.arange(period)
weights /= np.sum(weights)
out = convolve(data, weights)
return np.concatenate((np.array([np.nan] * (len(data) - len(out))), out))
@jit(nopython=True)
def dema(data, period, smoothing=2.0):
"""
Double Exponential Moving Average
:type data: np.ndarray
:type period: int
:type smoothing: float
:rtype: np.ndarray
"""
return (2 * ema(data, period, smoothing)) - ema(ema(data, period, smoothing), period, smoothing)
@jit(nopython=True)
def trix(data, period, smoothing=2.0):
"""
Triple Exponential Moving Average
:type data: np.ndarray
:type period: int
:type smoothing: float
:rtype: np.ndarray
"""
return ((3 * ema(data, period, smoothing) - (3 * ema(ema(data, period, smoothing), period, smoothing))) +
ema(ema(ema(data, period, smoothing), period, smoothing), period, smoothing))
@jit(nopython=True)
def macd(data, fast, slow, smoothing=2.0):
"""
Moving Average Convergence Divergence
:type data: np.ndarray
:type fast: int
:type slow: int
:type smoothing: float
:rtype: np.ndarray
"""
return ema(data, fast, smoothing) - ema(data, slow, smoothing)
@jit(nopython=True)
def stoch(c_close, c_high, c_low, period_k, period_d):
"""
Stochastic
:type c_close: np.ndarray
:type c_high: np.ndarray
:type c_low: np.ndarray
:type period_k: int
:type period_d: int
:rtype: (np.ndarray, np.ndarray)
"""
size = len(c_close)
k = np.array([np.nan] * size)
for i in range(period_k - 1, size):
e = i + 1
s = e - period_k
ml = np.min(c_low[s:e])
k[i] = ((c_close[i] - ml) / (np.max(c_high[s:e]) - ml)) * 100
return k, sma(k, period_d)
@jit(nopython=True)
def kdj(c_close, c_high, c_low, period_rsv=9, period_k=3, period_d=3, weight_k=3, weight_d=2):
"""
KDJ
:type c_close: np.ndarray
:type c_high: np.ndarray
:type c_low: np.ndarray
:type period_rsv: int
:type period_k: int
:type period_d: int
:type weight_k: int
:type weight_d: int
:rtype: (np.ndarray, np.ndarray, np.ndarray)
"""
size = len(c_close)
rsv = np.array([np.nan] * size)
for i in range(period_k - 1, size):
e = i + 1
s = e - period_k
ml = np.min(c_low[s:e])
rsv[i] = ((c_close[i] - ml) / (np.max(c_high[s:e]) - ml)) * 100
k = sma(rsv, period_rsv)
d = sma(k, period_d)
return k, d, (weight_k * k) - (weight_d * d)
@jit(nopython=True)
def wpr(c_close, c_high, c_low, period):
"""
William %R
:type c_close: np.ndarray
:type c_high: np.ndarray
:type c_low: np.ndarray
:type period: int
:rtype: (np.ndarray, np.ndarray)
"""
size = len(c_close)
out = np.array([np.nan] * size)
for i in range(period - 1, size):
e = i + 1
s = e - period
mh = np.max(c_high[s:e])
out[i] = ((mh - c_close[i]) / (mh - np.min(c_low[s:e]))) * -100
return out
@jit(nopython=True)
def rsi(data, period, smoothing=2.0, f_sma=True, f_clip=True, f_abs=True):
"""
Relative Strengh Index
:type data: np.ndarray
:type period: int
:type smoothing: float
:type f_sma: bool
:type f_clip: bool
:type f_abs: bool
:rtype: np.ndarray
"""
size = len(data)
delta = np.array([np.nan] * size)
up = np.array([np.nan] * size)
down = np.array([np.nan] * size)
delta = np.diff(data)
if f_clip:
up, down = np.clip(delta, a_min=0, a_max=np.max(delta)), np.clip(delta, a_min=np.min(delta), a_max=0)
else:
up, down = delta.copy(), delta.copy()
up[delta < 0] = 0.0
down[delta > 0] = 0.0
if f_abs:
for i, x in enumerate(down):
down[i] = fabs(x)
else:
down = np.abs(down)
rs = sma(up, period) / sma(down, period) if f_sma else ema(up, period - 1, smoothing) / ema(
down, period - 1, smoothing)
out = np.array([np.nan] * size)
out[1:] = (100 - 100 / (1 + rs))
return out
@jit(nopython=True)
def srsi(data, period, smoothing=2.0, f_sma=True, f_clip=True, f_abs=True):
"""
Stochastic Relative Strengh Index
:type data: np.ndarray
:type period: int
:type smoothing: float
:type f_sma: bool
:type f_clip: bool
:type f_abs: bool
:rtype: np.ndarray
"""
r = rsi(data, period, smoothing, f_sma, f_clip, f_abs)[period:]
s = np.array([np.nan] * len(r))
for i in range(period - 1, len(r)):
window = r[i + 1 - period:i + 1]
mw = np.min(window)
s[i] = ((r[i] - mw) / (np.max(window) - mw)) * 100
return np.concatenate((np.array([np.nan] * (len(data) - len(s))), s))
@jit(nopython=True)
def bollinger_bands(data, period, dev_up=2.0, dev_down=2.0):
"""
Bollinger Bands
:type data: np.ndarray
:type period: int
:type dev_up: float
:type dev_down: float
:rtype: (np.ndarray, np.ndarray, np.ndarray, np.ndarray)
:return: middle, up, down, width
"""
size = len(data)
bb_up = np.array([np.nan] * size)
bb_down = np.array([np.nan] * size)
bb_width = np.array([np.nan] * size)
bb_mid = sma(data, period)
for i in range(period - 1, size):
std_dev = np.std(data[i - period + 1:i + 1])
mid = bb_mid[i]
bb_up[i] = mid + (std_dev * dev_up)
bb_down[i] = mid - (std_dev * dev_down)
bb_width[i] = bb_up[i] - bb_down[i]
return bb_mid, bb_up, bb_down, bb_width
@jit(nopython=True)
def keltner_channel(c_close, c_open, c_high, c_low, period, smoothing=2.0):
"""
Keltner Channel
:type c_close: np.ndarray
:type c_open: np.ndarray
:type c_high: np.ndarray
:type c_low: np.ndarray
:type period: int
:type smoothing: float
:rtype: (np.ndarray, np.ndarray, np.ndarray, np.ndarray)
:return: middle, up, down, width
"""
e = ema(c_close, period, smoothing)
aa = 2 * atr(c_open, c_high, c_low, period)
up = e + aa
down = e - aa
return e, up, down, up - down
@jit(nopython=True)
def donchian_channel(c_high, c_low, period):
"""
Donchian Channel
:type c_high: np.ndarray
:type c_low: np.ndarray
:type period: int
:rtype: (np.ndarray, np.ndarray, np.ndarray, np.ndarray)
:return: middle, up, down, width
"""
size = len(c_high)
out_up = np.array([np.nan] * size)
out_down = np.array([np.nan] * size)
for i in range(period - 1, size):
e = i + 1
s = e - period
out_up[i] = np.max(c_high[s:e])
out_down[i] = np.min(c_low[s:e])
return (out_up + out_down) / 2, out_up, out_down, out_up - out_down
@jit(nopython=True)
def heiken_ashi(c_open, c_high, c_low, c_close):
"""
Heiken Ashi
:type c_open: np.ndarray
:type c_high: np.ndarray
:type c_low: np.ndarray
:type c_close: np.ndarray
:rtype: (np.ndarray, np.ndarray, np.ndarray, np.ndarray)
:return: open, high, low, close
"""
ha_close = (c_open + c_high + c_low + c_close) / 4
ha_open = np.empty_like(ha_close)
ha_open[0] = (c_open[0] + c_close[0]) / 2
for i in range(1, len(c_close)):
ha_open[i] = (c_open[i - 1] + c_close[i - 1]) / 2
ha_high = np.maximum(np.maximum(ha_open, ha_close), c_high)
ha_low = np.minimum(np.minimum(ha_open, ha_close), c_low)
return ha_open, ha_high, ha_low, ha_close
@jit(nopython=True)
def ichimoku(data, tenkansen=9, kinjunsen=26, senkou_b=52, shift=26):
"""
Ichimoku
:type data: np.ndarray
:type tenkansen: int
:type kinjunsen: int
:type senkou_b: int
:type shift: int
:rtype: (np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray)
:return: tenkansen, kinjunsen, chikou, senkou a, senkou b
"""
size = len(data)
n_tenkansen = np.array([np.nan] * size)
n_kinjunsen = np.array([np.nan] * size)
n_senkou_b = np.array([np.nan] * (size + shift))
for i in range(tenkansen - 1, size):
window = data[i + 1 - tenkansen:i + 1]
n_tenkansen[i] = (np.max(window) + np.min(window)) / 2
for i in range(kinjunsen - 1, size):
window = data[i + 1 - kinjunsen:i + 1]
n_kinjunsen[i] = (np.max(window) + np.min(window)) / 2
for i in range(senkou_b - 1, size):
window = data[i + 1 - senkou_b:i + 1]
n_senkou_b[i + shift] = (np.max(window) + np.min(window)) / 2
return \
n_tenkansen, n_kinjunsen, np.concatenate(((data[shift:]), (np.array([np.nan] * (size - shift))))), \
np.concatenate((np.array([np.nan] * shift), ((n_tenkansen + n_kinjunsen) / 2))), n_senkou_b
@jit(nopython=True)
def volume_profile(c_close, c_volume, bins=10):
"""
Volume Profile
:type c_close: np.ndarray
:type c_volume: np.ndarray
:type bins: int
:rtype: (np.ndarray, np.ndarray)
:return: count, price
"""
min_close = np.min(c_close)
max_close = np.max(c_close)
norm = 1.0 / (max_close - min_close)
sum_h = np.array([0.0] * bins)
for i in range(len(c_close)):
sum_h[int((c_close[i] - min_close) * bins * norm)] += c_volume[i] ** 2
sq = np.sqrt(sum_h)
return sq / sum(sq), np.linspace(min_close, max_close, bins)
@jit(nopython=True)
def tr(c_open, c_high, c_low):
"""
True Range
:type c_open: np.ndarray
:type c_high: np.ndarray
:type c_low: np.ndarray
:rtype: np.ndarray
"""
return np.maximum(np.maximum(c_open - c_low, np.abs(c_high - c_open)), np.abs(c_low - c_open))
@jit(nopython=True)
def atr(c_open, c_high, c_low, period):
"""
Average True Range
:type c_open: np.ndarray
:type c_high: np.ndarray
:type c_low: np.ndarray
:type period: int
:rtype: np.ndarray
"""
return sma(tr(c_open, c_high, c_low), period)
@jit(nopython=True)
def adx(c_open, c_high, c_low, period_adx, period_dm, smoothing=2.0):
"""
Average Directionnal Index
:type c_open: np.ndarray
:type c_high: np.ndarray
:type c_low: np.ndarray
:type period_adx: int
:type period_dm: int
:type smoothing: float
:rtype: np.ndarray
"""
up = np.concatenate((np.array([np.nan]), c_high[1:] - c_high[:-1]))
down = np.concatenate((np.array([np.nan]), c_low[:-1] - c_low[1:]))
dm_up = np.array([0] * len(up))
up_ids = up > down
dm_up[up_ids] = up[up_ids]
dm_up[dm_up < 0] = 0
dm_down = np.array([0] * len(down))
down_ids = down > up
dm_down[down_ids] = down[down_ids]
dm_down[dm_down < 0] = 0
avg_tr = atr(c_open, c_high, c_low, period_dm)
dm_up_avg = 100 * ema(dm_up, period_dm, smoothing) / avg_tr
dm_down_avg = 100 * ema(dm_down, period_dm, smoothing) / avg_tr
return ema(100 * np.abs(dm_up_avg - dm_down_avg) / (dm_up_avg + dm_down_avg), period_adx, smoothing)
@jit(nopython=True)
def obv(c_close, c_volume):
"""
On Balance Volume
:type c_close: np.ndarray
:type c_volume: np.ndarray
:rtype: np.ndarray
"""
size = len(c_close)
out = np.array([np.nan] * size)
out[0] = 1
for i in range(1, size):
if c_close[i] > c_close[i - 1]:
out[i] = out[i - 1] + c_volume[i]
elif c_close[i] < c_close[i - 1]:
out[i] = out[i - 1] - c_volume[i]
else:
out[i] = out[i - 1]
return out
@jit(nopython=True)
def momentum(data, period):
"""
Momentum
:type data: np.ndarray
:type period: int
:rtype: np.ndarray
"""
size = len(data)
out = np.array([np.nan] * size)
for i in range(period - 1, size):
out[i] = data[i] - data[i - period + 1]
return out
@jit(nopython=True)
def roc(data, period):
"""
Rate Of Change
:type data: np.ndarray
:type period: int
:rtype: np.ndarray
"""
size = len(data)
out = np.array([np.nan] * size)
for i in range(period - 1, size):
p = data[i - period + 1]
out[i] = ((data[i] - p) / p) * 100
return out
@jit(nopython=True)
def aroon(data, period):
"""
Aroon
:type data: np.ndarray
:type period: int
:rtype: (np.ndarray, np.ndarray)
"""
size = len(data)
out_up = np.array([np.nan] * size)
out_down = np.array([np.nan] * size)
for i in range(period - 1, size):
window = np.flip(data[i + 1 - period:i + 1])
out_up[i] = ((period - window.argmax()) / period) * 100
out_down[i] = ((period - window.argmin()) / period) * 100
return out_up, out_down
@jit(nopython=True)
def cmf(c_close, c_high, c_low, c_volume, period):
"""
Chaikin Money Flow
:type c_close: np.ndarray
:type c_high: np.ndarray
:type c_low: np.ndarray
:type c_volume: np.ndarray
:type period: int
:rtype: np.ndarray
"""
size = len(c_close)
out = np.array([np.nan] * size)
for i in range(period - 1, size):
e = i + 1
s = e - period
w_close = c_close[s:e]
w_high = c_high[s:e]
w_low = c_low[s:e]
w_vol = c_volume[s:e]
out[i] = sum((((w_close - w_low) - (w_high - w_close)) / (w_high - w_low)) * w_vol) / sum(w_vol)
return out
@jit(nopython=True)
def vix(c_close, c_low, period):
"""
Volatility Index
:type c_close: np.ndarray
:type c_low: np.ndarray
:type period: int
:rtype: np.ndarray
"""
size = len(c_close)
out = np.array([np.nan] * size)
for i in range(period - 1, size):
hc = np.max(c_close[i + 1 - period:i + 1])
out[i] = ((hc - c_low[i]) / hc) * 100
return out
@jit(nopython=True)
def fdi(c_close, period):
"""
Fractal Dimension Index
:type c_close: np.ndarray
:type period: int
:rtype: np.ndarray
"""
size = len(c_close)
out = np.array([np.nan] * size)
for i in range(period - 1, size):
window = c_close[i + 1 - period:i + 1]
pdiff = 0
length = 0
hc = np.max(window)
lc = np.min(window)
for j in (range(1, period - 1)):
if hc > lc:
diff = (window[-j] - lc) / (hc - lc)
length += np.sqrt(((diff - pdiff) + (1 / (period ** 2))) ** 2) if j > 1 else 0
pdiff = diff
out[i] = (1 + (np.log(length) + np.log(2)) / np.log(2 * period)) if length > 0 else 0
return out
@jit(nopython=True)
def entropy(c_close, c_volume, period, bins=2):
"""
Entropy (Experimental)
:type c_close: np.ndarray
:type c_volume: np.ndarray
:type period: int
:type bins: int
:rtype: np.ndarray
"""
size = len(c_close)
out = np.array([np.nan] * size)
sum_f = 0
for i in range(period - 1, size):
e = i + 1
s = e - period
close_w = c_close[s:e]
volume_w = c_volume[s:e]
min_w = np.min(close_w)
norm = 1.0 / (np.max(close_w) - min_w)
sum_h = np.array([0.0] * bins)
for j in range(period):
sum_h[int((close_w[j] - min_w) * bins * norm)] += volume_w[j] ** 2
count = np.sqrt(sum_h)
count = count / sum(count)
count = count[ | np.nonzero(count) | numpy.nonzero |
"""
Some functions for working with the Abel habit model.
"""
import numpy as np
from numpy import sqrt, exp
from scipy.stats import norm
import quantecon as qe
inv_sqrt_2pi = 1 / sqrt(2 * np.pi)
class AbelModel:
"""
Represents the model.
"""
def __init__(self, β=0.99,
γ=2.5,
ρ=0.9,
σ=0.002,
x0=0.1,
α=1,
grid_size=60):
self.β, self.γ, self.ρ, self.σ = β, γ, ρ, σ
self.α, self.x0 = α, x0
# derived constants
self.b = x0 + σ**2 * (1 - γ)
self.k0 = β * exp(self.b * (1 - γ) + σ**2 * (1 - γ)**2 / 2)
self.k1 = (ρ - α) * (1 - γ)
# Parameters in the stationary distribution
self.svar = σ**2 / (1 - ρ**2)
self.ssd = sqrt(self.svar)
self.smean = self.b / (1 - ρ)
# A discrete approximation of the stationary dist
std_range, n = 3, 20
mc = qe.tauchen(0, 1, std_range, n)
w_vec = mc.state_values
self.sx_vec = self.smean + self.ssd * w_vec
self.sp_vec = mc.P[0, :] # Any row
# A grid of points for interpolation
a, b = self.smean + 3 * self.ssd, self.smean - 3 * self.ssd
self.x_grid = np.linspace(a, b, grid_size)
def sim_state(self, x0=None, num_paths=1000, ts_length=1000):
"""
Simulate the state process. If x0 is None, then
draw from the stationary distribution.
"""
ρ, b, σ = self.ρ, self.b, self.σ
X = np.ones((num_paths, ts_length))
W = np.random.randn(num_paths, ts_length)
if x0 is None:
X[:, 0] = self.smean
else:
X[:, 0] = x0
for t in range(ts_length-1):
X[:, t+1] = ρ * X[:, t] + b + σ * W[:, t+1]
return X
def A(self, g, Ag, std_range=3, shock_state_size=20):
"""
Apply A to g and return Ag. The argument g is a vector, which is
converted to a function by linear interpolation.
Integration uses Gaussian quadrature.
"""
# Unpack parameters
β, γ, ρ, σ, x0, α = self.β, self.γ, self.ρ, self.σ, self.x0, self.α
b, k0, k1 = self.b, self.k0, self.k1
# Extract state and probs for N(0, 1) shocks
mc = qe.tauchen(0, 1, std_range, shock_state_size)
w_vec = mc.state_values
p_vec = mc.P[0, :] # Any row, all columns
# Interpolate g and allocate memory for new g
g_func = lambda x: np.interp(x, self.x_grid, g)
# Apply the operator K to g, computing Kg and || Kg ||
for (i, x) in enumerate(self.x_grid):
mf = k0 * exp(k1 * x)
Ag[i] = mf * np.dot(g_func(ρ * x + b + w_vec), p_vec)
# Calculate the norm of Ag
Ag_func = lambda x: np.interp(x, self.x_grid, Ag)
r = np.sqrt(np.dot(Ag_func(self.sx_vec)**2, self.sp_vec))
return r
def local_spec_rad_iterative(self, tol=1e-7, max_iter=5000):
"""
Compute the spectral radius of the operator A associated with the Abel
model self via the local spectral radios
"""
n = len(self.x_grid)
g_in = np.ones(n)
g_out = np.ones(n)
error = tol + 1
r = 1
i = 1
while error > tol and i < max_iter:
s = self.A(g_in, g_out)
new_r = s**(1/i)
error = abs(new_r - r)
g_in = np.copy(g_out)
i += 1
r = new_r
print(f"Converged in {i} iterations")
return r
def local_spec_rad_simulation(self, num_paths=1000, ts_length=1000):
X = self.sim_state(num_paths=num_paths, ts_length=ts_length)
A = self.k0 * np.exp(self.k1 * X)
A = np.prod(A, axis=1)
return A.mean()**(1/ts_length)
def spec_rad_analytic(self):
# Unpack parameters
β, γ, ρ, σ, x0, α = self.β, self.γ, self.ρ, self.σ, self.x0, self.α
b, k0, k1 = self.b, self.k0, self.k1
s = k1 * b / (1 - ρ)
t = k1**2 * σ**2 / (2 * (1 - ρ)**2)
return k0 * exp(s + t)
def calin_test(self):
"""
Implements the contraction test of Calin et al. A return value < 1
indicates contraction.
"""
# Unpack
ρ, σ, γ, x0 = self.ρ, self.σ, self.γ, self.x0
α, k0, k1, b = self.α, self.k0, self.k1, self.b
# Set up
phi = norm()
theta = x0 + σ**2 * (1 + ρ - α) * (1 - γ)
z = abs(ρ * k1) * σ / (1 - abs(ρ))
t1 = k0 * (1 + 2 * phi.cdf(z))
t2 = x0 * k1
t3 = σ**2 * (1 - γ)**2 * (ρ - α) * (2 + ρ - α) / 2
t4 = (ρ * k1 * σ)**2 / (2 * (1 - abs(ρ))**2)
t5 = abs(ρ * k1 * theta) / (1 - abs(ρ))
return t1 * | exp(t2 + t3 + t4 + t5) | numpy.exp |
"""
Tools for making FSPS templates
"""
import os
from collections import OrderedDict
import numpy as np
import astropy.units as u
from astropy.cosmology import WMAP9
FLAM_CGS = u.erg/u.second/u.cm**2/u.Angstrom
LINE_CGS = 1.e-17*u.erg/u.second/u.cm**2
try:
from dust_attenuation.baseclasses import BaseAttAvModel
except:
BaseAttAvModel = object
from astropy.modeling import Parameter
import astropy.units as u
try:
from fsps import StellarPopulation
except:
# Broken, but imports
StellarPopulation = object
from . import utils
from . import templates
DEFAULT_LABEL = 'fsps_tau{tau:3.1f}_logz{logzsol:4.2f}_tage{tage:4.2f}_av{Av:4.2f}'
WG00_DEFAULTS = dict(geometry='shell', dust_type='mw',
dust_distribution='homogeneous')
class Zafar15(BaseAttAvModel):
"""
Quasar extinction curve from Zafar et al. (2015)
https://ui.adsabs.harvard.edu/abs/2015A%26A...584A.100Z/abstract
"""
name = 'Zafar+15'
#bump_ampl = 1.
Rv = 2.21 # err 0.22
@staticmethod
def Alam(mu, Rv):
"""
klam, eq. 1
"""
x = 1/mu
# My fit
coeffs = np.array([0.05694421, 0.57778243, -0.12417444])
Alam = np.polyval(coeffs, x)*2.21/Rv
# Only above x > 5.90
fuv = x > 5.90
if fuv.sum() > 0:
Afuv = 1/Rv*(-4.678+2.355*x + 0.622*(x-5.90)**2) + 1.
Alam[fuv] = Afuv[fuv]
return Alam
def evaluate(self, x, Av):
if not hasattr(x, 'unit'):
xin = np.atleast_1d(x)*u.micron
else:
xin = np.atleast_1d(x)
mu = xin.to(u.micron).value
alam = self.Alam(mu, self.Rv) #*self.Rv
# Rv = Av/EBV
# EBV=Av/Rv
# Ax = Alam/Av
#
# klam = Alam/EBV
# Alam = klam*EBV = klam*Av/Rv
return np.maximum(alam*Av, 0.)
class ExtinctionModel(BaseAttAvModel):
"""
Modify `dust_extinction.averages.G03_SMCBar` to work as Att
"""
#from dust_extinction.averages import G03_SMCBar
#SMCBar = G03_SMCBar()
curve_type = 'smc'
init_curve = None
#@property
def _curve_model(self):
if self.init_curve == self.curve_type:
return 0
if self.curve_type.upper() == 'SMC':
from dust_extinction.averages import G03_SMCBar as curve
elif self.curve_type.upper() == 'LMC':
from dust_extinction.averages import G03_LMCAvg as curve
elif self.curve_type.upper() in ['MW','F99']:
from dust_extinction.parameter_averages import F99 as curve
else:
raise ValueError(f'curve_type {self.curve_type} not recognized')
self.curve = curve()
self.init_curve = self.curve_type
def evaluate(self, x, Av):
self._curve_model()
if not hasattr(x, 'unit'):
xin = np.atleast_1d(x)*u.Angstrom
else:
xin = np.atleast_1d(x)
xinv = 1./xin.to(u.micron)
if self.curve_type.upper() in ['MW','F99']:
curve = self.curve
klam = curve.evaluate(1/np.clip(xinv,
0.301/u.micron, 9.99/u.micron),
Rv=curve.Rv)
else:
klam = self.curve.evaluate(1/np.clip(xinv,
0.301/u.micron, 9.99/u.micron))
return klam*Av
class SMC(BaseAttAvModel):
"""
Modify `dust_extinction.averages.G03_SMCBar` to work as Att
"""
from dust_extinction.averages import G03_SMCBar
SMCBar = G03_SMCBar()
def evaluate(self, x, Av):
if not hasattr(x, 'unit'):
xin = np.atleast_1d(x)*u.Angstrom
else:
xin = np.atleast_1d(x)
xinv = 1./xin.to(u.micron)
klam = self.SMCBar.evaluate(1/np.clip(xinv,
0.301/u.micron, 9.99/u.micron))
return klam*Av
class Reddy15(BaseAttAvModel):
"""
Attenuation curve from Reddy et al. (2015)
With optional UV bump
https://ui.adsabs.harvard.edu/abs/2015ApJ...806..259R/abstract
"""
name = 'Reddy+15'
#bump_ampl = 1.
bump_ampl = Parameter(description="Amplitude of UV bump",
default=2., min=0., max=10.)
bump_gamma = 0.04
bump_x0 = 0.2175
Rv = 2.505
@staticmethod
def _left(mu):
"""
klam, mu < 0.6 micron
"""
return -5.726 + 4.004/mu - 0.525/mu**2 + 0.029/mu**3 + 2.505
@staticmethod
def _right(mu):
"""
klam, mu > 0.6 micron
"""
return -2.672 - 0.010/mu + 1.532/mu**2 - 0.412/mu**3 + 2.505
@property
def koffset(self):
"""
Force smooth transition at 0.6 micron
"""
return self._left(0.6) - self._right(0.6)
def evaluate(self, x, Av, bump_ampl):
if not hasattr(x, 'unit'):
xin = np.atleast_1d(x)*u.Angstrom
else:
xin = np.atleast_1d(x)
mu = xin.to(u.micron).value
left = mu < 0.6
klam = mu*0.
# Reddy Eq. 8
kleft = self._left(mu)
kright = self._right(mu)
klam[left] = self._left(mu[left])
klam[~left] = self._right(mu[~left]) + self.koffset
# Rv = Av/EBV
# EBV=Av/Rv
# klam = Alam/EBV
# Alam = klam*EBV = klam*Av/Rv
return np.maximum((klam + self.uv_bump(mu, bump_ampl))*Av/self.Rv, 0.)
def uv_bump(self, mu, bump_ampl):
"""
Drude profile for computing the UV bump.
Parameters
----------
x: np array (float)
expects wavelengths in [micron]
x0: float
Central wavelength of the UV bump (in microns).
gamma: float
Width (FWHM) of the UV bump (in microns).
ampl: float
Amplitude of the UV bump.
Returns
-------
np array (float)
lorentzian-like Drude profile
Raises
------
ValueError
Input x values outside of defined range
"""
return bump_ampl * (mu**2 * self.bump_gamma**2 /
((mu**2 - self.bump_x0**2)**2 +
mu**2 * self.bump_gamma**2))
class KC13(BaseAttAvModel):
"""
Kriek & Conroy (2013) attenuation model, extends Noll 2009 with UV bump
amplitude correlated with the slope, delta.
Slightly different from KC13 since the N09 model uses Leitherer (2002)
below 1500 Angstroms.
"""
name = 'Kriek+Conroy2013'
delta = Parameter(description="delta: slope of the power law",
default=0., min=-3., max=3.)
#extra_bump = 1.
extra_params = {'extra_bump':1.}
def _init_N09(self):
from dust_attenuation import averages, shapes, radiative_transfer
# Allow extrapolation
shapes.x_range_N09 = [0.9e-4, 2.e8]
averages.x_range_C00 = [0.9e-4, 2.e8]
averages.x_range_L02 = [0.9e-4, 0.18]
self.N09 = shapes.N09()
def evaluate(self, x, Av, delta):
import dust_attenuation
if not hasattr(self, 'N09'):
self._init_N09()
#Av = np.polyval(self.coeffs['Av'], tau_V)
x0 = 0.2175
gamma = 0.0350
ampl = (0.85 - 1.9*delta)*self.extra_params['extra_bump']
if not hasattr(x, 'unit'):
xin = np.atleast_1d(x)*u.Angstrom
else:
xin = x
if dust_attenuation.__version__ >= '0.0.dev131':
return self.N09.evaluate(xin, x0, gamma, ampl, delta, Av)
else:
return self.N09.evaluate(xin, Av, x0, gamma, ampl, delta)
class ParameterizedWG00(BaseAttAvModel):
coeffs = {'Av': np.array([-0.001, 0.026, 0.643, -0.016]),
'x0': np.array([ 3.067e-19, -7.401e-18, 6.421e-17, -2.370e-16,
3.132e-16, 2.175e-01]),
'gamma': np.array([ 2.101e-06, -4.135e-05, 2.719e-04,
-7.178e-04, 3.376e-04, 4.270e-02]),
'ampl': np.array([-1.906e-03, 4.374e-02, -3.501e-01,
1.228e+00, -2.151e+00, 8.880e+00]),
'slope': np.array([-4.084e-05, 9.984e-04, -8.893e-03,
3.670e-02, -7.325e-02, 5.891e-02])}
# Turn off bump
include_bump = 0.25
wg00_coeffs = {'geometry': 'shell',
'dust_type': 'mw',
'dust_distribution': 'homogeneous'}
name = 'ParameterizedWG00'
# def __init__(self, Av=1.0, **kwargs):
# """
# Version of the N09 curves fit to the WG00 curves up to tauV=10
# """
# from dust_attenuation import averages, shapes, radiative_transfer
#
# # Allow extrapolation
# shapes.x_range_N09 = [0.01, 1000]
# averages.x_range_C00 = [0.01, 1000]
# averages.x_range_L02 = [0.01, 0.18]
#
# self.N09 = shapes.N09()
def _init_N09(self):
from dust_attenuation import averages, shapes, radiative_transfer
# Allow extrapolation
shapes.x_range_N09 = [0.009, 2.e8]
averages.x_range_C00 = [0.009, 2.e8]
averages.x_range_L02 = [0.009, 0.18]
self.N09 = shapes.N09()
def get_tau(self, Av):
"""
Get the WG00 tau_V for a given Av
"""
tau_grid = np.arange(0, 10, 0.01)
av_grid = np.polyval(self.coeffs['Av'], tau_grid)
return np.interp(Av, av_grid, tau_grid, left=0., right=tau_grid[-1])
def evaluate(self, x, Av):
import dust_attenuation
if not hasattr(self, 'N09'):
self._init_N09()
tau_V = self.get_tau(Av)
#Av = np.polyval(self.coeffs['Av'], tau_V)
x0 = np.polyval(self.coeffs['x0'], tau_V)
gamma = np.polyval(self.coeffs['gamma'], tau_V)
if self.include_bump:
ampl = | np.polyval(self.coeffs['ampl'], tau_V) | numpy.polyval |
import ipdb
import torch
import torch.nn.functional as F
import time
import os
import sys
import numpy as np
from numpy import nonzero
from imageio import imwrite
from utils import AverageMeter
from models.sal_losses import cc_score, nss_score, similarity, auc_judd, auc_shuff_np
def normalize_data(data):
data_min = np.min(data)
data_max = np.max(data)
data_norm = np.clip((data - data_min) *
(255.0 / (data_max - data_min)),
0, 255).astype(np.uint8)
return data_norm
def save_video_results(output_buffer, save_path):
video_outputs = torch.stack(output_buffer)
for i in range(video_outputs.size()[0]):
save_name = os.path.join(save_path, 'pred_sal_{0:06d}.jpg'.format(i + 9))
imwrite(save_name, normalize_data(video_outputs[i][0].numpy()))
def test(data_loader, model, opt):
print('test')
model.eval()
with torch.no_grad():
batch_time = AverageMeter()
data_time = AverageMeter()
end_time = time.time()
output_buffer = []
previous_video_id = ''
cc = AverageMeter()
nss = AverageMeter()
sim = AverageMeter()
auc_j = AverageMeter()
for i, (data, targets, valid) in enumerate(data_loader):
data_time.update(time.time() - end_time)
if not opt.no_cuda:
targets['salmap'] = targets['salmap'].cuda()
targets['binmap'] = targets['binmap'].cuda()
valid['sal'] = valid['sal'].cuda()
inputs = data['rgb']
curr_batch_size = inputs.size()[0]
targets['salmap'] = targets['salmap'].float()
targets['binmap'] = targets['binmap'].float()
valid['sal'] = valid['sal'].float()
while inputs.size()[0] < opt.batch_size:
inputs = torch.cat((inputs, inputs[0:1, :]), 0)
while data['audio'].size(0) < opt.batch_size:
data['audio'] = torch.cat((data['audio'], data['audio'][0:1, :]), 0)
outputs = model(inputs, data['audio'])
ipdb.set_trace()
outputs['sal'][-1] = outputs['sal'][-1][0:curr_batch_size, :]
cc_test = cc_score(outputs['sal'][-1], targets['salmap'], valid['sal'])
nss_test = nss_score(outputs['sal'][-1], targets['binmap'], valid['sal'])
sim_test = similarity(outputs['sal'][-1], targets['salmap'])
auc_j_test = auc_judd(outputs['sal'][-1], targets['binmap'])
auc_j.update(torch.mean(auc_j_test), nonzero(valid['sal'])[:, 0].size(0))
if not opt.no_sigmoid_in_test:
outputs['sal'] = torch.sigmoid(outputs['sal'][-1])
if sum(valid['sal']) > 0:
cc_tmp = cc_test / nonzero(valid['sal'])[:, 0].size(0)
nss_tmp = nss_test / | nonzero(valid['sal']) | numpy.nonzero |
# -*- coding: utf-8 -*-
from _functools import partial
from numerik import nr_ls
import numpy as np
# noinspection PyAugmentAssignment
def calc_xieq(
n0,
mm,
z,
s_index,
kc,
nu_ij,
neq_0,
xieq_0,
t_abs,
method,
max_it,
tol,
method_loops,
notify_status_func,
process_func_handle):
"""Newton method for non-linear algebraic system, with line-search
:return: tuple with neq, xieq, f_0
:param n0: np.matrix (n x 1) - mol - alimentación
:param mm: np.matrix (n x 1) - masa molar
:param z: np.matrix (n x 1) - carga - alimentación
:param s_index: int - índice de solvente
:param kc: np.matrix (nr x 1) - "Cte." de equilibrio en reacción j @ T
:param nu_ij: np.matrix (n x nr) - Coefs. esteq. componente i en reacción j
:param t_abs: float - temperatura T en Kelvin
:param neq_0: np.matrix (n x 1) - mol - estimado inicial, equilibrio
:param xieq_0: np.matrix (nr x 1) - avance de reacción j - estimado inicial, equilibrio
:param method: str - método en uso: 'ideal_solution', 'davies', 'debye-hueckel'
:param max_it: int - máximo de iteraciones
:param tol: float - error, tolerancia máxima
:param method_loops: list (1 x 2): ciclos completados [backtrack, totales]
:param notify_status_func: función a llamar cada avance de iteración
:param process_func_handle: función a llamar para cancelación
"""
n = len(n0)
nr = nu_ij.shape[1]
mm_0 = mm[s_index].item()
a_m = a_m_d_h(t_abs)
meq_0 = neq_0 / (mm_0 * neq_0[s_index])
ionic_str_eq_0 = 1 / 2.0 * np.power(z, 2).T * meq_0
gammaeq = np.ones([n, 1])
gammaeq_0 = gammaeq
neq = neq_0
meq = meq_0
xieq = xieq_0
ionic_str_eq = ionic_str_eq_0
m0_ref = 1 / 1000.0 # ref. 1mol/kgsolvent conv. to mol/gsolvent
ionic_str_eq_adim = ionic_str_eq / m0_ref
f = partial(
f_gl_0_ideal,
n0=n0,
nu_ij=nu_ij,
n=n,
nr=nr,
kc=kc,
mm_0=mm_0,
s_index=s_index)
j = partial(
jac_ideal,
n0=n0,
nu_ij=nu_ij,
n=n,
nr=nr,
kc=kc,
mm_0=mm_0,
s_index=s_index)
# x is [n_0, n_1, n_2, ..., n_n, xi_1, xi_2, xi_3, ..., xi_{nr}]
x0 = np.concatenate([neq_0, xieq_0])
component_order = [s_index]
component_order.extend(
[index for (index, x) in enumerate(meq_0) if index != s_index]
)
component_order_map = \
sorted([(origindex, orderedindex) for (orderedindex, origindex)
in enumerate(component_order)], key=lambda y: y[0])
return_to_original_indexes = [x[1] for x in component_order_map]
if method == 'ideal_solution':
# case already contemplated above
pass
elif method == 'davies' or method == 'debye-hueckel':
# x is [n_0, m_1, m_2, ..., m_n, xi_1, xi_2, ..., xi_{nr}, \gamma_1,
# \gamma_2, ..., \gamma_n, I]
ordered_meq_0 = np.matrix(
[meq_0[index].item() for index in component_order]
).T
ordered_gammaeq_0 = np.matrix(
[gammaeq_0[index].item() for index in component_order]
).T
ordered_nu_ij = np.concatenate(
[nu_ij[index] for index in component_order]
)
ordered_n0 = np.matrix(
[n0[index].item() for index in component_order]
).T
ordered_z = np.matrix(
[z[index].item() for index in component_order]
).T
ordered_neq_0 = np.matrix(
[neq_0[index].item() for index in component_order]
).T
if method == 'davies':
ordered_gammaeq_0[0] = gamma_solvent_id(mm_0, ordered_meq_0[1:])
ordered_gammaeq_0[1:] = np.multiply(
gamma_davies(ordered_z[1:], ionic_str_eq_adim, a_m),
gamma_setchenow(ordered_z[1:], ionic_str_eq_adim, 0.1))
f = partial(
f_gl_0_davies,
n0=ordered_n0,
nu_ij=ordered_nu_ij,
n=n,
nr=nr,
kc=kc,
z=ordered_z,
mm_0=mm_0,
a_m=a_m)
j = partial(
jac_davies,
n0=ordered_n0,
nu_ij=ordered_nu_ij,
n=n,
nr=nr,
kc=kc,
z=ordered_z,
mm_0=mm_0,
a_m=a_m)
elif method == 'debye-hueckel':
ordered_gammaeq_0[0] = gamma_solvent_id(mm_0, ordered_meq_0[1:])
ordered_gammaeq_0[1:] = np.multiply(
gamma_d_h(ordered_z[1:], ionic_str_eq_adim, a_m),
gamma_setchenow(ordered_z[1:], ionic_str_eq_adim, 0.1))
f = partial(
f_gl_0_d_h,
n0=ordered_n0,
nu_ij=ordered_nu_ij,
n=n,
nr=nr,
kc=kc,
z=ordered_z,
mm_0=mm_0,
a_m=a_m)
j = partial(
jac_d_h,
n0=ordered_n0,
nu_ij=ordered_nu_ij,
n=n,
nr=nr,
kc=kc,
z=ordered_z,
mm_0=mm_0,
a_m=a_m)
x0 = np.concatenate(
[
ordered_neq_0[0],
ordered_meq_0[1:],
xieq_0,
ordered_gammaeq_0,
ionic_str_eq_0
])
progress_k, stop, outer_it_k, outer_it_j, \
lambda_ls, accum_step, x, \
diff, f_val, lambda_ls_y, \
method_loops = \
nr_ls(x0=x0,
f=f,
j=j,
tol=tol,
max_it=max_it,
inner_loop_condition=lambda x_vec:
all([item >= 0 for item in
x_vec[0:n]]),
notify_status_func=notify_status_func,
method_loops=method_loops,
process_func_handle=process_func_handle)
if method == 'ideal_solution':
neq = x[0:n]
xieq = x[n:n + nr]
meq = neq / (neq[s_index] * mm_0)
gammaeq = gammaeq_0
ionic_str_eq = 1 / 2.0 * np.power(z, 2).T * meq
elif method == 'davies' or method == 'debye-hueckel':
neq0 = x[0:n][0]
meq[1:n] = x[0:n][1:n]
meq[0] = 1 / mm_0
neq = meq * neq0 * mm_0
# Reorder to output original order
meq = meq[return_to_original_indexes]
neq = neq[return_to_original_indexes]
xieq = x[n:n + nr]
gammaeq = x[n + nr:n + nr + n][return_to_original_indexes]
ionic_str_eq = x[n + nr + n]
return neq, meq, xieq, gammaeq, ionic_str_eq, method_loops
def f_gl_0_ideal(x, n0, nu_ij, n, nr, kc, mm_0, s_index):
neq = x[0:n, 0]
n0_mm0 = neq[s_index] * mm_0
m0_ref = 1 / 1000.0 # ref. 1mol/kgsolvent conv. to mol/gsolvent
meq = neq / n0_mm0 # mol/gsolvent
xieq = x[n:n + nr, 0]
result = np.matrix(np.empty([n + nr, 1], dtype=float))
result[0:n] = -neq + n0 + nu_ij * xieq
result[n:n + nr] = -kc + np.prod(np.power(meq / m0_ref, nu_ij), 0).T
return result
def jac_ideal(x, n0, nu_ij, n, nr, kc, mm_0, s_index):
neq = x[0:n, 0]
n0_mm0 = neq[s_index] * mm_0
m0_ref = 1 / 1000.0 # ref. 1mol/kgsolvent conv. to mol/gsolvent
meq = neq / n0_mm0
diag_1_ov_meq = np.diagflat(np.power(meq, -1), 0)
diag_quotient = np.diagflat(np.prod(np.power(meq / m0_ref, nu_ij), 0))
result = np.matrix(np.zeros([n + nr, n + nr], dtype=float))
result[0:n, 0:n] = -1 * np.eye(n).astype(float)
result[0:n, n:n + nr] = nu_ij
# Return Jacobian terms as n calculated from molality (m)
result[n:n + nr, 0:n] = \
diag_quotient * nu_ij.T * diag_1_ov_meq * 1 / n0_mm0
return result
def f_gl_0_davies(x, n0, nu_ij, n, nr, kc, z, mm_0, a_m):
# f(x) = 0, objective function set for Davies model.
neq = np.zeros([n, 1])
meq = np.zeros([n, 1])
# x is [n0 m1 m2 ... m_n xi1 xi2 ... xi_nr gamma1 gamma2 ... gamma_n
# ionic_str]
neq[0] = x[0]
meq[1:n] = x[1:n]
xieq = x[n:n + nr]
gammaeq = x[n + nr:n + nr + n]
ionic_str = x[n + nr + n]
# calculate neq for all components
n0_mm0 = neq[0] * mm_0
meq[0] = 1 / mm_0
neq = meq * n0_mm0
m0_ref = 1 / 1000.0 # ref. 1mol/kgsolvent conv. to mol/gsolvent
ionic_str_adim = ionic_str / m0_ref
result = np.matrix(np.empty([n + nr + n + 1, 1], dtype=float))
result[0:n] = -neq + n0 + nu_ij * xieq
result[n:n + nr] = -kc + np.multiply(
np.prod(np.power(meq / m0_ref, nu_ij), 0).T,
np.prod(np.power(gammaeq, nu_ij), 0).T
)
result[n + nr] = \
-gammaeq[0] + gamma_solvent_id(mm_0, meq[1:n])
result[n + nr + 1:n + nr + n] = \
- gammaeq[1:] + \
+ np.multiply(
gamma_davies(z[1:], ionic_str_adim, a_m),
gamma_setchenow(z[1:], ionic_str_adim, 0.1))
result[n + nr + n] = \
-ionic_str + 1 / 2.0 * np.power(z, 2).T * meq
return result
def jac_davies(x, n0, nu_ij, n, nr, kc, z, mm_0, a_m):
# j(x), Jacobian matrix for Davies model.
neq = np.zeros([n, 1])
meq = np.zeros([n, 1])
# x is [n0 m1 m2 ... m_n xi1 xi2 ... xi_nr gamma1 gamma2 ... gamma_n
# ionic_str]
neq[0] = x[0].item()
meq[1:n] = x[1:n]
xieq = x[n:n + nr]
gammaeq = x[n + nr:n + nr + n]
ionic_str = x[n + nr + n].item()
# calculate neq for all components
n0_mm0 = (neq[0] * mm_0).item()
meq[0] = 1 / mm_0
neq = meq * n0_mm0
m0_ref = 1 / 1000.0 # ref. 1mol/kgsolvent conv. to mol/gsolvent
ionic_str_adim = ionic_str / m0_ref
sqrt_ionic_str_adim = np.sqrt(ionic_str_adim)
diag_quotient = np.diagflat(
np.prod(
np.power(
np.multiply(gammaeq, meq / m0_ref),
nu_ij),
0)
)
result = np.matrix(
np.zeros([n + nr + n + 1, n + nr + n + 1], dtype=float)
)
result[0:n, 0:n] = \
np.diagflat(
np.concatenate(
[-1.0 * np.matrix([1]),
-n0_mm0 * np.matrix(np.ones([n - 1, 1]))]
)
)
result[1:n, 0] = -meq[1:] * mm_0
result[0:n, n:n + nr] = nu_ij
result[n + nr:n + nr + n + 1, n + nr: n + nr + n + 1] = \
-1.0 * np.eye(n + 1)
result[n:n + nr, 0:n] = \
diag_quotient * nu_ij.T * np.diagflat(
np.concatenate(
[np.matrix(0.0), 1 / meq[1:]]
)
)
result[n:n + nr, n + nr:n + nr + n] = \
diag_quotient * nu_ij.T * np.diagflat(
1 / gammaeq
)
gamma0_ov_phi = np.exp(-1.0 * mm_0 * sum(meq[1:])).item()
result[n + nr, 1:n] = -1.0 * mm_0 * gamma0_ov_phi
factor_1 = \
sqrt_ionic_str_adim / (1 + sqrt_ionic_str_adim) \
- 0.3 * ionic_str_adim
dfactor_1_di = \
(1 / m0_ref) * (-0.3 + 1 / (2 * sqrt_ionic_str_adim *
(1 + sqrt_ionic_str_adim) ** 2))
factor_2 = np.power(10,
-a_m *
np.power(z[1:], 2) *
factor_1 +
(1 -
np.power(np.sign(z[1:]), 2)) *
0.1 *
ionic_str_adim)
result[n + nr + 1:n + nr + n, n + nr + n] = \
np.multiply(
np.log(10.0) * (
-a_m * np.power(z[1:], 2) *
dfactor_1_di +
(1 - np.power(np.sign(z[1:]), 2)) * 0.1 / m0_ref),
factor_2)
result[n + nr + n, 1:n] = \
1 / 2.0 * np.power(z[1:].T, 2.0)
return result
def f_gl_0_d_h(x, n0, nu_ij, n, nr, kc, z, mm_0, a_m):
# f(x) = 0, objective function set for Debye-Hueckel model.
neq = np.zeros([n, 1])
meq = np.zeros([n, 1])
# x is [n0 m1 m2 ... m_n xi1 xi2 ... xi_nr gamma1 gamma2 ... gamma_n
# ionic_str]
neq[0] = x[0]
meq[1:n] = x[1:n]
xieq = x[n:n + nr]
gammaeq = x[n + nr:n + nr + n]
ionic_str = x[n + nr + n]
# calculate neq for all components
n0_mm0 = neq[0] * mm_0
meq[0] = 1 / mm_0
neq = meq * n0_mm0
m0_ref = 1 / 1000.0 # ref. 1mol/kgsolvent conv. to mol/gsolvent
ionic_str_adim = ionic_str / m0_ref
result = np.matrix(np.empty([n + nr + n + 1, 1], dtype=float))
result[0:n] = -neq + n0 + nu_ij * xieq
result[n:n + nr] = -kc + np.multiply(
np.prod(np.power(meq / m0_ref, nu_ij), 0).T,
np.prod(np.power(gammaeq, nu_ij), 0).T
)
result[n + nr] = \
-gammaeq[0] + gamma_solvent_id(mm_0, meq[1:n])
result[n + nr + 1:n + nr + n] = \
- gammaeq[1:] + \
+ np.multiply(
gamma_d_h(z[1:], ionic_str_adim, a_m),
gamma_setchenow(z[1:], ionic_str_adim, 0.1))
result[n + nr + n] = \
-ionic_str + 1 / 2.0 * np.power(z, 2).T * meq
return result
def jac_d_h(x, n0, nu_ij, n, nr, kc, z, mm_0, a_m):
# j(x), Jacobian matrix for Debye-Hueckel model.
neq = np.zeros([n, 1])
meq = np.zeros([n, 1])
# x is [n0 m1 m2 ... m_n xi1 xi2 ... xi_nr gamma1 gamma2 ... gamma_n
# ionic_str]
neq[0] = x[0].item()
meq[1:n] = x[1:n]
xieq = x[n:n + nr]
gammaeq = x[n + nr:n + nr + n]
ionic_str = x[n + nr + n].item()
# calculate neq for all components
n0_mm0 = (neq[0] * mm_0).item()
meq[0] = 1 / mm_0
neq = meq * n0_mm0
m0_ref = 1 / 1000.0 # ref. 1mol/kgsolvent conv. to mol/gsolvent
ionic_str_adim = ionic_str / m0_ref
sqrt_ionic_str_adim = np.sqrt(ionic_str_adim)
diag_quotient = np.diagflat(
np.prod(
np.power(
np.multiply(gammaeq, meq / m0_ref),
nu_ij),
0)
)
result = np.matrix(
np.zeros([n + nr + n + 1, n + nr + n + 1], dtype=float)
)
result[0:n, 0:n] = \
np.diagflat(
np.concatenate(
[-1.0 * np.matrix([1]),
-n0_mm0 * np.matrix(np.ones([n - 1, 1]))]
)
)
result[1:n, 0] = -meq[1:] * mm_0
result[0:n, n:n + nr] = nu_ij
result[n + nr:n + nr + n + 1, n + nr: n + nr + n + 1] = \
-1.0 * np.eye(n + 1)
result[n:n + nr, 0:n] = \
diag_quotient * nu_ij.T * np.diagflat(
np.concatenate(
[ | np.matrix(0.0) | numpy.matrix |
import random
import numpy as np
from scipy.sparse import csc_matrix, lil_matrix
def binary_search(array, x):
"""
Binary search
:param array: array: Must be sorted
:param x: value to search
:return: position where it is found, -1 if not found
"""
lower = 0
upper = len(array)
while lower < upper: # use < instead of <=
mid = lower + (upper - lower) // 2 # // is the integer division
val = array[mid]
if x == val:
return mid
elif x > val:
if lower == mid:
break
lower = mid
elif x < val:
upper = mid
return -1
def slice(A: csc_matrix, rows, cols):
"""
CSC matrix sub-matrix view
Only works if rows is sorted
:param A: CSC matrix to get the view from
:param rows: array of selected rows: must be sorted! to use the binary search
:param cols: array of columns: should be sorted
:return:
"""
n_rows = len(rows)
n_cols = len(cols)
n = 0
p = 0
new_val = np.empty(A.nnz)
new_row_ind = np.empty(A.nnz)
new_col_ptr = np.empty(n_cols + 1)
new_col_ptr[p] = 0
for j in cols: # sliced columns
for k in range(A.indptr[j], A.indptr[j + 1]): # columns from A
found_idx = binary_search(rows, A.indices[k]) # look for the row index of A in the rows vector
if found_idx > -1:
new_val[n] = A.data[k]
new_row_ind[n] = found_idx
n += 1
p += 1
new_col_ptr[p] = n
new_col_ptr[p] = n
new_val = np.resize(new_val, n)
new_row_ind = np.resize(new_row_ind, n)
return csc_matrix((new_val, new_row_ind, new_col_ptr), shape=(n_rows, n_cols))
def csc_sub_matrix(Am, Annz, Ap, Ai, Ax, rows, cols):
"""
CSC matrix sub-matrix slice
Works for sorted and unsorted versions of "rows", but "rows" cannot contain duplicates
:param Am: number of rows
:param Annz: number of non-zero entries
:param Ap: Column pointers
:param Ai: Row indices
:param Ax: Data
:param rows: array of selected rows: must be sorted! to use the binary search
:param cols: array of columns: should be sorted
:return: new_val, new_row_ind, new_col_ptr, n_rows, n_cols
"""
n_rows = len(rows)
n_cols = len(cols)
nnz = 0
p = 0
new_val = np.empty(Annz)
new_row_ind = np.empty(Annz)
new_col_ptr = np.empty(n_cols + 1)
new_col_ptr[p] = 0
# generate lookup -> index lookup
lookup = np.zeros(Am, dtype=int)
lookup[rows] = np.arange(len(rows), dtype=int)
for j in cols: # sliced columns
for k in range(Ap[j], Ap[j + 1]): # columns from A
# row index translation to the "rows" space
i = Ai[k]
ii = lookup[i]
if rows[ii] == i: # entry found
new_val[nnz] = Ax[k]
new_row_ind[nnz] = ii
nnz += 1
p += 1
new_col_ptr[p] = nnz
new_col_ptr[p] = nnz
new_val = np.resize(new_val, nnz)
new_row_ind = np.resize(new_row_ind, nnz)
return new_val, new_row_ind, new_col_ptr, n_rows, n_cols
def slice2(A: csc_matrix, rows, cols):
"""
CSC matrix sub-matrix view
Works for unsorted versions of rows, but rows cannot contain repetitions
:param A: CSC matrix to get the view from
:param rows: array of selected rows: must be sorted! to use the binary search
:param cols: array of columns: should be sorted
:return:
"""
new_val, new_row_ind, new_col_ptr, n_rows, n_cols = csc_sub_matrix(Am=A.shape[0], Annz=A.nnz,
Ap=A.indptr, Ai=A.indices, Ax=A.data,
rows=rows, cols=cols)
return csc_matrix((new_val, new_row_ind, new_col_ptr), shape=(n_rows, n_cols))
def slice_r(A: csc_matrix, rows):
"""
CSC matrix sub-matrix view
:param A: CSC matrix to get the view from
:param rows: array of selected rows: must be sorted! to use the binary search
:return:
"""
n_rows = len(rows)
n_cols = A.shape[1]
n = 0
p = 0
new_val = | np.empty(A.nnz) | numpy.empty |
from io import FileIO
import numpy as np
from scipy import ndimage
from matplotlib import pyplot as plt
import yaml
import csv
from PIL import Image
import ReferenceModification.LibFunctions as lib
class TrackMap:
def __init__(self, map_name) -> None:
self.map_name = map_name
# map info
self.resolution = None
self.origin = None
self.n_obs = None
self.map_height = None
self.map_width = None
self.start_pose = None
self.obs_size = None
self.end_goal = None
self.map_img = None
self.dt_img = None
self.obs_img = None #TODO: combine to single image with dt for faster scan
self.load_map()
self.ss = None
self.wpts = None
self.t_pts = None
self.nvecs = None
self.ws = None
self.ref_pts = None # std wpts that aren't expanded
self.ss_normal = None # not expanded
self.diffs = None
self.l2s = None
try:
# raise FileNotFoundError
self._load_csv_track()
except FileNotFoundError:
print(f"Problem Loading map - generate new one")
def load_map(self):
file_name = 'maps/' + self.map_name + '.yaml'
with open(file_name) as file:
documents = yaml.full_load(file)
yaml_file = dict(documents.items())
try:
self.resolution = yaml_file['resolution']
self.origin = yaml_file['origin']
self.n_obs = yaml_file['n_obs']
self.obs_size = yaml_file['obs_size']
map_img_path = 'maps/' + yaml_file['image']
self.start_pose = np.array(yaml_file['start_pose'])
except Exception as e:
print(f"Problem loading, check key: {e}")
raise FileIO("Problem loading map yaml file")
self.end_goal = self.start_pose[0:2]
self.map_img = np.array(Image.open(map_img_path).transpose(Image.FLIP_TOP_BOTTOM))
self.map_img = self.map_img.astype(np.float64)
if len(self.map_img.shape) == 3:
self.map_img = self.map_img[:, :, 0]
self.obs_img = | np.zeros_like(self.map_img) | numpy.zeros_like |
import os, sys, math
import torch
import matplotlib.pyplot as plt
def convert_grid2prob(grid, threshold=0.1, temperature=1):
threshold = torch.max(grid) - threshold*(torch.max(grid)-torch.min(grid))
grid[grid>threshold] = torch.tensor(float('inf'))
prob = torch.exp(-temperature*grid) / torch.sum(torch.exp(-temperature*grid))
return prob
def convert_coords2px(coords, x_range, y_range, x_max_px, y_max_px, y_flip=False):
if not isinstance(x_range, (tuple, list)):
x_range = (0, x_range)
if not isinstance(y_range, (tuple, list)):
y_range = (0, y_range)
x_ratio_coords2idx = x_max_px / (x_range[1]-x_range[0])
y_ratio_coords2idx = y_max_px / (y_range[1]-y_range[0])
px_idx_x = coords[:,0]*x_ratio_coords2idx
if y_flip:
px_idx_y = y_max_px-coords[:,1]*y_ratio_coords2idx
else:
px_idx_y = coords[:,1]*y_ratio_coords2idx
px_idx_x[px_idx_x>=x_max_px] = x_max_px-1
px_idx_y[px_idx_y>=y_max_px] = y_max_px-1
px_idx_x[px_idx_x<0] = 0
px_idx_y[px_idx_y<0] = 0
px_idx = torch.stack((px_idx_x, px_idx_y), dim=1)
return px_idx.int()
def convert_px2cell(pxs, x_grid, y_grid, device='cuda'): # pixel to grid cell index
cell_idx = torch.zeros_like(pxs)
for i in range(pxs.shape[0]):
cell_idx[i,0] = torch.where( pxs[i,0]>=torch.tensor(x_grid).to(device) )[0][-1]
cell_idx[i,1] = torch.where( pxs[i,1]>=torch.tensor(y_grid).to(device) )[0][-1]
return cell_idx.int()
def get_weight(grid, index, sigma=1, rho=0):
# grid is HxW
# index is a pair of numbers
# return weight in [0,1]
grid = grid.cpu()
index = index.cpu()
if sigma <= 0: # one-hot
weight = torch.zeros_like(grid)
weight[index[1],index[0]] = 1
return weight
if not isinstance(sigma, (tuple, list)):
sigma = (sigma, sigma)
sigma_x, sigma_y = sigma[0], sigma[1]
x = torch.arange(0, grid.shape[0])
y = torch.arange(0, grid.shape[1])
x, y = torch.meshgrid(x, y)
in_exp = -1/(2*(1-rho**2)) * ((x-index[1])**2/(sigma_x**2)
+ (y-index[0])**2/(sigma_y**2)
- 2*rho*(x-index[0])/(sigma_x)*(y-index[1])/(sigma_y))
z = 1/(2*math.pi*sigma_x*sigma_y*math.sqrt(1-rho**2)) * torch.exp(in_exp)
weight = z/z.max()
weight[weight<0.1] = 0
return weight
def loss_nll(data, label, device='cuda'):
# data is the energy grid, label should be the index (i,j) meaning which grid to choose
# data - BxCxHxW
# label - BxC
weight = torch.tensor([]).to(device) # in batch
for i in range(data.shape[0]):
w = get_weight(data[i,0,:,:], label[i,:])
weight = torch.cat((weight, w.unsqueeze(0).to(device))) # Gaussian fashion [CxHxW]
numerator_in_log = torch.logsumexp(-data+torch.log(weight.unsqueeze(1)), dim=(2,3))
denominator_in_log = torch.logsumexp(-data, dim=(2,3))
l2 = torch.sum(torch.pow(data,2),dim=(2,3)) / (data.shape[2]*data.shape[3])
nll = - numerator_in_log + denominator_in_log + 0.00*l2
return torch.mean(nll)
def loss_mse(data, labels): # for batch
# data, labels - BxMxC
squared_diff = torch.square(data-labels)
squared_sum = torch.sum(squared_diff, dim=2) # BxM
loss = squared_sum/data.shape[0] # BxM
return loss
def loss_msle(data, labels): # for batch
# data, labels - BxMxC
squared_diff = torch.square(torch.log(data)-torch.log(labels))
squared_sum = torch.sum(squared_diff, dim=2) # BxM
loss = squared_sum/data.shape[0] # BxM
return loss
def loss_mae(data, labels): # for batch
# data, labels - BxMxC
abs_diff = torch.abs(data-labels)
abs_sum = torch.sum(abs_diff, dim=2) # BxM
loss = abs_sum/data.shape[0] # BxM
return loss
if __name__ == '__main__':
import numpy as np
from pathlib import Path
from torchvision import transforms
sys.path.append(str(Path(__file__).resolve().parents[1]))
from data_handle.data_handler import ToTensor, Rescale
from data_handle.data_handler import ImageStackDataset, DataHandler
project_dir = Path(__file__).resolve().parents[2]
data_dir = os.path.join(project_dir, 'Data/MAD_1n1e')
csv_path = os.path.join(project_dir, 'Data/MAD_1n1e/all_data.csv')
composed = transforms.Compose([Rescale((200,200), tolabel=False), ToTensor()])
dataset = ImageStackDataset(csv_path=csv_path, root_dir=data_dir, channel_per_image=1, transform=composed, T_channel=False)
myDH = DataHandler(dataset, batch_size=2, shuffle=False, validation_prop=0.2, validation_cache=5)
img = torch.cat((dataset[0]['image'].unsqueeze(0), dataset[1]['image'].unsqueeze(0)), dim=0) # BxCxHxW
label = torch.cat((dataset[0]['label'].unsqueeze(0), dataset[1]['label'].unsqueeze(0)), dim=0)
print(img.shape)
print(label)
x_grid = | np.arange(0, 201, 8) | numpy.arange |
import numpy as np
from itertools import product
from itertools import permutations
import matplotlib.pyplot as plt
import pickle
import os
import stimulus
import parameters
import analysis
class Motifs:
def __init__(self, data_dir, file_prefix, N = None):
self.motifs = {}
self.motif_sizes = [2,3,4]
data_files = os.listdir(data_dir)
for f in data_files:
if f.startswith(file_prefix):
print('Processing ', f)
self.current_filename = f
W, v = self.make_matrix(data_dir + f, 'elim_lesion', N)
print(type(W))
if type(W) is list:
for i,w1 in enumerate(W):
self.find_motifs(w1, v)
else:
self.find_motifs(W, v)
self.print_motif_list()
def make_matrix(self, filename, method, N):
x = pickle.load(open(filename, 'rb'))
beh_threshold = 0.1
val_th = 0.1
ind_accurate = np.where(np.array(x['accuracy_hist']) > 0.98)[0]
#N = np.argmax(ind_accurate)
#N = 6
print('N = ', N)
if method == 'elim_lesion' or method == 'elim':
parameters.update_parameters(x['par'])
s = stimulus.Stimulus()
trial_info = s.generate_trial()
if method == 'lesion':
significant_weights_rnn = x['model_performance']['accuracy'][-1] - x['lesion_accuracy_rnn'][0,:,:] > beh_threshold
significant_weights_out = x['model_performance']['accuracy'][-1] - x['lesion_accuracy_out'][0,:,:] > beh_threshold
v = np.array([0]*x['parameters']['num_exc_units'] + [1]*x['parameters']['num_inh_units'] \
+ [2]*x['parameters']['n_output'])
W = np.vstack((significant_weights_rnn, significant_weights_out))
d = W.shape[0] - W.shape[1]
W = np.hstack((W, np.zeros((W.shape[0], d))))
elif method == 'elim':
num_units = 50 - N
w1 = np.zeros((num_units, num_units))
w2 = np.zeros((3, num_units))
ind = np.where(x['gate_hist'][N]>0)[0]
for i in range(num_units):
for j in range(num_units):
w1[i,j] = x['weights_hist'][N]['w_rnn'][ind[i], ind[j]] > val_th
for j in range(3):
w2[j,i] = x['weights_hist'][N]['w_out'][j, ind[i]] > val_th
n_exc = int(np.sum(x['gate_hist'][N][:x['par']['num_exc']]))
n_inh = int(np.sum(x['gate_hist'][N][x['par']['num_exc']:]))
v = np.array([0]*n_exc + [1]*n_inh + [2]*x['par']['n_output'])
W = np.vstack((w1, w2))
d = W.shape[0] - W.shape[1]
W = np.hstack((W, np.zeros((W.shape[0], d))))
elif method == 'elim_lesion':
num_units = 50 - N
r = analysis.lesion_weights(trial_info, x['par']['h_init'], x['par']['syn_x_init'], x['par']['syn_u_init'], \
x['weights_hist'][N], x['gate_hist'][N])
#plt.imshow(np.squeeze(r['lesion_accuracy_rnn']), aspect='auto', interpolation = 'none')
#plt.colorbar()
#plt.show()
w1_full = np.tile(x['accuracy_hist'][N],(x['par']['n_hidden'],x['par']['n_hidden'])) - np.squeeze(r['lesion_accuracy_rnn']) > beh_threshold
w2_full = np.tile(x['accuracy_hist'][N],(x['par']['n_output'],x['par']['n_hidden'])) - np.squeeze(r['lesion_accuracy_out']) > beh_threshold
w1 = np.zeros((num_units, num_units))
w2 = np.zeros((3, num_units))
ind = np.where(x['gate_hist'][N]>0)[0]
for i in range(num_units):
for j in range(num_units):
w1[i,j] = w1_full[ind[i], ind[j]]
for j in range(3):
w2[j,i] = w2_full[j, ind[i]]
#plt.imshow(w1, aspect='auto', interpolation = 'none')
#plt.colorbar()
#plt.show()
print('accuracy ', x['accuracy_hist'][N])
n_exc = int(np.sum(x['gate_hist'][N][:x['par']['num_exc']]))
n_inh = int(np.sum(x['gate_hist'][N][x['par']['num_exc']:]))
v = np.array([0]*n_exc + [1]*n_inh + [2]*x['par']['n_output'])
W = np.vstack((w1, w2))
d = W.shape[0] - W.shape[1]
W = np.hstack((W, np.zeros((W.shape[0], d))))
plt.imshow(W, aspect='auto', interpolation = 'none')
plt.colorbar()
plt.show()
print(v)
elif method == 'stacked':
W = []
for i in range(x['W_rnn'].shape[0]):
w1 = np.reshape(x['W_rnn'][i,:], (50,50))>0.2
w2 = np.reshape(x['W_out'][i,:], (3,50))>0.2
v = np.array([0]*40 + [1]*10 + [2]*3)
W1 = np.vstack((w1, w2))
d = W1.shape[0] - W1.shape[1]
W1 = np.hstack((W1, np.zeros((W1.shape[0], d))))
W.append(W1)
return W, v
def connection_probs(self):
unique_labels = np.unique(self.v).tolist() # [Inhibitory, Excitatory, Output]
N = len(unique_labels)
total = np.zeros([N,N], dtype=np.float32)
connection = np.zeros([N,N], dtype=np.float32)
for (i, v_in), (j, v_out) in product(enumerate(input_labels), enumerate(output_labels)):
l_in = unique_labels.index(v_in)
l_out = unique_labels.index(v_out)
if i != j:
total[l_in, l_out] += 1
if self.W[j,i] > 0:
connection[l_in, l_out] += 1
self.p_connection = np.zeros((N,N), dtype = np.float32)
for n1, n2 in product(range(N), range(N)):
self.p_connection[n1, n2] = connection[n1, n2]/total[n1,n2] if total[n1,n2] != 0 else -1
def find_motifs(self, W ,v):
W, v = self.prune_network(W, v)
for i in self.motif_sizes:
self.find_motif_set_size(W, v, i)
def return_motifs(self):
return self.motifs
def find_motif_set_size(self,W, v, c):
N = W.shape[0]
for i0 in range(N):
ind0 = np.where((W[:, i0] > 0) + (W[i0, :] > 0))[0]
for i1 in np.setdiff1d(ind0, i0):
if c == 2:
self.motif_properties(W, v, [i0, i1])
else:
ind1 = np.where((W[:, i1] > 0) + (W[i1, :] > 0))[0]
for i2 in np.setdiff1d(ind1,[i0,i1]):
if c == 3:
self.motif_properties(W, v, [i0, i1, i2])
else:
ind2 = np.where((W[:, i2] > 0) + (W[i2, :] > 0))[0]
for i3 in np.setdiff1d(ind2,[i0,i1,i2]):
if c == 4:
self.motif_properties(W, v, [i0, i1, i2, i3])
else:
ind3 = np.where((W[:, i3] > 0) + (W[i3, :] > 0))[0]
for i4 in np.setdiff1d(ind3,[i0,i1,i2,i3]):
if c == 5:
self.motif_properties(W, v, [i0, i1, i2, i3, i4])
else:
ind4 = | np.where((W[:, i4] > 0) + (W[i4, :] > 0)) | numpy.where |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
os.environ["MKL_THREADING_LAYER"] = "GNU"
os.environ["CUDA_VISIBLE_DEVICES"] = "3"
import numpy as np
import numpy.random as npr
import tensorflow as tf
import pickle
import sys
import argparse
import traceback
#my imports
from pddm.policies.policy_random import Policy_Random
from pddm.utils.helper_funcs import *
from pddm.regressors.dynamics_model import Dyn_Model
from pddm.policies.mpc_rollout import MPCRollout
from pddm.utils.loader import Loader
from pddm.utils.saver import Saver
from pddm.utils.data_processor import DataProcessor
from pddm.utils.data_structures import *
from pddm.utils.convert_to_parser_args import convert_to_parser_args
from pddm.utils import config_reader
##############added by Hamada##########
from pddm.utils.utils import eei_inverted_pendulum
#from pddm.palameter.palameter
SCRIPT_DIR = os.path.dirname(__file__)
def run_job(args, simulation_version,control_delta,reward_type,use_different_env,save_dir=None):
# Continue training from an existing iteration
if args.continue_run>-1:
save_dir = os.path.join(SCRIPT_DIR, args.continue_run_filepath)
tf.reset_default_graph()
with tf.Session(config=get_gpu_config(args.use_gpu, args.gpu_frac)) as sess:
##############################################
### initialize some commonly used parameters (from args)
##############################################
env_name = args.env_name
continue_run = args.continue_run
K = args.K
num_iters = args.num_iters
num_trajectories_per_iter = args.num_trajectories_per_iter
horizon = args.horizon
### set seeds
npr.seed(args.seed)
tf.set_random_seed(args.seed)
#######################
### hardcoded args
#######################
### data types
args.tf_datatype = tf.float32
args.np_datatype = np.float32
### supervised learning noise, added to the training dataset
args.noiseToSignal = 0.01
### these are for *during* MPC rollouts,
# they allow you to run the H-step candidate actions on the real dynamics
# and compare the model's predicted outcomes vs. the true outcomes
execute_sideRollouts = False
plot_sideRollouts = False
########################################
### create loader, env, rand policy
########################################
loader = Loader(save_dir)
env, dt_from_xml = create_env(env_name)
if use_different_env:
if env_name=="pddm_vreacher-v0":
differnt_env, dt_from_xml = create_env('pddm_vreacher_ngr-v0')
elif env_name=="pddm_vreacher-v11":
differnt_env, dt_from_xml = create_env('pddm_vreacher_ngr-v11')
elif env_name == "pddm_vreacher-v14":
differnt_env, dt_from_xml = create_env('pddm_vreacher_ngr-v14')
elif env_name == "pddm_vreacher-v214":
differnt_env, dt_from_xml = create_env('pddm_vreacher_ngr-v214')
elif env_name == "pddm_cheetah-v0":
differnt_env, dt_from_xml = create_env('pddm_cheetah_cgr-v0')
elif env_name == "pddm_cheetah-v2":
differnt_env, dt_from_xml = create_env('pddm_cheetah_cgr-v2')
elif env_name == "pddm_cheetah-v6":
differnt_env, dt_from_xml = create_env('pddm_cheetah_cgr-v6')
elif env_name == "pddm_cheetah_pre-v2":
differnt_env, dt_from_xml = create_env('pddm_cheetah_pre_cgr-v2')
elif env_name == "pddm_vreacher_ngr-v214":
differnt_env, dt_from_xml = create_env('pddm_vreacher-v214')
elif env_name == "pddm_v4reacher-v4":
differnt_env, dt_from_xml = create_env('pddm_v4reacher_ngr-v4')
#env.env.env.render()
args.dt_from_xml = dt_from_xml
random_policy = Policy_Random(env.env)
if use_different_env:
random_policy_diff = Policy_Random(differnt_env.env)
##hamada added
### set seeds
#env.env.env.seed(args.seed)
#doing a render here somehow allows it to not produce a seg fault error later when visualizing
if args.visualize_MPC_rollout:
render_env(env)
render_stop(env)
print("simulation frame skip and dt check {} and {} ".format(env.env.env.frame_skip,env.env.env.dt))
#################################################
### initialize or load in info
#################################################
#check for a variable which indicates that we should duplicate each data point
#e.g., for baoding, since ballA/B are interchangeable, we store as 2 different points
if 'duplicateData_switchObjs' in dir(env.unwrapped_env):
duplicateData_switchObjs = True
indices_for_switching = [env.unwrapped_env.objInfo_start1, env.unwrapped_env.objInfo_start2,
env.unwrapped_env.targetInfo_start1, env.unwrapped_env.targetInfo_start2]
else:
duplicateData_switchObjs = False
indices_for_switching=[]
#initialize data processor
data_processor = DataProcessor(args, duplicateData_switchObjs, indices_for_switching)
#start a fresh run
if continue_run==-1:
#random training/validation data
if args.load_existing_random_data:
rollouts_trainRand, rollouts_valRand = loader.load_initialData()
else:
"""#training
rollouts_trainRand = collect_random_rollouts(
env, random_policy, args.num_rand_rollouts_train,
args.rand_rollout_length, dt_from_xml, args)
#validation
rollouts_valRand = collect_random_rollouts(
env, random_policy, args.num_rand_rollouts_val,
args.rand_rollout_length, dt_from_xml, args)"""
if use_different_env:
print("use_different_env")
# training
rollouts_trainRand = collect_random_rollouts(
env, random_policy, args.num_rand_rollouts_train,
args.rand_rollout_length, dt_from_xml, args)
# validation
rollouts_valRand = collect_random_rollouts(
env, random_policy, args.num_rand_rollouts_val,
args.rand_rollout_length, dt_from_xml, args)
# convert (rollouts --> dataset)
dataset_trainRand = data_processor.convertRolloutsToDatasets(
rollouts_trainRand)
dataset_valRand = data_processor.convertRolloutsToDatasets(
rollouts_valRand)
####for different env
# training different env
rollouts_trainRand_diff = collect_random_rollouts(
differnt_env, random_policy_diff, args.num_rand_rollouts_train,
args.rand_rollout_length, dt_from_xml, args)
# validation different env
rollouts_valRand_diff = collect_random_rollouts(
differnt_env, random_policy_diff, args.num_rand_rollouts_val,
args.rand_rollout_length, dt_from_xml, args)
# convert (rollouts --> dataset)
dataset_trainRand_diff = data_processor.convertRolloutsToDatasets(
rollouts_trainRand_diff)
dataset_valRand_diff = data_processor.convertRolloutsToDatasets(
rollouts_valRand_diff)
# concat this dataset with the existing dataset_trainRand
dataset_trainRand = concat_datasets(dataset_trainRand,
dataset_trainRand_diff)
dataset_valRand = concat_datasets(dataset_valRand,
dataset_valRand_diff)
else: #default
rollouts_trainRand = collect_random_rollouts(
env, random_policy, args.num_rand_rollouts_train,
args.rand_rollout_length, dt_from_xml, args)
# validation
rollouts_valRand = collect_random_rollouts(
env, random_policy, args.num_rand_rollouts_val,
args.rand_rollout_length, dt_from_xml, args)
# convert (rollouts --> dataset)
dataset_trainRand = data_processor.convertRolloutsToDatasets(
rollouts_trainRand)
dataset_valRand = data_processor.convertRolloutsToDatasets(
rollouts_valRand)
#onPol train/val data
dataset_trainOnPol = Dataset()
rollouts_trainOnPol = []
rollouts_valOnPol = []
#lists for saving
trainingLoss_perIter = []
rew_perIter = []
scores_perIter = []
trainingData_perIter = []
if simulation_version=="inverted_pendulum" or simulation_version=="reacher":
EEIss_perIter=[]
ERss_perIter = []
ENEss_perIter = []
if use_different_env:
rew_perIter_diff = []
scores_perIter_diff = []
#initialize counter
counter = 0
#continue from an existing run
else:
#load data
iter_data = loader.load_iter(continue_run-1)
#random data
rollouts_trainRand, rollouts_valRand = loader.load_initialData()
#onPol data
rollouts_trainOnPol = iter_data.train_rollouts_onPol
rollouts_valOnPol = iter_data.val_rollouts_onPol
#convert (rollouts --> dataset)
dataset_trainRand = data_processor.convertRolloutsToDatasets(
rollouts_trainRand)
dataset_valRand = data_processor.convertRolloutsToDatasets(
rollouts_valRand)
#lists for saving
trainingLoss_perIter = iter_data.training_losses
rew_perIter = iter_data.rollouts_rewardsPerIter
scores_perIter = iter_data.rollouts_scoresPerIter
trainingData_perIter = iter_data.training_numData
if simulation_version=="inverted_pendulum" or simulation_version=="reacher":
EEIss_perIter=iter_data.rollouts_EEIPerIter
ERss_perIter = iter_data.rollouts_ERPerIter
ENEss_perIter = iter_data.rollouts_ENEPerIter
#initialize counter
counter = continue_run
#how many iters to train for
num_iters += continue_run
### check data dims
inputSize, outputSize, acSize = check_dims(dataset_trainRand, env)
### amount of data
numData_train_rand = get_num_data(rollouts_trainRand)
if use_different_env:
numData_train_rand = get_num_data(rollouts_trainRand)+get_num_data(rollouts_trainRand_diff)
##############################################
### dynamics model + controller
##############################################
dyn_models = Dyn_Model(inputSize, outputSize, acSize, sess, params=args)
mpc_rollout = MPCRollout(env, dyn_models, random_policy,
execute_sideRollouts, plot_sideRollouts, args,save_dir,counter,simulation_version,control_delta,reward_type)
if use_different_env:
mpc_rollout_diff = MPCRollout(differnt_env, dyn_models, random_policy,
execute_sideRollouts, plot_sideRollouts, args, save_dir, counter,simulation_version, control_delta, reward_type)
### init TF variables
sess.run(tf.global_variables_initializer())
##############################################
### saver
##############################################
saver = Saver(save_dir, sess)
if use_different_env:
print("use_different_env")
rollouts_valRand = rollouts_valRand + rollouts_valRand_diff
rollouts_trainRand = rollouts_trainRand + rollouts_trainRand_diff
saver.save_initialData(args, rollouts_trainRand, rollouts_valRand)
##############################################
### THE MAIN LOOP
##############################################
firstTime = True
rollouts_info_prevIter, list_mpes, list_scores, list_rewards = None, None, None, None
while counter < num_iters:
#init vars for this iteration
saver_data = DataPerIter()
saver.iter_num = counter
#onPolicy validation doesn't exist yet, so just make it same as rand validation
if counter==0:
rollouts_valOnPol = rollouts_valRand
#convert (rollouts --> dataset)
dataset_trainOnPol = data_processor.convertRolloutsToDatasets(
rollouts_trainOnPol)
dataset_valOnPol = data_processor.convertRolloutsToDatasets(
rollouts_valOnPol)
# amount of data
numData_train_onPol = get_num_data(rollouts_trainOnPol)
# mean/std of all data
data_processor.update_stats(dyn_models, dataset_trainRand, dataset_trainOnPol)
#preprocess datasets to mean0/std1 + clip actions
preprocessed_data_trainRand = data_processor.preprocess_data(
dataset_trainRand)
preprocessed_data_valRand = data_processor.preprocess_data(
dataset_valRand)
preprocessed_data_trainOnPol = data_processor.preprocess_data(
dataset_trainOnPol)
preprocessed_data_valOnPol = data_processor.preprocess_data(
dataset_valOnPol)
#convert datasets (x,y,z) --> training sets (inp, outp)
inputs, outputs = data_processor.xyz_to_inpOutp(
preprocessed_data_trainRand)
inputs_val, outputs_val = data_processor.xyz_to_inpOutp(
preprocessed_data_valRand)
inputs_onPol, outputs_onPol = data_processor.xyz_to_inpOutp(
preprocessed_data_trainOnPol)
inputs_val_onPol, outputs_val_onPol = data_processor.xyz_to_inpOutp(
preprocessed_data_valOnPol)
#####################################
## Training the model
#####################################
if (not (args.print_minimal)):
print("\n#####################################")
print("Training the dynamics model..... iteration ", counter)
print("#####################################\n")
print(" amount of random data: ", numData_train_rand)
print(" amount of onPol: data: ", numData_train_onPol)
### copy train_onPol until it's big enough
if len(inputs_onPol)>0:
while inputs_onPol.shape[0]<inputs.shape[0]:
inputs_onPol = np.concatenate([inputs_onPol, inputs_onPol])
outputs_onPol = np.concatenate(
[outputs_onPol, outputs_onPol])
### copy val_onPol until it's big enough
while inputs_val_onPol.shape[0]<args.batchsize:
inputs_val_onPol = np.concatenate(
[inputs_val_onPol, inputs_val_onPol], 0)
outputs_val_onPol = np.concatenate(
[outputs_val_onPol, outputs_val_onPol], 0)
#re-initialize all vars (randomly) if training from scratch
##restore model if doing continue_run
if args.warmstart_training:
if firstTime:
if continue_run>0:
restore_path = save_dir + '/models/model_aggIter' + str(continue_run-1) + '.ckpt'
saver.tf_saver.restore(sess, restore_path)
print("\n\nModel restored from ", restore_path, "\n\n")
else:
sess.run(tf.global_variables_initializer())
#number of training epochs
if counter==0: nEpoch_use = args.nEpoch_init
else: nEpoch_use = args.nEpoch
#train model or restore model
if args.always_use_savedModel:
if continue_run>0:
restore_path = save_dir + '/models/model_aggIter' + str(continue_run-1) + '.ckpt'
else:
restore_path = save_dir + '/models/finalModel.ckpt'
saver.tf_saver.restore(sess, restore_path)
print("\n\nModel restored from ", restore_path, "\n\n")
#empty vars, for saving
training_loss = 0
training_lists_to_save = dict(
training_loss_list = 0,
val_loss_list_rand = 0,
val_loss_list_onPol = 0,
val_loss_list_xaxis = 0,
rand_loss_list = 0,
onPol_loss_list = 0,)
else:
## train model
training_loss, training_lists_to_save = dyn_models.train(
inputs,
outputs,
inputs_onPol,
outputs_onPol,
nEpoch_use,
inputs_val=inputs_val,
outputs_val=outputs_val,
inputs_val_onPol=inputs_val_onPol,
outputs_val_onPol=outputs_val_onPol)
#saving rollout info
rollouts_info = []
list_rewards = []
list_scores = []
list_mpes = []
list_EEIss =[]
list_ERss = []
list_ENEss = []
list_rewards_diff=[]
list_scores_diff=[]
if not args.print_minimal:
print("\n#####################################")
print("performing on-policy MPC rollouts... iter ", counter)
print("#####################################\n")
for rollout_num in range(num_trajectories_per_iter):
###########################################
########## perform 1 MPC rollout
###########################################
if not args.print_minimal:
print("\n####################### Performing MPC rollout #",
rollout_num)
#reset env randomly
starting_observation, starting_state = env.reset(return_start_state=True)
rollout_info = mpc_rollout.perform_rollout(
starting_state,
starting_observation,
counter,
rollout_num,
controller_type=args.controller_type,
take_exploratory_actions=False)
if use_different_env:
rollout_info_diff = mpc_rollout_diff.perform_rollout(
starting_state,
starting_observation,
counter,
rollout_num,
controller_type=args.controller_type,
take_exploratory_actions=False)
################added by Hamada####
# Note: can sometimes set take_exploratory_actions=True
# in order to use ensemble disagreement for exploration
###########################################
####### save rollout info (if long enough)
###########################################
if len(rollout_info['observations']) > K:
list_rewards.append(rollout_info['rollout_rewardTotal'])
list_scores.append(rollout_info['rollout_meanFinalScore'])
list_mpes.append(np.mean(rollout_info['mpe_1step']))
rollouts_info.append(rollout_info)
if use_different_env:
rollouts_info.append(rollout_info_diff)
list_rewards_diff.append(rollout_info_diff['rollout_rewardTotal'])
list_scores_diff.append(rollout_info_diff['rollout_meanFinalScore'])
if simulation_version == "inverted_pendulum" or simulation_version == "reacher" :
list_EEIss.append(rollout_info['Final_EEI'])
list_ERss.append(rollout_info['Final_ER'])
list_ENEss.append(rollout_info['Final_ENE'])
rollouts_info_prevIter = rollouts_info.copy()
# visualize, if desired
if args.visualize_MPC_rollout:
print("\n\nPAUSED FOR VISUALIZATION. Continue when ready to visualize.")
import IPython
IPython.embed()
for vis_index in range(len(rollouts_info)):
visualize_rendering(rollouts_info[vis_index], env, args)
#########################################################
### aggregate some random rollouts into training data
#########################################################
num_rand_rollouts = 5
rollouts_rand = collect_random_rollouts(
env, random_policy, num_rand_rollouts, args.rollout_length,
dt_from_xml, args)
#convert (rollouts --> dataset)
dataset_rand_new = data_processor.convertRolloutsToDatasets(
rollouts_rand)
#concat this dataset with the existing dataset_trainRand
dataset_trainRand = concat_datasets(dataset_trainRand,
dataset_rand_new)
if use_different_env:
rollouts_rand_diff = collect_random_rollouts(
differnt_env, random_policy_diff, num_rand_rollouts, args.rollout_length,
dt_from_xml, args)
# convert (rollouts --> dataset)
dataset_rand_new_diff = data_processor.convertRolloutsToDatasets(rollouts_rand_diff)
# concat this dataset with the existing dataset_trainRand
dataset_trainRand = concat_datasets(dataset_trainRand,
dataset_rand_new_diff)
#########################################################
### aggregate MPC rollouts into train/val
#########################################################
num_mpc_rollouts = len(rollouts_info)
print("length MPC rollout {}".format(num_mpc_rollouts))
rollouts_train = []
rollouts_val = []
for i in range(num_mpc_rollouts):
rollout = Rollout(rollouts_info[i]['observations'],
rollouts_info[i]['actions'],
rollouts_info[i]['rollout_rewardTotal'],
rollouts_info[i]['starting_state'])
if i<int(num_mpc_rollouts * 0.9):
rollouts_train.append(rollout)
else:
rollouts_val.append(rollout)
#aggregate into training data
if counter==0: rollouts_valOnPol = []
rollouts_trainOnPol = rollouts_trainOnPol + rollouts_train
rollouts_valOnPol = rollouts_valOnPol + rollouts_val
#########################################################
### save everything about this iter of model training
#########################################################
trainingData_perIter.append(numData_train_rand +
numData_train_onPol)
trainingLoss_perIter.append(training_loss)
### stage relevant info for saving
saver_data.training_numData = trainingData_perIter
saver_data.training_losses = trainingLoss_perIter
saver_data.training_lists_to_save = training_lists_to_save
# Note: the on-policy rollouts include curr iter's rollouts
# (so next iter can be directly trained on these)
saver_data.train_rollouts_onPol = rollouts_trainOnPol
saver_data.val_rollouts_onPol = rollouts_valOnPol
saver_data.normalization_data = data_processor.get_normalization_data()
saver_data.counter = counter
### save all info from this training iteration
saver.save_model()
saver.save_training_info(saver_data)
#########################################################
### save everything about this iter of MPC rollouts
#########################################################
# append onto rewards/scores
rew_perIter.append([np.mean(list_rewards), np.std(list_rewards)])
scores_perIter.append([np.mean(list_scores), np.std(list_scores)])
##aded hamada
if simulation_version=="inverted_pendulum" or simulation_version=="reacher":
print("EEI {}".format(np.array(list_EEIss)))
print("EEI shape {}".format( | np.array(list_EEIss) | numpy.array |
#!/usr/bin/env python3
import os
import glob
import re
import sys
import math
TIMEOUT = 100
# use cases and their directory names
tests = [
"CP3-4.8.5", "CP1-4.8.5", "CP3-4.8.9", "CP1-4.8.9",
"noSeqCon-CP3-4.8.5", "noSeqCon-CP1-4.8.5", "noSeqCon-CP3-4.8.9", "noSeqCon-CP1-4.8.9",
"nolambda-CP3-4.8.5", "nolambda-CP1-4.8.5", "nolambda-CP3-4.8.9", "nolambda-CP1-4.8.9"
]
loc_orig_5 = os.path.join('Logs_DLL_8.20', 'Logs_orig_4.8.5', '*.trace')
loc_orig_9 = os.path.join('Logs_DLL_8.20', 'Logs_orig_4.8.9', '*.trace')
loc_noseqcon_5 = os.path.join('Logs_DLL_8.20', 'Logs_noseqcon_4.8.5', '*.trace')
loc_noseqcon_9 = os.path.join('Logs_DLL_8.20', 'Logs_noseqcon_4.8.9', '*.trace')
loc_nolambda_5 = os.path.join('Logs_DLL_8.20', 'Logs_nolambda_4.8.5', '*.trace')
loc_nolambda_9 = os.path.join('Logs_DLL_8.20', 'Logs_nolambda_4.8.9', '*.trace')
file_orig_5 = glob.glob(loc_orig_5)
file_orig_9 = glob.glob(loc_orig_9)
file_noseqcon_5 = glob.glob(loc_noseqcon_5)
file_noseqcon_9 = glob.glob(loc_noseqcon_9)
file_nolambda_5 = glob.glob(loc_nolambda_5)
file_nolambda_9 = glob.glob(loc_nolambda_9)
allinfo_Expand = {}
allinfo_Remove = {}
allinfo_InsertAfter = {}
allinfo_InsertBefore = {}
def get_time (files, index):
for f in files:
outfile = open(f, 'r')
data = outfile.readlines()
outfile.close()
for i in range(0, len(data)):
if 'Verifying Impl$$_module.__default.Expand ...' in data[i]:
time = re.findall("\[([0-9.]*) s, ([0-9.]*) proof obligations\] ([a-z]+)", data[i + 1])
if len(time) > 0:
if time[0][2] == "verified":
if 'CP3' in f:
allinfo_Expand[tests[index]] = allinfo_Expand.get(tests[index], [])
allinfo_Expand[tests[index]] += [float(time[0][0])]
else:
allinfo_Expand[tests[index+1]] = allinfo_Expand.get(tests[index+1], [])
allinfo_Expand[tests[index+1]] += [float(time[0][0])]
else:
if time[0][2] == "timed":
if 'CP3' in f:
allinfo_Expand[tests[index]] = allinfo_Expand.get(tests[index], [])
allinfo_Expand[tests[index]] += [float(TIMEOUT)]
else:
allinfo_Expand[tests[index+1]] = allinfo_Expand.get(tests[index+1], [])
allinfo_Expand[tests[index+1]] += [float(TIMEOUT)]
else:
allinfo_Expand[tests[index]] = allinfo_Expand.get(tests[index], [])
allinfo_Expand[tests[index+1]] = allinfo_Expand.get(tests[index+1], [])
if 'Verifying Impl$$_module.__default.Remove ...' in data[i]:
time = re.findall("\[([0-9.]*) s, ([0-9.]*) proof obligations\] ([a-z]+)", data[i + 1])
if len(time) > 0:
if time[0][2] == "verified":
if 'CP3' in f:
allinfo_Remove[tests[index]] = allinfo_Remove.get(tests[index], [])
allinfo_Remove[tests[index]] += [float(time[0][0])]
else:
allinfo_Remove[tests[index+1]] = allinfo_Remove.get(tests[index+1], [])
allinfo_Remove[tests[index+1]] += [float(time[0][0])]
else:
if time[0][2] == "timed":
if 'CP3' in f:
allinfo_Remove[tests[index]] = allinfo_Remove.get(tests[index], [])
allinfo_Remove[tests[index]] += [float(TIMEOUT)]
else:
allinfo_Remove[tests[index+1]] = allinfo_Remove.get(tests[index+1], [])
allinfo_Remove[tests[index+1]] += [float(TIMEOUT)]
else:
allinfo_Remove[tests[index]] = allinfo_Remove.get(tests[index], [])
allinfo_Remove[tests[index+1]] = allinfo_Remove.get(tests[index+1], [])
if 'Verifying Impl$$_module.__default.InsertAfter ...' in data[i]:
time = re.findall("\[([0-9.]*) s, ([0-9.]*) proof obligations\] ([a-z]+)", data[i + 1])
if len(time) > 0:
if time[0][2] == "verified":
if 'CP3' in f:
allinfo_InsertAfter[tests[index]] = allinfo_InsertAfter.get(tests[index], [])
allinfo_InsertAfter[tests[index]] += [float(time[0][0])]
else:
allinfo_InsertAfter[tests[index+1]] = allinfo_InsertAfter.get(tests[index+1], [])
allinfo_InsertAfter[tests[index+1]] += [float(time[0][0])]
else:
if time[0][2] == "timed":
if 'CP3' in f:
allinfo_InsertAfter[tests[index]] = allinfo_InsertAfter.get(tests[index], [])
allinfo_InsertAfter[tests[index]] += [float(TIMEOUT)]
else:
allinfo_InsertAfter[tests[index+1]] = allinfo_InsertAfter.get(tests[index+1], [])
allinfo_InsertAfter[tests[index+1]] += [float(TIMEOUT)]
else:
allinfo_InsertAfter[tests[index]] = allinfo_InsertAfter.get(tests[index], [])
allinfo_InsertAfter[tests[index+1]] = allinfo_InsertAfter.get(tests[index+1], [])
if 'Verifying Impl$$_module.__default.InsertBefore ...' in data[i]:
time = re.findall("\[([0-9.]*) s, ([0-9.]*) proof obligations\] ([a-z]+)", data[i + 1])
if len(time) > 0:
if time[0][2] == "verified":
if 'CP3' in f:
allinfo_InsertBefore[tests[index]] = allinfo_InsertBefore.get(tests[index], [])
allinfo_InsertBefore[tests[index]] += [float(time[0][0])]
else:
allinfo_InsertBefore[tests[index+1]] = allinfo_InsertBefore.get(tests[index+1], [])
allinfo_InsertBefore[tests[index+1]] += [float(time[0][0])]
else:
if time[0][2] == "timed":
if 'CP3' in f:
allinfo_InsertBefore[tests[index]] = allinfo_InsertBefore.get(tests[index], [])
allinfo_InsertBefore[tests[index]] += [float(TIMEOUT)]
else:
allinfo_InsertBefore[tests[index+1]] = allinfo_InsertBefore.get(tests[index+1], [])
allinfo_InsertBefore[tests[index+1]] += [float(TIMEOUT)]
else:
allinfo_InsertBefore[tests[index]] = allinfo_InsertBefore.get(tests[index], [])
allinfo_InsertBefore[tests[index+1]] = allinfo_InsertBefore.get(tests[index+1], [])
get_time(file_orig_5, 0)
get_time(file_orig_9, 2)
get_time(file_noseqcon_5, 4)
get_time(file_noseqcon_9, 6)
get_time(file_nolambda_5, 8)
get_time(file_nolambda_9, 10)
# print(allinfo_Expand)
# print(allinfo_Remove)
# print(allinfo_InsertAfter)
# print(allinfo_InsertBefore)
# print a CSV
def show_csv(allinfo, info):
for test in tests:
if test in allinfo:
times = allinfo[test]
print(test + ", " + info),
for i in times:
print(", " + str(i)),
print ("\n"),
# show_csv(allinfo_Expand, "Expand")
# show_csv(allinfo_Remove, "Remove")
# show_csv(allinfo_InsertAfter, "InsertAfter")
# show_csv(allinfo_InsertBefore, "InsertBefore")
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
matplotlib.rcParams.update({'font.size': 20})
Expand_cp3_5 = np.array(allinfo_Expand[tests[0]])
Expand_cp1_5 = np.array(allinfo_Expand[tests[1]])
Expand_cp3_9 = np.array(allinfo_Expand[tests[2]])
Expand_cp1_9 = np.array(allinfo_Expand[tests[3]])
Expand_noseqcon_cp3_5 = np.array(allinfo_Expand[tests[4]])
Expand_noseqcon_cp1_5 = np.array(allinfo_Expand[tests[5]])
Expand_noseqcon_cp3_9 = np.array(allinfo_Expand[tests[6]])
Expand_noseqcon_cp1_9 = np.array(allinfo_Expand[tests[7]])
Expand_nolambda_cp3_5 = np.array(allinfo_Expand[tests[8]])
Expand_nolambda_cp1_5 = np.array(allinfo_Expand[tests[9]])
Expand_nolambda_cp3_9 = np.array(allinfo_Expand[tests[10]])
Expand_nolambda_cp1_9 = np.array(allinfo_Expand[tests[11]])
Expand_cp3_5_mean = np.mean(Expand_cp3_5)
Expand_cp3_5_std = np.std(Expand_cp3_5)
Expand_cp1_5_mean = np.mean(Expand_cp1_5)
Expand_cp1_5_std = np.std(Expand_cp1_5)
Expand_cp3_9_mean = np.mean(Expand_cp3_9)
Expand_cp3_9_std = np.std(Expand_cp3_9)
Expand_cp1_9_mean = np.mean(Expand_cp1_9)
Expand_cp1_9_std = np.std(Expand_cp1_9)
Expand_noseqcon_cp3_5_mean = np.mean(Expand_noseqcon_cp3_5)
Expand_noseqcon_cp3_5_std = np.std(Expand_noseqcon_cp3_5)
Expand_noseqcon_cp1_5_mean = np.mean(Expand_noseqcon_cp1_5)
Expand_noseqcon_cp1_5_std = np.std(Expand_noseqcon_cp1_5)
Expand_noseqcon_cp3_9_mean = np.mean(Expand_noseqcon_cp3_9)
Expand_noseqcon_cp3_9_std = np.std(Expand_noseqcon_cp3_9)
Expand_noseqcon_cp1_9_mean = | np.mean(Expand_noseqcon_cp1_9) | numpy.mean |
from robot.ur_robot import URRobot
from vision.realsense_d415_tcp import RealsenseD415TCP
import utils.utils as utils
import vision.utils as visionutils
from utils.config_loader import ConfigLoader
from datetime import datetime
import numpy as np
import cv2
import json
import time
from scipy import optimize
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import argparse
import os
class Configuration(object):
def __init__(self, config_file):
config = ConfigLoader.load(args.config_file)
#creates a class which instance attributes are based on the config dictionary
for k, v in config.items():
setattr(self, k, v)
# Checkerboard size as a tuple.
self.checkerboard_size = (self.checkerboard_size, self.checkerboard_size)
self.reference_point_offset = np.array(self.reference_point_offset)
self.tool_orientation = np.array(self.tool_orientation)
self.workspace_limits = np.array(self.workspace_limits)
@staticmethod
def dump_sample_file():
config = {
"calibration_type": "EYE_IN_HAND",
"robot_config_file":"PATH/TO/FILE",
"camera_config_file":"PATH/TO/FILE",
"workspace_limits": [[-0.490, 0.390], [-0.645, -0.185], [0.462, 0.710]],
"calib_grid_step": 0.05,
"reference_point_offset": [[0.74550],[-0.00895],[0.04900], [1]],
"tool_orientation": [1.226,-2.890,0.00],
"checkerboard_size": 3
}
with open('configurations/sample_configuration.json', 'w') as fp:
json.dump(config, fp)
def calibrate(config):
# Construct 3D calibration grid across workspace
gridspace_x = np.linspace(config.workspace_limits[0][0], config.workspace_limits[0][1], int(1 + (config.workspace_limits[0][1] - config.workspace_limits[0][0])/config.calib_grid_step))
gridspace_y = np.linspace(config.workspace_limits[1][0], config.workspace_limits[1][1], int(1 + (config.workspace_limits[1][1] - config.workspace_limits[1][0])/config.calib_grid_step))
gridspace_z = np.linspace(config.workspace_limits[2][0], config.workspace_limits[2][1], int(1 + (config.workspace_limits[2][1] - config.workspace_limits[2][0])/config.calib_grid_step))
calib_grid_x, calib_grid_y, calib_grid_z = np.meshgrid(gridspace_x, gridspace_y, gridspace_z)
num_calib_grid_pts = calib_grid_x.shape[0]*calib_grid_x.shape[1]*calib_grid_x.shape[2]
calib_grid_x.shape = (num_calib_grid_pts,1)
calib_grid_y.shape = (num_calib_grid_pts,1)
calib_grid_z.shape = (num_calib_grid_pts,1)
calib_grid_pts = np.concatenate((calib_grid_x, calib_grid_y, calib_grid_z), axis=1)
#measured_pts: points generated by sampling out of the config.workspace_limits[] + checkerboard offset from tool.
# It is the position of the tool when taking a picture, ideally this is the position of the center of the checkerboard in robot world coordinates.
# This is achieved easily when the camera is fixed and the robot moves the checkerboard in the image.
# As the robot position + checkerboard offset from tool = the position of the center of the fixed checkerboard in robot world coordinates.
measured_pts = []
#obseverved_pts: This is the position X,Y,Z in meters of the center of the checkerboard with respect to the origin of the camera in the camera world coordinates.
observed_pts = []
#observed_pix: Pixel locations of the center of the checkerboard in the image.
observed_pix = []
print(f'Going to calibrate in {num_calib_grid_pts} different points.')
# Connect to the robot
print('Connecting to robot...')
robot = URRobot(config.robot_config_file)
# Slow down robot to SAFE values
robot.activate_safe_mode()
robot.go_home()
# Connect to the camera
print('Connecting to camera...')
camera = RealsenseD415TCP(config.camera_config_file)
# Move robot to each calibration point in workspace
print('Collecting data...')
for calib_pt_idx in range(num_calib_grid_pts):
tool_position = calib_grid_pts[calib_pt_idx,:]
print('Calibration point: ', calib_pt_idx, '/', num_calib_grid_pts)
robot.move_to_pose(tool_position, config.tool_orientation)
time.sleep(1)
# Wait for a coherent pair of frames: depth and color
camera_color_img, camera_depth_img = camera.get_state()
if not (camera_depth_img is None and camera_color_img is None):
checkerboard_pix = visionutils.find_checkerboard(camera_color_img, config.checkerboard_size)
if checkerboard_pix is not None:
checkerboard_z = camera_depth_img[checkerboard_pix[1]][checkerboard_pix[0]]
checkerboard_x = np.multiply(checkerboard_pix[0]-camera.intrinsics[0][2], checkerboard_z/camera.intrinsics[0][0])
checkerboard_y = np.multiply(checkerboard_pix[1]-camera.intrinsics[1][2], checkerboard_z/camera.intrinsics[1][1])
if checkerboard_z != 0:
observed_pts.append([checkerboard_x, checkerboard_y, checkerboard_z])
observed_pix.append(checkerboard_pix)
# Get current robot pose
current_pose = robot.get_cartesian_pose()
if config.calibration_type == "EYE_IN_HAND":
rot_vec = np.array(current_pose)
rot_vec.shape = (1,6)
T_be = utils.V2T(rot_vec)
invT_be = np.linalg.inv(T_be)
# Save calibration point and observed checkerboard center
checker2tool = np.dot(invT_be, config.reference_point_offset)
checker2tool = checker2tool[:3, 0]
measured_pts.append(checker2tool)
print('Measured points: ', checker2tool)
else: # "EYE_TO_HAND"
tool_position = current_pose[:3] + config.reference_point_offset.flatten()[:3]
measured_pts.append(tool_position)
print('Measured points: ', tool_position)
# Save calibration point and observed checkerboard center
print('Observed points: ', [checkerboard_x,checkerboard_y,checkerboard_z])
print('Checkerboard pix: ', checkerboard_pix)
else:
print('checkerboard Z == 0')
else:
print('No checkerboard found')
else:
print('No depth or color frames')
# Move robot back to home pose
measured_pts = np.asarray(measured_pts)
observed_pts = np.asarray(observed_pts)
observed_pix = np.asarray(observed_pix)
world2camera = np.eye(4)
print('Total valid points: ', measured_pts.shape[0], '/', num_calib_grid_pts)
# Estimate rigid transform with SVD (from <NAME>)
def get_rigid_transform(A, B):
assert len(A) == len(B)
N = A.shape[0]; # Total points
centroid_A = np.mean(A, axis=0) # Find centroids
centroid_B = np.mean(B, axis=0)
AA = A - np.tile(centroid_A, (N, 1)) # Centre the points
BB = B - np.tile(centroid_B, (N, 1))
H = np.dot(np.transpose(AA), BB) # Dot is matrix multiplication for array
U, S, Vt = np.linalg.svd(H) # Find the rotation matrix R
R = np.dot(Vt.T, U.T)
if np.linalg.det(R) < 0: # Special reflection case
Vt[2,:] *= -1
R = np.dot(Vt.T, U.T)
t = np.dot(-R, centroid_A.T) + centroid_B.T # Find the traslation t
return R, t
def get_rigid_transform_error(z_scale):
nonlocal measured_pts, observed_pts, observed_pix, world2camera
# Apply z offset and compute new observed points using camera intrinsics
observed_z = observed_pts[:,2:] * z_scale
observed_x = np.multiply(observed_pix[:,[0]]-camera.intrinsics[0][2],observed_z/camera.intrinsics[0][0])
observed_y = np.multiply(observed_pix[:,[1]]-camera.intrinsics[1][2],observed_z/camera.intrinsics[1][1])
new_observed_pts = np.concatenate((observed_x, observed_y, observed_z), axis=1)
# Estimate rigid transform between measured points and new observed points
R, t = get_rigid_transform(np.asarray(measured_pts), np.asarray(new_observed_pts))
t.shape = (3,1)
world2camera = np.concatenate((np.concatenate((R, t), axis=1),np.array([[0, 0, 0, 1]])), axis=0)
# Compute rigid transform error
registered_pts = np.dot(R,np.transpose(measured_pts)) + np.tile(t,(1,measured_pts.shape[0]))
error = np.transpose(registered_pts) - new_observed_pts
error = np.sum(np.multiply(error,error))
rmse = np.sqrt(error/measured_pts.shape[0])
return rmse
# Optimize z scale w.r.t. rigid transform error
print('Calibrating...')
z_scale_init = 1
optim_result = optimize.minimize(get_rigid_transform_error, np.asarray(z_scale_init), method='Nelder-Mead')
camera_depth_offset = optim_result.x
# Save camera optimized offset and camera pose
now = datetime.now()
date_time = now.strftime("%Y-%m-%d_%H:%M:%S")
print('Saving...')
path_dir = os.path.join(os.getcwd(), 'calibrations')
if not os.path.exists(path_dir):
os.makedirs(path_dir)
np.savetxt('./calibrations/' + date_time + '_' + config.calibration_type + '_camera_depth_scale.txt', camera_depth_offset, delimiter=' ')
get_rigid_transform_error(camera_depth_offset)
camera_pose = | np.linalg.inv(world2camera) | numpy.linalg.inv |
import numpy as np
import os
import inspect
from scipy.interpolate import RectBivariateSpline
from . import GP_matrix as GP
class Emulator:
"""The Mira-Titan Universe emulator for the halo mass function.
Attributes
-----------------
param_limits : dictionary
Lower and upper limits of the cosmological parameters.
z_arr : array
Redshifts of the emulator output.
"""
# Cosmology parameters
param_names = ['Ommh2', 'Ombh2', 'Omnuh2', 'n_s', 'h', 'sigma_8', 'w_0', 'w_b']
param_limits = {
'Ommh2': (.12, .155),
'Ombh2': (.0215, .0235),
'Omnuh2': (0, .01),
'n_s': (.85, 1.05),
'h': (.55, .85),
'sigma_8': (.7, .9),
'w_0': (-1.3, -.7),
'w_b': (.3, 1.3),
}
# Emulator redshifts
z_arr = np.array([2.02, 1.61, 1.01, 0.656, 0.434, 0.242, 0.101, 0.0])
z_arr_asc = z_arr[::-1]
def __init__(self):
"""No arguments are required when initializing an `Emulator` instance.
Upon initialization, a bunch of matrix operations are performed which
takes a few seconds."""
data_path = os.path.join(os.path.dirname(os.path.abspath(inspect.stack()[0][1])), 'data')
# Basis functions and PCA standardization parameters
# They have different lengths so they are stored in separate files
self.__PCA_means, self.__PCA_transform = [], []
for i in range(len(self.z_arr)):
filename = os.path.join(data_path, 'PCA_mean_std_transform_%d.npy'%i)
_tmp = np.load(filename)
self.__PCA_means.append(_tmp[0,:])
self.__PCA_transform.append(_tmp[1:,:])
self.__GP_means = np.load(os.path.join(data_path, 'GP_params_mean.npy'))
self.__GP_std = np.load(os.path.join(data_path, 'GP_params_std.npy'))
self.__facs = np.load(os.path.join(data_path, 'facs.npy'))
# GP input data
params_design = np.load(os.path.join(data_path, 'params_design_w0wb.npy'))
hyper_params = np.load(os.path.join(data_path, 'hyperparams.npy'))
input_means = np.load(os.path.join(data_path, 'means.npy'))
cov_mat_data = np.load(os.path.join(data_path, 'cov_n.npy'))
N_PC = input_means.shape[2]
self.__GPreg = [GP.GaussianProcess(params_design,
input_means[z_id],
cov_mat_data[z_id],
hyper_params[z_id,:N_PC],
hyper_params[z_id,N_PC:].reshape(N_PC,-1))
for z_id in range(len(self.z_arr))]
def predict(self, requested_cosmology, z, m, get_errors=True, N_draw=1000):
"""Emulate the halo mass function dn/dlnM for the desired set of
cosmology parameters, redshifts, and masses.
:param requested_cosmology: The set of cosmology parameters for which
the mass function is requested. The parameters are `Ommh2`, `Ombh2`,
`Omnuh2`, `n_s`, `h`, `sigma_8`, `w_0`, `w_a`.
:type requested_cosmology: dict
:param z: The redshift(s) for which the mass function is requested.
:type z: float or array
:param m: The mass(es) for which the mass function is requested, in
units [Msun/h].
:type z: float or array
:param get_errors: Whether or not to compute error estimates (faster in
the latter case). Default is `True`.
:type get_errors: bool, optional
:param N_draw: How many sample mass functions to draw when computing the
error estimate. Applies only if `get_errors` is `True`.
:type N_draw: int, optional
Returns
-------
HMF: array_like
The mass function dN/dlnM in units[(h/Mpc)^3] and with shape
[len(z), len(m)].
HMF_rel_err: array_like
The relative error on dN/dlnM, with shape [len(z), len(m)]. Returns 0
if `get_errors` is `False`. For requested redshifts that are between
the redshifts for which the underlying emulator is defined, the
weighted errors from the neighboring redshifts are added in
quadrature.
"""
# Validate requested z and m
if np.any(z<0):
raise ValueError("z must be >= 0")
if np.any(z>self.z_arr_asc[-1]):
raise ValueError("z must be <= 2.02")
if np.any(m<1e13):
raise ValueError("m must be >= 1e13")
if np.any(m>1e16):
raise ValueError("m must be <= 1e16")
z = np.atleast_1d(z)
m = np.atleast_1d(m)
# Do we want error estimates?
if not get_errors:
N_draw = 0
# Call the actual emulator
emu_dict = self.predict_raw_emu(requested_cosmology, N_draw=N_draw)
# Set up interpolation grids
HMF_interp_input = np.log(np.nextafter(0,1)) * np.ones((len(self.z_arr_asc), 3001))
for i,emu_z in enumerate(self.z_arr_asc):
HMF_interp_input[i,:len(emu_dict[emu_z]['HMF'])] = | np.log(emu_dict[emu_z]['HMF']) | numpy.log |
import os
# os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = '1,3'
import numpy as np
import torch
import time
from tensorboardX import SummaryWriter
from fastprogress import master_bar, progress_bar
from torch.autograd import Variable
from sklearn.utils.class_weight import compute_class_weight
from config import *
from utils.graph_utils import *
from utils.google_tsp_reader import GoogleTSPReader
from utils.plot_utils import *
from models.gcn_model import ResidualGatedGCNModel
from utils.model_utils import *
from datetime import datetime
# setting random seed to 1
if torch.cuda.is_available():
#print("CUDA available, using GPU ID {}".format(config.gpu_id))
dtypeFloat = torch.cuda.FloatTensor
dtypeLong = torch.cuda.LongTensor
torch.cuda.manual_seed_all(1)
else:
#print("CUDA not available")
dtypeFloat = torch.FloatTensor
dtypeLong = torch.LongTensor
torch.manual_seed(1)
def train_one_epoch(net, optimizer, config, master_bar, num_neighbors = 20):
# Set training mode
net.train()
# Assign parameters
num_nodes = config.num_nodes
#num_neighbors = np.random.choice(config.num_neighbors)#config.num_neighbors
batch_size = config.batch_size
batches_per_epoch = config.batches_per_epoch
accumulation_steps = config.accumulation_steps
train_filepath = config.train_filepath
# modify
loss_type = config.loss_type
num_neg = config.num_neg
if loss_type == 'FL':
gamma = config.gamma
else:
gamma = 0
# Load TSP data
dataset = GoogleTSPReader(num_nodes, num_neighbors, batch_size, train_filepath, augmentation = True)
if batches_per_epoch != -1:
batches_per_epoch = min(batches_per_epoch, dataset.max_iter)
else:
batches_per_epoch = dataset.max_iter
# Convert dataset to iterable
dataset = iter(dataset)
# Initially set loss class weights as None
edge_cw = None
# Initialize running data
running_loss = 0.0
# running_err_edges = 0.0
# running_err_tour = 0.0
# running_err_tsp = 0.0
running_pred_tour_len = 0.0
running_gt_tour_len = 0.0
running_nb_data = 0
running_nb_batch = 0
start_epoch = time.time()
for batch_num in progress_bar(range(batches_per_epoch), parent=master_bar):
# Generate a batch of TSPs
try:
batch = next(dataset)
except StopIteration:
break
# Convert batch to torch Variables
x_edges = Variable(torch.LongTensor(batch.edges).type(dtypeLong), requires_grad=False)
x_edges_values = Variable(torch.FloatTensor(batch.edges_values).type(dtypeFloat), requires_grad=False)
x_nodes = Variable(torch.LongTensor(batch.nodes).type(dtypeLong), requires_grad=False)
x_nodes_coord = Variable(torch.FloatTensor(batch.nodes_coord).type(dtypeFloat), requires_grad=False)
y_edges = Variable(torch.LongTensor(batch.edges_target).type(dtypeLong), requires_grad=False)
y_nodes = Variable(torch.LongTensor(batch.nodes_target).type(dtypeLong), requires_grad=False)
# Compute class weights (if uncomputed)
if type(edge_cw) != torch.Tensor:
edge_labels = y_edges.cpu().numpy().flatten()
edge_cw = compute_class_weight("balanced", classes=np.unique(edge_labels), y=edge_labels)
# Forward pass
y_preds, loss = net.forward(x_edges, x_edges_values, x_nodes, x_nodes_coord, y_edges,
edge_cw, num_neg, loss_type, gamma)
loss = loss.mean() # Take mean of loss across multiple GPUs
loss = loss / accumulation_steps # Scale loss by accumulation steps
loss.backward()
# Backward pass
if (batch_num+1) % accumulation_steps == 0:
optimizer.step()
optimizer.zero_grad()
# Compute error metrics and mean tour lengths
# err_edges, err_tour, err_tsp, tour_err_idx, tsp_err_idx = edge_error(y_preds, y_edges, x_edges)
pred_tour_len = mean_tour_len_edges(x_edges_values, y_preds)
gt_tour_len = np.mean(batch.tour_len)
# Update running data
running_nb_data += batch_size
running_loss += batch_size* loss.data.item()* accumulation_steps # Re-scale loss
# running_err_edges += batch_size* err_edges
# running_err_tour += batch_size* err_tour
# running_err_tsp += batch_size* err_tsp
running_pred_tour_len += batch_size* pred_tour_len
running_gt_tour_len += batch_size* gt_tour_len
running_nb_batch += 1
# Log intermediate statistics
result = ('loss:{loss:.4f} pred_tour_len:{pred_tour_len:.3f} gt_tour_len:{gt_tour_len:.3f}'.format(
loss=running_loss/running_nb_data,
pred_tour_len=running_pred_tour_len/running_nb_data,
gt_tour_len=running_gt_tour_len/running_nb_data))
master_bar.child.comment = result
# Compute statistics for full epoch
loss = running_loss/ running_nb_data
err_edges = 0 # running_err_edges/ running_nb_data
err_tour = 0 # running_err_tour/ running_nb_data
err_tsp = 0 # running_err_tsp/ running_nb_data
pred_tour_len = running_pred_tour_len/ running_nb_data
gt_tour_len = running_gt_tour_len/ running_nb_data
return time.time()-start_epoch, loss, err_edges, err_tour, err_tsp, pred_tour_len, gt_tour_len
def metrics_to_str(epoch, time, learning_rate, loss, err_edges, err_tour, err_tsp, pred_tour_len, gt_tour_len, num_neighbors=20):
result = ( 'epoch:{epoch:0>2d}\t'
'time:{time:.1f}h\t'
'lr:{learning_rate:.2e}\t'
'loss:{loss:.4f}\t'
# 'err_edges:{err_edges:.2f}\t'
# 'err_tour:{err_tour:.2f}\t'
# 'err_tsp:{err_tsp:.2f}\t'
'pred_tour_len:{pred_tour_len:.3f}\t'
'gt_tour_len:{gt_tour_len:.3f}\t'
'num_neighbors:{num_neighbors:0>2d}'.format(
epoch=epoch,
time=time/3600,
learning_rate=learning_rate,
loss=loss,
# err_edges=err_edges,
# err_tour=err_tour,
# err_tsp=err_tsp,
pred_tour_len=pred_tour_len,
gt_tour_len=gt_tour_len,
num_neighbors=num_neighbors))
return result
def test(net, config, master_bar, mode='test', num_neighbors = 20):
# Set evaluation mode
net.eval()
# Assign parameters
num_nodes = config.num_nodes
#num_neighbors = np.random.choice(config.num_neighbors)#config.num_neighbors
batch_size = config.batch_size
batches_per_epoch = config.batches_per_epoch
beam_size = config.beam_size
val_filepath = config.val_filepath
test_filepath = config.test_filepath
# modify
num_neg = config.num_neg
loss_type = config.loss_type
if loss_type == 'FL':
gamma = config.gamma
else:
gamma = 0
# Load TSP data
if mode == 'val':
dataset = GoogleTSPReader(num_nodes, num_neighbors, batch_size=batch_size, filepath=val_filepath)
elif mode == 'test':
dataset = GoogleTSPReader(num_nodes, num_neighbors, batch_size=batch_size, filepath=test_filepath)
batches_per_epoch = dataset.max_iter
# Convert dataset to iterable
dataset = iter(dataset)
# Initially set loss class weights as None
edge_cw = None
# Initialize running data
running_loss = 0.0
# running_err_edges = 0.0
# running_err_tour = 0.0
# running_err_tsp = 0.0
running_pred_tour_len = 0.0
running_gt_tour_len = 0.0
running_nb_data = 0
running_nb_batch = 0
with torch.no_grad():
start_test = time.time()
for batch_num in progress_bar(range(batches_per_epoch), parent=master_bar):
# Generate a batch of TSPs
try:
batch = next(dataset)
except StopIteration:
break
# Convert batch to torch Variables
x_edges = Variable(torch.LongTensor(batch.edges).type(dtypeLong), requires_grad=False)
x_edges_values = Variable(torch.FloatTensor(batch.edges_values).type(dtypeFloat), requires_grad=False)
x_nodes = Variable(torch.LongTensor(batch.nodes).type(dtypeLong), requires_grad=False)
x_nodes_coord = Variable(torch.FloatTensor(batch.nodes_coord).type(dtypeFloat), requires_grad=False)
y_edges = Variable(torch.LongTensor(batch.edges_target).type(dtypeLong), requires_grad=False)
y_nodes = Variable(torch.LongTensor(batch.nodes_target).type(dtypeLong), requires_grad=False)
# Compute class weights (if uncomputed)
if type(edge_cw) != torch.Tensor:
edge_labels = y_edges.cpu().numpy().flatten()
edge_cw = compute_class_weight("balanced", classes=np.unique(edge_labels), y=edge_labels)
# Forward pass --- modify
y_preds, loss = net.forward(x_edges, x_edges_values, x_nodes, x_nodes_coord, y_edges,
edge_cw, num_neg, loss_type, gamma)
loss = loss.mean() # Take mean of loss across multiple GPUs
# Compute error metrics
# err_edges, err_tour, err_tsp, tour_err_idx, tsp_err_idx = edge_error(y_preds, y_edges, x_edges)
# Get batch beamsearch tour prediction
if mode == 'val': # Validation: faster 'vanilla' beamsearch
bs_nodes = beamsearch_tour_nodes(
y_preds, beam_size, batch_size, num_nodes, dtypeFloat, dtypeLong, probs_type='logits')
elif mode == 'test': # Testing: beamsearch with shortest tour heuristic
bs_nodes = beamsearch_tour_nodes_shortest(
y_preds, x_edges_values, beam_size, batch_size, num_nodes, dtypeFloat, dtypeLong, probs_type='logits')
# Compute mean tour length
pred_tour_len = mean_tour_len_nodes(x_edges_values, bs_nodes)
gt_tour_len = | np.mean(batch.tour_len) | numpy.mean |
"""
analysis_dev_baseline.py
Obtain fitting parameters for the baseline system, based on the experimental results.
##################### RESULT #####################
drums = (8.242079921128573, -2.193882033832822)
vocals = (10.729872914688878, -3.22347120307927)
bass = (10.359737286288485 -3.277817921881511)
other = (11.848966992443225 -4.081261039251299)
speech = (6.661884937528991 -1.4516773850817029)
"""
import numpy as np
import os
from utils.datasets import get_audio_files_DSD, get_audio_files_librispeech
import matplotlib.pyplot as plt
import scipy.optimize
import librosa.core
colors = plt.rcParams['axes.prop_cycle'].by_key()['color']
# Set to your path
result_folder_path = '/Users/andres.perez/source/ambisonic_rt_estimation/results_dev_baseline'
main_path = '/Volumes/Dinge/datasets' # Path of the dataset
# %% SETUP
fs = 8000
instrument_idx = 1
instruments = ['bass', 'drums', 'other', 'vocals', 'speech']
instrument = instruments[instrument_idx]
result_folder_path = os.path.join(result_folder_path, instrument)
# Number of iterations
I = 10
# Get audio files
subset = 'Dev'
########################
# Length and offset
audio_file_length = 20. # seconds
audio_file_length_samples = int(audio_file_length * fs)
audio_file_offset = 5. # seconds
audio_file_offset_samples = int(audio_file_offset * fs)
if instrument != 'speech':
# Get audio files
# Dataset
audio_files = get_audio_files_DSD(main_path,
mixtures=False,
dataset_instrument=instrument,
dataset_type=subset)
else:
audio_files_all = get_audio_files_librispeech(main_path, dataset_type=subset)
sizes = np.empty(len(audio_files_all))
# Filter out by length
for af_idx, af in enumerate(audio_files_all):
s_t, sr_lib = librosa.core.load(af, sr=None, mono=True)
sizes[af_idx] = s_t.size / sr_lib
# mask = np.logical_and(sizes > audio_file_length, sizes < audio_file_length+audio_file_offset)
mask = sizes > audio_file_length+audio_file_offset
indices = np.argwhere(mask).flatten()
audio_files = np.asarray(audio_files_all)[indices]
N = len(audio_files)
# %% Get data
# File name: "ir_idx" _ "af_idx"
# Some of the af_idx are missing. That's because baseline didn't work. We will just skip those files across all IRs.
# Resulting file: np.asarray([rt60_true[ir_idx], baseline_rt60])
result_types = ['rt60_true', 'baseline_rt60']
T = len(result_types)
results = np.empty((I, N, T))
results.fill(np.nan)
for i in range(I):
for a in range(N):
# Construct file name
file_name = str(i) + '_' + str(a) + '.npy'
file_path = os.path.join(result_folder_path, file_name)
# Ingest it if it exists
if os.path.exists(file_path):
results[i, a] = np.load(file_path)
# %% Statistical analysis
# Sort by increasing true RT60
iii = np.argsort(results[:, 0, 0])
# Mean and std
plt.figure()
plt.title('RT60 Estimation - Mean and std')
plt.grid()
plt.xlabel('IR index')
plt.ylabel('RT60 (s)')
x = np.arange(I)
# True measured RT60
plt.plot(x, results[:, 0, 0][iii], '-o', color=colors[0], markersize=4, label='True')
formats = ['--p']
labels = ['Baseline']
t = 1
mean_values = np.nanmean(results[:, :, t][iii], axis=1)
std_values = np.nanstd (results[:, :, t][iii], axis=1)
plt.errorbar(x+(t/25), mean_values, yerr=std_values, markersize=4,
c=colors[t], fmt=formats[t-1], label=labels[t-1])
## Linear regression
def line(x, m, n):
return m * x + n
p0 = 2, 1 # initial guess
popt, pcov = scipy.optimize.curve_fit(line, mean_values, results[:, 0, 0][iii], p0, sigma=std_values, absolute_sigma=True)
yfit = line(mean_values, *popt)
m, n = popt
plt.plot(x, mean_values*m+n, ':o', markersize=4, c=colors[2], label='mean, linear regression')
plt.legend()
print('INSTRUMENT: ', instrument)
print('--------------------------------------------')
print(' m, n : ', m, n)
print(pcov)
var = np.sum(np.diag(pcov))
std = np.sqrt(var) # joint standard deviation is sqrt of sum of variances https://socratic.org/statistics/random-variables/addition-rules-for-variances
print(std)
# %%
##################### ALL TOGETHER #####################
folder_path = os.path.join('/Users/andres.perez/source/dereverberation/experiment/results_dev_baseline')
instruments = ['bass', 'drums', 'other', 'vocals', 'speech']
C = len(instruments)
r = np.empty((I, N, C))
r.fill(np.nan)
for i in range(I):
for a in range(N):
# Construct file name
file_name = str(i) + '_' + str(a) + '.npy'
for inst_idx, inst in enumerate(instruments):
file_path = os.path.join(folder_path, inst, file_name)
# print(file_path)
# Ingest it if it exists
if os.path.exists(file_path):
r[i, a, inst_idx] = np.load(file_path)[-1]
plt.figure()
plt.title('Baseline - mean dev results')
for inst_idx, inst in enumerate(instruments):
mean_values = np.nanmean(r[:, :, inst_idx][iii], axis=1)
std_values = np.nanstd(r[:, :, inst_idx][iii], axis=1)
# plt.errorbar(np.arange(I), mean_values, yerr=std_values, label=inst)
plt.errorbar(np.arange(I), mean_values, label=inst)
plt.grid()
plt.legend()
# %%
##################### FIGURE 1 - DRUMS AND SPEECH RESULTS #####################
# File name: "ir_idx" _ "af_idx"
# Some of the af_idx are missing. That's because baseline didn't work. We will just skip those files across all IRs.
# Resulting file: np.asarray([rt60_true[ir_idx], baseline_rt60])
instruments = ['speech', 'drums']
# instruments = ['bass', 'drums', 'other', 'vocals', 'speech']
C = len(instruments)
result_types = ['rt60_true', 'baseline_rt60']
T = len(result_types)
results = | np.empty((C, I, N, T)) | numpy.empty |
# Copyright (c) MW-Pose Group, 2020
from .structures import BaseStructure
import numpy as np
import cv2
class Heatmap(BaseStructure):
def __init__(self, heatmap):
super(Heatmap, self).__init__(heatmap)
def resize(self, size):
pass
class HeatmapMask(BaseStructure):
def __init__(self, mask):
super(HeatmapMask, self).__init__(mask)
def resize(self, size):
pass
def get_max_pred(heatmaps):
num_joints = heatmaps.shape[0]
width = heatmaps.shape[2]
heatmaps_reshaped = heatmaps.reshape((num_joints, -1))
idx = np.argmax(heatmaps_reshaped, 1)
maxvals = np.max(heatmaps_reshaped, 1)
maxvals = maxvals.reshape((num_joints, 1))
idx = idx.reshape((num_joints, 1))
preds = np.tile(idx, (1, 2)).astype(np.float32)
preds[:, 0] = (preds[:, 0]) % width
preds[:, 1] = np.floor((preds[:, 1]) / width)
pred_mask = np.tile(np.greater(maxvals, 0.0), (1, 2))
pred_mask = pred_mask.astype(np.float32)
preds *= pred_mask
return preds, maxvals
def get_3rd_point(a, b):
"""Return vector c that perpendicular to (a - b)."""
direct = a - b
return b + np.array([-direct[1], direct[0]], dtype=np.float32)
def get_dir(src_point, rot_rad):
"""Rotate the point by `rot_rad` degree."""
sn, cs = np.sin(rot_rad), np.cos(rot_rad)
src_result = [0, 0]
src_result[0] = src_point[0] * cs - src_point[1] * sn
src_result[1] = src_point[0] * sn + src_point[1] * cs
return src_result
def get_affine_transform(center,
scale,
rot,
output_size,
shift=np.array([0, 0], dtype=np.float32),
inv=0):
if not isinstance(scale, np.ndarray) and not isinstance(scale, list):
scale = np.array([scale, scale])
scale_tmp = scale
src_w = scale_tmp[0]
dst_w = output_size[0]
dst_h = output_size[1]
rot_rad = np.pi * rot / 180
src_dir = get_dir([0, src_w * -0.5], rot_rad)
dst_dir = np.array([0, dst_w * -0.5], np.float32)
src = np.zeros((3, 2), dtype=np.float32)
dst = np.zeros((3, 2), dtype=np.float32)
src[0, :] = center + scale_tmp * shift
src[1, :] = center + src_dir + scale_tmp * shift
dst[0, :] = [dst_w * 0.5, dst_h * 0.5]
dst[1, :] = np.array([dst_w * 0.5, dst_h * 0.5]) + dst_dir
src[2:, :] = get_3rd_point(src[0, :], src[1, :])
dst[2:, :] = get_3rd_point(dst[0, :], dst[1, :])
if inv:
trans = cv2.getAffineTransform( | np.float32(dst) | numpy.float32 |
"""
Module to read / write Fortran unformatted sequential files.
This is in the spirit of code written by <NAME> and <NAME>.
"""
from __future__ import division, print_function, absolute_import
import warnings
import numpy as np
__all__ = ['FortranFile']
class FortranFile(object):
"""
A file object for unformatted sequential files from Fortran code.
Parameters
----------
filename: file or str
Open file object or filename.
mode : {'r', 'w'}, optional
Read-write mode, default is 'r'.
header_dtype : data-type
Data type of the header. Size and endiness must match the input/output file.
Notes
-----
These files are broken up into records of unspecified types. The size of
each record is given at the start (although the size of this header is not
standard) and the data is written onto disk without any formatting. Fortran
compilers supporting the BACKSPACE statement will write a second copy of
the size to facilitate backwards seeking.
This class only supports files written with both sizes for the record.
It also does not support the subrecords used in Intel and gfortran compilers
for records which are greater than 2GB with a 4-byte header.
An example of an unformatted sequential file in Fortran would be written as::
OPEN(1, FILE=myfilename, FORM='unformatted')
WRITE(1) myvariable
Since this is a non-standard file format, whose contents depend on the
compiler and the endianness of the machine, caution is advised. Files from
gfortran 4.8.0 and gfortran 4.1.2 on x86_64 are known to work.
Consider using Fortran direct-access files or files from the newer Stream
I/O, which can be easily read by `numpy.fromfile`.
Examples
--------
To create an unformatted sequential Fortran file:
>>> from scipy.io import FortranFile
>>> f = FortranFile('test.unf', 'w')
>>> f.write_record(np.array([1,2,3,4,5],dtype=np.int32))
>>> f.write_record(np.linspace(0,1,20).reshape((5,-1)))
>>> f.close()
To read this file:
>>> from scipy.io import FortranFile
>>> f = FortranFile('test.unf', 'r')
>>> print(f.read_ints(dtype=np.int32))
[1 2 3 4 5]
>>> print(f.read_reals(dtype=np.float).reshape((5,-1)))
[[ 0. 0.05263158 0.10526316 0.15789474]
[ 0.21052632 0.26315789 0.31578947 0.36842105]
[ 0.42105263 0.47368421 0.52631579 0.57894737]
[ 0.63157895 0.68421053 0.73684211 0.78947368]
[ 0.84210526 0.89473684 0.94736842 1. ]]
>>> f.close()
"""
def __init__(self, filename, mode='r', header_dtype=np.uint32):
if header_dtype is None:
raise ValueError('Must specify dtype')
header_dtype = np.dtype(header_dtype)
if header_dtype.kind != 'u':
warnings.warn("Given a dtype which is not unsigned.")
if mode not in 'rw' or len(mode) != 1:
raise ValueError('mode must be either r or w')
if hasattr(filename, 'seek'):
self._fp = filename
else:
self._fp = open(filename, '%sb' % mode)
self._header_dtype = header_dtype
def _read_size(self):
return np.fromfile(self._fp, dtype=self._header_dtype, count=1)
def write_record(self, s):
"""
Write a record (including sizes) to the file.
Parameters
----------
s : array_like
The data to write.
"""
s = np.array(s, order='F')
| np.array([s.nbytes],dtype=self._header_dtype) | numpy.array |
"""
{This script calculates spread in velocity dispersion (sigma) from mocks for
red and blue galaxies as well as smf for red and blue galaxies. It then
finds a non-gaussian distribution that best fits the error in spread
distributions in each bin.}
"""
from cosmo_utils.utils import work_paths as cwpaths
from scipy.stats import normaltest as nt
from chainconsumer import ChainConsumer
from multiprocessing import Pool
import matplotlib.pyplot as plt
from scipy.stats import chi2
from matplotlib import rc
from scipy import stats
import pandas as pd
import numpy as np
import emcee
import math
import os
__author__ = '{<NAME>}'
rc('font', **{'family': 'sans-serif', 'sans-serif': ['Helvetica']}, size=20)
rc('text', usetex=True)
rc('axes', linewidth=2)
rc('xtick.major', width=2, size=7)
rc('ytick.major', width=2, size=7)
def reading_catls(filename, catl_format='.hdf5'):
"""
Function to read ECO/RESOLVE catalogues.
Parameters
----------
filename: string
path and name of the ECO/RESOLVE catalogue to read
catl_format: string, optional (default = '.hdf5')
type of file to read.
Options:
- '.hdf5': Reads in a catalogue in HDF5 format
Returns
-------
mock_pd: pandas DataFrame
DataFrame with galaxy/group information
Examples
--------
# Specifying `filename`
>>> filename = 'ECO_catl.hdf5'
# Reading in Catalogue
>>> mock_pd = reading_catls(filename, format='.hdf5')
>>> mock_pd.head()
x y z vx vy vz \
0 10.225435 24.778214 3.148386 356.112457 -318.894409 366.721832
1 20.945772 14.500367 -0.237940 168.731766 37.558834 447.436951
2 21.335835 14.808488 0.004653 967.204407 -701.556763 -388.055115
3 11.102760 21.782235 2.947002 611.646484 -179.032089 113.388794
4 13.217764 21.214905 2.113904 120.689598 -63.448833 400.766541
loghalom cs_flag haloid halo_ngal ... cz_nodist vel_tot \
0 12.170 1 196005 1 ... 2704.599189 602.490355
1 11.079 1 197110 1 ... 2552.681697 479.667489
2 11.339 1 197131 1 ... 2602.377466 1256.285409
3 11.529 1 199056 1 ... 2467.277182 647.318259
4 10.642 1 199118 1 ... 2513.381124 423.326770
vel_tan vel_pec ra_orig groupid M_group g_ngal g_galtype \
0 591.399858 -115.068833 215.025116 0 11.702527 1 1
1 453.617221 155.924074 182.144134 1 11.524787 4 0
2 1192.742240 394.485714 182.213220 1 11.524787 4 0
3 633.928896 130.977416 210.441320 2 11.502205 1 1
4 421.064495 43.706352 205.525386 3 10.899680 1 1
halo_rvir
0 0.184839
1 0.079997
2 0.097636
3 0.113011
4 0.057210
"""
## Checking if file exists
if not os.path.exists(filename):
msg = '`filename`: {0} NOT FOUND! Exiting..'.format(filename)
raise ValueError(msg)
## Reading file
if catl_format=='.hdf5':
mock_pd = pd.read_hdf(filename)
else:
msg = '`catl_format` ({0}) not supported! Exiting...'.format(catl_format)
raise ValueError(msg)
return mock_pd
def std_func(bins, mass_arr, vel_arr):
last_index = len(bins)-1
i = 0
std_arr = []
for index1, bin_edge in enumerate(bins):
cen_deltav_arr = []
if index1 == last_index:
break
for index2, stellar_mass in enumerate(mass_arr):
if stellar_mass >= bin_edge and stellar_mass < bins[index1+1]:
cen_deltav_arr.append(vel_arr[index2])
N = len(cen_deltav_arr)
mean = 0
diff_sqrd_arr = []
for value in cen_deltav_arr:
diff = value - mean
diff_sqrd = diff**2
diff_sqrd_arr.append(diff_sqrd)
mean_diff_sqrd = np.mean(diff_sqrd_arr)
std = np.sqrt(mean_diff_sqrd)
std_arr.append(std)
return std_arr
def get_deltav_sigma_mocks(survey, path):
"""
Calculate spread in velocity dispersion from survey mocks
Parameters
----------
survey: string
Name of survey
path: string
Path to mock catalogs
Returns
---------
std_red_arr: numpy array
Spread in velocity dispersion of red galaxies
centers_red_arr: numpy array
Bin centers of central stellar mass for red galaxies
std_blue_arr: numpy array
Spread in velocity dispersion of blue galaxies
centers_blue_arr: numpy array
Bin centers of central stellar mass for blue galaxies
"""
if survey == 'eco':
mock_name = 'ECO'
num_mocks = 8
min_cz = 3000
max_cz = 7000
mag_limit = -17.33
mstar_limit = 8.9
volume = 151829.26 # Survey volume without buffer [Mpc/h]^3
elif survey == 'resolvea':
mock_name = 'A'
num_mocks = 59
min_cz = 4500
max_cz = 7000
mag_limit = -17.33
mstar_limit = 8.9
volume = 13172.384 # Survey volume without buffer [Mpc/h]^3
elif survey == 'resolveb':
mock_name = 'B'
num_mocks = 104
min_cz = 4500
max_cz = 7000
mag_limit = -17
mstar_limit = 8.7
volume = 4709.8373 # Survey volume without buffer [Mpc/h]^3
std_red_arr = []
centers_red_arr = []
std_blue_arr = []
centers_blue_arr = []
box_id_arr = np.linspace(5001,5008,8)
for box in box_id_arr:
box = int(box)
temp_path = path + '{0}/{1}_m200b_catls/'.format(box,
mock_name)
for num in range(num_mocks):
filename = temp_path + '{0}_cat_{1}_Planck_memb_cat.hdf5'.format(
mock_name, num)
mock_pd = reading_catls(filename)
# Using the same survey definition as in mcmc smf i.e excluding the
# buffer
mock_pd = mock_pd.loc[(mock_pd.cz.values >= min_cz) & \
(mock_pd.cz.values <= max_cz) & \
(mock_pd.M_r.values <= mag_limit) & \
(mock_pd.logmstar.values >= mstar_limit)]
logmstar_arr = mock_pd.logmstar.values
u_r_arr = mock_pd.u_r.values
colour_label_arr = np.empty(len(mock_pd), dtype='str')
# Using defintions from Moffett paper
for idx, value in enumerate(logmstar_arr):
if value <= 9.1:
if u_r_arr[idx] > 1.457:
colour_label = 'R'
else:
colour_label = 'B'
elif value > 9.1 and value < 10.1:
divider = 0.24 * value - 0.7
if u_r_arr[idx] > divider:
colour_label = 'R'
else:
colour_label = 'B'
elif value >= 10.1:
if u_r_arr[idx] > 1.7:
colour_label = 'R'
else:
colour_label = 'B'
colour_label_arr[idx] = colour_label
mock_pd['colour_label'] = colour_label_arr
mock_pd.logmstar = np.log10((10**mock_pd.logmstar) / 2.041)
red_subset_grpids = np.unique(mock_pd.groupid.loc[(mock_pd.\
colour_label == 'R') & (mock_pd.g_galtype == 1)].values)
blue_subset_grpids = np.unique(mock_pd.groupid.loc[(mock_pd.\
colour_label == 'B') & (mock_pd.g_galtype == 1)].values)
# Calculating spread in velocity dispersion for galaxies in groups
# with a red central
red_deltav_arr = []
red_cen_stellar_mass_arr = []
for key in red_subset_grpids:
group = mock_pd.loc[mock_pd.groupid == key]
cen_stellar_mass = group.logmstar.loc[group.g_galtype.\
values == 1].values[0]
mean_cz_grp = np.round(np.mean(group.cz.values),2)
deltav = group.cz.values - len(group)*[mean_cz_grp]
for val in deltav:
red_deltav_arr.append(val)
red_cen_stellar_mass_arr.append(cen_stellar_mass)
# print(max(red_cen_stellar_mass_arr))
if survey == 'eco' or survey == 'resolvea':
# TODO : check if this is actually correct for resolve a
red_stellar_mass_bins = np.linspace(8.6,11.5,6)
elif survey == 'resolveb':
red_stellar_mass_bins = np.linspace(8.4,11.0,6)
std_red = std_func(red_stellar_mass_bins, red_cen_stellar_mass_arr,
red_deltav_arr)
std_red = np.array(std_red)
std_red_arr.append(std_red)
# Calculating spread in velocity dispersion for galaxies in groups
# with a blue central
blue_deltav_arr = []
blue_cen_stellar_mass_arr = []
for key in blue_subset_grpids:
group = mock_pd.loc[mock_pd.groupid == key]
cen_stellar_mass = group.logmstar.loc[group.g_galtype\
.values == 1].values[0]
mean_cz_grp = np.round(np.mean(group.cz.values),2)
deltav = group.cz.values - len(group)*[mean_cz_grp]
for val in deltav:
blue_deltav_arr.append(val)
blue_cen_stellar_mass_arr.append(cen_stellar_mass)
# print(max(blue_cen_stellar_mass_arr))
if survey == 'eco' or survey == 'resolvea':
# TODO : check if this is actually correct for resolve a
blue_stellar_mass_bins = np.linspace(8.6,10.5,6)
elif survey == 'resolveb':
blue_stellar_mass_bins = np.linspace(8.4,10.4,6)
std_blue = std_func(blue_stellar_mass_bins, \
blue_cen_stellar_mass_arr, blue_deltav_arr)
std_blue = np.array(std_blue)
std_blue_arr.append(std_blue)
centers_red = 0.5 * (red_stellar_mass_bins[1:] + \
red_stellar_mass_bins[:-1])
centers_blue = 0.5 * (blue_stellar_mass_bins[1:] + \
blue_stellar_mass_bins[:-1])
centers_red_arr.append(centers_red)
centers_blue_arr.append(centers_blue)
std_red_arr = np.array(std_red_arr)
centers_red_arr = np.array(centers_red_arr)
std_blue_arr = np.array(std_blue_arr)
centers_blue_arr = np.array(centers_blue_arr)
return std_red_arr, std_blue_arr, centers_red_arr, centers_blue_arr
def lnprob(theta, x_vals, y_vals, err_tot):
"""
Calculates log probability for emcee
Parameters
----------
theta: array
Array of parameter values
x_vals: array
Array of x-axis values
y_vals: array
Array of y-axis values
err_tot: array
Array of error values of mass function
Returns
---------
lnp: float
Log probability given a model
chi2: float
Value of chi-squared given a model
"""
m, b = theta
if -5.0 < m < 0.5 and 0.0 < b < 10.0:
try:
model = m * x_vals + b
chi2 = chi_squared(y_vals, model, err_tot)
lnp = -chi2 / 2
if math.isnan(lnp):
raise ValueError
except (ValueError, RuntimeWarning, UserWarning):
lnp = -np.inf
chi2 = -np.inf
else:
chi2 = -np.inf
lnp = -np.inf
return lnp, chi2
def chi_squared(data, model, err_data):
"""
Calculates chi squared
Parameters
----------
data: array
Array of data values
model: array
Array of model values
err_data: float
Error in data values
Returns
---------
chi_squared: float
Value of chi-squared given a model
"""
chi_squared_arr = (data - model)**2 / (err_data**2)
chi_squared = np.sum(chi_squared_arr)
return chi_squared
global model_init
global survey
global path_to_proc
global mf_type
survey = 'resolveb'
machine = 'mac'
mf_type = 'smf'
dict_of_paths = cwpaths.cookiecutter_paths()
path_to_raw = dict_of_paths['raw_dir']
path_to_proc = dict_of_paths['proc_dir']
path_to_external = dict_of_paths['ext_dir']
path_to_data = dict_of_paths['data_dir']
if machine == 'bender':
halo_catalog = '/home/asadm2/.astropy/cache/halotools/halo_catalogs/'\
'vishnu/rockstar/vishnu_rockstar_test.hdf5'
elif machine == 'mac':
halo_catalog = path_to_raw + 'vishnu_rockstar_test.hdf5'
if survey == 'eco':
catl_file = path_to_raw + "eco/eco_all.csv"
elif survey == 'resolvea' or survey == 'resolveb':
catl_file = path_to_raw + "resolve/RESOLVE_liveJune2018.csv"
if survey == 'eco':
path_to_mocks = path_to_data + 'mocks/m200b/eco/'
elif survey == 'resolvea':
path_to_mocks = path_to_data + 'mocks/m200b/resolvea/'
elif survey == 'resolveb':
path_to_mocks = path_to_data + 'mocks/m200b/resolveb/'
std_red_mocks, std_blue_mocks, centers_red_mocks, \
centers_blue_mocks = get_deltav_sigma_mocks(survey, path_to_mocks)
## Histogram of red and blue sigma in bins of central stellar mass to see if the
## distribution of values to take std of is normal or lognormal
nrows = 2
ncols = 5
if survey == 'eco' or survey == 'resolvea':
red_stellar_mass_bins = np.linspace(8.6,11.5,6)
blue_stellar_mass_bins = np.linspace(8.6,10.5,6)
elif survey == 'resolveb':
red_stellar_mass_bins = np.linspace(8.4,11.0,6)
blue_stellar_mass_bins = np.linspace(8.4,10.4,6)
fig3, axs = plt.subplots(nrows, ncols)
for i in range(0, nrows, 1):
for j in range(0, ncols, 1):
if i == 0: # row 1 for all red bins
axs[i, j].hist(np.log10(std_red_mocks.T[j]), histtype='step', \
color='indianred', linewidth=4, linestyle='-') # first red bin
axs[i, j].set_title('[{0}-{1}]'.format(np.round(
red_stellar_mass_bins[j],2), np.round(
red_stellar_mass_bins[j+1],2)), fontsize=20)
k2, p = nt(np.log10(std_red_mocks.T[j]), nan_policy="omit")
axs[i, j].text(0.7, 0.7, "{0}".format(np.round(p, 2)),
transform=axs[i, j].transAxes)
else: # row 2 for all blue bins
axs[i, j].hist(np.log10(std_blue_mocks.T[j]), histtype='step', \
color='cornflowerblue', linewidth=4, linestyle='-')
axs[i, j].set_title('[{0}-{1}]'.format(np.round(
blue_stellar_mass_bins[j],2), np.round(
blue_stellar_mass_bins[j+1],2)), fontsize=20)
k2, p = nt(np.log10(std_blue_mocks.T[j]), nan_policy="omit")
axs[i, j].text(0.7, 0.7, "{0}".format(np.round(p, 2)),
transform=axs[i, j].transAxes)
for ax in axs.flat:
ax.set(xlabel=r'\boldmath$\sigma \left[km/s\right]$')
for ax in axs.flat:
ax.label_outer()
plt.show()
## Measuring fractional error in sigma of red and blue galaxies in all 5 bins
sigma_av_red = []
frac_err_red = []
for idx in range(len(std_red_mocks.T)):
mean = np.mean(std_red_mocks.T[idx][~np.isnan(std_red_mocks.T[idx])])
sigma_av_red.append(mean)
frac_err = (std_red_mocks.T[idx][~np.isnan(std_red_mocks.T[idx])] \
- mean)/mean
frac_err_red.append(frac_err)
frac_err_red = np.array(frac_err_red, dtype=list)
sigma_av_blue = []
frac_err_blue = []
for idx in range(len(std_blue_mocks.T)):
mean = np.mean(std_blue_mocks.T[idx][~np.isnan(std_blue_mocks.T[idx])])
sigma_av_blue.append(mean)
frac_err = (std_blue_mocks.T[idx][~np.isnan(std_blue_mocks.T[idx])] \
- mean)/mean
frac_err_blue.append(frac_err)
frac_err_blue = np.array(frac_err_blue, dtype=list)
## Fit fractional error distributions
nrows = 2
ncols = 5
if survey == 'eco' or survey == 'resolvea':
red_stellar_mass_bins = np.linspace(8.6,11.5,6)
blue_stellar_mass_bins = np.linspace(8.6,10.5,6)
elif survey == 'resolveb':
red_stellar_mass_bins = np.linspace(8.4,11.0,6)
blue_stellar_mass_bins = np.linspace(8.4,10.4,6)
max_red_arr = np.empty(len(frac_err_red))
max_blue_arr = np.empty(len(frac_err_blue))
for idx in range(len(frac_err_red)):
max_red = plt.hist(frac_err_red[idx], density=True)[0].max()
max_blue = plt.hist(frac_err_blue[idx], density=True)[0].max()
max_red_arr[idx] = max_red + 0.05
max_blue_arr[idx] = max_blue + 0.05
print(np.mean(frac_err_red[idx]))
print(np.mean(frac_err_blue[idx]))
plt.clf()
red_a = []
red_loc = []
red_scale = []
blue_a = []
blue_loc = []
blue_scale = []
fig3, axs = plt.subplots(nrows, ncols)
for i in range(0, nrows, 1):
for j in range(0, ncols, 1):
if i == 0: # row 1 for all red bins
frac_err_arr = frac_err_red[j]
axs[i,j].hist(frac_err_arr, density=True, histtype='step',
linewidth=3, color='k')
# find minimum and maximum of xticks, so we know
# where we should compute theoretical distribution
xt = axs[i,j].get_xticks()
xmin, xmax = min(xt), max(xt)
lnspc = np.linspace(xmin, xmax, len(frac_err_arr))
loc_log, scale_log = stats.logistic.fit(frac_err_arr)
pdf_logistic = stats.logistic.pdf(lnspc, loc_log, scale_log)
axs[i,j].plot(lnspc, pdf_logistic, label="Logistic")
# a_beta, b_beta, loc_beta, scale_beta = stats.beta.fit(frac_err_arr)
# pdf_beta = stats.beta.pdf(lnspc, a_beta, b_beta, loc_beta,
# scale_beta)
# axs[i,j].plot(lnspc, pdf_beta, label="Beta")
loc_norm, scale_norm = stats.norm.fit(frac_err_arr)
pdf_norm = stats.norm.pdf(lnspc, loc_norm, scale_norm)
axs[i,j].plot(lnspc, pdf_norm, label="Normal")
a_sn, loc_sn, scale_sn = stats.skewnorm.fit(frac_err_arr)
pdf_skewnorm = stats.skewnorm.pdf(lnspc, a_sn, loc_sn, scale_sn)
axs[i,j].plot(lnspc, pdf_skewnorm, label="Skew-normal")
red_a.append(a_sn)
red_loc.append(loc_sn)
red_scale.append(scale_sn)
# a_w, loc_w, scale_w = stats.weibull_min.fit(frac_err_arr)
# pdf_weibull = stats.weibull_min.pdf(lnspc, a_w, loc_w, scale_w)
# axs[i,j].plot(lnspc, pdf_weibull, label="Weibull")
# a_g,loc_g,scale_g = stats.gamma.fit(frac_err_arr)
# pdf_gamma = stats.gamma.pdf(lnspc, a_g, loc_g, scale_g)
# axs[i,j].plot(lnspc, pdf_gamma, label="Gamma")
axs[i,j].set_title('[{0}-{1}]'.format(np.round(
red_stellar_mass_bins[j],2), np.round(
red_stellar_mass_bins[j+1],2)),fontsize=20,
color='indianred')
textstr = '\n'.join((
r'$\mu=%.2f$' % (a_sn, ),
r'$loc=%.2f$' % (loc_sn, ),
r'$scale=%.2f$' % (scale_sn, )))
props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)
axs[i,j].set_ylim(0, max_red_arr[j])
# axs[i, j].text(0.4, 0.8, textstr, fontsize=12, bbox=props,
# transform=axs[i, j].transAxes)
else: # row 2 for all blue bins
frac_err_arr = frac_err_blue[j]
axs[i,j].hist(frac_err_arr, density=True, histtype='step',
linewidth=3, color='k')
# find minimum and maximum of xticks, so we know
# where we should compute theoretical distribution
xt = axs[i,j].get_xticks()
xmin, xmax = min(xt), max(xt)
lnspc = np.linspace(xmin, xmax, len(frac_err_arr))
loc_log, scale_log = stats.logistic.fit(frac_err_arr)
pdf_logistic = stats.logistic.pdf(lnspc, loc_log, scale_log)
axs[i,j].plot(lnspc, pdf_logistic, label="Logistic")
# a_beta, b_beta, loc_beta, scale_beta = stats.beta.fit(frac_err_arr)
# pdf_beta = stats.beta.pdf(lnspc, a_beta, b_beta, loc_beta,
# scale_beta)
# axs[i,j].plot(lnspc, pdf_beta, label="Beta")
loc_norm, scale_norm = stats.norm.fit(frac_err_arr)
pdf_norm = stats.norm.pdf(lnspc, loc_norm, scale_norm)
axs[i,j].plot(lnspc, pdf_norm, label="Normal")
a_sn, loc_sn, scale_sn = stats.skewnorm.fit(frac_err_arr)
pdf_skewnorm = stats.skewnorm.pdf(lnspc, a_sn, loc_sn, scale_sn)
axs[i,j].plot(lnspc, pdf_skewnorm, label="Skew-normal")
blue_a.append(a_sn)
blue_loc.append(loc_sn)
blue_scale.append(scale_sn)
# a_w, loc_w, scale_w = stats.weibull_min.fit(frac_err_arr)
# pdf_weibull = stats.weibull_min.pdf(lnspc, a_w, loc_w, scale_w)
# axs[i,j].plot(lnspc, pdf_weibull, label="Weibull")
# a_g, loc_g, scale_g = stats.gamma.fit(frac_err_arr)
# pdf_gamma = stats.gamma.pdf(lnspc, a_g, loc_g,scale_g)
# axs[i,j].plot(lnspc, pdf_gamma, label="Gamma")
axs[i,j].set_title('[{0}-{1}]'.format(np.round(
blue_stellar_mass_bins[j],2), np.round(
blue_stellar_mass_bins[j+1],2)), fontsize=20,
color='cornflowerblue')
textstr = '\n'.join((
r'$\mu=%.2f$' % (a_sn, ),
r'$loc=%.2f$' % (loc_sn, ),
r'$scale=%.2f$' % (scale_sn, )))
props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)
axs[i,j].set_ylim(0, max_blue_arr[j])
# axs[i, j].text(0.4, 0.8, textstr, fontsize=12, bbox=props,
# transform=axs[i, j].transAxes)
red_a = np.array(red_a)
red_loc = np.array(red_loc)
red_scale = np.array(red_scale)
blue_a = np.array(blue_a)
blue_loc = np.array(blue_loc)
blue_scale = np.array(blue_scale)
a_arr = (np.array((red_a, blue_a))).flatten()
loc_arr = (np.array((red_loc, blue_loc))).flatten()
scale_arr = (np.array((red_scale, blue_scale))).flatten()
axs[0,0].legend(loc='center right', prop={'size': 8})
axs[1,2].set(xlabel=r'\boldmath$(\sigma - \bar \sigma )/ \bar \sigma$')
plt.show()
## Simulating errors
np.random.seed(30)
m_true_arr = np.round(np.random.uniform(-4.9, 0.4, size=500),2)
b_true_arr = np.round(np.random.uniform(1, 7, size=500),2)
## Keeping data fixed
m_true = m_true_arr[50]
b_true = b_true_arr[50]
N=10
x = np.sort(10*np.random.rand(N))
samples_arr = []
chi2_arr = []
yerr_arr = []
for i in range(500):
print(i)
## Mimicking non-gaussian errors from mocks
# yerr = stats.skewnorm.rvs(a_arr, loc_arr, scale_arr)
## Corresponding gaussian distributions
var_arr = stats.skewnorm.stats(a_arr, loc_arr, scale_arr, moments='mvsk')[1]
# mu_arr = stats.skewnorm.stats(a_arr, loc_arr, scale_arr, moments='mvsk')[0]
std_arr = np.sqrt(var_arr)
## Simulating gaussian errors with mean of 0 and same sigma as corresponding
## non-gaussian fits
yerr = stats.norm.rvs(np.zeros(10), std_arr)
y = m_true * x + b_true
y_new = y + y*yerr
pos = [0,5] + 1e-4 * np.random.randn(64, 2)
nwalkers, ndim = pos.shape
nsteps = 5000
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob,
args=(x, y_new, std_arr))
sampler.run_mcmc(pos, nsteps, store=True, progress=True)
flat_samples = sampler.get_chain(discard=100, thin=15, flat=True)
chi2 = sampler.get_blobs(discard=100, thin=15, flat=True)
samples_arr.append(flat_samples)
chi2_arr.append(chi2)
yerr_arr.append(yerr)
non_gaussian_samples_arr = np.array(samples_arr)
non_gaussian_chi2_arr = np.array(chi2_arr)
non_gaussian_yerr_arr = np.array(yerr_arr)
gaussian_samples_arr = np.array(samples_arr)
gaussian_chi2_arr = np.array(chi2_arr)
gaussian_yerr_arr = | np.array(yerr_arr) | numpy.array |
# MIT License
#
# Copyright (c) 2018-2021 Tskit Developers
# Copyright (c) 2015-2018 University of Oxford
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Test cases for the low level C interface to tskit.
"""
import collections
import inspect
import itertools
import os
import random
import tempfile
import msprime
import numpy as np
import pytest
import _tskit
import tskit
def get_tracked_sample_counts(st, tracked_samples):
"""
Returns a list giving the number of samples in the specified list
that are in the subtree rooted at each node.
"""
nu = [0 for j in range(st.get_num_nodes())]
for j in tracked_samples:
# Duplicates not permitted.
assert nu[j] == 0
u = j
while u != _tskit.NULL:
nu[u] += 1
u = st.get_parent(u)
return nu
def get_sample_counts(tree_sequence, st):
"""
Returns a list of the sample node counts for the specified tree.
"""
nu = [0 for j in range(st.get_num_nodes())]
for j in range(tree_sequence.get_num_samples()):
u = j
while u != _tskit.NULL:
nu[u] += 1
u = st.get_parent(u)
return nu
class LowLevelTestCase:
"""
Superclass of tests for the low-level interface.
"""
def verify_tree_dict(self, n, pi):
"""
Verifies that the specified tree in dict format is a
consistent coalescent history for a sample of size n.
"""
assert len(pi) <= 2 * n - 1
# _tskit.NULL should not be a node
assert _tskit.NULL not in pi
# verify the root is equal for all samples
root = 0
while pi[root] != _tskit.NULL:
root = pi[root]
for j in range(n):
k = j
while pi[k] != _tskit.NULL:
k = pi[k]
assert k == root
# 0 to n - 1 inclusive should always be nodes
for j in range(n):
assert j in pi
num_children = collections.defaultdict(int)
for j in pi.keys():
num_children[pi[j]] += 1
# nodes 0 to n are samples.
for j in range(n):
assert pi[j] != 0
assert num_children[j] == 0
# All non-sample nodes should be binary
for j in pi.keys():
if j > n:
assert num_children[j] >= 2
def get_example_tree_sequence(
self, sample_size=10, length=1, mutation_rate=1, random_seed=1
):
ts = msprime.simulate(
sample_size,
recombination_rate=0.1,
mutation_rate=mutation_rate,
random_seed=random_seed,
length=length,
)
return ts.ll_tree_sequence
def get_example_tree_sequences(self):
yield self.get_example_tree_sequence()
yield self.get_example_tree_sequence(2, 10)
yield self.get_example_tree_sequence(20, 10)
yield self.get_example_migration_tree_sequence()
def get_example_migration_tree_sequence(self):
pop_configs = [msprime.PopulationConfiguration(5) for _ in range(2)]
migration_matrix = [[0, 1], [1, 0]]
ts = msprime.simulate(
population_configurations=pop_configs,
migration_matrix=migration_matrix,
mutation_rate=1,
record_migrations=True,
random_seed=1,
)
return ts.ll_tree_sequence
def verify_iterator(self, iterator):
"""
Checks that the specified non-empty iterator implements the
iterator protocol correctly.
"""
list_ = list(iterator)
assert len(list_) > 0
for _ in range(10):
with pytest.raises(StopIteration):
next(iterator)
class MetadataTestMixin:
metadata_tables = [
"node",
"edge",
"site",
"mutation",
"migration",
"individual",
"population",
]
class TestTableCollection(LowLevelTestCase):
"""
Tests for the low-level TableCollection class
"""
def test_file_errors(self):
tc1 = _tskit.TableCollection(1)
self.get_example_tree_sequence().dump_tables(tc1)
def loader(*args):
tc = _tskit.TableCollection(1)
tc.load(*args)
for func in [tc1.dump, loader]:
with pytest.raises(TypeError):
func()
for bad_type in [None, [], {}]:
with pytest.raises(TypeError):
func(bad_type)
def test_dump_equality(self, tmp_path):
for ts in self.get_example_tree_sequences():
tc = _tskit.TableCollection(sequence_length=ts.get_sequence_length())
ts.dump_tables(tc)
with open(tmp_path / "tmp.trees", "wb") as f:
tc.dump(f)
with open(tmp_path / "tmp.trees", "rb") as f:
tc2 = _tskit.TableCollection()
tc2.load(f)
assert tc.equals(tc2)
def test_reference_deletion(self):
ts = msprime.simulate(10, mutation_rate=1, random_seed=1)
tc = ts.tables._ll_tables
# Get references to all the tables
tables = [
tc.individuals,
tc.nodes,
tc.edges,
tc.migrations,
tc.sites,
tc.mutations,
tc.populations,
tc.provenances,
]
del tc
for _ in range(10):
for table in tables:
assert len(str(table)) > 0
def test_set_sequence_length_errors(self):
tables = _tskit.TableCollection(1)
with pytest.raises(TypeError):
del tables.sequence_length
for bad_value in ["sdf", None, []]:
with pytest.raises(TypeError):
tables.sequence_length = bad_value
def test_set_sequence_length(self):
tables = _tskit.TableCollection(1)
assert tables.sequence_length == 1
for value in [-1, 1e6, 1e-22, 1000, 2 ** 32, -10000]:
tables.sequence_length = value
assert tables.sequence_length == value
def test_set_metadata_errors(self):
tables = _tskit.TableCollection(1)
with pytest.raises(AttributeError):
del tables.metadata
for bad_value in ["bytes only", 59, 43.4, None, []]:
with pytest.raises(TypeError):
tables.metadata = bad_value
def test_set_metadata(self):
tables = _tskit.TableCollection(1)
assert tables.metadata == b""
for value in [b"foo", b"", "💩".encode(), b"null char \0 in string"]:
tables.metadata = value
tables.metadata_schema = "Test we have two separate fields"
assert tables.metadata == value
def test_set_metadata_schema_errors(self):
tables = _tskit.TableCollection(1)
with pytest.raises(AttributeError):
del tables.metadata_schema
for bad_value in [59, 43.4, None, []]:
with pytest.raises(TypeError):
tables.metadata_schema = bad_value
def test_set_metadata_schema(self):
tables = _tskit.TableCollection(1)
assert tables.metadata_schema == ""
for value in ["foo", "", "💩", "null char \0 in string"]:
tables.metadata_schema = value
tables.metadata = b"Test we have two separate fields"
assert tables.metadata_schema == value
def test_simplify_bad_args(self):
ts = msprime.simulate(10, random_seed=1)
tc = ts.tables._ll_tables
with pytest.raises(TypeError):
tc.simplify()
with pytest.raises(ValueError):
tc.simplify("asdf")
with pytest.raises(TypeError):
tc.simplify([0, 1], keep_unary="sdf")
with pytest.raises(TypeError):
tc.simplify([0, 1], keep_unary_in_individuals="abc")
with pytest.raises(TypeError):
tc.simplify([0, 1], keep_input_roots="sdf")
with pytest.raises(TypeError):
tc.simplify([0, 1], filter_populations="x")
with pytest.raises(_tskit.LibraryError):
tc.simplify([0, -1])
def test_link_ancestors_bad_args(self):
ts = msprime.simulate(10, random_seed=1)
tc = ts.tables._ll_tables
with pytest.raises(TypeError):
tc.link_ancestors()
with pytest.raises(TypeError):
tc.link_ancestors([0, 1])
with pytest.raises(ValueError):
tc.link_ancestors(samples=[0, 1], ancestors="sdf")
with pytest.raises(ValueError):
tc.link_ancestors(samples="sdf", ancestors=[0, 1])
with pytest.raises(_tskit.LibraryError):
tc.link_ancestors(samples=[0, 1], ancestors=[11, -1])
with pytest.raises(_tskit.LibraryError):
tc.link_ancestors(samples=[0, -1], ancestors=[11])
def test_link_ancestors(self):
ts = msprime.simulate(2, random_seed=1)
tc = ts.tables._ll_tables
edges = tc.link_ancestors([0, 1], [3])
assert isinstance(edges, _tskit.EdgeTable)
del edges
assert tc.edges.num_rows == 2
def test_subset_bad_args(self):
ts = msprime.simulate(10, random_seed=1)
tc = ts.tables._ll_tables
with pytest.raises(TypeError):
tc.subset(np.array(["a"]))
with pytest.raises(ValueError):
tc.subset(np.array([[1], [2]], dtype="int32"))
with pytest.raises(TypeError):
tc.subset()
with pytest.raises(_tskit.LibraryError):
tc.subset(np.array([100, 200], dtype="int32"))
def test_union_bad_args(self):
ts = msprime.simulate(10, random_seed=1)
tc = ts.tables._ll_tables
tc2 = tc
with pytest.raises(TypeError):
tc.union(tc2, np.array(["a"]))
with pytest.raises(ValueError):
tc.union(tc2, np.array([0], dtype="int32"))
with pytest.raises(TypeError):
tc.union(tc2)
with pytest.raises(TypeError):
tc.union()
node_mapping = np.arange(ts.num_nodes, dtype="int32")
node_mapping[0] = 1200
with pytest.raises(_tskit.LibraryError):
tc.union(tc2, node_mapping)
node_mapping = np.array(
[node_mapping.tolist(), node_mapping.tolist()], dtype="int32"
)
with pytest.raises(ValueError):
tc.union(tc2, node_mapping)
with pytest.raises(ValueError):
tc.union(tc2, np.array([[1], [2]], dtype="int32"))
def test_equals_bad_args(self):
ts = msprime.simulate(10, random_seed=1242)
tc = ts.tables._ll_tables
with pytest.raises(TypeError):
tc.equals()
with pytest.raises(TypeError):
tc.equals(None)
assert tc.equals(tc)
with pytest.raises(TypeError):
tc.equals(tc, no_such_arg=1)
bad_bool = "x"
with pytest.raises(TypeError):
tc.equals(tc, ignore_metadata=bad_bool)
with pytest.raises(TypeError):
tc.equals(tc, ignore_ts_metadata=bad_bool)
with pytest.raises(TypeError):
tc.equals(tc, ignore_provenance=bad_bool)
with pytest.raises(TypeError):
tc.equals(tc, ignore_timestamps=bad_bool)
def test_asdict(self):
for ts in self.get_example_tree_sequences():
tc = _tskit.TableCollection(sequence_length=ts.get_sequence_length())
ts.dump_tables(tc)
d = tc.asdict()
# Method is tested extensively elsewhere, just basic sanity check here
assert isinstance(d, dict)
assert len(d) > 0
def test_fromdict(self):
for ts in self.get_example_tree_sequences():
tc1 = _tskit.TableCollection(sequence_length=ts.get_sequence_length())
ts.dump_tables(tc1)
d = tc1.asdict()
tc2 = _tskit.TableCollection(sequence_length=0)
tc2.fromdict(d)
assert tc1.equals(tc2)
def test_asdict_bad_args(self):
ts = msprime.simulate(10, random_seed=1242)
tc = ts.tables._ll_tables
for bad_type in [None, 0.1, "str"]:
with pytest.raises(TypeError):
tc.asdict(force_offset_64=bad_type)
def test_fromdict_bad_args(self):
tc = _tskit.TableCollection(0)
for bad_type in [None, 0.1, "str"]:
with pytest.raises(TypeError):
tc.fromdict(bad_type)
class TestIbd:
def test_uninitialised(self):
result = _tskit.IbdResult.__new__(_tskit.IbdResult)
with pytest.raises(SystemError):
result.get(0, 1)
with pytest.raises(SystemError):
result.print_state()
with pytest.raises(SystemError):
result.total_segments
def test_find_bad_args(self):
ts = msprime.simulate(10, random_seed=1)
tc = ts.tables._ll_tables
with pytest.raises(TypeError):
tc.find_ibd()
for bad_samples in ["sdf", None, {}]:
with pytest.raises(ValueError):
tc.find_ibd(bad_samples)
for not_enough_samples in [[], [0]]:
with pytest.raises(ValueError):
tc.find_ibd(not_enough_samples)
# input array must be 2D
with pytest.raises(ValueError):
tc.find_ibd([[[1], [1]]])
# Input array must be (n, 2)
with pytest.raises(ValueError):
tc.find_ibd([[1, 1, 1]])
for bad_float in ["sdf", None, {}]:
with pytest.raises(TypeError):
tc.find_ibd([(0, 1)], min_length=bad_float)
with pytest.raises(TypeError):
tc.find_ibd([(0, 1)], max_time=bad_float)
with pytest.raises(_tskit.LibraryError):
tc.find_ibd([(0, 1)], max_time=-1)
with pytest.raises(_tskit.LibraryError):
tc.find_ibd([(0, 1)], min_length=-1)
def test_get_output(self):
ts = msprime.simulate(10, random_seed=1)
tc = ts.tables._ll_tables
pairs = [(0, 1), (2, 3)]
result = tc.find_ibd(pairs)
assert isinstance(result, _tskit.IbdResult)
for pair in pairs:
value = result.get(*pair)
assert isinstance(value, dict)
assert len(value) == 3
assert list(value["left"]) == [0]
assert list(value["right"]) == [1]
assert len(value["node"]) == 1
def test_get_bad_args(self):
ts = msprime.simulate(10, random_seed=1)
tc = ts.tables._ll_tables
pairs = [(0, 1), (2, 3)]
result = tc.find_ibd(pairs)
with pytest.raises(TypeError):
result.get()
with pytest.raises(TypeError):
result.get("0", 1)
with pytest.raises(_tskit.LibraryError):
result.get(0, 0)
# TODO this should probably be a KeyError, but let's not
# worry about it for now.
with pytest.raises(_tskit.LibraryError):
result.get(0, 2)
def test_print_state(self):
ts = msprime.simulate(10, random_seed=1)
tc = ts.tables._ll_tables
pairs = [(0, 1), (2, 3)]
result = tc.find_ibd(pairs)
with pytest.raises(TypeError):
result.print_state()
with tempfile.TemporaryFile("w+") as f:
result.print_state(f)
f.seek(0)
output = f.read()
assert len(output) > 0
assert "IBD" in output
def test_direct_instantiation(self):
# Nobody should do this, but just in case
result = _tskit.IbdResult()
assert result.total_segments == 0
with tempfile.TemporaryFile("w+") as f:
result.print_state(f)
f.seek(0)
output = f.read()
assert len(output) > 0
assert "IBD" in output
class TestTableMethods:
"""
Tests for the low-level table methods.
"""
@pytest.mark.parametrize("table_name", tskit.TABLE_NAMES)
def test_table_extend(self, table_name, ts_fixture):
table = getattr(ts_fixture.tables, table_name)
assert len(table) >= 5
ll_table = table.ll_table
table_copy = table.copy()
ll_table.extend(table_copy.ll_table, row_indexes=[])
assert table == table_copy
ll_table.clear()
ll_table.extend(table_copy.ll_table, row_indexes=range(len(table_copy)))
assert table == table_copy
@pytest.mark.parametrize("table_name", tskit.TABLE_NAMES)
@pytest.mark.parametrize(
["row_indexes", "expected_rows"],
[
([0], [0]),
([4] * 1000, [4] * 1000),
([4, 1, 3, 0, 0], [4, 1, 3, 0, 0]),
(np.array([0, 1, 4], dtype=np.uint8), [0, 1, 4]),
(np.array([3, 3, 3], dtype=np.uint16), [3, 3, 3]),
(np.array([4, 2, 1], dtype=np.int8), [4, 2, 1]),
(np.array([4, 2], dtype=np.int16), [4, 2]),
(np.array([0, 1], dtype=np.int32), [0, 1]),
(range(2, -1, -1), [2, 1, 0]),
],
)
def test_table_extend_types(
self, ts_fixture, table_name, row_indexes, expected_rows
):
table = getattr(ts_fixture.tables, table_name)
assert len(table) >= 5
ll_table = table.ll_table
table_copy = table.copy()
ll_table.extend(table_copy.ll_table, row_indexes=row_indexes)
assert len(table) == len(table_copy) + len(expected_rows)
for i, expected_row in enumerate(expected_rows):
assert table[len(table_copy) + i] == table_copy[expected_row]
@pytest.mark.parametrize(
["table_name", "column_name"],
[
(t, c)
for t in tskit.TABLE_NAMES
for c in getattr(tskit, f"{t[:-1].capitalize()}Table").column_names
if c[-7:] != "_offset"
],
)
def test_table_update(self, ts_fixture, table_name, column_name):
table = getattr(ts_fixture.tables, table_name)
copy = table.copy()
ll_table = table.ll_table
# Find the first row where this column differs to get a value to swap in
other_row_index = -1
for i, row in enumerate(table):
if not np.array_equal(
getattr(table[0], column_name), getattr(row, column_name)
):
other_row_index = i
assert other_row_index != -1
# No-op update should not create a change
args = ll_table.get_row(0)
ll_table.update_row(0, *args)
table.assert_equals(copy)
# Modify the column under test in the first row
new_args = list(ll_table.get_row(0))
arg_index = list(inspect.signature(table.add_row).parameters.keys()).index(
column_name
)
new_args[arg_index] = ll_table.get_row(other_row_index)[arg_index]
ll_table.update_row(0, *new_args)
for a, b in zip(ll_table.get_row(0), new_args):
np.array_equal(a, b)
def test_update_defaults(self):
t = tskit.IndividualTable()
assert t.add_row(flags=1, location=[1, 2], parents=[3, 4], metadata=b"FOO") == 0
t.ll_table.update_row(0)
assert t.flags[0] == 0
assert len(t.location) == 0
assert t.location_offset[0] == 0
assert len(t.parents) == 0
assert t.parents_offset[0] == 0
assert len(t.metadata) == 0
assert t.metadata_offset[0] == 0
t = tskit.NodeTable()
assert (
t.add_row(flags=1, time=2, population=3, individual=4, metadata=b"FOO") == 0
)
t.ll_table.update_row(0)
assert t.time[0] == 0
assert t.flags[0] == 0
assert t.population[0] == tskit.NULL
assert t.individual[0] == tskit.NULL
assert len(t.metadata) == 0
assert t.metadata_offset[0] == 0
t = tskit.EdgeTable()
assert t.add_row(1, 2, 3, 4, metadata=b"FOO") == 0
t.ll_table.update_row(0, 1, 2, 3, 4)
assert len(t.metadata) == 0
assert t.metadata_offset[0] == 0
t = tskit.MigrationTable()
assert t.add_row(1, 2, 3, 4, 5, 6, b"FOO") == 0
t.ll_table.update_row(0, 1, 2, 3, 4, 5, 6)
assert len(t.metadata) == 0
assert t.metadata_offset[0] == 0
t = tskit.MutationTable()
assert t.add_row(1, 2, "A", 3, b"FOO", 4) == 0
t.ll_table.update_row(0, 1, 2, "A", 3)
assert len(t.metadata) == 0
assert t.metadata_offset[0] == 0
assert tskit.is_unknown_time(t.time[0])
t = tskit.PopulationTable()
assert t.add_row(b"FOO") == 0
t.ll_table.update_row(0)
assert len(t.metadata) == 0
assert t.metadata_offset[0] == 0
def test_update_bad_data(self):
t = tskit.IndividualTable()
t.add_row()
with pytest.raises(TypeError):
t.ll_table.update_row(0, flags="x")
with pytest.raises(TypeError):
t.ll_table.update_row(0, metadata=123)
with pytest.raises(ValueError):
t.ll_table.update_row(0, location="1234")
with pytest.raises(ValueError):
t.ll_table.update_row(0, parents="forty-two")
t = tskit.NodeTable()
t.add_row()
with pytest.raises(TypeError):
t.ll_table.update_row(0, flags="x")
with pytest.raises(TypeError):
t.ll_table.update_row(0, time="x")
with pytest.raises(TypeError):
t.ll_table.update_row(0, individual="x")
with pytest.raises(TypeError):
t.ll_table.update_row(0, population="x")
with pytest.raises(TypeError):
t.ll_table.update_row(0, metadata=123)
t = tskit.EdgeTable()
t.add_row(1, 2, 3, 4)
with pytest.raises(TypeError):
t.ll_table.update_row(0, left="x", right=0, parent=0, child=0)
with pytest.raises(TypeError):
t.ll_table.update_row(
0,
)
with pytest.raises(TypeError):
t.ll_table.update_row(0, 0, 0, 0, 0, metadata=123)
t = tskit.SiteTable()
t.add_row(0, "A")
with pytest.raises(TypeError):
t.ll_table.update_row(0, "x", "A")
with pytest.raises(TypeError):
t.ll_table.update_row(0, 0, 0)
with pytest.raises(TypeError):
t.ll_table.update_row(0, 0, "A", metadata=[0, 1, 2])
t = tskit.MutationTable()
t.add_row(0, 0, "A")
with pytest.raises(TypeError):
t.ll_table.update_row(0, "0", 0, "A")
with pytest.raises(TypeError):
t.ll_table.update_row(0, 0, "0", "A")
with pytest.raises(TypeError):
t.ll_table.update_row(0, 0, 0, "A", parent=None)
with pytest.raises(TypeError):
t.ll_table.update_row(0, 0, 0, "A", metadata=[0])
with pytest.raises(TypeError):
t.ll_table.update_row(0, 0, 0, "A", time="A")
t = tskit.MigrationTable()
with pytest.raises(TypeError):
t.add_row(left="x", right=0, node=0, source=0, dest=0, time=0)
with pytest.raises(TypeError):
t.ll_table.update_row(
0,
)
with pytest.raises(TypeError):
t.ll_table.update_row(0, 0, 0, 0, 0, 0, 0, metadata=123)
t = tskit.ProvenanceTable()
t.add_row("a", "b")
with pytest.raises(TypeError):
t.ll_table.update_row(0, 0, "b")
with pytest.raises(TypeError):
t.ll_table.update_row(0, "a", 0)
t = tskit.PopulationTable()
t.add_row()
with pytest.raises(TypeError):
t.ll_table.update_row(0, metadata=[0])
class TestTableMethodsErrors:
"""
Tests for the error handling of errors in the low-level tables.
"""
def yield_tables(self, ts):
for table in ts.tables.name_map.values():
yield table.ll_table
@pytest.mark.parametrize(
"table_name",
tskit.TABLE_NAMES,
)
def test_table_extend_bad_args(self, ts_fixture, table_name):
table = getattr(ts_fixture.tables, table_name)
ll_table = table.ll_table
ll_table_copy = table.copy().ll_table
with pytest.raises(
_tskit.LibraryError,
match="Tables can only be extended using rows from a different table",
):
ll_table.extend(ll_table, row_indexes=[])
with pytest.raises(TypeError):
ll_table.extend(None, row_indexes=[])
with pytest.raises(ValueError):
ll_table.extend(ll_table_copy, row_indexes=5)
with pytest.raises(TypeError):
ll_table.extend(ll_table_copy, row_indexes=[None])
with pytest.raises(ValueError, match="object too deep"):
ll_table.extend(ll_table_copy, row_indexes=[[0, 1], [2, 3]])
with pytest.raises(ValueError, match="object too deep"):
ll_table.extend(ll_table_copy, row_indexes=[[0, 1]])
with pytest.raises(_tskit.LibraryError, match="out of bounds"):
ll_table.extend(ll_table_copy, row_indexes=[-1])
with pytest.raises(_tskit.LibraryError, match="out of bounds"):
ll_table.extend(ll_table_copy, row_indexes=[1000])
with pytest.raises(_tskit.LibraryError, match="out of bounds"):
ll_table.extend(ll_table_copy, row_indexes=range(10000000, 10000001))
# Uncastable types
for dtype in [np.uint32, np.int64, np.uint64, np.float32, np.float64]:
with pytest.raises(TypeError, match="Cannot cast"):
ll_table.extend(ll_table_copy, row_indexes=np.array([0], dtype=dtype))
@pytest.mark.parametrize("table_name", tskit.TABLE_NAMES)
def test_update_bad_row_index(self, ts_fixture, table_name):
table = getattr(ts_fixture.tables, table_name)
ll_table = table.ll_table
row_data = ll_table.get_row(0)
with pytest.raises(_tskit.LibraryError, match="out of bounds"):
ll_table.update_row(-1, *row_data)
with pytest.raises(ValueError, match="tskit ids must be"):
ll_table.update_row(-42, *row_data)
with pytest.raises(TypeError):
ll_table.update_row([], *row_data)
with pytest.raises(TypeError):
ll_table.update_row("abc", *row_data)
with pytest.raises(_tskit.LibraryError, match="out of bounds"):
ll_table.update_row(10000, *row_data)
with pytest.raises(OverflowError, match="Value too large for tskit id type"):
ll_table.update_row(2 ** 62, *row_data)
def test_equals_bad_args(self, ts_fixture):
for ll_table in self.yield_tables(ts_fixture):
assert ll_table.equals(ll_table)
with pytest.raises(TypeError):
ll_table.equals(None)
with pytest.raises(TypeError):
ll_table.equals(ll_table, no_such_arg="")
uninit_other = type(ll_table).__new__(type(ll_table))
with pytest.raises(SystemError):
ll_table.equals(uninit_other)
def test_get_row_bad_args(self, ts_fixture):
for ll_table in self.yield_tables(ts_fixture):
assert ll_table.get_row(0) is not None
with pytest.raises(TypeError):
ll_table.get_row(no_such_arg="")
@pytest.mark.parametrize("table", ["nodes", "individuals"])
def test_flag_underflow_overflow(self, table):
tables = _tskit.TableCollection(1)
table = getattr(tables, table)
table.add_row(flags=0)
table.add_row(flags=(1 << 32) - 1)
with pytest.raises(OverflowError, match="unsigned int32 >= than 2\\^32"):
table.add_row(flags=1 << 32)
with pytest.raises(OverflowError, match="int too big to convert"):
table.add_row(flags=1 << 64)
with pytest.raises(OverflowError, match="int too big to convert"):
table.add_row(flags=1 << 256)
with pytest.raises(
ValueError, match="Can't convert negative value to unsigned int"
):
table.add_row(flags=-1)
def test_index(self):
tc = msprime.simulate(10, random_seed=42).tables._ll_tables
assert tc.indexes["edge_insertion_order"].dtype == np.int32
assert tc.indexes["edge_removal_order"].dtype == np.int32
assert np.array_equal(
tc.indexes["edge_insertion_order"], np.arange(18, dtype=np.int32)
)
assert np.array_equal(
tc.indexes["edge_removal_order"], np.arange(18, dtype=np.int32)[::-1]
)
tc.drop_index()
assert tc.indexes == {}
tc.build_index()
assert np.array_equal(
tc.indexes["edge_insertion_order"], np.arange(18, dtype=np.int32)
)
assert np.array_equal(
tc.indexes["edge_removal_order"], np.arange(18, dtype=np.int32)[::-1]
)
modify_indexes = tc.indexes
modify_indexes["edge_insertion_order"] = np.arange(42, 42 + 18, dtype=np.int32)
modify_indexes["edge_removal_order"] = np.arange(
4242, 4242 + 18, dtype=np.int32
)
tc.indexes = modify_indexes
assert np.array_equal(
tc.indexes["edge_insertion_order"], np.arange(42, 42 + 18, dtype=np.int32)
)
assert np.array_equal(
tc.indexes["edge_removal_order"], np.arange(4242, 4242 + 18, dtype=np.int32)
)
def test_no_indexes(self):
tc = msprime.simulate(10, random_seed=42).tables._ll_tables
tc.drop_index()
assert tc.indexes == {}
def test_bad_indexes(self):
tc = msprime.simulate(10, random_seed=42).tables._ll_tables
for col in ("insertion", "removal"):
d = tc.indexes
d[f"edge_{col}_order"] = d[f"edge_{col}_order"][:-1]
with pytest.raises(
ValueError,
match="^edge_insertion_order and"
" edge_removal_order must be the same"
" length$",
):
tc.indexes = d
d = tc.indexes
for col in ("insertion", "removal"):
d[f"edge_{col}_order"] = d[f"edge_{col}_order"][:-1]
with pytest.raises(
ValueError,
match="^edge_insertion_order and edge_removal_order must be"
" the same length as the number of edges$",
):
tc.indexes = d
# Both columns must be provided, if one is
for col in ("insertion", "removal"):
d = tc.indexes
del d[f"edge_{col}_order"]
with pytest.raises(
TypeError,
match="^edge_insertion_order and "
"edge_removal_order must be specified "
"together$",
):
tc.indexes = d
tc = msprime.simulate(
10, recombination_rate=10, random_seed=42
).tables._ll_tables
modify_indexes = tc.indexes
shape = modify_indexes["edge_insertion_order"].shape
modify_indexes["edge_insertion_order"] = np.zeros(shape, dtype=np.int32)
modify_indexes["edge_removal_order"] = np.zeros(shape, dtype=np.int32)
tc.indexes = modify_indexes
ts = _tskit.TreeSequence()
with pytest.raises(
_tskit.LibraryError,
match="^Bad edges: contradictory children for a given"
" parent over an interval$",
):
ts.load_tables(tc, build_indexes=False)
modify_indexes["edge_insertion_order"] = np.full(shape, 2 ** 30, dtype=np.int32)
modify_indexes["edge_removal_order"] = np.full(shape, 2 ** 30, dtype=np.int32)
tc.indexes = modify_indexes
ts = _tskit.TreeSequence()
with pytest.raises(_tskit.LibraryError, match="^Edge out of bounds$"):
ts.load_tables(tc, build_indexes=False)
class TestTreeSequence(LowLevelTestCase, MetadataTestMixin):
"""
Tests for the low-level interface for the TreeSequence.
"""
def setUp(self):
fd, self.temp_file = tempfile.mkstemp(prefix="msp_ll_ts_")
os.close(fd)
def tearDown(self):
os.unlink(self.temp_file)
def test_file_errors(self):
ts1 = self.get_example_tree_sequence()
def loader(*args):
ts2 = _tskit.TreeSequence()
ts2.load(*args)
for func in [ts1.dump, loader]:
with pytest.raises(TypeError):
func()
for bad_type in [None, [], {}]:
with pytest.raises(TypeError):
func(bad_type)
def test_initial_state(self):
# Check the initial state to make sure that it is empty.
ts = _tskit.TreeSequence()
with pytest.raises(ValueError):
ts.get_num_samples()
with pytest.raises(ValueError):
ts.get_sequence_length()
with pytest.raises(ValueError):
ts.get_num_trees()
with pytest.raises(ValueError):
ts.get_num_edges()
with pytest.raises(ValueError):
ts.get_num_mutations()
with pytest.raises(ValueError):
ts.get_num_migrations()
with pytest.raises(ValueError):
ts.get_num_migrations()
with pytest.raises(ValueError):
ts.get_genotype_matrix()
with pytest.raises(ValueError):
ts.dump()
def test_num_nodes(self):
for ts in self.get_example_tree_sequences():
max_node = 0
for j in range(ts.get_num_edges()):
_, _, parent, child, _ = ts.get_edge(j)
for node in [parent, child]:
if node > max_node:
max_node = node
assert max_node + 1 == ts.get_num_nodes()
def test_dump_equality(self, tmp_path):
for ts in self.get_example_tree_sequences():
tables = _tskit.TableCollection(sequence_length=ts.get_sequence_length())
ts.dump_tables(tables)
tables.compute_mutation_times()
ts = _tskit.TreeSequence()
ts.load_tables(tables)
with open(tmp_path / "temp.trees", "wb") as f:
ts.dump(f)
with open(tmp_path / "temp.trees", "rb") as f:
ts2 = _tskit.TreeSequence()
ts2.load(f)
tc = _tskit.TableCollection(ts.get_sequence_length())
ts.dump_tables(tc)
tc2 = _tskit.TableCollection(ts2.get_sequence_length())
ts2.dump_tables(tc2)
assert tc.equals(tc2)
def verify_mutations(self, ts):
mutations = [ts.get_mutation(j) for j in range(ts.get_num_mutations())]
assert ts.get_num_mutations() > 0
assert len(mutations) == ts.get_num_mutations()
# Check the form of the mutations
for j, (position, nodes, index) in enumerate(mutations):
assert j == index
for node in nodes:
assert isinstance(node, int)
assert node >= 0
assert node <= ts.get_num_nodes()
assert isinstance(position, float)
assert position > 0
assert position < ts.get_sequence_length()
# mutations must be sorted by position order.
assert mutations == sorted(mutations)
def test_get_edge_interface(self):
for ts in self.get_example_tree_sequences():
num_edges = ts.get_num_edges()
# We don't accept Python negative indexes here.
with pytest.raises(IndexError):
ts.get_edge(-1)
for j in [0, 10, 10 ** 6]:
with pytest.raises(IndexError):
ts.get_edge(num_edges + j)
for x in [None, "", {}, []]:
with pytest.raises(TypeError):
ts.get_edge(x)
def test_get_node_interface(self):
for ts in self.get_example_tree_sequences():
num_nodes = ts.get_num_nodes()
# We don't accept Python negative indexes here.
with pytest.raises(IndexError):
ts.get_node(-1)
for j in [0, 10, 10 ** 6]:
with pytest.raises(IndexError):
ts.get_node(num_nodes + j)
for x in [None, "", {}, []]:
with pytest.raises(TypeError):
ts.get_node(x)
def test_get_genotype_matrix_interface(self):
for ts in self.get_example_tree_sequences():
num_samples = ts.get_num_samples()
num_sites = ts.get_num_sites()
G = ts.get_genotype_matrix()
assert G.shape == (num_sites, num_samples)
with pytest.raises(TypeError):
ts.get_genotype_matrix(isolated_as_missing=None)
with pytest.raises(TypeError):
ts.get_genotype_matrix(alleles="XYZ")
with pytest.raises(ValueError):
ts.get_genotype_matrix(alleles=tuple())
G = ts.get_genotype_matrix(isolated_as_missing=False)
assert G.shape == (num_sites, num_samples)
def test_get_genotype_matrix_missing_data(self):
tables = _tskit.TableCollection(1)
tables.nodes.add_row(flags=1, time=0)
tables.nodes.add_row(flags=1, time=0)
tables.sites.add_row(0.1, "A")
tables.build_index()
ts = _tskit.TreeSequence(0)
ts.load_tables(tables)
G = ts.get_genotype_matrix(isolated_as_missing=False)
assert np.all(G == 0)
G = ts.get_genotype_matrix(isolated_as_missing=True)
assert np.all(G == -1)
G = ts.get_genotype_matrix()
assert np.all(G == -1)
def test_get_migration_interface(self):
ts = self.get_example_migration_tree_sequence()
for bad_type in ["", None, {}]:
with pytest.raises(TypeError):
ts.get_migration(bad_type)
num_records = ts.get_num_migrations()
# We don't accept Python negative indexes here.
with pytest.raises(IndexError):
ts.get_migration(-1)
for j in [0, 10, 10 ** 6]:
with pytest.raises(IndexError):
ts.get_migration(num_records + j)
def test_get_samples(self):
for ts in self.get_example_tree_sequences():
# get_samples takes no arguments.
with pytest.raises(TypeError):
ts.get_samples(0)
assert np.array_equal(
np.arange(ts.get_num_samples(), dtype=np.int32), ts.get_samples()
)
def test_genealogical_nearest_neighbours(self):
for ts in self.get_example_tree_sequences():
with pytest.raises(TypeError):
ts.genealogical_nearest_neighbours()
with pytest.raises(TypeError):
ts.genealogical_nearest_neighbours(focal=None)
with pytest.raises(TypeError):
ts.genealogical_nearest_neighbours(
focal=ts.get_samples(),
reference_sets={},
)
with pytest.raises(ValueError):
ts.genealogical_nearest_neighbours(
focal=ts.get_samples(),
reference_sets=[],
)
bad_array_values = ["", {}, "x", [[[0], [1, 2]]]]
for bad_array_value in bad_array_values:
with pytest.raises(ValueError):
ts.genealogical_nearest_neighbours(
focal=bad_array_value,
reference_sets=[[0], [1]],
)
with pytest.raises(ValueError):
ts.genealogical_nearest_neighbours(
focal=ts.get_samples(),
reference_sets=[[0], bad_array_value],
)
with pytest.raises(ValueError):
ts.genealogical_nearest_neighbours(
focal=ts.get_samples(),
reference_sets=[bad_array_value],
)
focal = ts.get_samples()
A = ts.genealogical_nearest_neighbours(focal, [focal[2:], focal[:2]])
assert A.shape == (len(focal), 2)
def test_mean_descendants(self):
for ts in self.get_example_tree_sequences():
with pytest.raises(TypeError):
ts.mean_descendants()
with pytest.raises(TypeError):
ts.mean_descendants(reference_sets={})
with pytest.raises(ValueError):
ts.mean_descendants(reference_sets=[])
bad_array_values = ["", {}, "x", [[[0], [1, 2]]]]
for bad_array_value in bad_array_values:
with pytest.raises(ValueError):
ts.mean_descendants(
reference_sets=[[0], bad_array_value],
)
with pytest.raises(ValueError):
ts.mean_descendants(reference_sets=[bad_array_value])
focal = ts.get_samples()
A = ts.mean_descendants([focal[2:], focal[:2]])
assert A.shape == (ts.get_num_nodes(), 2)
def test_metadata_schemas(self):
tables = _tskit.TableCollection(1.0)
# Set the schema
for table_name in self.metadata_tables:
table = getattr(tables, f"{table_name}s")
table.metadata_schema = f"{table_name} test metadata schema"
# Read back via ll tree sequence
tables.build_index()
ts = _tskit.TreeSequence()
ts.load_tables(tables)
schemas = ts.get_table_metadata_schemas()
for table_name in self.metadata_tables:
assert getattr(schemas, table_name) == f"{table_name} test metadata schema"
# Clear and read back again
for table_name in self.metadata_tables:
getattr(tables, f"{table_name}s").metadata_schema = ""
ts = _tskit.TreeSequence()
ts.load_tables(tables)
schemas = ts.get_table_metadata_schemas()
for table_name in self.metadata_tables:
assert getattr(schemas, table_name) == ""
def test_metadata(self):
tables = _tskit.TableCollection(1)
tables.build_index()
ts = _tskit.TreeSequence()
ts.load_tables(tables)
assert ts.get_metadata() == b""
for value in [b"foo", b"", "💩".encode(), b"null char \0 in string"]:
tables.metadata = value
ts = _tskit.TreeSequence()
ts.load_tables(tables)
assert ts.get_metadata() == value
def test_metadata_schema(self):
tables = _tskit.TableCollection(1)
tables.build_index()
ts = _tskit.TreeSequence()
ts.load_tables(tables)
assert ts.get_metadata_schema() == ""
for value in ["foo", "", "💩", "null char \0 in string"]:
tables.metadata_schema = value
ts = _tskit.TreeSequence()
ts.load_tables(tables)
assert ts.get_metadata_schema() == value
def test_kc_distance_errors(self):
ts1 = self.get_example_tree_sequence(10)
with pytest.raises(TypeError):
ts1.get_kc_distance()
with pytest.raises(TypeError):
ts1.get_kc_distance(ts1)
for bad_tree in [None, "tree", 0]:
with pytest.raises(TypeError):
ts1.get_kc_distance(bad_tree, lambda_=0)
for bad_value in ["tree", [], None]:
with pytest.raises(TypeError):
ts1.get_kc_distance(ts1, lambda_=bad_value)
# Different numbers of samples fail.
ts2 = self.get_example_tree_sequence(11)
self.verify_kc_library_error(ts1, ts2)
# Different sequence lengths fail.
ts2 = self.get_example_tree_sequence(10, length=11)
self.verify_kc_library_error(ts1, ts2)
def verify_kc_library_error(self, ts1, ts2):
with pytest.raises(_tskit.LibraryError):
ts1.get_kc_distance(ts2, 0)
def test_kc_distance(self):
ts1 = self.get_example_tree_sequence(10, random_seed=123456)
ts2 = self.get_example_tree_sequence(10, random_seed=1234)
for lambda_ in [-1, 0, 1, 1000, -1e300]:
x1 = ts1.get_kc_distance(ts2, lambda_)
x2 = ts2.get_kc_distance(ts1, lambda_)
assert x1 == x2
def test_load_tables_build_indexes(self):
for ts in self.get_example_tree_sequences():
tables = _tskit.TableCollection(sequence_length=ts.get_sequence_length())
ts.dump_tables(tables)
tables.drop_index()
# Tables not in tc but rebuilt
ts2 = _tskit.TreeSequence()
ts2.load_tables(tables, build_indexes=True)
tables2 = _tskit.TableCollection(sequence_length=ts.get_sequence_length())
ts2.dump_tables(tables2)
assert tables2.has_index()
# Tables not in tc, not rebuilt so error
ts3 = _tskit.TreeSequence()
with pytest.raises(
_tskit.LibraryError, match="Table collection must be indexed"
):
ts3.load_tables(tables)
# Tables in tc, not rebuilt
tables.build_index()
ts4 = _tskit.TreeSequence()
ts4.load_tables(tables, build_indexes=False)
tables4 = _tskit.TableCollection(sequence_length=ts.get_sequence_length())
ts4.dump_tables(tables4)
assert tables4.has_index()
def test_clear_table(self, ts_fixture):
tables = _tskit.TableCollection(
sequence_length=ts_fixture.get_sequence_length()
)
ts_fixture.ll_tree_sequence.dump_tables(tables)
tables.clear()
data_tables = [t for t in tskit.TABLE_NAMES if t != "provenances"]
for table in data_tables:
assert getattr(tables, f"{table}").num_rows == 0
assert len(getattr(tables, f"{table}").metadata_schema) != 0
assert tables.provenances.num_rows > 0
assert len(tables.metadata) > 0
assert len(tables.metadata_schema) > 0
tables.clear(clear_provenance=True)
assert tables.provenances.num_rows == 0
for table in data_tables:
assert len(getattr(tables, f"{table}").metadata_schema) != 0
assert len(tables.metadata) > 0
assert len(tables.metadata_schema) > 0
tables.clear(clear_metadata_schemas=True)
for table in data_tables:
assert len(getattr(tables, f"{table}").metadata_schema) == 0
assert len(tables.metadata) > 0
assert len(tables.metadata_schema) > 0
tables.clear(clear_ts_metadata_and_schema=True)
assert len(tables.metadata) == 0
assert len(tables.metadata_schema) == 0
class StatsInterfaceMixin:
"""
Tests for the interface on specific stats.
"""
def test_mode_errors(self):
_, f, params = self.get_example()
for bad_mode in ["", "not a mode", "SITE", "x" * 8192]:
with pytest.raises(ValueError):
f(mode=bad_mode, **params)
for bad_type in [123, {}, None, [[]]]:
with pytest.raises(TypeError):
f(mode=bad_type, **params)
def test_window_errors(self):
ts, f, params = self.get_example()
del params["windows"]
for bad_array in ["asdf", None, [[[[]], [[]]]], np.zeros((10, 3, 4))]:
with pytest.raises(ValueError):
f(windows=bad_array, **params)
for bad_windows in [[], [0]]:
with pytest.raises(ValueError):
f(windows=bad_windows, **params)
L = ts.get_sequence_length()
bad_windows = [
[L, 0],
[0.1, L],
[-1, L],
[0, L + 0.1],
[0, 0.1, 0.1, L],
[0, -1, L],
[0, 0.1, 0.05, 0.2, L],
]
for bad_window in bad_windows:
with pytest.raises(_tskit.LibraryError):
f(windows=bad_window, **params)
def test_windows_output(self):
ts, f, params = self.get_example()
del params["windows"]
for num_windows in range(1, 10):
windows = np.linspace(0, ts.get_sequence_length(), num=num_windows + 1)
assert windows.shape[0] == num_windows + 1
sigma = f(windows=windows, **params)
assert sigma.shape[0] == num_windows
class WeightMixin(StatsInterfaceMixin):
def get_example(self):
ts, method = self.get_method()
params = {
"weights": np.ones((ts.get_num_samples(), 2)),
"windows": [0, ts.get_sequence_length()],
}
return ts, method, params
def test_bad_weights(self):
ts, f, params = self.get_example()
del params["weights"]
n = ts.get_num_samples()
with pytest.raises(_tskit.LibraryError):
f(weights=np.ones((n, 0)), **params)
for bad_weight_shape in [(n - 1, 1), (n + 1, 1), (0, 3)]:
with pytest.raises(ValueError):
f(weights=np.ones(bad_weight_shape), **params)
def test_output_dims(self):
ts, method, params = self.get_example()
weights = params["weights"]
nw = weights.shape[1]
windows = [0, ts.get_sequence_length()]
for mode in ["site", "branch"]:
out = method(weights[:, [0]], windows, mode=mode)
assert out.shape == (1, 1)
out = method(weights, windows, mode=mode)
assert out.shape == (1, nw)
out = method(weights[:, [0, 0, 0]], windows, mode=mode)
assert out.shape == (1, 3)
mode = "node"
N = ts.get_num_nodes()
out = method(weights[:, [0]], windows, mode=mode)
assert out.shape == (1, N, 1)
out = method(weights, windows, mode=mode)
assert out.shape == (1, N, nw)
out = method(weights[:, [0, 0, 0]], windows, mode=mode)
assert out.shape == (1, N, 3)
class WeightCovariateMixin(StatsInterfaceMixin):
def get_example(self):
ts, method = self.get_method()
params = {
"weights": np.ones((ts.get_num_samples(), 2)),
"covariates": np.array(
[np.arange(ts.get_num_samples()), np.arange(ts.get_num_samples()) ** 2]
).T,
"windows": [0, ts.get_sequence_length()],
}
return ts, method, params
def test_output_dims(self):
ts, method, params = self.get_example()
weights = params["weights"]
nw = weights.shape[1]
windows = [0, ts.get_sequence_length()]
for covariates in (params["covariates"], params["covariates"][:, :0]):
for mode in ["site", "branch"]:
out = method(weights[:, [0]], covariates, windows, mode=mode)
assert out.shape == (1, 1)
out = method(weights, covariates, windows, mode=mode)
assert out.shape == (1, nw)
out = method(weights[:, [0, 0, 0]], covariates, windows, mode=mode)
assert out.shape == (1, 3)
mode = "node"
N = ts.get_num_nodes()
out = method(weights[:, [0]], covariates, windows, mode=mode)
assert out.shape == (1, N, 1)
out = method(weights, covariates, windows, mode=mode)
assert out.shape == (1, N, nw)
out = method(weights[:, [0, 0, 0]], covariates, windows, mode=mode)
assert out.shape == (1, N, 3)
class SampleSetMixin(StatsInterfaceMixin):
def test_bad_sample_sets(self):
ts, f, params = self.get_example()
del params["sample_set_sizes"]
del params["sample_sets"]
with pytest.raises(_tskit.LibraryError):
f(sample_sets=[], sample_set_sizes=[], **params)
n = ts.get_num_samples()
samples = ts.get_samples()
for bad_set_sizes in [[], [1], [n - 1], [n + 1], [n - 3, 1, 1], [1, n - 2]]:
with pytest.raises(ValueError):
f(sample_set_sizes=bad_set_sizes, sample_sets=samples, **params)
N = ts.get_num_nodes()
for bad_node in [-1, N, N + 1, -N]:
with pytest.raises(_tskit.LibraryError):
f(sample_set_sizes=[2], sample_sets=[0, bad_node], **params)
for bad_sample in [n, n + 1, N - 1]:
with pytest.raises(_tskit.LibraryError):
f(sample_set_sizes=[2], sample_sets=[0, bad_sample], **params)
class OneWaySampleStatsMixin(SampleSetMixin):
"""
Tests for one-way sample stats.
"""
def get_example(self):
ts, method = self.get_method()
params = {
"sample_set_sizes": [ts.get_num_samples()],
"sample_sets": ts.get_samples(),
"windows": [0, ts.get_sequence_length()],
}
return ts, method, params
def test_basic_example(self):
ts, method = self.get_method()
result = method(
[ts.get_num_samples()], ts.get_samples(), [0, ts.get_sequence_length()]
)
assert result.shape == (1, 1)
result = method(
[ts.get_num_samples()],
ts.get_samples(),
[0, ts.get_sequence_length()],
mode="node",
)
assert result.shape == (1, ts.get_num_nodes(), 1)
result = method(
[ts.get_num_samples()], ts.get_samples(), ts.get_breakpoints(), mode="node"
)
assert result.shape == (ts.get_num_trees(), ts.get_num_nodes(), 1)
def test_output_dims(self):
ts, method = self.get_method()
samples = ts.get_samples()
windows = [0, ts.get_sequence_length()]
n = len(samples)
for mode in ["site", "branch"]:
pi = method([n], samples, windows, mode=mode)
assert pi.shape == (1, 1)
pi = method([2, n - 2], samples, windows, mode=mode)
assert pi.shape == (1, 2)
pi = method([2, 2, n - 4], samples, windows, mode=mode)
assert pi.shape == (1, 3)
pi = method(np.ones(n).astype(np.uint32), samples, windows, mode=mode)
assert pi.shape == (1, n)
mode = "node"
N = ts.get_num_nodes()
pi = method([n], samples, windows, mode=mode)
assert pi.shape == (1, N, 1)
pi = method([2, n - 2], samples, windows, mode=mode)
assert pi.shape == (1, N, 2)
pi = method([2, 2, n - 4], samples, windows, mode=mode)
assert pi.shape == (1, N, 3)
pi = method(np.ones(n).astype(np.uint32), samples, windows, mode=mode)
assert pi.shape == (1, N, n)
def test_polarised(self):
# TODO move this to the top level.
ts, method = self.get_method()
samples = ts.get_samples()
n = len(samples)
windows = [0, ts.get_sequence_length()]
method([n], samples, windows, polarised=True)
method([n], samples, windows, polarised=False)
class TestDiversity(LowLevelTestCase, OneWaySampleStatsMixin):
"""
Tests for the diversity method.
"""
def get_method(self):
ts = self.get_example_tree_sequence()
return ts, ts.diversity
class TestTraitCovariance(LowLevelTestCase, WeightMixin):
"""
Tests for trait covariance.
"""
def get_method(self):
ts = self.get_example_tree_sequence()
return ts, ts.trait_covariance
class TestTraitCorrelation(LowLevelTestCase, WeightMixin):
"""
Tests for trait correlation.
"""
def get_method(self):
ts = self.get_example_tree_sequence()
return ts, ts.trait_correlation
class TestTraitLinearModel(LowLevelTestCase, WeightCovariateMixin):
"""
Tests for trait correlation.
"""
def get_method(self):
ts = self.get_example_tree_sequence()
return ts, ts.trait_linear_model
class TestSegregatingSites(LowLevelTestCase, OneWaySampleStatsMixin):
"""
Tests for the diversity method.
"""
def get_method(self):
ts = self.get_example_tree_sequence()
return ts, ts.segregating_sites
class TestY1(LowLevelTestCase, OneWaySampleStatsMixin):
"""
Tests for the diversity method.
"""
def get_method(self):
ts = self.get_example_tree_sequence()
return ts, ts.Y1
class TestAlleleFrequencySpectrum(LowLevelTestCase, OneWaySampleStatsMixin):
"""
Tests for the diversity method.
"""
def get_method(self):
ts = self.get_example_tree_sequence()
return ts, ts.allele_frequency_spectrum
def test_basic_example(self):
ts = self.get_example_tree_sequence()
n = ts.get_num_samples()
result = ts.allele_frequency_spectrum(
[n], ts.get_samples(), [0, ts.get_sequence_length()]
)
assert result.shape == (1, n + 1)
result = ts.allele_frequency_spectrum(
[n], ts.get_samples(), [0, ts.get_sequence_length()], polarised=True
)
assert result.shape == (1, n + 1)
def test_output_dims(self):
ts = self.get_example_tree_sequence()
samples = ts.get_samples()
L = ts.get_sequence_length()
n = len(samples)
for mode in ["site", "branch"]:
for s in [[n], [n - 2, 2], [n - 4, 2, 2], [1] * n]:
s = np.array(s, dtype=np.uint32)
windows = [0, L]
for windows in [[0, L], [0, L / 2, L], np.linspace(0, L, num=10)]:
jafs = ts.allele_frequency_spectrum(
s, samples, windows, mode=mode, polarised=True
)
assert jafs.shape == tuple([len(windows) - 1] + list(s + 1))
jafs = ts.allele_frequency_spectrum(
s, samples, windows, mode=mode, polarised=False
)
assert jafs.shape == tuple([len(windows) - 1] + list(s + 1))
def test_node_mode_not_supported(self):
ts = self.get_example_tree_sequence()
with pytest.raises(_tskit.LibraryError):
ts.allele_frequency_spectrum(
[ts.get_num_samples()],
ts.get_samples(),
[0, ts.get_sequence_length()],
mode="node",
)
class TwoWaySampleStatsMixin(SampleSetMixin):
"""
Tests for the two way sample stats.
"""
def get_example(self):
ts, method = self.get_method()
params = {
"sample_set_sizes": [2, ts.get_num_samples() - 2],
"sample_sets": ts.get_samples(),
"indexes": [[0, 1]],
"windows": [0, ts.get_sequence_length()],
}
return ts, method, params
def test_basic_example(self):
ts, method = self.get_method()
div = method(
[2, ts.get_num_samples() - 2],
ts.get_samples(),
[[0, 1]],
windows=[0, ts.get_sequence_length()],
)
assert div.shape == (1, 1)
def test_output_dims(self):
ts, method = self.get_method()
samples = ts.get_samples()
windows = [0, ts.get_sequence_length()]
n = len(samples)
for mode in ["site", "branch"]:
div = method([2, 2, n - 4], samples, [[0, 1]], windows, mode=mode)
assert div.shape == (1, 1)
div = method([2, 2, n - 4], samples, [[0, 1], [1, 2]], windows, mode=mode)
assert div.shape == (1, 2)
div = method(
[2, 2, n - 4], samples, [[0, 1], [1, 2], [0, 1]], windows, mode=mode
)
assert div.shape == (1, 3)
N = ts.get_num_nodes()
mode = "node"
div = method([2, 2, n - 4], samples, [[0, 1]], windows, mode=mode)
assert div.shape == (1, N, 1)
div = method([2, 2, n - 4], samples, [[0, 1], [1, 2]], windows, mode=mode)
assert div.shape == (1, N, 2)
div = method(
[2, 2, n - 4], samples, [[0, 1], [1, 2], [0, 1]], windows, mode=mode
)
assert div.shape == (1, N, 3)
def test_set_index_errors(self):
ts, method = self.get_method()
samples = ts.get_samples()
windows = [0, ts.get_sequence_length()]
n = len(samples)
def f(indexes):
method([2, 2, n - 4], samples, indexes, windows)
for bad_array in ["wer", {}, [[[], []], [[], []]]]:
with pytest.raises(ValueError):
f(bad_array)
for bad_dim in [[[]], [[1], [1]]]:
with pytest.raises(ValueError):
f(bad_dim)
class ThreeWaySampleStatsMixin(SampleSetMixin):
"""
Tests for the two way sample stats.
"""
def get_example(self):
ts, method = self.get_method()
params = {
"sample_set_sizes": [1, 1, ts.get_num_samples() - 2],
"sample_sets": ts.get_samples(),
"indexes": [[0, 1, 2]],
"windows": [0, ts.get_sequence_length()],
}
return ts, method, params
def test_basic_example(self):
ts, method = self.get_method()
div = method(
[1, 1, ts.get_num_samples() - 2],
ts.get_samples(),
[[0, 1, 2]],
windows=[0, ts.get_sequence_length()],
)
assert div.shape == (1, 1)
def test_output_dims(self):
ts, method = self.get_method()
samples = ts.get_samples()
windows = [0, ts.get_sequence_length()]
n = len(samples)
for mode in ["site", "branch"]:
div = method([2, 2, n - 4], samples, [[0, 1, 2]], windows, mode=mode)
assert div.shape == (1, 1)
div = method(
[1, 1, 2, n - 4], samples, [[0, 1, 2], [1, 2, 3]], windows, mode=mode
)
assert div.shape == (1, 2)
div = method(
[1, 1, 2, n - 4],
samples,
[[0, 1, 2], [1, 2, 3], [0, 1, 2]],
windows,
mode=mode,
)
assert div.shape == (1, 3)
N = ts.get_num_nodes()
mode = "node"
div = method([2, 2, n - 4], samples, [[0, 1, 2]], windows, mode=mode)
assert div.shape == (1, N, 1)
div = method(
[1, 1, 2, n - 4], samples, [[0, 1, 2], [1, 2, 3]], windows, mode=mode
)
assert div.shape == (1, N, 2)
div = method(
[1, 1, 2, n - 4],
samples,
[[0, 1, 2], [1, 2, 3], [0, 1, 2]],
windows,
mode=mode,
)
assert div.shape == (1, N, 3)
def test_set_index_errors(self):
ts, method = self.get_method()
samples = ts.get_samples()
windows = [0, ts.get_sequence_length()]
n = len(samples)
def f(indexes):
method([2, 2, n - 4], samples, indexes, windows)
for bad_array in ["wer", {}, [[[], []], [[], []]]]:
with pytest.raises(ValueError):
f(bad_array)
for bad_dim in [[[]], [[1], [1]], [(0, 1)], [(0, 1, 2, 3)]]:
with pytest.raises(ValueError):
f(bad_dim)
class FourWaySampleStatsMixin(SampleSetMixin):
"""
Tests for the four way sample stats.
"""
def get_example(self):
ts, method = self.get_method()
params = {
"sample_set_sizes": [1, 1, 1, ts.get_num_samples() - 3],
"sample_sets": ts.get_samples(),
"indexes": [[0, 1, 2, 3]],
"windows": [0, ts.get_sequence_length()],
}
return ts, method, params
def test_basic_example(self):
ts, method = self.get_method()
div = method(
[1, 1, 1, ts.get_num_samples() - 3],
ts.get_samples(),
[[0, 1, 2, 3]],
windows=[0, ts.get_sequence_length()],
)
assert div.shape == (1, 1)
def test_output_dims(self):
ts, method = self.get_method()
samples = ts.get_samples()
windows = [0, ts.get_sequence_length()]
n = len(samples)
for mode in ["site", "branch"]:
div = method([2, 1, 1, n - 4], samples, [[0, 1, 2, 3]], windows, mode=mode)
assert div.shape == (1, 1)
div = method(
[1, 1, 1, 1, n - 4],
samples,
[[0, 1, 2, 3], [1, 2, 3, 4]],
windows,
mode=mode,
)
assert div.shape == (1, 2)
div = method(
[1, 1, 1, 1, n - 4],
samples,
[[0, 1, 2, 3], [1, 2, 3, 4], [0, 1, 2, 4]],
windows,
mode=mode,
)
assert div.shape == (1, 3)
N = ts.get_num_nodes()
mode = "node"
div = method([2, 1, 1, n - 4], samples, [[0, 1, 2, 3]], windows, mode=mode)
assert div.shape == (1, N, 1)
div = method(
[1, 1, 1, 1, n - 4],
samples,
[[0, 1, 2, 3], [1, 2, 3, 4]],
windows,
mode=mode,
)
assert div.shape == (1, N, 2)
div = method(
[1, 1, 1, 1, n - 4],
samples,
[[0, 1, 2, 3], [1, 2, 3, 4], [0, 1, 2, 4]],
windows,
mode=mode,
)
assert div.shape == (1, N, 3)
def test_set_index_errors(self):
ts, method = self.get_method()
samples = ts.get_samples()
windows = [0, ts.get_sequence_length()]
n = len(samples)
def f(indexes):
method([2, 1, 1, n - 4], samples, indexes, windows)
for bad_array in ["wer", {}, [[[], []], [[], []]]]:
with pytest.raises(ValueError):
f(bad_array)
for bad_dim in [[[]], [[1], [1]], [(0, 1)], [(0, 1, 2, 3, 4)]]:
with pytest.raises(ValueError):
f(bad_dim)
class TestDivergence(LowLevelTestCase, TwoWaySampleStatsMixin):
def get_method(self):
ts = self.get_example_tree_sequence()
return ts, ts.divergence
class TestY2(LowLevelTestCase, TwoWaySampleStatsMixin):
def get_method(self):
ts = self.get_example_tree_sequence()
return ts, ts.Y2
class Testf2(LowLevelTestCase, TwoWaySampleStatsMixin):
def get_method(self):
ts = self.get_example_tree_sequence()
return ts, ts.f2
class TestY3(LowLevelTestCase, ThreeWaySampleStatsMixin):
def get_method(self):
ts = self.get_example_tree_sequence()
return ts, ts.Y3
class Testf3(LowLevelTestCase, ThreeWaySampleStatsMixin):
def get_method(self):
ts = self.get_example_tree_sequence()
return ts, ts.f3
class Testf4(LowLevelTestCase, FourWaySampleStatsMixin):
def get_method(self):
ts = self.get_example_tree_sequence()
return ts, ts.f4
class TestGeneralStatsInterface(LowLevelTestCase, StatsInterfaceMixin):
"""
Tests for the general stats interface.
"""
def get_example(self):
ts = self.get_example_tree_sequence()
W = np.zeros((ts.get_num_samples(), 1))
params = {
"weights": W,
"summary_func": lambda x: np.cumsum(x),
"output_dim": 1,
"windows": ts.get_breakpoints(),
}
return ts, ts.general_stat, params
def test_basic_example(self):
ts = self.get_example_tree_sequence()
W = np.zeros((ts.get_num_samples(), 1))
sigma = ts.general_stat(
W, lambda x: np.cumsum(x), 1, ts.get_breakpoints(), mode="branch"
)
assert sigma.shape == (ts.get_num_trees(), 1)
def test_non_numpy_return(self):
ts = self.get_example_tree_sequence()
W = np.ones((ts.get_num_samples(), 3))
sigma = ts.general_stat(
W, lambda x: [sum(x)], 1, ts.get_breakpoints(), mode="branch"
)
assert sigma.shape == (ts.get_num_trees(), 1)
sigma = ts.general_stat(
W, lambda x: [2, 2], 2, ts.get_breakpoints(), mode="branch"
)
assert sigma.shape == (ts.get_num_trees(), 2)
def test_complicated_numpy_function(self):
ts = self.get_example_tree_sequence(sample_size=20, length=30, random_seed=325)
W = np.zeros((ts.get_num_samples(), 4))
def f(x):
y = np.sum(x * x), np.prod(x + np.arange(x.shape[0]))
return y
sigma = ts.general_stat(W, f, 2, ts.get_breakpoints(), mode="branch")
assert sigma.shape == (ts.get_num_trees(), 2)
def test_input_dims(self):
ts = self.get_example_tree_sequence()
for k in range(1, 20):
W = np.zeros((ts.get_num_samples(), k))
sigma = ts.general_stat(
W, lambda x: np.cumsum(x), k, ts.get_breakpoints(), mode="branch"
)
assert sigma.shape == (ts.get_num_trees(), k)
sigma = ts.general_stat(
W, lambda x: [np.sum(x)], 1, ts.get_breakpoints(), mode="branch"
)
assert sigma.shape == (ts.get_num_trees(), 1)
def test_W_errors(self):
ts = self.get_example_tree_sequence()
n = ts.get_num_samples()
for bad_array in [[], [0, 1], [[[[]], [[]]]], np.zeros((10, 3, 4))]:
with pytest.raises(ValueError):
ts.general_stat(bad_array, lambda x: x, 1, ts.get_breakpoints())
for bad_size in [n - 1, n + 1, 0]:
W = np.zeros((bad_size, 1))
with pytest.raises(ValueError):
ts.general_stat(W, lambda x: x, 1, ts.get_breakpoints())
def test_summary_func_errors(self):
ts = self.get_example_tree_sequence()
W = np.zeros((ts.get_num_samples(), 1))
for bad_type in ["sdf", 1, {}]:
with pytest.raises(TypeError):
ts.general_stat(W, bad_type, 1, ts.get_breakpoints())
# Wrong numbers of arguments to f
with pytest.raises(TypeError):
ts.general_stat(W, lambda: 0, 1, ts.get_breakpoints())
with pytest.raises(TypeError):
ts.general_stat(W, lambda x, y: None, 1, ts.get_breakpoints())
# Exceptions within f are correctly raised.
for exception in [ValueError, TypeError]:
def f(x):
raise exception("test")
with pytest.raises(exception):
ts.general_stat(W, f, 1, ts.get_breakpoints())
# Wrong output dimensions
for bad_array in [[1, 1], range(10)]:
with pytest.raises(ValueError):
ts.general_stat(W, lambda x: bad_array, 1, ts.get_breakpoints())
with pytest.raises(ValueError):
ts.general_stat(W, lambda x: [1], 2, ts.get_breakpoints())
# Bad arrays returned from f
for bad_array in [["sdf"], 0, "w4", None]:
with pytest.raises(ValueError):
ts.general_stat(W, lambda x: bad_array, 1, ts.get_breakpoints())
class TestTreeDiffIterator(LowLevelTestCase):
"""
Tests for the low-level tree diff iterator.
"""
def test_uninitialised_tree_sequence(self):
ts = _tskit.TreeSequence()
with pytest.raises(ValueError):
_tskit.TreeDiffIterator(ts)
def test_constructor(self):
with pytest.raises(TypeError):
_tskit.TreeDiffIterator()
with pytest.raises(TypeError):
_tskit.TreeDiffIterator(None)
ts = self.get_example_tree_sequence()
before = list(_tskit.TreeDiffIterator(ts))
iterator = _tskit.TreeDiffIterator(ts)
del ts
# We should keep a reference to the tree sequence.
after = list(iterator)
assert before == after
def test_iterator(self):
ts = self.get_example_tree_sequence()
self.verify_iterator(_tskit.TreeDiffIterator(ts))
class TestVariantGenerator(LowLevelTestCase):
"""
Tests for the VariantGenerator class.
"""
def test_uninitialised_tree_sequence(self):
ts = _tskit.TreeSequence()
with pytest.raises(ValueError):
_tskit.VariantGenerator(ts)
def test_constructor(self):
with pytest.raises(TypeError):
_tskit.VariantGenerator()
with pytest.raises(TypeError):
_tskit.VariantGenerator(None)
ts = self.get_example_tree_sequence()
with pytest.raises(ValueError):
_tskit.VariantGenerator(ts, samples={})
with pytest.raises(TypeError):
_tskit.VariantGenerator(ts, impute_missing_data=None)
with pytest.raises(_tskit.LibraryError):
_tskit.VariantGenerator(ts, samples=[-1, 2])
with pytest.raises(TypeError):
_tskit.VariantGenerator(ts, alleles=1234)
def test_alleles(self):
ts = self.get_example_tree_sequence()
for bad_type in [["a", "b"], "sdf", 234]:
with pytest.raises(TypeError):
_tskit.VariantGenerator(ts, samples=[1, 2], alleles=bad_type)
with pytest.raises(ValueError):
_tskit.VariantGenerator(ts, samples=[1, 2], alleles=tuple())
for bad_allele_type in [None, 0, b"x", []]:
with pytest.raises(TypeError):
_tskit.VariantGenerator(ts, samples=[1, 2], alleles=(bad_allele_type,))
too_many_alleles = tuple(str(j) for j in range(128))
with pytest.raises(_tskit.LibraryError):
_tskit.VariantGenerator(ts, samples=[1, 2], alleles=too_many_alleles)
def test_iterator(self):
ts = self.get_example_tree_sequence()
self.verify_iterator(_tskit.VariantGenerator(ts))
def test_missing_data(self):
tables = _tskit.TableCollection(1)
tables.nodes.add_row(flags=1, time=0)
tables.nodes.add_row(flags=1, time=0)
tables.sites.add_row(0.1, "A")
tables.build_index()
ts = _tskit.TreeSequence(0)
ts.load_tables(tables)
variant = list(_tskit.VariantGenerator(ts))[0]
_, genotypes, alleles = variant
assert np.all(genotypes == -1)
assert alleles == ("A", None)
class TestLdCalculator(LowLevelTestCase):
"""
Tests for the LdCalculator class.
"""
def test_uninitialised_tree_sequence(self):
ts = _tskit.TreeSequence()
with pytest.raises(ValueError):
_tskit.LdCalculator(ts)
def test_constructor(self):
with pytest.raises(TypeError):
_tskit.LdCalculator()
with pytest.raises(TypeError):
_tskit.LdCalculator(None)
def test_get_r2(self):
ts = self.get_example_tree_sequence()
calc = _tskit.LdCalculator(ts)
n = ts.get_num_sites()
for bad_id in [-1, n, n + 1]:
with pytest.raises(_tskit.LibraryError):
calc.get_r2(0, bad_id)
with pytest.raises(_tskit.LibraryError):
calc.get_r2(bad_id, 0)
def test_get_r2_array(self):
ts = self.get_example_tree_sequence()
calc = _tskit.LdCalculator(ts)
with pytest.raises(TypeError):
calc.get_r2_array()
with pytest.raises(TypeError):
calc.get_r2_array(None)
# Doesn't support buffer protocol, so raises typeerror
with pytest.raises(TypeError):
calc.get_r2_array(None, 0)
n = ts.get_num_sites()
assert n > 2
with pytest.raises(BufferError):
calc.get_r2_array(bytes(100), 0)
buff = bytearray(1024)
with pytest.raises(ValueError):
calc.get_r2_array(buff, 0, max_distance=-1)
with pytest.raises(ValueError):
calc.get_r2_array(buff, 0, direction=1000)
# TODO this API is poor, we should explicitly catch these negative
# size errors.
for bad_max_mutations in [-2, -3]:
with pytest.raises(BufferError):
calc.get_r2_array(buff, 0, max_mutations=bad_max_mutations)
for bad_start_pos in [-1, n, n + 1]:
with pytest.raises(_tskit.LibraryError):
calc.get_r2_array(buff, bad_start_pos)
class TestLsHmm(LowLevelTestCase):
"""
Tests for the LsHmm class.
"""
def test_uninitialised_tree_sequence(self):
ts = _tskit.TreeSequence()
with pytest.raises(ValueError):
_tskit.LsHmm(ts, None, None)
def test_constructor(self):
ts = self.get_example_tree_sequence()
with pytest.raises(TypeError):
_tskit.LsHmm()
with pytest.raises(TypeError):
_tskit.LsHmm(None)
values = np.zeros(ts.get_num_sites())
for bad_array in ["asdf", [[], []], None]:
with pytest.raises(ValueError):
_tskit.LsHmm(ts, bad_array, values)
with pytest.raises(ValueError):
_tskit.LsHmm(ts, values, bad_array)
def test_bad_rate_arrays(self):
ts = self.get_example_tree_sequence()
m = ts.get_num_sites()
assert m > 0
values = np.zeros(m)
for bad_size in [0, m - 1, m + 1, m + 2]:
bad_array = np.zeros(bad_size)
with pytest.raises(ValueError):
_tskit.LsHmm(ts, bad_array, values)
with pytest.raises(ValueError):
_tskit.LsHmm(ts, values, bad_array)
def test_haplotype_input(self):
ts = self.get_example_tree_sequence()
m = ts.get_num_sites()
fm = _tskit.CompressedMatrix(ts)
vm = _tskit.ViterbiMatrix(ts)
ls_hmm = _tskit.LsHmm(ts, np.zeros(m), np.zeros(m))
for bad_size in [0, m - 1, m + 1, m + 2]:
bad_array = np.zeros(bad_size, dtype=np.int8)
with pytest.raises(ValueError):
ls_hmm.forward_matrix(bad_array, fm)
with pytest.raises(ValueError):
ls_hmm.viterbi_matrix(bad_array, vm)
for bad_array in [[0.002], [[], []], None]:
with pytest.raises(ValueError):
ls_hmm.forward_matrix(bad_array, fm)
with pytest.raises(ValueError):
ls_hmm.viterbi_matrix(bad_array, vm)
def test_output_type_errors(self):
ts = self.get_example_tree_sequence()
m = ts.get_num_sites()
h = np.zeros(m, dtype=np.int8)
ls_hmm = _tskit.LsHmm(ts, np.zeros(m), np.zeros(m))
for bad_type in [ls_hmm, None, m, []]:
with pytest.raises(TypeError):
ls_hmm.forward_matrix(h, bad_type)
with pytest.raises(TypeError):
ls_hmm.viterbi_matrix(h, bad_type)
other_ts = self.get_example_tree_sequence()
output = _tskit.CompressedMatrix(other_ts)
with pytest.raises(_tskit.LibraryError):
ls_hmm.forward_matrix(h, output)
output = _tskit.ViterbiMatrix(other_ts)
with pytest.raises(_tskit.LibraryError):
ls_hmm.viterbi_matrix(h, output)
def test_empty_forward_matrix(self):
for mu in [0, 1]:
ts = self.get_example_tree_sequence(mutation_rate=mu)
m = ts.get_num_sites()
fm = _tskit.CompressedMatrix(ts)
assert fm.num_sites == m
assert np.array_equal(np.zeros(m), fm.normalisation_factor)
assert np.array_equal(np.zeros(m, dtype=np.uint32), fm.num_transitions)
F = fm.decode()
assert np.all(F >= 0)
for j in range(m):
assert fm.get_site(j) == []
def test_empty_viterbi_matrix(self):
for mu in [0, 1]:
ts = self.get_example_tree_sequence(mutation_rate=mu)
m = ts.get_num_sites()
vm = _tskit.ViterbiMatrix(ts)
assert vm.num_sites == m
# TODO we should have the same semantics for 0 sites
if m == 0:
h = vm.traceback()
assert len(h) == 0
else:
with pytest.raises(_tskit.LibraryError):
vm.traceback()
def verify_compressed_matrix(self, ts, output):
S = output.normalisation_factor
N = output.num_transitions
assert np.all(0 < S)
assert np.all(S < 1)
assert np.all(N > 0)
F = output.decode()
assert F.shape == (ts.get_num_sites(), ts.get_num_samples())
assert np.all(F >= 0)
m = ts.get_num_sites()
for j in range(m):
site_list = output.get_site(j)
assert len(site_list) == N[j]
for item in site_list:
assert len(item) == 2
node, value = item
assert 0 <= node < ts.get_num_nodes()
assert 0 <= value <= 1
for site in [m, m + 1, 2 * m]:
with pytest.raises(ValueError):
output.get_site(site)
def test_forward_matrix(self):
ts = self.get_example_tree_sequence()
m = ts.get_num_sites()
output = _tskit.CompressedMatrix(ts)
ls_hmm = _tskit.LsHmm(ts, | np.zeros(m) | numpy.zeros |
"""
Author: <NAME>
Copyright: (C) 2019-2020 <http://www.dei.unipd.it/
Department of Information Engineering> (DEI), <http://www.unipd.it/ University of Padua>, Italy
License: <http://www.apache.org/licenses/LICENSE-2.0 Apache License, Version 2.0>
"""
import os
import torch
from model_3 import Model
import tensorflow as tf
import data_utils as du
import numpy as np
import util
PROG_NAME = 'MP_Prob_p_we'
def convert_we_dict_to_emb_matrix(wed, we_size=50):
np.random.seed(0)
max_k = -1
for k, v in wed.items():
if int(k) > max_k:
max_k = k
W_init_embed = np.float32( | np.random.uniform(-0.02, 0.02, [max_k + 1, we_size]) | numpy.random.uniform |
from logging import log
import numpy as np
import pandas as pd
from scipy import interpolate
import matplotlib.pyplot as plt
from matplotlib.backends.backend_tkagg import (FigureCanvasTkAgg, NavigationToolbar2Tk)
from matplotlib.backend_bases import key_press_handler
from matplotlib.figure import Figure
from matplotlib.font_manager import FontProperties
from matplotlib.ticker import ScalarFormatter
from flask import Flask, render_template, request
from tkinter import *
from tkinter import ttk
import sys
import os
import shutil
import random
from matplotlib.ticker import MaxNLocator
from pathlib import Path
import math
import copy
#from decimal import Decimal, ROUND_HALF_UP
def readinput(filename):
csv_input = pd.read_csv(filepath_or_buffer=filename, encoding="utf_8", sep=",")
symbol = csv_input['Symbol']
value = csv_input['Value']
unit = csv_input['Unit']
valueDict = {}
unitDict = {}
for i, j, k in zip(symbol, value, unit):
valueDict[i] = float(j)
unitDict[i] = str(k)
return valueDict, unitDict
def CeqLHVFunc(filename,fuelName):
csv_input = pd.read_csv(filepath_or_buffer=filename, encoding="utf_8", sep=",")
fuelType = csv_input['Fuel type']
CeqLHV = csv_input['CeqLHV']
fuelDict = {}
for i, j in zip(fuelType, CeqLHV):
fuelDict[i] = float(j)
return fuelDict[fuelName]
def Cco2Func(filename,fuelName):
csv_input = pd.read_csv(filepath_or_buffer=filename, encoding="utf_8", sep=",")
fuelType = csv_input['Fuel type']
Cco2 = csv_input['Cco2']
Cco2Dict = {}
for i, j in zip(fuelType, Cco2):
Cco2Dict[i] = float(j)
return Cco2Dict[fuelName]
def initialFleetFunc(filename):
csv_input = pd.read_csv(filepath_or_buffer=filename, encoding="utf_8", sep=",")
year = csv_input['Year']
TEU = csv_input['TEU']
iniFleetDict = {}
k = 0
for i, j in zip(year, TEU):
iniFleetDict.setdefault(k,{})
iniFleetDict[k]['year'] = int(i)
iniFleetDict[k]['TEU'] = float(j)
k += 1
return iniFleetDict
def decisionListFunc(filename):
csv_input = pd.read_csv(filepath_or_buffer=filename, encoding="utf_8", sep=",").fillna(0)
Year = csv_input['Year']
Order = csv_input['Order']
fuelType = csv_input['Fuel type']
WPS = csv_input['WPS']
SPS = csv_input['SPS']
CCS = csv_input['CCS']
CAP = csv_input['CAP']
Speed = csv_input['Speed']
Fee = csv_input['Fee']
valueDict = {}
for i, j, k, l, m, n, o, p, q in zip(Year, Order, fuelType, WPS, SPS, CCS, CAP, Speed, Fee):
valueDict.setdefault(int(i),{})
valueDict[int(i)]['Order'] = int(j)
valueDict[int(i)]['fuelType'] = k
valueDict[int(i)]['WPS'] = int(l)
valueDict[int(i)]['SPS'] = int(m)
valueDict[int(i)]['CCS'] = int(n)
valueDict[int(i)]['CAP'] = float(o)
valueDict[int(i)]['Speed'] = float(p)
valueDict[int(i)]['Fee'] = float(q)
return valueDict
def fleetPreparationFunc(fleetAll,initialFleetFile,numCompany,startYear,lastYear,elapsedYear,tOpSch,tbid,valueDict,NShipFleet,parameterFile2,parameterFile12,parameterFile3,parameterFile5):
fleetAll.setdefault(numCompany,{})
fleetAll[numCompany].setdefault('total',{})
fleetAll[numCompany]['total']['sale'] = np.zeros(lastYear-startYear+1)
fleetAll[numCompany]['total']['g'] = np.zeros(lastYear-startYear+1)
fleetAll[numCompany]['total']['gTilde'] = np.zeros(lastYear-startYear+1)
fleetAll[numCompany]['total']['costTilde'] = np.zeros(lastYear-startYear+1)
fleetAll[numCompany]['total']['saleTilde'] = np.zeros(lastYear-startYear+1)
fleetAll[numCompany]['total']['cta'] = np.zeros(lastYear-startYear+1)
fleetAll[numCompany]['total']['overDi'] = np.zeros(lastYear-startYear+1)
fleetAll[numCompany]['total']['costShipBasicHFO'] = np.zeros(lastYear-startYear+1)
fleetAll[numCompany]['total']['costShip'] = np.zeros(lastYear-startYear+1)
fleetAll[numCompany]['total']['costFuel'] = np.zeros(lastYear-startYear+1)
fleetAll[numCompany]['total']['dcostFuel'] = np.zeros(lastYear-startYear+1)
fleetAll[numCompany]['total']['costAdd'] = np.zeros(lastYear-startYear+1)
fleetAll[numCompany]['total']['costAll'] = np.zeros(lastYear-startYear+1)
fleetAll[numCompany]['total']['maxCta'] = np.zeros(lastYear-startYear+1)
fleetAll[numCompany]['total']['rocc'] = np.zeros(lastYear-startYear+1)
fleetAll[numCompany]['total']['costRfrb'] = np.zeros(lastYear-startYear+1)
fleetAll[numCompany]['total']['dcostEco'] = np.zeros(lastYear-startYear+1)
#fleetAll[numCompany]['total']['dCostCnt'] = np.zeros(lastYear-startYear+1)
fleetAll[numCompany]['total']['costCnt'] = np.zeros(lastYear-startYear+1)
fleetAll[numCompany]['total']['nTransCnt'] = np.zeros(lastYear-startYear+1)
fleetAll[numCompany]['total']['atOnce'] = np.zeros(lastYear-startYear+1)
fleetAll[numCompany]['total']['mSubs'] = np.zeros(lastYear-startYear+1)
fleetAll[numCompany]['total']['mTax'] = np.zeros(lastYear-startYear+1)
fleetAll[numCompany]['total']['balance'] = np.zeros(lastYear-startYear+1)
fleetAll[numCompany]['total']['demand'] = np.zeros(lastYear-startYear+1)
fleetAll[numCompany]['total']['profit'] = np.zeros(lastYear-startYear+1)
fleetAll[numCompany]['total']['profitSum'] = np.zeros(lastYear-startYear+1)
fleetAll[numCompany]['total']['gSum'] = np.zeros(lastYear-startYear+1)
fleetAll[numCompany]['total']['Idx'] = np.zeros(lastYear-startYear+1)
fleetAll[numCompany]['total']['lastOrderFuel'] = 'HFO/Diesel'
fleetAll[numCompany]['total']['lastOrderCAP'] = 20000
initialFleets = initialFleetFunc(initialFleetFile)
for i in range(len(initialFleets)):
orderYear = initialFleets[i]['year'] - tbid
iniT = startYear - initialFleets[i]['year']
iniCAPcnt = initialFleets[i]['TEU']
fleetAll = orderShipFunc(fleetAll,numCompany,'HFO',0,0,0,iniCAPcnt,tOpSch,tbid,iniT,orderYear,elapsedYear,valueDict,NShipFleet,True,parameterFile2,parameterFile12,parameterFile3,parameterFile5)
return fleetAll
def unitCostFuelFunc(filename,fuelName,year):
csv_input = pd.read_csv(filepath_or_buffer=filename, encoding="utf_8", sep=",")
measureYear = np.array(csv_input['Year'],dtype='float64')
measureHFO = np.array(csv_input['HFO'],dtype='float64')
measure = np.array(csv_input[fuelName],dtype='float64')
fittedHFO = interpolate.interp1d(measureYear, measureHFO)
fitted = interpolate.interp1d(measureYear, measure)
if year >= 2020:
interp = fitted(year)
interpHFO = fittedHFO(year)
else:
interp = measure[0]
interpHFO = measureHFO[0]
return interp, interpHFO
def rShipBasicFunc(filename,fuelName,CAPcnt):
csv_input = pd.read_csv(filepath_or_buffer=filename, encoding="utf_8", sep=",")
fuelType = csv_input['Fuel type']
rShipBasic = csv_input['rShipBasic']
fuelDict = {}
for i, j in zip(fuelType, rShipBasic):
fuelDict[i] = float(j)
return fuelDict[fuelName]
def wDWTFunc(kDWT1,CAPcnt,kDWT2):
wDWT = kDWT1*CAPcnt+kDWT2
return wDWT
def wFLDFunc(kFLD1,wDWT,kFLD2):
wFLD = kFLD1*wDWT+kFLD2
return wFLD
def dFunc(Dyear,Hday,v,Rrun):
d = Dyear*Hday*v*Rrun
return d
def fShipFunc(kShip1,kShip2,wDWT,wFLD,rocc,CNM2km,v,d,rWPS,windPr,CeqLHV):
fShipORG = (kShip1/1000)*(wFLD-(1-kShip2*rocc)*wDWT)*(wFLD**(-1/3))*((CNM2km*v)**2)*CNM2km*d
if windPr:
fShip = CeqLHV*fShipORG*(1-rWPS)
else:
fShip = CeqLHV*fShipORG
return fShipORG, fShip
def fAuxFunc(Dyear,Hday,Rrun,kAux1,kAux2,wDWT,rSPS,solar,CeqLHV):
fAuxORG = Dyear*Hday*Rrun*(kAux1+kAux2*wDWT)/1000
if solar:
fAux = CeqLHV*fAuxORG*(1-rSPS)
else:
fAux = CeqLHV*fAuxORG
return fAuxORG, fAux
def gFunc(Cco2ship,fShip,Cco2aux,fAux,rCCS,CCS):
gORG = Cco2ship*fShip+Cco2aux*fAux
if CCS:
g = gORG*(1-rCCS)
else:
g = gORG
return gORG, g
def maxCtaFunc(CAPcnt,d):
maxCta = CAPcnt*d
return maxCta
def ctaFunc(CAPcnt,rocc,d):
cta = CAPcnt*rocc*d
return cta
def costFuelFunc(unitCostFuelHFO, unitCostFuel, fShipORG, fAuxORG, fShip, fAux):
costFuelORG = unitCostFuelHFO*(fShipORG+fAuxORG)
costFuel = unitCostFuel*(fShip+fAux)
dcostFuel = costFuel - costFuelORG
return costFuelORG, costFuel, dcostFuel
def costShipFunc(kShipBasic1, CAPcnt, kShipBasic2, rShipBasic, dcostWPS, dcostSPS, dcostCCS, flagWPS, flagSPS, flagCCS):
costShipBasicHFO = kShipBasic1 * CAPcnt + kShipBasic2
costShipBasic = rShipBasic * costShipBasicHFO
cAdditionalEquipment = 0
if flagWPS:
cAdditionalEquipment += dcostWPS
elif flagSPS:
cAdditionalEquipment += dcostSPS
elif flagCCS:
cAdditionalEquipment += dcostCCS
costShipAdd = cAdditionalEquipment * costShipBasicHFO
costShip = costShipBasic + costShipAdd
return costShipBasicHFO, costShipBasic, costShipAdd, costShip
def additionalShippingFeeFunc(tOp, tOpSch, dcostFuelAll, costShipAll, costShipBasicHFO):
if tOp <= tOpSch:
dcostShipping = dcostFuelAll + (costShipAll-costShipBasicHFO)/tOpSch
else:
dcostShipping = dcostFuelAll
return dcostShipping
def demandScenarioFunc(year,kDem1,kDem2,kDem3,kDem4):
Di = (kDem1*year**2 + kDem2*year + kDem3)*1000000000/kDem4
return Di
def playOrderFunc(cost,playOrder):
unique, counts = np.unique(cost, return_counts=True)
if np.amax(counts) == 1:
playOrderNew = playOrder[np.argsort(cost)]
elif np.amax(counts) == 2:
minCost = np.amin(cost)
maxCost = np.amax(cost)
if minCost == unique[counts == 1]:
playOrderNew = np.zeros(3)
playOrderNew[0] = playOrder[cost == minCost]
playOrderNew[1:3] = np.random.permutation(playOrder[cost!=minCost])
else:
playOrderNew = np.zeros(3)
playOrderNew[2] = playOrder[cost == maxCost]
playOrderNew[0:2] = np.random.permutation(playOrder[cost!=maxCost])
else:
playOrderNew = np.random.permutation(playOrder)
return playOrderNew
def rEEDIreqCurrentFunc(wDWT,rEEDIreq):
if wDWT >= 200000:
rEEDIreqCurrent = rEEDIreq[0]
elif wDWT >= 120000:
rEEDIreqCurrent = rEEDIreq[1]
else:
rEEDIreqCurrent = rEEDIreq[2]
return rEEDIreqCurrent
def EEDIreqFunc(kEEDI1,wDWT,kEEDI2,rEEDIreq):
EEDIref = kEEDI1*wDWT**kEEDI2
EEDIreq = (1-rEEDIreq)*EEDIref
return EEDIref, EEDIreq
def EEDIattFunc(wDWT,wMCR,kMCR1,kMCR2,kMCR3,kPAE1,kPAE2,rCCS,vDsgn,rWPS,Cco2ship,SfcM,SfcA,rSPS,Cco2aux,EEDIreq,flagWPS,flagSPS,flagCCS):
if wDWT < wMCR:
MCRM = kMCR1*wDWT + kMCR2
else:
MCRM = kMCR3
PA = kPAE1*MCRM+kPAE2
def _EEDIcalc(vDsgnRed):
if flagWPS:
rWPStemp = rWPS
else:
rWPStemp = 0
if flagSPS:
rSPStemp = rSPS
else:
rSPStemp = 0
if flagCCS:
rCCStemp = rCCS
else:
rCCStemp = 0
return ((1-rCCStemp)/(0.7*wDWT*vDsgnRed))*((1-rWPStemp)*Cco2ship*0.75*MCRM*SfcM*(vDsgnRed/vDsgn)**3 + (1-rSPStemp)*Cco2aux*PA*SfcA)
vDsgnRed = vDsgn
EEDIatt = _EEDIcalc(vDsgnRed)
while EEDIatt > EEDIreq:
vDsgnRed -= 1
if vDsgnRed == 0:
break
EEDIatt = _EEDIcalc(vDsgnRed)
return MCRM, PA, EEDIatt, vDsgnRed
def regPreFunc(nDec):
regDec = {}
regDec['rEEDIreq'] = np.zeros((nDec,3))
regDec['Subsidy'] = np.zeros(nDec)
regDec['Ctax'] = np.zeros(nDec)
regDec['rEEDIreq'][0,0] = 0.5
regDec['rEEDIreq'][0,1] = 0.45
regDec['rEEDIreq'][0,2] = 0.35
return regDec
def regDecFunc(regDec,nReg,currentYear):
def _regDecGui1(regDec,nReg,currentYear):
def _buttonCommand(regDec,nReg,root):
if float(v1.get()) <= 100 and float(v2.get()) <= 100 and float(v3.get()) <= 100 and float(v1.get()) >= 0 and float(v2.get()) >= 0 and float(v3.get()) >= 0:
regDec['rEEDIreq'][nReg,0] = float(v1.get()) / 100
regDec['rEEDIreq'][nReg,1] = float(v2.get()) / 100
regDec['rEEDIreq'][nReg,2] = float(v3.get()) / 100
root.quit()
root.destroy()
else:
button['state'] = 'disabled'
def _buttonCommandCheck():
if float(v1.get()) <= 100 and float(v2.get()) <= 100 and float(v3.get()) <= 100 and float(v1.get()) >= 0 and float(v2.get()) >= 0 and float(v3.get()) >= 0:
button['state'] = 'normal'
else:
button['state'] = 'disabled'
root = Tk()
root.title('Regulator : Reduction Rate for EEXI / EEDI in '+str(currentYear))
width = 600
height = 300
placeX = root.winfo_screenwidth()/2 - width/2
placeY = root.winfo_screenheight()/2 - height/2
widgetSize = str(width)+'x'+str(height)+'+'+str(int(placeX))+'+'+str(int(placeY))
root.geometry(widgetSize)
root['bg'] = '#a3d6cc'
style = ttk.Style()
style.theme_use('default')
style.configure('new.TFrame', foreground='black', background='#a3d6cc')
style.configure('new.TLabel', foreground='black', background='#a3d6cc')
style.configure('new.TButton', foreground='black', background='#a3d6cc')
style.configure('new.TCheckbutton', foreground='black', background='#a3d6cc')
style.configure('new.TEntry', foreground='black', background='#a3d6cc')
# Frame
frame = ttk.Frame(root, style='new.TFrame', padding=20)
frame.pack()
# Checkbutton
v1 = StringVar()
if nReg == 0:
v1.set('0') # 初期化
else:
v1.set(str(100*regDec['rEEDIreq'][nReg-1,0])) # 初期化
cb1 = ttk.Entry(frame, style='new.TEntry', textvariable=v1)
label1 = ttk.Label(frame, style='new.TLabel',text='wDWT >= 200,000', padding=(5, 2))
label11 = ttk.Label(frame, style='new.TLabel',text='% <= 100%', padding=(5, 2))
label111 = ttk.Label(frame, style='new.TLabel',text='0% <=', padding=(5, 2))
labelExpl = ttk.Label(frame, style='new.TLabel', text='Guide: Input reduction rate for EEXI / EEDI, and then click "Check" & "Next".', padding=(5, 2))
# Checkbutton
v2 = StringVar()
if nReg == 0:
v2.set('0') # 初期化
else:
v2.set(str(100*regDec['rEEDIreq'][nReg-1,1])) # 初期化
cb2 = ttk.Entry(frame, style='new.TEntry', textvariable=v2)
label2 = ttk.Label(frame, style='new.TLabel',text='120,000 <= wDWT < 200,000', padding=(5, 2))
label22 = ttk.Label(frame, style='new.TLabel',text='% <= 100%', padding=(5, 2))
label222 = ttk.Label(frame, style='new.TLabel',text='0% <=', padding=(5, 2))
# Checkbutton
v3 = StringVar()
if nReg == 0:
v3.set('0') # 初期化
else:
v3.set(str(100*regDec['rEEDIreq'][nReg-1,2])) # 初期化
cb3 = ttk.Entry(frame, style='new.TEntry', textvariable=v3)
label3 = ttk.Label(frame, style='new.TLabel',text='wDWT < 120,000', padding=(5, 2))
label33 = ttk.Label(frame, style='new.TLabel',text='% <= 100%', padding=(5, 2))
label333 = ttk.Label(frame, style='new.TLabel',text='0% <=', padding=(5, 2))
# Button
button = ttk.Button(frame, style='new.TButton',text='Next', state='disabled', command=lambda: _buttonCommand(regDec,nReg,root))
button1 = ttk.Button(frame, style='new.TButton',text='Check', command=lambda: _buttonCommandCheck())
# Layout
label11.grid(row=0, column=3)
cb1.grid(row=0, column=2)
label111.grid(row=0, column=1)
label1.grid(row=0, column=0)
label22.grid(row=1, column=3)
cb2.grid(row=1, column=2)
label222.grid(row=1, column=1)
label2.grid(row=1, column=0)
label33.grid(row=2, column=3)
cb3.grid(row=2, column=2)
label333.grid(row=2, column=1)
label3.grid(row=2, column=0)
button.grid(row=3, column=3)
button1.grid(row=3, column=2)
labelExpl.grid(row=5, column=0, columnspan=3)
root.deiconify()
root.mainloop()
return regDec
def _regDecGui2(regDec,nReg,currentYear):
def _buttonCommand(regDec,nReg,root):
if float(v1.get()) <= 100 and float(v1.get()) >= 0 and float(v2.get()) >= 0:
regDec['Subsidy'][nReg] = float(v1.get()) / 100
regDec['Ctax'][nReg] = float(v2.get())
root.quit()
root.destroy()
else:
button['state'] = 'disabled'
def _buttonCommandCheck():
if float(v1.get()) <= 100 and float(v1.get()) >= 0 and float(v2.get()) >= 0:
button['state'] = 'normal'
else:
button['state'] = 'disabled'
root = Tk()
root.title('Regulator : Subsidy & Carbon tax in'+str(currentYear))
width = 800
height = 300
placeX = root.winfo_screenwidth()/2 - width/2
placeY = root.winfo_screenheight()/2 - height/2
widgetSize = str(width)+'x'+str(height)+'+'+str(int(placeX))+'+'+str(int(placeY))
root.geometry(widgetSize)
root['bg'] = '#a3d6cc'
style = ttk.Style()
style.theme_use('default')
style.configure('new.TFrame', foreground='black', background='#a3d6cc')
style.configure('new.TLabel', foreground='black', background='#a3d6cc')
style.configure('new.TButton', foreground='black', background='#a3d6cc')
style.configure('new.TCheckbutton', foreground='black', background='#a3d6cc')
style.configure('new.TEntry', foreground='black', background='#a3d6cc')
# Frame
frame = ttk.Frame(root, style='new.TFrame', padding=20)
frame.pack()
# Checkbutton
v1 = StringVar()
if nReg == 0:
v1.set('0') # 初期化
else:
v1.set(str(int(100*regDec['Subsidy'][nReg-1]))) # 初期化
cb1 = ttk.Entry(frame, style='new.TEntry', textvariable=v1)
label1 = ttk.Label(frame, style='new.TLabel', text='Subsidy rate', padding=(5, 2))
label11 = ttk.Label(frame, style='new.TLabel', text='% <= 100%', padding=(5, 2))
label111 = ttk.Label(frame, style='new.TLabel', text='0% <=', padding=(5, 2))
labelExpl = ttk.Label(frame, style='new.TLabel', text='Guide: Input subsidy and carbon tax, and then click "Check" & "Next".', padding=(5, 2))
# Checkbutton
v2 = StringVar()
if nReg == 0:
v2.set('0') # 初期化
else:
v2.set(str(int(regDec['Ctax'][nReg-1]))) # 初期化
cb2 = ttk.Entry(frame, style='new.TEntry', textvariable=v2)
label2 = ttk.Label(frame, style='new.TLabel', text='Carbon tax [$/ton]', padding=(5, 2))
#label22 = ttk.Label(frame, style='new.TLabel', text='% <= 100%', padding=(5, 2))
label222 = ttk.Label(frame, style='new.TLabel', text='0 <=', padding=(5, 2))
# Button
button = ttk.Button(frame, style='new.TButton', text='Next', state='disabled', command=lambda: _buttonCommand(regDec,nReg,root))
button1 = ttk.Button(frame, style='new.TButton', text='Check', command=lambda: _buttonCommandCheck())
# Layout
label11.grid(row=0, column=3)
cb1.grid(row=0, column=2)
label111.grid(row=0, column=1)
label1.grid(row=0, column=0)
#label22.grid(row=1, column=3)
cb2.grid(row=1, column=2)
label222.grid(row=1, column=1)
label2.grid(row=1, column=0)
button.grid(row=3, column=3)
button1.grid(row=3, column=2)
labelExpl.grid(row=5, column=0, columnspan=3)
root.deiconify()
root.mainloop()
return regDec
regDec = _regDecGui1(regDec,nReg,currentYear)
regDec = _regDecGui2(regDec,nReg,currentYear)
return regDec
def scrapRefurbishFunc(fleetAll,numCompany,elapsedYear,currentYear,valueDict,tOpSch,rEEDIreq):
def _scrapOrRefurbishGui(fleetAll,numCompany,tOpSch,valueDict,currentYear,rEEDIreq):
def _buttonCommandCheckButton(fleetAll,valueDict,rEEDIreq,v,Sys):
NumFleet = len(fleetAll[numCompany])
numAlive = 0
for keyFleet in range(1,NumFleet):
if fleetAll[numCompany][keyFleet]['delivery'] <= currentYear and fleetAll[numCompany][keyFleet]['tOp'] < tOpSch:
tOpTemp = fleetAll[numCompany][keyFleet]['tOp']
fleetAll[numCompany][keyFleet][Sys] = int(v[numAlive].get())
rEEDIreqCurrent = rEEDIreqCurrentFunc(fleetAll[numCompany][keyFleet]['wDWT'],rEEDIreq)
fleetAll[numCompany][keyFleet]['EEDIref'][tOpTemp], fleetAll[numCompany][keyFleet]['EEDIreq'][tOpTemp] = EEDIreqFunc(valueDict['kEEDI1'],fleetAll[numCompany][keyFleet]['wDWT'],valueDict['kEEDI2'],rEEDIreqCurrent)
fleetAll[numCompany][keyFleet]['MCRM'][tOpTemp], fleetAll[numCompany][keyFleet]['PA'][tOpTemp], fleetAll[numCompany][keyFleet]['EEDIatt'][tOpTemp], fleetAll[numCompany][keyFleet]['vDsgnRed'][tOpTemp] = EEDIattFunc(fleetAll[numCompany][keyFleet]['wDWT'],valueDict['wMCR'],valueDict['kMCR1'],valueDict['kMCR2'],valueDict['kMCR3'],valueDict['kPAE1'],valueDict['kPAE2'],valueDict['rCCS'],valueDict['vDsgn'],valueDict['rWPS'],fleetAll[numCompany][keyFleet]['Cco2ship'],valueDict['SfcM'],valueDict['SfcA'],valueDict['rSPS'],fleetAll[numCompany][keyFleet]['Cco2aux'],fleetAll[numCompany][keyFleet]['EEDIreq'][tOpTemp],fleetAll[numCompany][keyFleet]['WPS'],fleetAll[numCompany][keyFleet]['SPS'],fleetAll[numCompany][keyFleet]['CCS'])
label14[numAlive]['text'] = str(int(fleetAll[numCompany][keyFleet]['vDsgnRed'][tOpTemp]))
label15[numAlive]['text'] = str('{:.3g}'.format(fleetAll[numCompany][keyFleet]['EEDIreq'][tOpTemp]))
label16[numAlive]['text'] = str('{:.3g}'.format(fleetAll[numCompany][keyFleet]['EEDIatt'][tOpTemp]))
if valueDict['vMin'] < fleetAll[numCompany][keyFleet]['vDsgnRed'][tOpTemp]:
button2['state'] = 'normal'
numAlive += 1
#fleetAll[numCompany][keyFleet] = fleetAll[numCompany][keyFleet]
def _buttonCommandNext(root,fleetAll,numCompany,tOpSch):
NumFleet = len(fleetAll[numCompany])
j = 0
goAhead = True
for keyFleet in range(1,NumFleet):
if fleetAll[numCompany][keyFleet]['delivery'] <= currentYear and fleetAll[numCompany][keyFleet]['tOp'] < tOpSch:
tOpTemp = fleetAll[numCompany][keyFleet]['tOp']
if valueDict['vMin'] > fleetAll[numCompany][keyFleet]['vDsgnRed'][tOpTemp] and v4[j].get() != '1':
goAhead = False
j += 1
if goAhead:
j = 0
for keyFleet in range(1,NumFleet):
if fleetAll[numCompany][keyFleet]['delivery'] <= currentYear and fleetAll[numCompany][keyFleet]['tOp'] < tOpSch:
if v4[j].get() == '1':
fleetAll[numCompany][keyFleet]['tOp'] = tOpSch
j += 1
root.quit()
root.destroy()
else:
button2['state'] = 'disabled'
def _buttonCommandCheck(fleetAll,valueDict,rEEDIreq):
NumFleet = len(fleetAll[numCompany])
numAlive = 0
goAhead = True
for keyFleet in range(1,NumFleet):
if fleetAll[numCompany][keyFleet]['delivery'] <= currentYear and fleetAll[numCompany][keyFleet]['tOp'] < tOpSch:
tOpTemp = fleetAll[numCompany][keyFleet]['tOp']
fleetAll[numCompany][keyFleet]['WPS'] = int(v1[numAlive].get())
fleetAll[numCompany][keyFleet]['SPS'] = int(v2[numAlive].get())
fleetAll[numCompany][keyFleet]['CCS'] = int(v3[numAlive].get())
rEEDIreqCurrent = rEEDIreqCurrentFunc(fleetAll[numCompany][keyFleet]['wDWT'],rEEDIreq)
fleetAll[numCompany][keyFleet]['EEDIref'][tOpTemp], fleetAll[numCompany][keyFleet]['EEDIreq'][tOpTemp] = EEDIreqFunc(valueDict['kEEDI1'],fleetAll[numCompany][keyFleet]['wDWT'],valueDict['kEEDI2'],rEEDIreqCurrent)
fleetAll[numCompany][keyFleet]['MCRM'][tOpTemp], fleetAll[numCompany][keyFleet]['PA'][tOpTemp], fleetAll[numCompany][keyFleet]['EEDIatt'][tOpTemp], fleetAll[numCompany][keyFleet]['vDsgnRed'][tOpTemp] = EEDIattFunc(fleetAll[numCompany][keyFleet]['wDWT'],valueDict['wMCR'],valueDict['kMCR1'],valueDict['kMCR2'],valueDict['kMCR3'],valueDict['kPAE1'],valueDict['kPAE2'],valueDict['rCCS'],valueDict['vDsgn'],valueDict['rWPS'],fleetAll[numCompany][keyFleet]['Cco2ship'],valueDict['SfcM'],valueDict['SfcA'],valueDict['rSPS'],fleetAll[numCompany][keyFleet]['Cco2aux'],fleetAll[numCompany][keyFleet]['EEDIreq'][tOpTemp],fleetAll[numCompany][keyFleet]['WPS'],fleetAll[numCompany][keyFleet]['SPS'],fleetAll[numCompany][keyFleet]['CCS'])
if valueDict['vMin'] > fleetAll[numCompany][keyFleet]['vDsgnRed'][tOpTemp] and v4[numAlive].get() != '1':
goAhead = False
numAlive += 1
if goAhead:
button2['state'] = 'normal'
def _buttonCommandNext2(root):
root.quit()
root.destroy()
def _buttonCommandAtOnce(Sys):
NumFleet = len(fleetAll[numCompany])
j = 0
for keyFleet in range(1,NumFleet):
if fleetAll[numCompany][keyFleet]['delivery'] <= currentYear and fleetAll[numCompany][keyFleet]['tOp'] < tOpSch:
if Sys == 'WPS':
if label10[j].state() != ('disabled', 'selected'):
if v1[j].get() == '1':
v1[j].set('0')
elif v1[j].get() == '0':
v1[j].set('1')
fleetAll[numCompany][keyFleet][Sys] = int(v1[j].get())
elif Sys == 'SPS':
if label11[j].state() != ('disabled', 'selected'):
if v2[j].get() == '1':
v2[j].set('0')
elif v2[j].get() == '0':
v2[j].set('1')
fleetAll[numCompany][keyFleet][Sys] = int(v2[j].get())
elif Sys == 'CCS':
if label12[j].state() != ('disabled', 'selected') and label12[j].state() != ('disabled',):
if v3[j].get() == '1':
v3[j].set('0')
elif v3[j].get() == '0':
v3[j].set('1')
fleetAll[numCompany][keyFleet][Sys] = int(v3[j].get())
tOpTemp = fleetAll[numCompany][keyFleet]['tOp']
rEEDIreqCurrent = rEEDIreqCurrentFunc(fleetAll[numCompany][keyFleet]['wDWT'],rEEDIreq)
fleetAll[numCompany][keyFleet]['EEDIref'][tOpTemp], fleetAll[numCompany][keyFleet]['EEDIreq'][tOpTemp] = EEDIreqFunc(valueDict['kEEDI1'],fleetAll[numCompany][keyFleet]['wDWT'],valueDict['kEEDI2'],rEEDIreqCurrent)
fleetAll[numCompany][keyFleet]['MCRM'][tOpTemp], fleetAll[numCompany][keyFleet]['PA'][tOpTemp], fleetAll[numCompany][keyFleet]['EEDIatt'][tOpTemp], fleetAll[numCompany][keyFleet]['vDsgnRed'][tOpTemp] = EEDIattFunc(fleetAll[numCompany][keyFleet]['wDWT'],valueDict['wMCR'],valueDict['kMCR1'],valueDict['kMCR2'],valueDict['kMCR3'],valueDict['kPAE1'],valueDict['kPAE2'],valueDict['rCCS'],valueDict['vDsgn'],valueDict['rWPS'],fleetAll[numCompany][keyFleet]['Cco2ship'],valueDict['SfcM'],valueDict['SfcA'],valueDict['rSPS'],fleetAll[numCompany][keyFleet]['Cco2aux'],fleetAll[numCompany][keyFleet]['EEDIreq'][tOpTemp],fleetAll[numCompany][keyFleet]['WPS'],fleetAll[numCompany][keyFleet]['SPS'],fleetAll[numCompany][keyFleet]['CCS'])
label14[j]['text'] = str(int(fleetAll[numCompany][keyFleet]['vDsgnRed'][tOpTemp]))
label15[j]['text'] = str('{:.3g}'.format(fleetAll[numCompany][keyFleet]['EEDIreq'][tOpTemp]))
label16[j]['text'] = str('{:.3g}'.format(fleetAll[numCompany][keyFleet]['EEDIatt'][tOpTemp]))
if valueDict['vMin'] < fleetAll[numCompany][keyFleet]['vDsgnRed'][tOpTemp]:
button2['state'] = 'normal'
fleetAll[numCompany][keyFleet] = fleetAll[numCompany][keyFleet]
j += 1
root = Tk()
root.title('Company '+str(numCompany)+' : Scrap or Refurbish in '+str(currentYear))
width = 1000
height = 500
placeX = root.winfo_screenwidth()/2 - width/2
placeY = root.winfo_screenheight()/2 - height/2
widgetSize = str(width)+'x'+str(height)+'+'+str(int(placeX))+'+'+str(int(placeY))
root.geometry(widgetSize)
canvas = Canvas(root, width=width, height=height)
# Frame
style = ttk.Style()
style.theme_use('default')
if numCompany == 1:
color = '#ffcccc'
elif numCompany == 2:
color = '#ffedab'
elif numCompany == 3:
color = '#a4a8d4'
root['bg'] = color
style.configure('new.TFrame', foreground='black', background=color)
style.configure('new.TLabel', foreground='black', background=color)
style.configure('new.TButton', foreground='black', background=color)
style.configure('new.TCheckbutton', foreground='black', background=color)
frame = ttk.Frame(root, style='new.TFrame', padding=20)
frame.pack()
frame.bind("<Configure>", lambda e: canvas.configure(scrollregion=canvas.bbox("all")))
vbar = Scrollbar(root, orient="vertical")
vbar.config(command=canvas.yview)
vbar.pack(side=RIGHT,fill="y")
canvas['bg'] = color
canvas.create_window((placeX, placeY), window=frame, anchor=CENTER)
canvas.pack()
canvas.update_idletasks()
canvas.configure(yscrollcommand=vbar.set)
canvas.yview_moveto(0)
# Label
label0 = ttk.Label(frame, style='new.TLabel', text='No.', padding=(5, 2))
labelDeli = ttk.Label(frame, style='new.TLabel',text='Delivery year', padding=(5, 2))
label1 = ttk.Label(frame, style='new.TLabel',text='Fuel type', padding=(5, 2))
label2 = ttk.Label(frame, style='new.TLabel',text='Capacity [TEU]', padding=(5, 2))
label3 = ttk.Label(frame, style='new.TLabel',text='WPS', padding=(5, 2))
label4 = ttk.Label(frame, style='new.TLabel',text='SPS', padding=(5, 2))
label5 = ttk.Label(frame, style='new.TLabel',text='CCS', padding=(5, 2))
label7 = ttk.Label(frame, style='new.TLabel',text='Maximum speed [kt]', padding=(5, 2))
label152 = ttk.Label(frame, style='new.TLabel',text='EEXIreq [g/(ton*NM)]', padding=(5, 2))
label162 = ttk.Label(frame, style='new.TLabel',text='EEXIatt [g/(ton*NM)]', padding=(5, 2))
labelScrap = ttk.Label(frame, style='new.TLabel',text='Scrap', padding=(5, 2))
label00 = []
labelDeli1 = []
label8 = []
label9 = []
label10 = []
label11 = []
label12 = []
label14 = []
label15 = []
label16 = []
buttonScrap = []
v1 = []
v2 = []
v3 = []
v4 = []
NumFleet = len(fleetAll[numCompany])
for keyFleet in range(1,NumFleet):
fleetAll[numCompany][keyFleet] = fleetAll[numCompany][keyFleet]
if fleetAll[numCompany][keyFleet]['delivery'] <= currentYear and fleetAll[numCompany][keyFleet]['tOp'] < tOpSch:
labelDeli1.append(ttk.Label(frame, style='new.TLabel',text=str(fleetAll[numCompany][keyFleet]['delivery']), padding=(5, 2)))
label00.append(ttk.Label(frame, style='new.TLabel',text=str(keyFleet), padding=(5, 2)))
tOpTemp = fleetAll[numCompany][keyFleet]['tOp']
if fleetAll[numCompany][keyFleet]['fuelName'] == 'HFO':
label8.append(ttk.Label(frame, style='new.TLabel',text='HFO/Diesel', padding=(5, 2)))
else:
label8.append(ttk.Label(frame, style='new.TLabel',text=fleetAll[numCompany][keyFleet]['fuelName'], padding=(5, 2)))
label9.append(ttk.Label(frame, style='new.TLabel',text=str(int(fleetAll[numCompany][keyFleet]['CAPcnt'])), padding=(5, 2)))
v1.append(StringVar())
if fleetAll[numCompany][keyFleet]['WPS']:
v1[-1].set('1')
label10.append(ttk.Checkbutton(frame, style='new.TCheckbutton', padding=(10), state='disable', command=lambda: _buttonCommandCheckButton(fleetAll,valueDict,rEEDIreq,v1,'WPS'),variable=v1[-1]))
else:
v1[-1].set('0')
label10.append(ttk.Checkbutton(frame, style='new.TCheckbutton',padding=(10), command=lambda: _buttonCommandCheckButton(fleetAll,valueDict,rEEDIreq,v1,'WPS'),variable=v1[-1]))
v2.append(StringVar())
if fleetAll[numCompany][keyFleet]['SPS']:
v2[-1].set('1')
label11.append(ttk.Checkbutton(frame, style='new.TCheckbutton',padding=(10), state='disable', command=lambda: _buttonCommandCheckButton(fleetAll,valueDict,rEEDIreq,v2,'SPS'),variable=v2[-1]))
else:
v2[-1].set('0')
label11.append(ttk.Checkbutton(frame, style='new.TCheckbutton',padding=(10), command=lambda: _buttonCommandCheckButton(fleetAll,valueDict,rEEDIreq,v2,'SPS'),variable=v2[-1]))
v3.append(StringVar())
if fleetAll[numCompany][keyFleet]['CCS']:
v3[-1].set('1')
label12.append(ttk.Checkbutton(frame, style='new.TCheckbutton',padding=(10), state='disable', command=lambda: _buttonCommandCheckButton(fleetAll,valueDict,rEEDIreq,v3,'CCS'),variable=v3[-1]))
elif currentYear < valueDict['addSysYear']+2:
v3[-1].set('0')
label12.append(ttk.Checkbutton(frame, style='new.TCheckbutton', padding=(10), state='disable', command=lambda: _buttonCommandCheckButton(fleetAll,valueDict,rEEDIreq,v3,'CCS'),variable=v3[-1]))
else:
v3[-1].set('0')
label12.append(ttk.Checkbutton(frame, style='new.TCheckbutton',padding=(10), command=lambda: _buttonCommandCheckButton(fleetAll,valueDict,rEEDIreq,v3,'CCS'),variable=v3[-1]))
label14.append(ttk.Label(frame, style='new.TLabel',text=str(int(fleetAll[numCompany][keyFleet]['vDsgnRed'][tOpTemp])), padding=(5, 2)))
label15.append(ttk.Label(frame, style='new.TLabel',text='{:.3g}'.format(fleetAll[numCompany][keyFleet]['EEDIreq'][tOpTemp]), padding=(5, 2)))
label16.append(ttk.Label(frame, style='new.TLabel',text='{:.3g}'.format(fleetAll[numCompany][keyFleet]['EEDIatt'][tOpTemp]), padding=(5, 2)))
v4.append(StringVar())
buttonScrap.append(ttk.Checkbutton(frame, style='new.TCheckbutton',padding=(10), variable=v4[-1]))
labelExpl = ttk.Label(frame, style='new.TLabel', text='Guide: Check additional systems and scrap button if you want, and then click "Check" & "Next". You can check all the button at once by "Check all at once".', padding=(5, 2))
labelExpl2 = ttk.Label(frame, style='new.TLabel', text='Guide: You have no fleet. Click "Next".', padding=(5, 2))
# Button
button1 = ttk.Button(frame, style='new.TButton', text='Check', command=lambda: _buttonCommandCheck(fleetAll,valueDict,rEEDIreq))
button2 = ttk.Button(frame, style='new.TButton', text='Next', state='disabled', command=lambda: _buttonCommandNext(root,fleetAll,numCompany,tOpSch))
buttonWPS = ttk.Button(frame, style='new.TButton', text='Check all WPS at once', command=lambda: _buttonCommandAtOnce('WPS'))
buttonSPS = ttk.Button(frame, style='new.TButton', text='Check all SPS at once', command=lambda: _buttonCommandAtOnce('SPS'))
buttonCCS = ttk.Button(frame, style='new.TButton', text='Check all CCS at once', command=lambda: _buttonCommandAtOnce('CCS'))
button22 = ttk.Button(frame, style='new.TButton',text='Next', command=lambda: _buttonCommandNext2(root))
# Layout
if len(label8) > 0:
label0.grid(row=0, column=0)
labelDeli.grid(row=0, column=1)
label1.grid(row=0, column=2)
label2.grid(row=0, column=3)
label3.grid(row=0, column=4)
label4.grid(row=0, column=5)
label5.grid(row=0, column=6)
label7.grid(row=0, column=7)
label152.grid(row=0, column=8)
label162.grid(row=0, column=9)
labelScrap.grid(row=0, column=10)
for i, j in enumerate(label8):
labelDeli1[i].grid(row=i+1, column=1, pady=0)
label00[i].grid(row=i+1, column=0, pady=0)
label8[i].grid(row=i+1, column=2, pady=0)
label9[i].grid(row=i+1, column=3, pady=0)
label10[i].grid(row=i+1, column=4, pady=0)
label11[i].grid(row=i+1, column=5, pady=0)
label12[i].grid(row=i+1, column=6, pady=0)
label14[i].grid(row=i+1, column=7, pady=0)
label15[i].grid(row=i+1, column=8, pady=0)
label16[i].grid(row=i+1, column=9, pady=0)
buttonScrap[i].grid(row=i+1, column=10, pady=0)
button1.grid(row=i+2, column=9)
button2.grid(row=i+2, column=10)
buttonWPS.grid(row=i+2, column=1)
buttonSPS.grid(row=i+2, column=2)
buttonCCS.grid(row=i+2, column=3)
labelExpl.grid(row=i+3, column=0, columnspan=10)
else:
labelExpl2.grid(row=0, column=0)
button22.grid(row=0, column=1)
root.deiconify()
root.mainloop()
return fleetAll
def _dcostCntGui(fleetAll,numCompany,elapsedYear):
def _buttonCommand(fleetAll,numCompany,elapsedYear,root,v):
fleetAll[numCompany]['total']['dcostCnt'][elapsedYear] = v.get()
root.destroy()
root.quit()
root = Tk()
root.title('Company '+str(numCompany)+' : Additional Shipping Fee Per Container in '+str(currentYear))
width = 500
height = 200
placeX = root.winfo_screenwidth()/2 - width/2
placeY = root.winfo_screenheight()/2 - height/2
widgetSize = str(width)+'x'+str(height)+'+'+str(int(placeX))+'+'+str(int(placeY))
root.geometry(widgetSize)
# Frame
style = ttk.Style()
style.theme_use('default')
if numCompany == 1:
color = '#ffcccc'
elif numCompany == 2:
color = '#ffedab'
elif numCompany == 3:
color = '#a4a8d4'
root['bg'] = color
style.configure('new.TFrame', foreground='black', background=color)
style.configure('new.TLabel', foreground='black', background=color)
style.configure('new.TButton', foreground='black', background=color)
style.configure('new.TCheckbutton', foreground='black', background=color)
style.configure('new.TEntry', foreground='black', background=color)
frame = ttk.Frame(root, style='new.TFrame', padding=20)
frame.pack()
v1 = StringVar()
if elapsedYear == 0:
v1.set('0')
else:
v1.set(str(int(fleetAll[numCompany]['total']['dcostCnt'][elapsedYear-1])))
cb1 = ttk.Entry(frame, style='new.TEntry', textvariable=v1)
label1 = ttk.Label(frame, style='new.TLabel', text='Additional container fee dC (-1000 <= dC <= 1000)', padding=(5, 2))
label2 = ttk.Label(frame, style='new.TLabel', text='Nominal shipping cost: 1500 $/container', padding=(5, 2))
label3 = ttk.Label(frame, style='new.TLabel', text='$', padding=(5, 2))
labelExpl = ttk.Label(frame, style='new.TLabel', text='Guide: Input additional shipping fee per container, and then click "Complete".', padding=(5, 2))
button = ttk.Button(frame, style='new.TButton', text='Complete', command=lambda: _buttonCommand(fleetAll,numCompany,elapsedYear,root,v1))
label1.grid(row=1, column=0)
label2.grid(row=0, column=0)
label3.grid(row=1, column=2)
cb1.grid(row=1, column=1)
button.grid(row=2, column=1)
labelExpl.grid(row=3, column=0,columnspan=5)
root.deiconify()
root.mainloop()
return fleetAll
# calculate EEDI
NumFleet = len(fleetAll[numCompany])
for keyFleet in range(1,NumFleet):
tOpTemp = fleetAll[numCompany][keyFleet]['tOp']
if fleetAll[numCompany][keyFleet]['delivery'] <= currentYear and fleetAll[numCompany][keyFleet]['tOp'] < tOpSch:
rEEDIreqCurrent = rEEDIreqCurrentFunc(fleetAll[numCompany][keyFleet]['wDWT'],rEEDIreq)
fleetAll[numCompany][keyFleet]['EEDIref'][tOpTemp], fleetAll[numCompany][keyFleet]['EEDIreq'][tOpTemp] = EEDIreqFunc(valueDict['kEEDI1'],fleetAll[numCompany][keyFleet]['wDWT'],valueDict['kEEDI2'],rEEDIreqCurrent)
fleetAll[numCompany][keyFleet]['MCRM'][tOpTemp], fleetAll[numCompany][keyFleet]['PA'][tOpTemp], fleetAll[numCompany][keyFleet]['EEDIatt'][tOpTemp], fleetAll[numCompany][keyFleet]['vDsgnRed'][tOpTemp] = EEDIattFunc(fleetAll[numCompany][keyFleet]['wDWT'],valueDict['wMCR'],valueDict['kMCR1'],valueDict['kMCR2'],valueDict['kMCR3'],valueDict['kPAE1'],valueDict['kPAE2'],valueDict['rCCS'],valueDict['vDsgn'],valueDict['rWPS'],fleetAll[numCompany][keyFleet]['Cco2ship'],valueDict['SfcM'],valueDict['SfcA'],valueDict['rSPS'],fleetAll[numCompany][keyFleet]['Cco2aux'],fleetAll[numCompany][keyFleet]['EEDIreq'][tOpTemp],fleetAll[numCompany][keyFleet]['WPS'],fleetAll[numCompany][keyFleet]['SPS'],fleetAll[numCompany][keyFleet]['CCS'])
# decide to scrap or refurbish currently alive fleet
fleetAll = _scrapOrRefurbishGui(fleetAll,numCompany,tOpSch,valueDict,currentYear,rEEDIreq)
for keyFleet in range(1,NumFleet):
tOpTemp = fleetAll[numCompany][keyFleet]['tOp']
if fleetAll[numCompany][keyFleet]['delivery'] <= currentYear and fleetAll[numCompany][keyFleet]['tOp'] < tOpSch:
cAdditionalEquipment = 0
if fleetAll[numCompany][keyFleet]['lastWPS'] != fleetAll[numCompany][keyFleet]['WPS'] and fleetAll[numCompany][keyFleet]['WPS']:
cAdditionalEquipment += valueDict['dcostWPS']
elif fleetAll[numCompany][keyFleet]['lastSPS'] != fleetAll[numCompany][keyFleet]['SPS'] and fleetAll[numCompany][keyFleet]['SPS']:
cAdditionalEquipment += valueDict['dcostSPS']
elif fleetAll[numCompany][keyFleet]['lastCCS'] != fleetAll[numCompany][keyFleet]['CCS'] and fleetAll[numCompany][keyFleet]['CCS']:
cAdditionalEquipment += valueDict['dcostCCS']
fleetAll[numCompany][keyFleet]['lastWPS'] = fleetAll[numCompany][keyFleet]['WPS']
fleetAll[numCompany][keyFleet]['lastSPS'] = fleetAll[numCompany][keyFleet]['SPS']
fleetAll[numCompany][keyFleet]['lastCCS'] = fleetAll[numCompany][keyFleet]['CCS']
fleetAll[numCompany][keyFleet]['costRfrb'][tOpTemp] = cAdditionalEquipment * fleetAll[numCompany][keyFleet]['costShipBasicHFO']
# decide additional shipping fee per container
#_dcostCntGui(fleetAll,numCompany,elapsedYear)
return fleetAll
def orderShipFunc(fleetAll,numCompany,fuelName,WPS,SPS,CCS,CAPcnt,tOpSch,tbid,iniT,currentYear,elapsedYear,valueDict,NShipFleet,ifIni,parameterFile2,parameterFile12,parameterFile3,parameterFile5):
NumFleet = len(fleetAll[numCompany])
fleetAll[numCompany].setdefault(NumFleet,{})
fleetAll[numCompany][NumFleet]['fuelName'] = fuelName
fleetAll[numCompany][NumFleet]['WPS'] = WPS
fleetAll[numCompany][NumFleet]['SPS'] = SPS
fleetAll[numCompany][NumFleet]['CCS'] = CCS
fleetAll[numCompany][NumFleet]['lastWPS'] = WPS
fleetAll[numCompany][NumFleet]['lastSPS'] = SPS
fleetAll[numCompany][NumFleet]['lastCCS'] = CCS
fleetAll[numCompany][NumFleet]['CAPcnt'] = float(CAPcnt)
fleetAll[numCompany][NumFleet]['wDWT'] = wDWTFunc(valueDict["kDWT1"],fleetAll[numCompany][NumFleet]['CAPcnt'],valueDict["kDWT2"])
fleetAll[numCompany][NumFleet]['wFLD'] = wFLDFunc(valueDict["kFLD1"],fleetAll[numCompany][NumFleet]['wDWT'],valueDict["kFLD2"])
fleetAll[numCompany][NumFleet]['CeqLHVship'] = CeqLHVFunc(parameterFile2,fleetAll[numCompany][NumFleet]['fuelName'])
fleetAll[numCompany][NumFleet]['CeqLHVaux'] = CeqLHVFunc(parameterFile12,fleetAll[numCompany][NumFleet]['fuelName'])
fleetAll[numCompany][NumFleet]['Cco2ship'] = Cco2Func(parameterFile3,fleetAll[numCompany][NumFleet]['fuelName'])
if fuelName == 'HFO':
fleetAll[numCompany][NumFleet]['Cco2aux'] = Cco2Func(parameterFile3,'Diesel')
else:
fleetAll[numCompany][NumFleet]['Cco2aux'] = Cco2Func(parameterFile3,fleetAll[numCompany][NumFleet]['fuelName'])
fleetAll[numCompany][NumFleet]['rShipBasic'] = rShipBasicFunc(parameterFile5,fleetAll[numCompany][NumFleet]['fuelName'],fleetAll[numCompany][NumFleet]['CAPcnt'])
fleetAll[numCompany][NumFleet]['delivery'] = currentYear+tbid
fleetAll[numCompany][NumFleet]['tOp'] = iniT
fleetAll[numCompany][NumFleet]['costShipBasicHFO'], fleetAll[numCompany][NumFleet]['costShipBasic'], fleetAll[numCompany][NumFleet]['costShipAdd'], fleetAll[numCompany][NumFleet]['costShip'] = costShipFunc(valueDict["kShipBasic1"], fleetAll[numCompany][NumFleet]["CAPcnt"], valueDict["kShipBasic2"], fleetAll[numCompany][NumFleet]['rShipBasic'], valueDict["dcostWPS"], valueDict["dcostSPS"], valueDict["dcostCCS"], fleetAll[numCompany][NumFleet]['WPS'], fleetAll[numCompany][NumFleet]['SPS'], fleetAll[numCompany][NumFleet]['CCS'])
if iniT == 0 and not ifIni:
fleetAll[numCompany]['total']['costShip'][elapsedYear+2] += NShipFleet * fleetAll[numCompany][NumFleet]['costShip']
fleetAll[numCompany]['total']['costShipBasicHFO'][elapsedYear+2] += NShipFleet * fleetAll[numCompany][NumFleet]['costShipBasicHFO']
fleetAll[numCompany][NumFleet]['v'] = np.zeros(tOpSch)
fleetAll[numCompany][NumFleet]['d'] = np.zeros(tOpSch)
fleetAll[numCompany][NumFleet]['fShipORG'] = np.zeros(tOpSch)
fleetAll[numCompany][NumFleet]['fAuxORG'] = np.zeros(tOpSch)
fleetAll[numCompany][NumFleet]['gORG'] = np.zeros(tOpSch)
fleetAll[numCompany][NumFleet]['costFuelORG'] = np.zeros(tOpSch)
fleetAll[numCompany][NumFleet]['costFuel'] = np.zeros(tOpSch)
fleetAll[numCompany][NumFleet]['dcostFuel'] = np.zeros(tOpSch)
fleetAll[numCompany][NumFleet]['fShip'] = np.zeros(tOpSch)
fleetAll[numCompany][NumFleet]['fAux'] = np.zeros(tOpSch)
fleetAll[numCompany][NumFleet]['g'] = np.zeros(tOpSch)
fleetAll[numCompany][NumFleet]['cta'] = np.zeros(tOpSch)
fleetAll[numCompany][NumFleet]['dcostShipping'] = np.zeros(tOpSch)
fleetAll[numCompany][NumFleet]['gTilde'] = np.zeros(tOpSch)
fleetAll[numCompany][NumFleet]['costRfrb'] = np.zeros(tOpSch)
fleetAll[numCompany][NumFleet]['EEDIref'] = np.zeros(tOpSch)
fleetAll[numCompany][NumFleet]['EEDIreq'] = np.zeros(tOpSch)
fleetAll[numCompany][NumFleet]['EEDIatt'] = np.zeros(tOpSch)
fleetAll[numCompany][NumFleet]['vDsgnRed'] = np.zeros(tOpSch)
fleetAll[numCompany][NumFleet]['MCRM'] = np.zeros(tOpSch)
fleetAll[numCompany][NumFleet]['PA'] = np.zeros(tOpSch)
fleetAll[numCompany][NumFleet]['year'] = np.zeros(tOpSch)
return fleetAll
def orderPhaseFunc(fleetAll,numCompany,valueDict,elapsedYear,tOpSch,tbid,currentYear,rEEDIreq,NShipFleet,parameterFile2,parameterFile12,parameterFile3,parameterFile5):
def _orderShipGui(fleetAll,numCompany,valueDict,elapsedYear,tOpSch,tbid,currentYear,rEEDIreq,NShipFleet,parameterFile2,parameterFile12,parameterFile3,parameterFile5):
def _EEDIcalc(rEEDIreq,parameterFile3,valueDict):
fuelType = v1.get()
CAP = float(v2.get())
WPS = int(v3.get())
SPS = int(v4.get())
CCS = int(v5.get())
wDWT = wDWTFunc(valueDict['kDWT1'],CAP,valueDict['kDWT2'])
rEEDIreqCurrent = rEEDIreqCurrentFunc(wDWT,rEEDIreq)
if fuelType == 'HFO/Diesel':
Cco2ship = Cco2Func(parameterFile3,'HFO')
Cco2aux = Cco2Func(parameterFile3,'Diesel')
else:
Cco2ship = Cco2Func(parameterFile3,fuelType)
Cco2aux = Cco2Func(parameterFile3,fuelType)
_, EEDIreq = EEDIreqFunc(valueDict['kEEDI1'],wDWT,valueDict['kEEDI2'],rEEDIreqCurrent)
_, _, EEDIatt, vDsgnRed = EEDIattFunc(wDWT,valueDict['wMCR'],valueDict['kMCR1'],valueDict['kMCR2'],valueDict['kMCR3'],valueDict['kPAE1'],valueDict['kPAE2'],valueDict['rCCS'],valueDict['vDsgn'],valueDict['rWPS'],Cco2ship,valueDict['SfcM'],valueDict['SfcA'],valueDict['rSPS'],Cco2aux,EEDIreq,WPS,SPS,CCS)
return CAP, vDsgnRed, EEDIreq, EEDIatt
def _buttonCommandAnother(fleetAll,numCompany,tOpSch,tbid,currentYear,elapsedYear,NShipFleet,parameterFile2,parameterFile12,parameterFile3,parameterFile5):
CAP, vDsgnRed, EEDIreq, EEDIatt = _EEDIcalc(rEEDIreq,parameterFile3,valueDict)
if valueDict['vMin'] <= vDsgnRed and CAP >= 8000 and CAP <= 24000:
if v1.get() == 'HFO/Diesel':
fuelName = 'HFO'
else:
fuelName = v1.get()
fleetAll = orderShipFunc(fleetAll,numCompany,fuelName,int(v3.get()),int(v4.get()),int(v5.get()),float(v2.get()),tOpSch,tbid,0,currentYear,elapsedYear,valueDict,NShipFleet,False,parameterFile2,parameterFile12,parameterFile3,parameterFile5)
fleetAll[numCompany]['total']['lastOrderFuel'] = v1.get()
fleetAll[numCompany]['total']['lastOrderCAP'] = v2.get()
cb1.delete(0,"end")
cb1.insert(0, fleetAll[numCompany]['total']['lastOrderCAP'])
v3.set('0')
v4.set('0')
v5.set('0')
cb2.var = v3
cb3.var = v4
cb4.var = v5
label6['text'] = 'None'
label7['text'] = 'None'
label8['text'] = 'None'
button1['state'] = 'disabled'
button2['state'] = 'disabled'
else:
button1['state'] = 'disabled'
button2['state'] = 'disabled'
def _buttonCommandComplete(root,fleetAll,numCompany,tOpSch,tbid,currentYear,elapsedYear,NShipFleet,parameterFile2,parameterFile12,parameterFile3,parameterFile5):
CAP, vDsgnRed, EEDIreq, EEDIatt = _EEDIcalc(rEEDIreq,parameterFile3,valueDict)
if valueDict['vMin'] <= vDsgnRed and CAP >= 8000 and CAP <= 24000:
if v1.get() == 'HFO/Diesel':
fuelName = 'HFO'
else:
fuelName = v1.get()
fleetAll = orderShipFunc(fleetAll,numCompany,fuelName,int(v3.get()),int(v4.get()),int(v5.get()),float(v2.get()),tOpSch,tbid,0,currentYear,elapsedYear,valueDict,NShipFleet,False,parameterFile2,parameterFile12,parameterFile3,parameterFile5)
fleetAll[numCompany]['total']['lastOrderFuel'] = v1.get()
fleetAll[numCompany]['total']['lastOrderCAP'] = v2.get()
root.quit()
root.destroy()
else:
button1['state'] = 'disabled'
button2['state'] = 'disabled'
def _buttonCommandCheck(valueDict,parameterFile3,rEEDIreq):
CAP, vDsgnRed, EEDIreq, EEDIatt = _EEDIcalc(rEEDIreq,parameterFile3,valueDict)
label6['text'] = str(str(int(vDsgnRed)))
label7['text'] = str('{:.3g}'.format(EEDIreq))
label8['text'] = str('{:.3g}'.format(EEDIatt))
if valueDict['vMin'] < vDsgnRed:
button1['state'] = 'normal'
button2['state'] = 'normal'
if CAP >= 8000 and CAP <= 24000:
button1['state'] = 'normal'
button2['state'] = 'normal'
def _buttonCommandNoOrder(root):
root.quit()
root.destroy()
root = Tk()
root.title('Company '+str(numCompany)+' : Order Ship in '+str(currentYear))
width = 1000
height = 300
placeX = root.winfo_screenwidth()/2 - width/2
placeY = root.winfo_screenheight()/2 - height/2
widgetSize = str(width)+'x'+str(height)+'+'+str(int(placeX))+'+'+str(int(placeY))
root.geometry(widgetSize)
# Frame
style = ttk.Style()
style.theme_use('default')
if numCompany == 1:
color = '#ffcccc'
elif numCompany == 2:
color = '#ffedab'
elif numCompany == 3:
color = '#a4a8d4'
root['bg'] = color
style.configure('new.TFrame', foreground='black', background=color)
style.configure('new.TLabel', foreground='black', background=color)
style.configure('new.TButton', foreground='black', background=color)
style.configure('new.TCheckbutton', foreground='black', background=color)
style.configure('new.TEntry', foreground='black', background=color)
style.configure('new.TCombobox', foreground='black', background=color)
frame = ttk.Frame(root, style='new.TFrame', padding=20)
frame.pack()
# Label
label1 = ttk.Label(frame, style='new.TLabel', text='Fuel type', padding=(5, 2))
label2 = ttk.Label(frame, style='new.TLabel', text='Capacity (8000<=Capacity<=24000) [TEU]', padding=(5, 2))
label3 = ttk.Label(frame, style='new.TLabel', text='Maximum speed [kt]', padding=(5, 2))
label4 = ttk.Label(frame, style='new.TLabel', text='EEDIreq [g/(ton*NM)]', padding=(5, 2))
label5 = ttk.Label(frame, style='new.TLabel', text='EEDIatt [g/(ton*NM)]', padding=(5, 2))
label6 = ttk.Label(frame, style='new.TLabel', text='None', padding=(5, 2))
label7 = ttk.Label(frame, style='new.TLabel', text='None', padding=(5, 2))
label8 = ttk.Label(frame, style='new.TLabel', text='None', padding=(5, 2))
label9 = ttk.Label(frame, style='new.TLabel', text='WPS', padding=(5, 2))
label10 = ttk.Label(frame, style='new.TLabel', text='SPS', padding=(5, 2))
label11 = ttk.Label(frame, style='new.TLabel', text='CCS', padding=(5, 2))
labelExpl = ttk.Label(frame, style='new.TLabel', text='Guide: When you want to order a fleet, select the setting and click "another fleet" or "complete". Ohterwise, click "No order".', padding=(5, 2))
# List box
if currentYear < valueDict['addSysYear']:
fuelTypeList = ['HFO/Diesel','LNG']
else:
fuelTypeList = ['HFO/Diesel','LNG','NH3','H2']
v1 = StringVar()
lb = ttk.Combobox(frame, style='new.TCombobox', textvariable=v1,values=fuelTypeList)
if elapsedYear == 0:
lb.set('HFO/Diesel')
else:
lb.set(fleetAll[numCompany]['total']['lastOrderFuel'])
# Entry
v2 = StringVar()
if elapsedYear == 0:
v2.set('20000')
else:
v2.set(str(fleetAll[numCompany]['total']['lastOrderCAP']))
cb1 = ttk.Entry(frame, style='new.TEntry', textvariable=v2)
# Checkbutton
v3 = StringVar()
v3.set('0') # 初期化
cb2 = ttk.Checkbutton(frame, style='new.TCheckbutton', padding=(10), text='WPS', variable=v3)
# Checkbutton
v4 = StringVar()
v4.set('0') # 初期化
cb3 = ttk.Checkbutton(frame, style='new.TCheckbutton', padding=(10), text='SPS', variable=v4)
# Checkbutton
v5 = StringVar()
if currentYear >= valueDict['addSysYear']:
v5.set('0') # 初期化
cb4 = ttk.Checkbutton(frame, style='new.TCheckbutton', padding=(10), text='CCS', variable=v5)
else:
v5.set('0') # 初期化
cb4 = ttk.Checkbutton(frame, state='disable', style='new.TCheckbutton', padding=(10), text='CCS', variable=v5)
# Button
button1 = ttk.Button(frame, style='new.TButton', text='Another fleet', state='disabled', command=lambda: _buttonCommandAnother(fleetAll,numCompany,tOpSch,tbid,currentYear,elapsedYear,NShipFleet,parameterFile2,parameterFile12,parameterFile3,parameterFile5))
button2 = ttk.Button(frame, style='new.TButton', text='Complete', state='disabled', command=lambda: _buttonCommandComplete(root,fleetAll,numCompany,tOpSch,tbid,currentYear,elapsedYear,NShipFleet,parameterFile2,parameterFile12,parameterFile3,parameterFile5))
button3 = ttk.Button(frame, style='new.TButton', text='EEDI check', command=lambda: _buttonCommandCheck(valueDict,parameterFile3,rEEDIreq))
button4 = ttk.Button(frame, style='new.TButton', text='No order', command=lambda: _buttonCommandNoOrder(root))
# Layout
label1.grid(row=0, column=0)
label2.grid(row=0, column=1)
label3.grid(row=2, column=1)
label4.grid(row=2, column=2)
label5.grid(row=2, column=3)
label6.grid(row=3, column=1)
label7.grid(row=3, column=2)
label8.grid(row=3, column=3)
label9.grid(row=0, column=2)
label10.grid(row=0, column=3)
label11.grid(row=0, column=4)
cb1.grid(row=1, column=1)
cb2.grid(row=1, column=2)
cb3.grid(row=1, column=3)
cb4.grid(row=1, column=4)
lb.grid(row=1, column=0)
button1.grid(row=4, column=2)
button2.grid(row=4, column=4)
button3.grid(row=4, column=1)
button4.grid(row=4, column=0)
labelExpl.grid(row=5, column=0, columnspan=5)
root.deiconify()
root.mainloop()
return fleetAll
fleetAll = _orderShipGui(fleetAll,numCompany,valueDict,elapsedYear,tOpSch,tbid,currentYear,rEEDIreq,NShipFleet,parameterFile2,parameterFile12,parameterFile3,parameterFile5)
return fleetAll
def yearlyCtaFunc(fleetAll,numCompany,startYear,elapsedYear,NShipFleet,tOpSch,v,valueDict):
NumFleet = len(fleetAll[numCompany])
j = 0
maxCta = 0
currentYear = startYear+elapsedYear
for keyFleet in range(1,NumFleet):
if fleetAll[numCompany][keyFleet]['delivery'] <= currentYear and fleetAll[numCompany][keyFleet]['tOp'] < tOpSch:
tOpTemp = fleetAll[numCompany][keyFleet]['tOp']
fleetAll[numCompany][keyFleet]['v'][tOpTemp] = float(v[j].get()) # input for each fleet
fleetAll[numCompany][keyFleet]['d'][tOpTemp] = dFunc(valueDict["Dyear"],valueDict["Hday"],fleetAll[numCompany][keyFleet]['v'][tOpTemp],valueDict["Rrun"])
maxCta += NShipFleet * maxCtaFunc(fleetAll[numCompany][keyFleet]['CAPcnt'],fleetAll[numCompany][keyFleet]['d'][tOpTemp])
j += 1
fleetAll[numCompany]['total']['maxCta'][elapsedYear] = maxCta
return fleetAll
def yearlyOperationFunc(fleetAll,numCompany,startYear,elapsedYear,NShipFleet,tOpSch,valueDict,rSubs,rTax,parameterFile4):
NumFleet = len(fleetAll[numCompany])
currentYear = startYear+elapsedYear
fleetAll[numCompany]['total']['costRfrb'][elapsedYear] = 0
fleetAll[numCompany]['total']['g'][elapsedYear] = 0
fleetAll[numCompany]['total']['cta'][elapsedYear] = 0
fleetAll[numCompany]['total']['costFuel'][elapsedYear] = 0
for keyFleet in range(1,NumFleet):
if fleetAll[numCompany][keyFleet]['delivery'] <= currentYear and fleetAll[numCompany][keyFleet]['tOp'] < tOpSch:
tOpTemp = fleetAll[numCompany][keyFleet]['tOp']
unitCostFuel, unitCostFuelHFO = unitCostFuelFunc(parameterFile4,fleetAll[numCompany][keyFleet]['fuelName'],currentYear)
fleetAll[numCompany][keyFleet]['cta'][tOpTemp] = ctaFunc(fleetAll[numCompany][keyFleet]['CAPcnt'],fleetAll[numCompany]['total']['rocc'][elapsedYear],fleetAll[numCompany][keyFleet]['d'][tOpTemp])
fleetAll[numCompany][keyFleet]['fShipORG'][tOpTemp], fleetAll[numCompany][keyFleet]['fShip'][tOpTemp] = fShipFunc(valueDict["kShip1"],valueDict["kShip2"],fleetAll[numCompany][keyFleet]['wDWT'],fleetAll[numCompany][keyFleet]['wFLD'],fleetAll[numCompany]['total']['rocc'][elapsedYear],valueDict["CNM2km"],fleetAll[numCompany][keyFleet]['v'][tOpTemp],fleetAll[numCompany][keyFleet]['d'][tOpTemp],valueDict["rWPS"],fleetAll[numCompany][keyFleet]['WPS'],fleetAll[numCompany][keyFleet]['CeqLHVship'])
fleetAll[numCompany][keyFleet]['fAuxORG'][tOpTemp], fleetAll[numCompany][keyFleet]['fAux'][tOpTemp] = fAuxFunc(valueDict["Dyear"],valueDict["Hday"],valueDict["Rrun"],valueDict["kAux1"],valueDict["kAux2"],fleetAll[numCompany][keyFleet]['wDWT'],valueDict["rSPS"],fleetAll[numCompany][keyFleet]['SPS'],fleetAll[numCompany][keyFleet]['CeqLHVaux'])
fleetAll[numCompany][keyFleet]['gORG'][tOpTemp], fleetAll[numCompany][keyFleet]['g'][tOpTemp] = gFunc(fleetAll[numCompany][keyFleet]['Cco2ship'],fleetAll[numCompany][keyFleet]['fShip'][tOpTemp],fleetAll[numCompany][keyFleet]['Cco2aux'],fleetAll[numCompany][keyFleet]['fAux'][tOpTemp],valueDict["rCCS"],fleetAll[numCompany][keyFleet]['CCS'])
fleetAll[numCompany][keyFleet]['costFuelORG'][tOpTemp], fleetAll[numCompany][keyFleet]['costFuel'][tOpTemp], fleetAll[numCompany][keyFleet]['dcostFuel'][tOpTemp] = costFuelFunc(unitCostFuelHFO, unitCostFuel, fleetAll[numCompany][keyFleet]['fShipORG'][tOpTemp], fleetAll[numCompany][keyFleet]['fAuxORG'][tOpTemp], fleetAll[numCompany][keyFleet]['fShip'][tOpTemp], fleetAll[numCompany][keyFleet]['fAux'][tOpTemp])
fleetAll[numCompany][keyFleet]['dcostShipping'][tOpTemp] = additionalShippingFeeFunc(tOpTemp, tOpSch, fleetAll[numCompany][keyFleet]['dcostFuel'][tOpTemp], fleetAll[numCompany][keyFleet]['costShip'], fleetAll[numCompany][keyFleet]['costShipBasicHFO'])
fleetAll[numCompany][keyFleet]['gTilde'][tOpTemp] = fleetAll[numCompany][keyFleet]['g'][tOpTemp] / fleetAll[numCompany][keyFleet]['cta'][tOpTemp]
fleetAll[numCompany]['total']['costRfrb'][elapsedYear] += NShipFleet * fleetAll[numCompany][keyFleet]['costRfrb'][tOpTemp]
fleetAll[numCompany]['total']['g'][elapsedYear] += NShipFleet * fleetAll[numCompany][keyFleet]['g'][tOpTemp]
fleetAll[numCompany]['total']['cta'][elapsedYear] += NShipFleet * fleetAll[numCompany][keyFleet]['cta'][tOpTemp]
fleetAll[numCompany]['total']['costFuel'][elapsedYear] += NShipFleet * fleetAll[numCompany][keyFleet]['costFuel'][tOpTemp]
fleetAll[numCompany]['total']['dcostFuel'][elapsedYear] += NShipFleet * fleetAll[numCompany][keyFleet]['dcostFuel'][tOpTemp]
for keyFleet in range(1,NumFleet):
if fleetAll[numCompany][keyFleet]['delivery'] <= currentYear and fleetAll[numCompany][keyFleet]['tOp'] < tOpSch:
fleetAll[numCompany][keyFleet]['year'][fleetAll[numCompany][keyFleet]['tOp']] = currentYear
fleetAll[numCompany][keyFleet]['tOp'] += 1
fleetAll[numCompany]['total']['costAll'][elapsedYear] = fleetAll[numCompany]['total']['costFuel'][elapsedYear] + fleetAll[numCompany]['total']['costShip'][elapsedYear] + fleetAll[numCompany]['total']['costRfrb'][elapsedYear]
fleetAll[numCompany]['total']['dcostEco'][elapsedYear] = fleetAll[numCompany]['total']['dcostFuel'][elapsedYear] + fleetAll[numCompany]['total']['costShip'][elapsedYear]-fleetAll[numCompany]['total']['costShipBasicHFO'][elapsedYear] + fleetAll[numCompany]['total']['costRfrb'][elapsedYear]
fleetAll[numCompany]['total']['nTransCnt'][elapsedYear] = fleetAll[numCompany]['total']['cta'][elapsedYear] / valueDict['dJPNA']
fleetAll[numCompany]['total']['costCnt'][elapsedYear] = (valueDict['costCntMax']-valueDict['costCntMin']) / (1+math.e**(-valueDict['aSgmd']*(fleetAll[numCompany]['total']['rocc'][elapsedYear]-valueDict['roccNom']))) + valueDict['costCntMin']
fleetAll[numCompany]['total']['sale'][elapsedYear] = fleetAll[numCompany]['total']['nTransCnt'][elapsedYear] * fleetAll[numCompany]['total']['costCnt'][elapsedYear]
fleetAll[numCompany]['total']['gTilde'][elapsedYear] = fleetAll[numCompany]['total']['g'][elapsedYear] / fleetAll[numCompany]['total']['cta'][elapsedYear]
fleetAll[numCompany]['total']['costTilde'][elapsedYear] = fleetAll[numCompany]['total']['costAll'][elapsedYear] / fleetAll[numCompany]['total']['cta'][elapsedYear]
fleetAll[numCompany]['total']['saleTilde'][elapsedYear] = fleetAll[numCompany]['total']['sale'][elapsedYear] / fleetAll[numCompany]['total']['cta'][elapsedYear]
fleetAll[numCompany]['total']['mSubs'][elapsedYear] = rSubs * fleetAll[numCompany]['total']['dcostEco'][elapsedYear]
fleetAll[numCompany]['total']['mTax'][elapsedYear] = rTax * fleetAll[numCompany]['total']['g'][elapsedYear]
fleetAll[numCompany]['total']['balance'][elapsedYear] = fleetAll[numCompany]['total']['mTax'][elapsedYear] - fleetAll[numCompany]['total']['mSubs'][elapsedYear]
fleetAll[numCompany]['total']['profit'][elapsedYear] = fleetAll[numCompany]['total']['sale'][elapsedYear] - fleetAll[numCompany]['total']['costAll'][elapsedYear] - fleetAll[numCompany]['total']['balance'][elapsedYear]
fleetAll[numCompany]['total']['profitSum'][elapsedYear] += fleetAll[numCompany]['total']['profit'][elapsedYear]
fleetAll[numCompany]['total']['gSum'][elapsedYear] += fleetAll[numCompany]['total']['g'][elapsedYear]
fleetAll[numCompany]['total']['Idx'][elapsedYear] = fleetAll[numCompany]['total']['profitSum'][elapsedYear] / fleetAll[numCompany]['total']['gSum'][elapsedYear]
return fleetAll
def decideSpeedFunc(fleetAll,numCompany,startYear,elapsedYear,NShipFleet,tOpSch,valueDict):
def _surviceSpeedGui(fleetAll,numCompany,startYear,elapsedYear,NShipFleet,tOpSch,valueDict):
def _buttonCommandNext(root,fleetAll,numCompany,startYear,elapsedYear,NShipFleet,tOpSch,valueDict):
NumFleet = len(fleetAll[numCompany])
j = 0
goAhead = True
for keyFleet in range(1,NumFleet):
if fleetAll[numCompany][keyFleet]['delivery'] <= currentYear and fleetAll[numCompany][keyFleet]['tOp'] < tOpSch:
tOpTemp = fleetAll[numCompany][keyFleet]['tOp']
if float(v13[j].get()) < 12 or float(v13[j].get()) > fleetAll[numCompany][keyFleet]['vDsgnRed'][tOpTemp]:
goAhead = False
j += 1
if goAhead:
fleetAll = yearlyCtaFunc(fleetAll,numCompany,startYear,elapsedYear,NShipFleet,tOpSch,v13,valueDict)
fleetAll[numCompany]['total']['atOnce'][elapsedYear] = float(vAtOnce.get())
root.quit()
root.destroy()
else:
button2['state'] = 'disabled'
def _buttonCommandNext2(root):
root.quit()
root.destroy()
def _buttonCommandCalc(fleetAll,numCompany,startYear,elapsedYear,NShipFleet,tOpSch,valueDict):
fleetAll = yearlyCtaFunc(fleetAll,numCompany,startYear,elapsedYear,NShipFleet,tOpSch,v13,valueDict)
#labelRes4['text'] = str('{:.3g}'.format(fleetAll[numCompany]['total']['cta'][elapsedYear]))
#labelRes6['text'] = str('{:.4g}'.format(fleetAll[numCompany]['total']['rocc'][elapsedYear]))
#labelRes8['text'] = str('{:.3g}'.format(fleetAll[numCompany]['total']['costFuel'][elapsedYear]))
#labelRes10['text'] = str('{:.3g}'.format(fleetAll[numCompany]['total']['g'][elapsedYear]))
button2['state'] = 'normal'
NumFleet = len(fleetAll[numCompany])
j = 0
for keyFleet in range(1,NumFleet):
if fleetAll[numCompany][keyFleet]['delivery'] <= currentYear and fleetAll[numCompany][keyFleet]['tOp'] < tOpSch:
tOpTemp = fleetAll[numCompany][keyFleet]['tOp']
if float(v13[j].get()) < 12 or float(v13[j].get()) > fleetAll[numCompany][keyFleet]['vDsgnRed'][tOpTemp]:
button2['state'] = 'disabled'
j += 1
def _buttonCommandAtOnce():
NumFleet = len(fleetAll[numCompany])
j = 0
for keyFleet in range(1,NumFleet):
if fleetAll[numCompany][keyFleet]['delivery'] <= currentYear and fleetAll[numCompany][keyFleet]['tOp'] < tOpSch:
tOpTemp = fleetAll[numCompany][keyFleet]['tOp']
#if fleetAll[numCompany][keyFleet]['v'][tOpTemp-1] == 0:
# v13[j].set(str(int(min([float(vAtOnce.get()),fleetAll[numCompany][keyFleet]['vDsgnRed'][tOpTemp]]))))
#else:
# v13[j].set(str(int(min([float(vAtOnce.get()),fleetAll[numCompany][keyFleet]['v'][tOpTemp-1]]))))
v13[j].set(str(int(min([float(vAtOnce.get()),fleetAll[numCompany][keyFleet]['vDsgnRed'][tOpTemp]]))))
j += 1
button1['state'] = 'normal'
root = Tk()
root.title('Company '+str(numCompany)+' : Service Speed in '+str(startYear+elapsedYear))
width = 1100
height = 400
placeX = root.winfo_screenwidth()/2 - width/2
placeY = root.winfo_screenheight()/2 - height/2
widgetSize = str(width)+'x'+str(height)+'+'+str(int(placeX))+'+'+str(int(placeY))
root.geometry(widgetSize)
canvas = Canvas(root, width=width, height=height)
# Frame
style = ttk.Style()
style.theme_use('default')
if numCompany == 1:
color = '#ffcccc'
elif numCompany == 2:
color = '#ffedab'
elif numCompany == 3:
color = '#a4a8d4'
root['bg'] = color
style.configure('new.TFrame', foreground='black', background=color)
style.configure('new.TLabel', foreground='black', background=color)
style.configure('new.TButton', foreground='black', background=color)
style.configure('new.TCheckbutton', foreground='black', background=color)
style.configure('new.TEntry', foreground='black', background=color)
frame = ttk.Frame(root, style='new.TFrame', padding=20)
frame.pack()
frame.bind("<Configure>", lambda e: canvas.configure(scrollregion=canvas.bbox("all")))
vbar = Scrollbar(root, orient="vertical")
vbar.config(command=canvas.yview)
vbar.pack(side=RIGHT,fill="y")
canvas['bg'] = color
canvas.create_window((placeX, placeY), window=frame, anchor=CENTER)
canvas.pack()
canvas.update_idletasks()
canvas.configure(yscrollcommand=vbar.set)
canvas.yview_moveto(0)
# Label
labelAtOnce = ttk.Label(frame, style='new.TLabel', text='Input all service speeds at once (12<=) [kt]:', padding=(5, 2))
vAtOnce = StringVar()
if elapsedYear == 0:
vAtOnce.set('18')
else:
vAtOnce.set(str(int(fleetAll[numCompany]['total']['atOnce'][elapsedYear-1])))
labelAtOnce2 = ttk.Entry(frame, style='new.TEntry', textvariable=vAtOnce)
#labelRes1 = ttk.Label(frame, style='new.TLabel',text='Assigned demand [TEU*NM]:', padding=(5, 2))
#labelRes2 = ttk.Label(frame, style='new.TLabel',text=str('{:.3g}'.format(Di)), padding=(5, 2))
#labelRes3 = ttk.Label(frame, style='new.TLabel',text='Cargo trasnsport amount [TEU*NM]:', padding=(5, 2))
#labelRes4 = ttk.Label(frame, style='new.TLabel',text='None', padding=(5, 2))
#labelRes5 = ttk.Label(frame, style='new.TLabel',text='Occupancy rate [%]:', padding=(5, 2))
#labelRes6 = ttk.Label(frame, style='new.TLabel',text='None', padding=(5, 2))
#labelRes7 = ttk.Label(frame, style='new.TLabel',text='Fuel cost [$]:', padding=(5, 2))
#labelRes8 = ttk.Label(frame, style='new.TLabel',text='None', padding=(5, 2))
#labelRes9 = ttk.Label(frame, style='new.TLabel',text='CO2 [ton]:', padding=(5, 2))
#labelRes10 = ttk.Label(frame, style='new.TLabel',text='None', padding=(5, 2))
label0 = ttk.Label(frame, style='new.TLabel',text='No.', padding=(5, 2))
label1 = ttk.Label(frame, style='new.TLabel',text='Fuel type', padding=(5, 2))
label2 = ttk.Label(frame, style='new.TLabel',text='Capacity [TEU]', padding=(5, 2))
label3 = ttk.Label(frame, style='new.TLabel',text='WPS', padding=(5, 2))
label4 = ttk.Label(frame, style='new.TLabel',text='SPS', padding=(5, 2))
label5 = ttk.Label(frame, style='new.TLabel',text='CCS', padding=(5, 2))
label6 = ttk.Label(frame, style='new.TLabel',text='Service speed (12<=) [kt]', padding=(5, 2))
label7 = ttk.Label(frame, style='new.TLabel',text='Maximum speed [kt]', padding=(5, 2))
label00 = []
label8 = []
label9 = []
label10 = []
label11 = []
label12 = []
label13 = []
label14 = []
v13 = []
currentYear = startYear+elapsedYear
NumFleet = len(fleetAll[numCompany])
for keyFleet in range(1,NumFleet):
if fleetAll[numCompany][keyFleet]['delivery'] <= currentYear and fleetAll[numCompany][keyFleet]['tOp'] < tOpSch:
label00.append(ttk.Label(frame, style='new.TLabel',text=str(keyFleet), padding=(5, 2)))
tOpTemp = fleetAll[numCompany][keyFleet]['tOp']
if fleetAll[numCompany][keyFleet]['fuelName'] == 'HFO':
label8.append(ttk.Label(frame, style='new.TLabel',text='HFO/Diesel', padding=(5, 2)))
else:
label8.append(ttk.Label(frame, style='new.TLabel',text=fleetAll[numCompany][keyFleet]['fuelName'], padding=(5, 2)))
label9.append(ttk.Label(frame, style='new.TLabel',text=str(int(fleetAll[numCompany][keyFleet]['CAPcnt'])), padding=(5, 2)))
if fleetAll[numCompany][keyFleet]['WPS']:
label10.append(ttk.Label(frame, style='new.TLabel',text='Yes', padding=(5, 2)))
else:
label10.append(ttk.Label(frame, style='new.TLabel',text='No', padding=(5, 2)))
if fleetAll[numCompany][keyFleet]['SPS']:
label11.append(ttk.Label(frame, style='new.TLabel',text='Yes', padding=(5, 2)))
else:
label11.append(ttk.Label(frame, style='new.TLabel',text='No', padding=(5, 2)))
if fleetAll[numCompany][keyFleet]['CCS']:
label12.append(ttk.Label(frame, style='new.TLabel',text='Yes', padding=(5, 2)))
else:
label12.append(ttk.Label(frame, style='new.TLabel',text='No', padding=(5, 2)))
tOpTemp = fleetAll[numCompany][keyFleet]['tOp']
v13.append(StringVar())
if fleetAll[numCompany][keyFleet]['v'][tOpTemp-1] == 0:
#v13[-1].set(str(int(fleetAll[numCompany][keyFleet]['vDsgnRed'][tOpTemp])))
v13[-1].set(str(int(18)))
else:
v13[-1].set(str(int(fleetAll[numCompany][keyFleet]['v'][tOpTemp-1])))
#v13[-1].set('None')
label13.append(ttk.Entry(frame, style='new.TEntry',textvariable=v13[-1]))
label14.append(ttk.Label(frame, style='new.TLabel',text=str(int(fleetAll[numCompany][keyFleet]['vDsgnRed'][tOpTemp])), padding=(5, 2)))
labelExpl = ttk.Label(frame, style='new.TLabel', text='Guide: Input a service speed for all fleets at first and click "Input", and then change each speed if you want. After inputting all values, click "Check" and "Next".', padding=(5, 2))
labelExpl2 = ttk.Label(frame, style='new.TLabel', text='Guide: You have no fleet. Click "Next".', padding=(5, 2))
# Button
button1 = ttk.Button(frame, style='new.TButton',text='Check', state='disabled', command=lambda: _buttonCommandCalc(fleetAll,numCompany,startYear,elapsedYear,NShipFleet,tOpSch,valueDict))
button2 = ttk.Button(frame, style='new.TButton',text='Next', state='disabled', command=lambda: _buttonCommandNext(root,fleetAll,numCompany,startYear,elapsedYear,NShipFleet,tOpSch,valueDict))
button22 = ttk.Button(frame, style='new.TButton',text='Next', command=lambda: _buttonCommandNext2(root))
button3 = ttk.Button(frame, style='new.TButton',text='Input', command=lambda: _buttonCommandAtOnce())
# Layout
if len(label8) > 0:
#labelRes1.grid(row=0, column=1)
#labelRes2.grid(row=0, column=2)
#labelRes3.grid(row=0, column=1)
#labelRes4.grid(row=0, column=2)
#labelRes5.grid(row=1, column=1)
#labelRes6.grid(row=1, column=2)
#labelRes7.grid(row=1, column=4)
#labelRes8.grid(row=1, column=5)
#labelRes9.grid(row=2, column=1)
#labelRes10.grid(row=2, column=2)
label0.grid(row=3, column=0)
label1.grid(row=3, column=1)
label2.grid(row=3, column=2)
label3.grid(row=3, column=3)
label4.grid(row=3, column=4)
label5.grid(row=3, column=5)
label6.grid(row=3, column=6)
label7.grid(row=3, column=7)
for i, j in enumerate(label8):
label00[i].grid(row=i+4, column=0)
label8[i].grid(row=i+4, column=1)
label9[i].grid(row=i+4, column=2)
label10[i].grid(row=i+4, column=3)
label11[i].grid(row=i+4, column=4)
label12[i].grid(row=i+4, column=5)
label13[i].grid(row=i+4, column=6)
label14[i].grid(row=i+4, column=7)
labelAtOnce.grid(row=i+5, column=1)
labelAtOnce2.grid(row=i+5, column=2)
button3.grid(row=i+5, column=3)
button1.grid(row=i+5, column=6)
button2.grid(row=i+5, column=7)
labelExpl.grid(row=i+6, column=0,columnspan=8)
else:
labelExpl2.grid(row=0, column=0)
button22.grid(row=0, column=1)
root.deiconify()
root.mainloop()
return fleetAll
fleetAll = _surviceSpeedGui(fleetAll,numCompany,startYear,elapsedYear,NShipFleet,tOpSch,valueDict)
return fleetAll
def outputGuiFunc(fleetAll,startYear,elapsedYear,lastYear,tOpSch,unitDict):
def _eachFrame(frame,fig,keyi,keyList,root):
'''
def _on_key_press(event):
#print("you pressed {}".format(event.key))
key_press_handler(event, canvas, toolbar)
'''
def _buttonCommandNext(root,fig):
for keyi in keyList:
if type(fleetAll[1]['total'][keyi]) is np.ndarray:
fig[keyi].clf()
plt.close(fig[keyi])
root.quit() # stops mainloop
root.destroy() # this is necessary on Windows to prevent
def _buttonCommandShow(frameShow):
frameShow.tkraise()
frameEach = frame[keyi]
frameEach.grid(row=0, column=0, sticky="nsew")
# Canvas
canvas = FigureCanvasTkAgg(fig[keyi], master=frameEach)
canvas.draw()
canvas.get_tk_widget().place(relx=0.03, rely=0.1)
'''
toolbar = NavigationToolbar2Tk(canvas, root)
toolbar.update()
canvas.get_tk_widget().grid(row=1, column=0)
canvas.mpl_connect("key_press_event", _on_key_press)
'''
# Button
button1 = Button(master=frameEach, text="Next Year", command=lambda: _buttonCommandNext(root,fig))
button1.place(relx=0.22, rely=0.9)
button2 = Button(master=frameEach, text="Show", command=lambda: _buttonCommandShow(frame[v.get()]))
button2.place(relx=0.59, rely=0.9)
# List box
v = StringVar()
lb = ttk.Combobox(frameEach,textvariable=v,values=keyList)
lb.set(keyi)
lb.place(relx=0.66, rely=0.9)
# Tkinter Class
root = Tk()
root.title('Result in '+str(startYear+elapsedYear))
root.geometry('800x600+300+200')
width = 800
height = 600
placeX = root.winfo_screenwidth()/2 - width/2
placeY = root.winfo_screenheight()/2 - height/2
widgetSize = str(width)+'x'+str(height)+'+'+str(int(placeX))+'+'+str(int(placeY))
root.geometry(widgetSize)
fig = {}
frame = {}
keyList = list(fleetAll[1]['total'].keys())
for keyi in keyList:
if type(fleetAll[1]['total'][keyi]) is np.ndarray:
fig[keyi] = outputAllCompany2Func(fleetAll,startYear,elapsedYear,keyi,unitDict)
frame[keyi] = ttk.Frame(root, height=height, width=width)
for keyi in keyList:
if type(fleetAll[1]['total'][keyi]) is np.ndarray:
_eachFrame(frame,fig,keyi,keyList,root)
frame[keyList[0]].tkraise()
# root
mainloop()
def outputGui2Func(fleetAll,valueDict,startYear,elapsedYear,lastYear,tOpSch,unitDict):
def _eachFrameCO2(frame,fig,keyi,ifTotal):
def _buttonCommandShow(totalOrTilde):
if totalOrTilde == 'Total':
frameTotal[keyi].tkraise()
else:
frameComp[keyi].tkraise()
frameEach = frame[keyi]
frameEach.grid(row=0, column=1, pady=0,sticky="nsew")
# Canvas
canvas = FigureCanvasTkAgg(fig[keyi], master=frameEach)
canvas.draw()
canvas.get_tk_widget().place(relx=0.03, rely=0.1)
# Button
v1 = StringVar()
button1 = ttk.Combobox(frameEach,textvariable=v1,values=['Total','Each company'])
if ifTotal:
button1.set('Total')
else:
button1.set('Each company')
button1.place(relx=0.45, rely=0.9)
button2 = Button(master=frameEach, text="Show", command=lambda: _buttonCommandShow(v1.get()))
button2.place(relx=0.8, rely=0.9)
def _eachFrameProfit(frame,fig,keyi):
frameEach = frame[keyi]
frameEach.grid(row=0, column=0, pady=0,sticky="nsew")
# Canvas
canvas = FigureCanvasTkAgg(fig[keyi], master=frameEach)
canvas.draw()
canvas.get_tk_widget().place(relx=0.03, rely=0.1)
def _eachFrameIndex(frame,fig,keyi):
frameEach = frame[keyi]
frameEach.grid(row=1, column=0, pady=0,sticky="nsew")
# Canvas
canvas = FigureCanvasTkAgg(fig[keyi], master=frameEach)
canvas.draw()
canvas.get_tk_widget().place(relx=0.03, rely=0.1)
def _eachFrameSel(frame,fig,keyi,keyList,ifSelTotal):
def _buttonCommandShow(keyi,ifTotal):
if ifTotal == 'Total':
frameSelTotal[keyi].tkraise()
else:
frameSel[keyi].tkraise()
def _buttonCommandNext(root,fig):
for keyi in keyList:
if type(fleetAll[1]['total'][keyi]) is np.ndarray:
fig[keyi].clf()
figTotal[keyi].clf()
plt.close(fig[keyi])
root.quit() # stops mainloop
root.destroy() # this is necessary on Windows to prevent
frameEach = frame[keyi]
frameEach.grid(row=1, column=1, pady=0, sticky="nsew")
# Canvas
canvas = FigureCanvasTkAgg(fig[keyi], master=frameEach)
canvas.draw()
canvas.get_tk_widget().place(relx=0.03, rely=0.1)
# List box
v1 = StringVar()
lb = ttk.Combobox(frameEach,textvariable=v1,values=keyList)
lb.set(keyi)
lb.place(relx=0.45, rely=0.9)
# Button
v2 = StringVar()
button1 = ttk.Combobox(frameEach,textvariable=v2,values=['Total','Each company'])
if ifSelTotal:
button1.set('Total')
else:
button1.set('Each company')
button1.place(relx=0.02, rely=0.9)
button2 = Button(master=frameEach, text="Show", command=lambda: _buttonCommandShow(v1.get(),v2.get()))
button2.place(relx=0.8, rely=0.9)
buttonNext = Button(master=root, text="Next Year", command=lambda: _buttonCommandNext(root,fig))
buttonNext.place(relx=0.9, rely=0.9)
# Tkinter Class
root = Tk()
root.title('Result in '+str(startYear+elapsedYear))
width = root.winfo_screenwidth()-400
height = root.winfo_screenheight()-80
placeX = 0
placeY = 0
widgetSize = str(int(width))+'x'+str(int(height))+'+'+str(int(placeX))+'+'+str(int(placeY))
root.geometry(widgetSize)
fig = {}
frameComp = {}
frameTotal = {}
frameSel = {}
frameSelTotal = {}
figTotal = {}
removeList = []
keyList = list(fleetAll[1]['total'].keys())
figWidth,figHeight = width/2-50, height/2
for keyi in keyList:
if type(fleetAll[1]['total'][keyi]) is np.ndarray:
fig[keyi] = outputAllCompany2Func(fleetAll,valueDict,startYear,elapsedYear,keyi,unitDict,figWidth/100-1,figHeight/100-1)
figTotal[keyi] = outputAllCompanyTotalFunc(fleetAll,valueDict,startYear,elapsedYear,keyi,unitDict,figWidth/100-1,figHeight/100-1)
frameComp[keyi] = ttk.Frame(root, height=figHeight, width=figWidth)
frameTotal[keyi] = ttk.Frame(root, height=figHeight, width=figWidth)
frameSel[keyi] = ttk.Frame(root, height=figHeight, width=figWidth)
frameSelTotal[keyi] = ttk.Frame(root, height=figHeight, width=figWidth)
else:
removeList.append(keyi)
for keyi in removeList:
keyList.remove(keyi)
_eachFrameCO2(frameComp,fig,'g',False)
_eachFrameCO2(frameTotal,figTotal,'g',True)
_eachFrameProfit(frameComp,fig,'profit')
_eachFrameIndex(frameComp,fig,'Idx')
for keyi in keyList:
if type(fleetAll[1]['total'][keyi]) is np.ndarray:
_eachFrameSel(frameSel,fig,keyi,keyList,False)
_eachFrameSel(frameSelTotal,figTotal,keyi,keyList,True)
#frame[keyList[0]].tkraise()
# root
mainloop()
return fleetAll
def outputEachCompanyFunc(fleetAll,numCompany,startYear,elapsedYear,lastYear,tOpSch,decisionListName):
fig, ax = plt.subplots(2, 2, figsize=(10.0, 10.0))
plt.subplots_adjust(wspace=0.4, hspace=0.6)
fleetAll[numCompany]['total'] = fleetAll[numCompany]['total']
SPlot = fleetAll[numCompany]['total']['S'][:elapsedYear+1]
ax[0,0].plot(fleetAll['year'][:elapsedYear+1],fleetAll[numCompany]['total']['S'][:elapsedYear+1])
ax[0,0].set_title(r"$ ( \Delta C_{shipping} - \alpha g) \ / \ cta$")
ax[0,0].set_xlabel('Year')
ax[0,0].yaxis.set_major_formatter(ScalarFormatter(useMathText=True))
ax[0,0].ticklabel_format(style="sci", axis="y",scilimits=(0,0))
#ax[0].set_ylabel('Year')
gTildePlot = fleetAll[numCompany]['total']['gTilde'][:elapsedYear+1]*1000000
ax[1,0].plot(fleetAll['year'][:elapsedYear+1],fleetAll[numCompany]['total']['gTilde'][:elapsedYear+1]*1000000)
ax[1,0].set_title("g / cta")
ax[1,0].set_xlabel('Year')
ax[1,0].set_ylabel('g / (TEU $\cdot$ NM)')
#ax[1,0].yaxis.set_major_formatter(ScalarFormatter(useMathText=True))
ax[1,0].ticklabel_format(style="sci", axis="y",scilimits=(0,0))
gPlot = fleetAll[numCompany]['total']['g'][:elapsedYear+1]/1000000
ax[0,1].plot(fleetAll['year'][:elapsedYear+1],fleetAll[numCompany]['total']['g'][:elapsedYear+1]/1000000)
ax[0,1].set_title("g")
ax[0,1].set_xlabel('Year')
ax[0,1].set_ylabel('Millions ton')
ax[0,1].yaxis.set_major_formatter(ScalarFormatter(useMathText=True))
ax[0,1].ticklabel_format(style="sci", axis="y",scilimits=(0,0))
dcostShippingTildePlot = fleetAll[numCompany]['total']['dcostShippingTilde'][:elapsedYear+1]
ax[1,1].plot(fleetAll['year'][:elapsedYear+1],fleetAll[numCompany]['total']['dcostShippingTilde'][:elapsedYear+1])
ax[1,1].set_title("$\Delta C_{shipping} \ / \ cta$")
ax[1,1].set_xlabel('Year')
ax[1,1].set_ylabel('\$ / (TEU $\cdot$ NM)')
ax[1,1].yaxis.set_major_formatter(ScalarFormatter(useMathText=True))
ax[1,1].ticklabel_format(style="sci", axis="y",scilimits=(0,0))
#if i == 1:
# ax2.bar(fleetAll['year'][:elapsedYear+1], simu)
#else:
# ax2.bar(fleetAll['year'][:elapsedYear+1], simu, bottom=simuSum)
#fig.tight_layout()
if os.name == 'nt':
plt.show()
elif os.name == 'posix':
plt.savefig("Company"+str(numCompany)+decisionListName+".jpg")
np.savetxt("Company"+str(numCompany)+decisionListName+'_S.csv',SPlot)
np.savetxt("Company"+str(numCompany)+decisionListName+'_gTilde.csv',gTildePlot)
np.savetxt("Company"+str(numCompany)+decisionListName+'_g.csv',gPlot)
np.savetxt("Company"+str(numCompany)+decisionListName+'_dcostShippingTilde.csv',dcostShippingTildePlot)
def outputAllCompanyFunc(fleetAll,startYear,elapsedYear,lastYear,tOpSch,unitDict):
currentYear = startYear+elapsedYear
if elapsedYear > 0:
year = fleetAll['year'][:elapsedYear+1]
fig, axes = plt.subplots(3, 6, figsize=(16.0, 9.0))
plt.subplots_adjust(wspace=0.4, hspace=0.6)
for numCompany in range(1,4):
for ax, keyi in zip(fig.axes, fleetAll[numCompany]['total'].keys()):
ax.plot(year,fleetAll[numCompany]['total'][keyi][:elapsedYear+1],label="Company"+str(numCompany))
ax.set_title(keyi)
ax.set_xlabel('Year')
ax.legend()
#ax.ticklabel_format(style="sci", axis="y",scilimits=(0,0))
ax.set_ylabel(unitDict[keyi])
ax.title.set_size(10)
ax.xaxis.label.set_size(10)
#ax.get_xaxis().get_major_formatter().set_useOffset(False)
#ax.get_xaxis().set_major_locator(MaxNLocator(integer=True))
ax.set_xticks(year)
ax.yaxis.label.set_size(10)
else:
fig, axes = plt.subplots(3, 6, figsize=(16.0, 9.0))
plt.subplots_adjust(wspace=0.4, hspace=0.6)
for numCompany in range(1,4):
for ax, keyi in zip(fig.axes, fleetAll[numCompany]['total'].keys()):
ax.scatter(startYear,fleetAll[numCompany]['total'][keyi][0],label="Company"+str(numCompany))
ax.set_title(keyi)
ax.set_xlabel('Year')
ax.legend()
#ax.ticklabel_format(style="sci", axis="y",scilimits=(0,0))
ax.set_ylabel(unitDict[keyi])
ax.title.set_size(10)
ax.xaxis.label.set_size(10)
#ax.set_xticks(np.array([startYear-1,startYear,startYear+1]))
ax.set_xticks(np.array([startYear]))
ax.yaxis.label.set_size(10)
'''
if os.name == 'nt':
plt.show()
elif os.name == 'posix':
plt.savefig("TotalValues.jpg")
for j, listName in enumerate(decisionListNameList,1):
valueName = []
outputList = []
for i, keyi in enumerate(fleetAll[j]['total'].keys(),1):
valueName.append(keyi)
outputList.append(fleetAll[j]['total'][keyi][:elapsedYear+1])
outputData = np.stack(outputList,1)
outputDf = pd.DataFrame(data=outputData, index=year, columns=valueName, dtype='float')
outputDf.to_csv("Company"+str(j)+'_'+listName+'.csv')
'''
'''
figDict = {}
for j, listName in enumerate(decisionListNameList,1):
for keyFleet in fleetAll[j].keys():
valueName = []
outputList = []
if type(keyFleet) is int:
for keyValue in fleetAll[j][keyFleet].keys():
if type(fleetAll[j][keyFleet][keyValue]) is np.ndarray:
valueName.append(keyValue)
#if keyFleet == 1 and j == 1:
# fig, ax = plt.subplots(1, 1, figsize=(12.0, 8.0))
# figDict.setdefault(keyValue,ax)
plotArr = np.zeros(lastYear-startYear+1)
if fleetAll[j][keyFleet]['delivery'] >= startYear:
plotArr[fleetAll[j][keyFleet]['delivery']-startYear:fleetAll[j][keyFleet]['delivery']-startYear+fleetAll[j][keyFleet]['tOp']] = fleetAll[j][keyFleet][keyValue][:fleetAll[j][keyFleet]['tOp']]
else:
plotArr[:tOpSch-startYear+fleetAll[j][keyFleet]['delivery']] = fleetAll[j][keyFleet][keyValue][startYear-fleetAll[j][keyFleet]['delivery']:fleetAll[j][keyFleet]['tOp']]
outputList.append(plotArr)
#figDict[keyValue].plot(year,plotArr,label="Fleet"+str(keyFleet))
#figDict[keyValue].set_title(keyValue)
#figDict[keyValue].set_xlabel('Year')
#figDict[keyValue].legend()
#figDict[keyValue].ticklabel_format(style="sci", axis="y",scilimits=(0,0))
#figDict[keyValue].set_ylabel(unitDict[keyValue])
#if j == len(decisionListNameList) and keyFleet == len(list(fleetAll[j].keys()))-1 and os.name == 'nt':
# plt.show()
#elif j == len(decisionListNameList) and keyFleet == len(list(fleetAll[j].keys()))-1 and os.name == 'posix':
# plt.savefig(str(keyValue)+".jpg")
if os.name == 'posix':
outputData = np.stack(outputList,1)
outputDf = pd.DataFrame(data=outputData, index=year, columns=valueName, dtype='float')
outputDf.to_csv("Company"+str(j)+'_'+listName+'_'+'Fleet'+str(keyFleet)+'.csv')
'''
return fig
def outputAllCompany2Func(fleetAll,valueDict,startYear,elapsedYear,keyi,unitDict,figWidth,figHeight):
plt.rcParams.update({'figure.max_open_warning': 0})
currentYear = startYear+elapsedYear
#fig, ax = plt.subplots(1, 1, figsize=(figWidth, figHeight))
fig = Figure(figsize=(figWidth, figHeight))
ax = fig.add_subplot(1,1,1)
#plt.subplots_adjust(wspace=0.4, hspace=0.6)
ticArr = np.array([2020,2025,2030,2035,2040,2045,2050])
if elapsedYear > 0:
year = fleetAll['year'][:elapsedYear+1]
for numCompany in range(1,4):
if numCompany == 1:
color = 'tomato'
elif numCompany == 2:
color = 'gold'
elif numCompany == 3:
color = 'royalblue'
ax.plot(year,fleetAll[numCompany]['total'][keyi][:elapsedYear+1],color=color, marker=".",label="Company"+str(numCompany))
ax.set_title(keyi)
ax.set_xlabel('Year')
ax.ticklabel_format(style="sci", axis="y",scilimits=(0,0))
ax.set_ylabel(unitDict[keyi])
#ax.title.set_size(10)
#ax.xaxis.label.set_size(10)
#ax.get_xaxis().get_major_formatter().set_useOffset(False)
#ax.get_xaxis().set_major_locator(MaxNLocator(integer=True))
ax.set_xticks(ticArr)
#ax.yaxis.label.set_size(10)
else:
for numCompany in range(1,4):
if numCompany == 1:
color = 'tomato'
elif numCompany == 2:
color = 'gold'
elif numCompany == 3:
color = 'royalblue'
ax.scatter(startYear,fleetAll[numCompany]['total'][keyi][0],color=color,label="Company"+str(numCompany))
ax.set_title(keyi)
ax.set_xlabel('Year')
ax.ticklabel_format(style="sci", axis="y",scilimits=(0,0))
ax.set_ylabel(unitDict[keyi])
#ax.title.set_size(10)
#ax.xaxis.label.set_size(10)
#ax.set_xticks(np.array([startYear-1,startYear,startYear+1]))
ax.set_xticks(np.array([startYear]))
#ax.yaxis.label.set_size(10)
if keyi == 'g':
IMOgoal = np.full(ticArr.shape,valueDict['IMOgoal']/3)
color = 'olivedrab'
ax.plot(ticArr,IMOgoal,color=color, marker=".",label="IMO goal")
y_min, y_max = ax.get_ylim()
ax.set_ylim(0, y_max)
ax.legend()
return fig
def outputAllCompanyTotalFunc(fleetAll,valueDict,startYear,elapsedYear,keyi,unitDict,figWidth,figHeight):
plt.rcParams.update({'figure.max_open_warning': 0})
currentYear = startYear+elapsedYear
#fig, ax = plt.subplots(1, 1, figsize=(figWidth, figHeight))
fig = Figure(figsize=(figWidth, figHeight))
ax = fig.add_subplot(1,1,1)
#plt.subplots_adjust(wspace=0.4, hspace=0.6)
ticArr = | np.array([2020,2025,2030,2035,2040,2045,2050]) | numpy.array |
import math
import random
import numpy
# ALERT! number of human / non-human feature vectors must be the same!
class NeuralNetwork:
def __init__(self, human_feature_vectors, nonhuman_feature_vectors, hidden_layer_neurons):
self.epochs = 22
self.learning_rate = 0.1
self.human_feature_vectors = human_feature_vectors
print('human feature vectors dimensions: ' + str(len(self.human_feature_vectors)) + ' x ' + str(len(self.human_feature_vectors[0])))
self.nonhuman_feature_vectors = nonhuman_feature_vectors
# hidden_layer_neurons should be 200 or 400
self.hidden_layer_neurons = hidden_layer_neurons
# hidden layer weights should be 7524 x 200
self.hidden_layer_weights = numpy.array(self.create_random_matrix(self.hidden_layer_neurons, len(self.human_feature_vectors[0])))
# hidden layer bias should be 1 x 200
self.hidden_layer_bias = numpy.array([[-1.0] * self.hidden_layer_neurons] * 1)
# output layer weights should be 200 x 1
self.output_layer_weights = self.create_random_matrix(1, self.hidden_layer_neurons)
self.output_layer_bias = [[-1.0]]
# dummy value for hidden_layer_output
self.hidden_layer_output = numpy.array([0.0])
# dummy value for predicted output
self.predicted_output = numpy.array([0.0])
# FIXME dummy value for output layer delta - needs to be 1 x 10
self.output_layer_delta = | numpy.array([0.0]) | numpy.array |
# coding: utf-8
# pylint: disable=R0914
"""tests aero cards"""
import os
from collections import defaultdict
import unittest
from io import StringIO
from typing import Tuple, Optional, Any
import numpy as np
from cpylog import SimpleLogger
import pyNastran
from pyNastran.bdf.bdf import BDF, CORD2R, BDFCard, SET1, read_bdf
from pyNastran.bdf.test.test_bdf import run_bdf
from pyNastran.bdf.cards.aero.aero import (
AEFACT, AELIST, AEPARM,
CAERO1, CAERO2, CAERO3, CAERO4, #CAERO5,
PAERO1, PAERO2, PAERO4, #PAERO3, PAERO5,
AESURF, AESURFS,
AELINK, AECOMP,
SPLINE1, SPLINE2, #, SPLINE3, SPLINE4, SPLINE5
build_caero_paneling
)
from pyNastran.bdf.cards.aero.dynamic_loads import AERO, FLFACT, FLUTTER, GUST, MKAERO1, MKAERO2
from pyNastran.bdf.cards.aero.static_loads import AESTAT, AEROS, CSSCHD, TRIM, TRIM2, DIVERG
from pyNastran.bdf.cards.test.utils import save_load_deck
IS_MATPLOTLIB = False
if IS_MATPLOTLIB:
import matplotlib.pyplot as plt
ROOTPATH = pyNastran.__path__[0]
MODEL_PATH = os.path.join(ROOTPATH, '..', 'models')
#test_path = os.path.join(ROOTPATH, 'bdf', 'cards', 'test')
COMMENT_BAD = 'this is a bad comment'
COMMENT_GOOD = 'this is a good comment\n'
class TestAero(unittest.TestCase):
"""
The Aero cards are:
* AEFACT
* AELINK
* AELIST
* AEPARM
* AESTAT
* AESURF / AESURFS
* AERO / AEROS
* CSSCHD
* CAERO1 / CAERO2 / CAERO3 / CAERO4 / CAERO5
* FLFACT
* FLUTTER
* GUST
* MKAERO1 / MKAERO2
* PAERO1 / PAERO2 / PAERO3
* SPLINE1 / SPLINE2 / SPLINE4 / SPLINE5
"""
def test_aestat_1(self):
log = SimpleLogger(level='warning')
model = BDF(log=log)
lines = ['AESTAT 502 PITCH']
card = model._process_card(lines)
card = BDFCard(card)
size = 8
card = AESTAT.add_card(card)
card.write_card(size, 'dummy')
card.raw_fields()
def test_aecomp_1(self):
"""checks the AECOMP card"""
#sid = 10
#aesid = 0
#lalpha = None
#lmach = None
#lschd = None
#sid = 5
#aesid = 50
#lalpha = 12
#lmach = 15
name = 'WING'
list_type = 'AELIST' # or SET1, CAEROx
aelist_ids = [75, 76]
card = ['AECOMP', name, list_type] + aelist_ids
bdf_card = BDFCard(card, has_none=True)
aecomp1 = AECOMP.add_card(bdf_card, comment='aecomp card')
aecomp1.validate()
aecomp1.write_card()
#label = 'ELEV'
#cid1 = 0
#alid1 = 37
#aesurf = AESURF(aesid, label, cid1, alid1)
#aefact_sid = alid1
#Di = [0., 0.5, 1.]
#aefact_elev = AEFACT(aefact_sid, Di)
#aefact_sid = lalpha
#Di = [0., 5., 10.]
#aefact_alpha = AEFACT(aefact_sid, Di)
#aefact_sid = lmach
#Di = [0., 0.7, 0.8]
#aefact_mach = AEFACT(aefact_sid, Di)
#aefact_sid = lschd
#Di = [0., 15., 30., 45.]
#aefact_delta = AEFACT(aefact_sid, Di)
log = SimpleLogger(level='warning')
model = BDF(log=log)
data = ['AELIST', 75, 1001, 'THRU', 1075, 1101, 'THRU', 1109, 1201, 1202]
model.add_card(data, data[0], COMMENT_BAD, is_list=True)
data = ['AELIST', 76, 2000, 'THRU', 2010]
model.add_card(data, data[0], COMMENT_BAD, is_list=True)
#model.add_aesurf(aesurf)
#model.add_aefact(aefact_elev)
#model.add_aefact(aefact_alpha)
#model.add_aefact(aefact_mach)
#model.add_aefact(aefact_delta)
aecomp1.safe_cross_reference(model)
aecomp1.uncross_reference()
aecomp1.cross_reference(model)
aecomp1.write_card()
aecomp1.uncross_reference()
aecomp1.write_card()
model.validate()
save_load_deck(model)
#-----------
aecomp2 = AECOMP(name, list_type, aelist_ids, comment='cssch card')
aecomp2.validate()
aecomp2.write_card()
list_type = 'INVALID'
aecomp3 = AECOMP(name, list_type, aelist_ids, comment='cssch card')
with self.assertRaises(RuntimeError):
aecomp3.validate()
name = 'MYCOMP'
list_type = 'AELIST'
lists = 10
model.add_aecomp(name, list_type, lists)
lists = 42.0
with self.assertRaises(TypeError):
AECOMP(name, list_type, lists)
def test_aefact_1(self):
"""checks the AEFACT card"""
data = ['AEFACT', 97, .3, 0.7, 1.0]
log = SimpleLogger(level='warning')
model = BDF(log=log)
model.add_card(data, data[0], COMMENT_BAD, is_list=True)
data = ['AEFACT', 97, .3, 0.7, 1.0]
model.add_card(data, data[0], COMMENT_BAD, is_list=True)
data = ['AEFACT', '98', '.3', '0.7', '1.0']
model.add_card(data, data[0], COMMENT_GOOD, is_list=True)
msg = '$this is a bad comment\nAEFACT 97 .3 .7 1.\n'
aefact97 = model.aefacts[97]
aefact98 = model.aefacts[98]
self.assertTrue(all(aefact97.fractions == [.3, .7, 1.0]))
self.assertTrue(all(aefact98.fractions == [.3, .7, 1.0]))
out = aefact97.write_card(8, None)
self.assertEqual(msg, out)
msg = '$this is a good comment\nAEFACT 98 .3 .7 1.\n'
out = aefact98.write_card(8, None)
self.assertEqual(msg, out)
#data = ['AEFACT', 99, .3, 0.7, 1.0, None, 'cat']
#with self.assertRaises(SyntaxError):
#model.add_card(data, data[0], comment_good, is_list=True)
#data = ['AEFACT', 100, .3, 0.7, 1.0, 'cat']
#with self.assertRaises(SyntaxError):
#model.add_card(data, data[0], comment_good, is_list=True)
#data = ['AEFACT', 101, .3, 0.7, 1.0, 2]
#with self.assertRaises(SyntaxError):
#model.add_card(data, data[0], comment_good, is_list=True)
fractions = [1., 2., 3.]
aefact = AEFACT(200, fractions, comment='')
aefact.validate()
aefact.write_card()
#model = BDF()
#aefact.cross_reference(model)
#aefact.write_card()
#aefact.uncross_reference()
#aefact.write_card()
def test_aelink_1(self):
log = SimpleLogger(level='warning')
model = BDF(log=log)
idi = 10
label = 'CS'
independent_labels = ['A', 'B', 'C']
linking_coefficients = [1.0, 2.0]
aelink = AELINK(idi, label, independent_labels, linking_coefficients, comment='')
assert aelink.aelink_id == idi
with self.assertRaises(RuntimeError):
aelink.validate()
str(aelink)
aelink.write_card()
card = ['AELINK', idi, label, independent_labels[0], linking_coefficients[0],
independent_labels[1], linking_coefficients[1], independent_labels[2]]
with self.assertRaises(AssertionError):
model.add_card(card, 'AELINK')
card = ['AELINK', idi, label, independent_labels[0], linking_coefficients[0],
independent_labels[1], linking_coefficients[1]]
model.add_card(card, 'AELINK', comment='cat')
#print(model.aelinks[idi])
assert model.aelinks[idi][0].comment == '$cat\n', 'comment=%r' % str(model.aelinks[idi][0].comment)
#-------------------------------
idi = 11
label = 'LABEL'
independent_labels = ['pig', 'frog', 'dog']
linking_coefficients = []
aelink2 = model.add_aelink(idi, label, independent_labels, linking_coefficients)
with self.assertRaises(RuntimeError):
model.validate()
aelink2.linking_coefficients = [1.0, 2.0, 3.0]
assert aelink2.linking_coefficients == [1., 2., 3.]
#-------------------------------
idi = 'ALWAYS'
label = 'LABEL'
independent_labels = ['pig', 'frog', 'dog']
linking_coefficients = [1.0, 2.0, 3.0]
model.add_aelink(idi, label, independent_labels, linking_coefficients)
model.validate()
model.cross_reference()
def test_aelink_2(self):
log = SimpleLogger(level='warning')
model = BDF(log=log)
idi = 31
label = 'LABEL'
independent_labels = ['pig', 'frog', 'dog']
linking_coefficients = [1.0, 2.0, 3.0]
model.add_aelink(idi, label, independent_labels, linking_coefficients)
save_load_deck(model, run_renumber=False)
def test_aelist_1(self):
"""checks the AELIST card"""
log = SimpleLogger(level='warning')
model = BDF(log=log)
data = ['AELIST', 75, 1001, 'THRU', 1075, 1101, 'THRU', 1109, 1201, 1202]
model.add_card(data, data[0], COMMENT_BAD, is_list=True)
elements = list(range(1001, 1076)) + list(range(1101, 1110)) + [1201, 1202]
aelist = AELIST(74, elements)
aelist.validate()
aelist.write_card()
aelist75 = model.aelists[75]
#print(aelist.elements)
#print(elements)
self.assertTrue(elements == aelist75.elements)
elements = list(range(1001, 1076)) + list(range(1101, 1110)) + [1108, 1202]
data = ['AELIST', 76, 1001, 'THRU', 1075, 1101, 'THRU', 1109, 1108, 1202]
model.add_card(data, data[0], COMMENT_BAD, is_list=True)
aelist76 = model.aelists[76]
#print(aelist76 .elements)
#print(elements)
self.assertFalse(elements == aelist76.elements)
elements = list(set(elements))
elements.sort()
self.assertTrue(elements == aelist76.elements)
elements = [1000, 1000, 1000, 2000, 1000, 2000]
aelist = AELIST(75, elements)
aelist.clean_ids()
str(aelist.write_card())
elements = 42
AELIST(76, elements)
elements = 42.0
with self.assertRaises(TypeError):
AELIST(77, elements)
def test_aeparm_1(self):
"""checks the AEPARM card"""
aeparm_id = 100
aeparm = AEPARM.add_card(BDFCard(['AEPARM', aeparm_id, 'THRUST', 'lb']),
comment='aeparm_comment')
model = BDF(debug=False)
aeparm = model.add_aeparm(aeparm_id, 'THRUST', 'lb', comment='aeparm_comment')
assert aeparm.aeparm_id == aeparm_id
aeparm.validate()
aeparm.cross_reference(None)
aeparm.uncross_reference()
aeparm.safe_cross_reference(None)
aeparm.write_card()
save_load_deck(model)
# def test_aestat_1(self):
# def test_aesurf_1(self):
def test_aesurfs_1(self):
"""checks the AESURFS cards"""
aesid = 6001
label = 'ELEV'
list1 = 6002
list2 = 6003
card = ['AESURFS', aesid, label, None, list1, None, list2]
bdf_card = BDFCard(card, has_none=True)
log = SimpleLogger(level='warning')
model = BDF(log=log)
model.add_card(bdf_card, 'AESURFS', comment='aesurfs',
is_list=True, has_none=True)
aesurfs = AESURFS(aesid, label, list1, list2, comment='aesurfs')
str(aesurfs)
aesurfs.write_card()
model.add_set1(6002, [1, 2, 3])
model.add_grid(1, [0., 0., 0.])
model.add_grid(2, [0., 0., 0.])
model.add_grid(3, [0., 0., 0.])
model.validate()
save_load_deck(model)
def test_aero_1(self):
"""checks the AERO card"""
acsid = 0.
velocity = None
cref = 1.0
rho_ref = 1.0
aero = AERO(velocity, cref, rho_ref, acsid=acsid, sym_xz=0, sym_xy=0,
comment='aero card')
with self.assertRaises(TypeError):
aero.validate()
assert aero.is_symmetric_xy is False
assert aero.is_symmetric_xz is False
assert aero.is_anti_symmetric_xy is False
assert aero.is_anti_symmetric_xz is False
#aero.set_ground_effect(True)
#assert aero.is_symmetric_xy is False
#assert aero.is_symmetric_xz is False
#assert aero.is_anti_symmetric_xy is True
#assert aero.is_anti_symmetric_xz is False
#aero.set_ground_effect(False)
#assert aero.is_symmetric_xy is False
#assert aero.is_symmetric_xz is False
#assert aero.is_anti_symmetric_xy is False
#assert aero.is_anti_symmetric_xz is False
aero = AERO(velocity, cref, rho_ref, acsid=acsid, sym_xz=1, sym_xy=1,
comment='aero card')
assert aero.is_symmetric_xy is True
assert aero.is_symmetric_xz is True
assert aero.is_anti_symmetric_xy is False
assert aero.is_anti_symmetric_xz is False
aero = AERO(velocity, cref, rho_ref, acsid=acsid, sym_xz=-1, sym_xy=-1,
comment='aero card')
assert aero.is_symmetric_xy is False
assert aero.is_symmetric_xz is False
assert aero.is_anti_symmetric_xy is True
assert aero.is_anti_symmetric_xz is True
aero.set_ground_effect(True)
def test_aero_2(self):
"""checks the AERO card"""
acsid = 0
velocity = None
cref = 1.0
rho_ref = 1.0
aero = AERO(velocity, cref, rho_ref, acsid=acsid, sym_xz=0., sym_xy=0,
comment='aero card')
with self.assertRaises(TypeError):
aero.validate()
aero = AERO(velocity, cref, rho_ref, acsid=acsid, sym_xz=0, sym_xy=0.,
comment='aero card')
with self.assertRaises(TypeError):
aero.validate()
aero = AERO(velocity, cref, rho_ref, acsid=acsid, sym_xz=0, sym_xy=0.,
comment='aero card')
with self.assertRaises(TypeError):
aero.validate()
aero = AERO(velocity, cref, rho_ref, acsid=None, sym_xz=0, sym_xy=0,
comment='aero card')
aero.validate()
aero.write_card()
aero.raw_fields()
model = BDF()
aero.cross_reference(model)
aero.write_card()
aero.raw_fields()
aero.uncross_reference()
aero.write_card()
aero.raw_fields()
def test_aeros_1(self):
"""checks the AEROS card"""
#acsid = 0.
#velocity = None
cref = 1.0
bref = 2.0
sref = 100.
acsid = 0
rcsid = 0
aeros = AEROS.add_card(BDFCard(['AERO', acsid, rcsid, cref, bref, sref]))
aeros = AEROS(cref, bref, sref, acsid, rcsid, sym_xz=0, sym_xy=0,
comment='aeros card')
aeros.validate()
aeros.write_card()
aeros.raw_fields()
acsid = None
rcsid = None
sym_xz = None
sym_xy = None
aeros = AEROS(cref, bref, sref, acsid, rcsid, sym_xz=sym_xz, sym_xy=sym_xy,
comment='aeros card')
aeros.validate()
aeros.write_card()
aeros.raw_fields()
cref = 1
bref = 2
sref = 3
acsid = 42.
rcsid = 43.
sym_xz = 44.
sym_xy = 45.
aeros = AEROS(cref, bref, sref, acsid, rcsid, sym_xz=sym_xz, sym_xy=sym_xy)
with self.assertRaises(TypeError):
aeros.validate()
def test_caero1_paneling_nspan_nchord_1(self):
"""checks the CAERO1/PAERO1/AEFACT card"""
log = SimpleLogger(level='warning')
model = BDF(log=log)
cref = 1.0
bref = 1.0
sref = 1.0
model.add_aeros(cref, bref, sref, acsid=0, rcsid=0, sym_xz=0, sym_xy=0, comment='')
pid = 1
igroup = 1
p1 = [0., 0., 0.]
p4 = [1., 15., 0.]
x12 = 1.
x43 = 1.
model.add_paero1(pid, caero_body_ids=None, comment='')
eid = 10000000
caero = model.add_caero1(eid, pid, igroup, p1, x12, p4, x43,
cp=0, nspan=3, lspan=0, nchord=2, lchord=0, comment='')
npoints, nelements = caero.get_npanel_points_elements()
npoints_expected = 12 # 4*3
nelements_expected = 6 # 2*3
x, y = caero.xy
chord_expected = np.array([0., 0.5, 1.])
span_expected = np.array([0., 1 / 3, 2 / 3, 1.])
assert np.allclose(x, chord_expected)
assert np.allclose(y, span_expected)
assert npoints_expected == npoints
assert nelements_expected == nelements
def test_caero1_paneling_nspan_lchord(self):
"""checks the CAERO1/PAERO1/AEFACT card"""
fig, ax = _setup_aero_plot()
log = SimpleLogger(level='warning')
model = BDF(log=log)
cref = 1.0
bref = 1.0
sref = 1.0
model.add_aeros(cref, bref, sref, acsid=0, rcsid=0, sym_xz=0, sym_xy=0, comment='')
pid = 1
igroup = 1
p1 = [0., 0., 0.]
p4 = [1., 15., 0.]
x12 = 1.
x43 = 1.
model.add_paero1(pid, caero_body_ids=None, comment='')
eid = 10000000
chord_aefact_id = 10000
model.add_aefact(chord_aefact_id, [0., 0.5, 1.0])
caero = model.add_caero1(eid, pid, igroup, p1, x12, p4, x43,
cp=0,
nspan=3, lspan=0,
nchord=0, lchord=chord_aefact_id, comment='')
model.cross_reference()
npoints, nelements = caero.get_npanel_points_elements()
npoints_expected = 12 # 4*3
nelements_expected = 6 # 2*3
assert npoints_expected == npoints
assert nelements_expected == nelements
del model.caeros[eid]
del model.aefacts[chord_aefact_id]
points, elements = caero.panel_points_elements()
x, y = caero.xy
chord_expected = np.array([0., 0.5, 1.])
span_expected = np.array([0., 1 / 3, 2 / 3, 1.])
assert np.allclose(x, chord_expected)
assert np.allclose(y, span_expected)
if IS_MATPLOTLIB:
caero.plot(ax)
fig.show()
def test_caero1_paneling_transpose(self):
fig, ax = _setup_aero_plot()
log = SimpleLogger(level='warning')
model = BDF(log=log)
cref = 1.0
bref = 1.0
sref = 1.0
model.add_aeros(cref, bref, sref, acsid=0, rcsid=0, sym_xz=0, sym_xy=0, comment='')
#['CAERO1', '2000', '2000', '0', '15', '10', '1', '0', None, '7.314386', '0.', '-0.18288', '1.463854', '8.222755', '1.573341', '-0.18288', '0.365963']
#card_lines = [
#'CAERO1,2000,2000,0,15,10,1,0,1',
#'+,7.314386,0.,-0.18288,1.463854,8.222755,1.573341,-0.18288,0.365963',
#]
#model.add_card(card_lines, 'CAERO1', comment='', ifile=None, is_list=False, has_none=True)
eid = 2000
#caero = model.caeros[eid]
#print(caero.get_stats())
pid = 1
igroup = 1
p1 = [7.3, 0., 0.]
p4 = [8.2, 1.6, 0.]
x12 = 1.4
x43 = 0.3
model.add_paero1(pid, caero_body_ids=None, comment='')
caero = model.add_caero1(
eid, pid, igroup, p1, x12, p4, x43,
cp=0, nspan=5, lspan=0, nchord=2, lchord=0, comment='')
caero.validate()
x, y = caero.xy
x_expected = np.array([0., 0.5, 1.])
y_expected = np.array([0., 0.2, 0.4, 0.6, 0.8, 1.])
assert np.allclose(x, x_expected)
assert np.allclose(y, y_expected)
#print(caero.get_stats())
caero.cross_reference(model)
all_control_surface_name, caero_control_surfaces, out = build_caero_paneling(model)
box_id_to_caero_element_map_expected = {
2000: np.array([0, 3, 4, 1]),
2001: np.array([1, 4, 5, 2]),
2002: np.array([3, 6, 7, 4]),
2003: np.array([4, 7, 8, 5]),
2004: np.array([ 6, 9, 10, 7]),
2005: np.array([ 7, 10, 11, 8]),
2006: np.array([ 9, 12, 13, 10]),
2007: | np.array([10, 13, 14, 11]) | numpy.array |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
# CODE NAME HERE
# CODE DESCRIPTION HERE
Created on 2019-05-13 at 11:28
@author: cook
"""
import numpy as np
import warnings
import os
from scipy.ndimage import filters
from apero import core
from apero.core import constants
from apero import lang
from apero.core import math as mp
from apero.core.core import drs_log
from apero.core.core import drs_file
from apero.core.core import drs_database
from apero.io import drs_fits
from apero.io import drs_data
# =============================================================================
# Define variables
# =============================================================================
__NAME__ = 'science.calib.badpix.py'
__INSTRUMENT__ = 'None'
# Get constants
Constants = constants.load(__INSTRUMENT__)
# Get version and author
__version__ = Constants['DRS_VERSION']
__author__ = Constants['AUTHORS']
__date__ = Constants['DRS_DATE']
__release__ = Constants['DRS_RELEASE']
# get param dict
ParamDict = constants.ParamDict
DrsFitsFile = drs_file.DrsFitsFile
# Get Logging function
WLOG = drs_log.wlog
# Get the text types
TextEntry = lang.drs_text.TextEntry
TextDict = lang.drs_text.TextDict
# alias pcheck
pcheck = core.pcheck
# =============================================================================
# Define functions
# =============================================================================
def normalise_median_flat(params, image, method='new', **kwargs):
"""
Applies a median filter and normalises. Median filter is applied with width
"wmed" or p["BADPIX_FLAT_MED_WID"] if wmed is None) and then normalising by
the 90th percentile
:param params: parameter dictionary, ParamDict containing constants
Must contain at least:
BADPIX_FLAT_MED_WID: float, the median image in the x
dimension over a boxcar of this width
BADPIX_NORM_PERCENTILE: float, the percentile to normalise
to when normalising and median
filtering image
log_opt: string, log option, normally the program name
:param image: numpy array (2D), the iamge to median filter and normalise
:param method: string, "new" or "old" if "new" uses np.nanpercentile else
sorts the flattened image and takes the "percentile" (i.e.
90th) pixel value to normalise
:param kwargs: keyword arguments
:keyword wmed: float or None, if not None defines the median filter width
if None uses p["BADPIX_MED_WID", see
scipy.ndimage.filters.median_filter "size" for more details
:keyword percentile: float or None, if not None degines the percentile to
normalise the image at, if None used from
p["BADPIX_NORM_PERCENTILE"]
:return norm_med_image: numpy array (2D), the median filtered and normalised
image
:return norm_image: numpy array (2D), the normalised image
"""
func_name = __NAME__ + '.normalise_median_flat()'
# log that we are normalising the flat
WLOG(params, '', TextEntry('40-012-00001'))
# get used percentile
percentile = pcheck(params, 'BADPIX_NORM_PERCENTILE', 'percentile', kwargs,
func_name)
# wmed: We construct a "simili" flat by taking the running median of the
# flag in the x dimension over a boxcar width of wmed (suggested
# value of ~7 pix). This assumes that the flux level varies only by
# a small amount over wmed pixels and that the badpixels are
# isolated enough that the median along that box will be representative
# of the flux they should have if they were not bad
wmed = pcheck(params, 'BADPIX_FLAT_MED_WID', 'wmed', kwargs, func_name)
# create storage for median-filtered flat image
image_med = np.zeros_like(image)
# loop around x axis
for i_it in range(image.shape[1]):
# x-spatial filtering and insert filtering into image_med array
image_med[i_it, :] = filters.median_filter(image[i_it, :], wmed)
if method == 'new':
# get the 90th percentile of median image
norm = np.nanpercentile(image_med[np.isfinite(image_med)], percentile)
else:
v = image_med.reshape(np.product(image.shape))
v = np.sort(v)
norm = v[int(np.product(image.shape) * percentile/100.0)]
# apply to flat_med and flat_ref
return image_med/norm, image/norm
def locate_bad_pixels(params, fimage, fmed, dimage, **kwargs):
"""
Locate the bad pixels in the flat image and the dark image
:param params: parameter dictionary, ParamDict containing constants
Must contain at least:
log_opt: string, log option, normally the program name
BADPIX_FLAT_MED_WID: float, the median image in the x
dimension over a boxcar of this width
BADPIX_FLAT_CUT_RATIO: float, the maximum differential pixel
cut ratio
BADPIX_ILLUM_CUT: float, the illumination cut parameter
BADPIX_MAX_HOTPIX: float, the maximum flux in ADU/s to be
considered too hot to be used
:param fimage: numpy array (2D), the flat normalised image
:param fmed: numpy array (2D), the flat median normalised image
:param dimage: numpy array (2D), the dark image
:param wmed: float or None, if not None defines the median filter width
if None uses p["BADPIX_MED_WID", see
scipy.ndimage.filters.median_filter "size" for more details
:return bad_pix_mask: numpy array (2D), the bad pixel mask image
:return badpix_stats: list of floats, the statistics array:
Fraction of hot pixels from dark [%]
Fraction of bad pixels from flat [%]
Fraction of NaN pixels in dark [%]
Fraction of NaN pixels in flat [%]
Fraction of bad pixels with all criteria [%]
"""
func_name = __NAME__ + '.locate_bad_pixels()'
# log that we are looking for bad pixels
WLOG(params, '', TextEntry('40-012-00005'))
# -------------------------------------------------------------------------
# wmed: We construct a "simili" flat by taking the running median of the
# flag in the x dimension over a boxcar width of wmed (suggested
# value of ~7 pix). This assumes that the flux level varies only by
# a small amount over wmed pixels and that the badpixels are
# isolated enough that the median along that box will be representative
# of the flux they should have if they were not bad
wmed = pcheck(params, 'BADPIX_FLAT_MED_WID', 'wmed', kwargs, func_name)
# maxi differential pixel response relative to the expected value
cut_ratio = pcheck(params, 'BADPIX_FLAT_CUT_RATIO', 'cut_ratio', kwargs,
func_name)
# illumination cut parameter. If we only cut the pixels that
# fractionnally deviate by more than a certain amount, we are going
# to have lots of bad pixels in unillumnated regions of the array.
# We therefore need to set a threshold below which a pixels is
# considered unilluminated. First of all, the flat field image is
# normalized by its 90th percentile. This sets the brighter orders
# to about 1. We then set an illumination threshold below which
# only the dark current will be a relevant parameter to decide that
# a pixel is "bad"
illum_cut = pcheck(params, 'BADPIX_ILLUM_CUT', 'illum_cut', kwargs,
func_name)
# hotpix. Max flux in ADU/s to be considered too hot to be used
max_hotpix = pcheck(params, 'BADPIX_MAX_HOTPIX', 'max_hotpix', kwargs,
func_name)
# -------------------------------------------------------------------------
# create storage for ratio of flat_ref to flat_med
fratio = np.zeros_like(fimage)
# create storage for bad dark pixels
badpix_dark = np.zeros_like(dimage, dtype=bool)
# -------------------------------------------------------------------------
# complain if the flat image and dark image do not have the same dimensions
if dimage.shape != fimage.shape:
eargs = [fimage.shape, dimage.shape, func_name]
WLOG(params, 'error', TextEntry('09-012-00002', args=eargs))
# -------------------------------------------------------------------------
# as there may be a small level of scattered light and thermal
# background in the dark we subtract the running median to look
# only for isolate hot pixels
for i_it in range(fimage.shape[1]):
dimage[i_it, :] -= filters.median_filter(dimage[i_it, :], wmed)
# work out how much do flat pixels deviate compared to expected value
zmask = fmed != 0
fratio[zmask] = fimage[zmask] / fmed[zmask]
# catch the warnings
with warnings.catch_warnings(record=True) as _:
# if illumination is low, then consider pixel valid for this criterion
fratio[fmed < illum_cut] = 1
# catch the warnings
with warnings.catch_warnings(record=True) as _:
# where do pixels deviate too much
badpix_flat = (np.abs(fratio - 1)) > cut_ratio
# -------------------------------------------------------------------------
# get finite flat pixels
valid_flat = np.isfinite(fimage)
# -------------------------------------------------------------------------
# get finite dark pixels
valid_dark = np.isfinite(dimage)
# -------------------------------------------------------------------------
# select pixels that are hot
badpix_dark[valid_dark] = dimage[valid_dark] > max_hotpix
# -------------------------------------------------------------------------
# construct the bad pixel mask
badpix_map = badpix_flat | badpix_dark | ~valid_flat | ~valid_dark
# -------------------------------------------------------------------------
# log results
badpix_stats = [(np.sum(badpix_dark) / np.array(badpix_dark).size) * 100,
(np.sum(badpix_flat) / np.array(badpix_flat).size) * 100,
(np.sum(~valid_dark) / np.array(valid_dark).size) * 100,
(np.sum(~valid_flat) / np.array(valid_flat).size) * 100,
(np.sum(badpix_map) / np.array(badpix_map).size) * 100]
WLOG(params, '', TextEntry('40-012-00006', args=badpix_stats))
# -------------------------------------------------------------------------
# return bad pixel map
return badpix_map, badpix_stats
def locate_bad_pixels_full(params, image, **kwargs):
"""
Locate the bad pixels identified from the full engineering flat image
(location defined from p['BADPIX_FULL_FLAT'])
:param p: parameter dictionary, ParamDict containing constants
Must contain at least:
IC_IMAGE_TYPE: string, the detector type (this step is only for
H4RG)
LOG_OPT: string, log option, normally the program name
BADPIX_FULL_FLAT: string, the full engineering flat filename
BADPIX_FULL_THRESHOLD: float, the threshold on the engineering
above which the data is good
:param image: numpy array (2D), the image to correct (for size only)
:return newimage: numpy array (2D), the mask of the bad pixels
:return stats: float, the fraction of un-illuminated pixels (percentage)
"""
func_name = __NAME__ + '.locate_bad_pixels_full()'
# log that we are looking for bad pixels
WLOG(params, '', TextEntry('40-012-00002'))
# get parameters from params/kwargs
threshold = pcheck(params, 'BADPIX_FULL_THRESHOLD', 'threshold', kwargs,
func_name)
rotnum = pcheck(params, 'RAW_TO_PP_ROTATION', 'rotnum', kwargs, func_name)
# get full flat
mdata = drs_data.load_full_flat_badpix(params, **kwargs)
# check if the shape of the image and the full flat match
if image.shape != mdata.shape:
eargs = [mdata.shape, image.shape, func_name]
WLOG(params, 'error', TextEntry('09-012-00001', args=eargs))
# apply threshold
mask = np.abs(mp.rot8(mdata, rotnum) - 1) > threshold
# -------------------------------------------------------------------------
# log results
badpix_stats = (np.sum(mask) / np.array(mask).size) * 100
WLOG(params, '', TextEntry('40-012-00004', args=[badpix_stats]))
# return mask
return mask, badpix_stats
def correction(params, image=None, header=None, return_map=False, **kwargs):
"""
Corrects "image" for "BADPIX" using calibDB file (header must contain
value of p['ACQTIME_KEY'] as a keyword) - sets all bad pixels to zeros
:param p: parameter dictionary, ParamDict containing constants
Must contain at least:
calibDB: dictionary, the calibration database dictionary
(if not in "p" we construct it and need "max_time_unix"
max_time_unix: float, the unix time to use as the time of
reference (used only if calibDB is not defined)
log_opt: string, log option, normally the program name
DRS_CALIB_DB: string, the directory that the calibration
files should be saved to/read from
:param image: numpy array (2D), the image
:param header: dictionary, the header dictionary created by
spirouFITS.ReadImage
:param return_map: bool, if True returns bad pixel map else returns
corrected image
:returns: numpy array (2D), the corrected image where all bad pixels are
set to zeros or the bad pixel map (if return_map = True)
"""
func_name = __NAME__ + '.correct_for_baxpix()'
# check for filename in kwargs
filename = kwargs.get('filename', None)
# deal with no header
if header is None:
WLOG(params, 'error', TextEntry('00-012-00002', args=[func_name]))
# deal with no image (when return map is False)
if (not return_map) and (image is None):
WLOG(params, 'error', TextEntry('00-012-00003', args=[func_name]))
# get loco file instance
badinst = core.get_file_definition('BADPIX', params['INSTRUMENT'],
kind='red')
# get calibration key
badkey = badinst.get_dbkey(func=func_name)
# -------------------------------------------------------------------------
# get filename
if filename is not None:
badpixfile = filename
else:
# get calibDB
cdb = drs_database.get_full_database(params, 'calibration')
# get filename col
filecol = cdb.file_col
# get the badpix entries
badpixentries = drs_database.get_key_from_db(params, badkey, cdb,
header, n_ent=1)
# get badpix filename
badpixfilename = badpixentries[filecol][0]
badpixfile = os.path.join(params['DRS_CALIB_DB'], badpixfilename)
# -------------------------------------------------------------------------
# get bad pixel file
badpiximage = drs_fits.readfits(params, badpixfile)
# create mask from badpixmask
mask = np.array(badpiximage, dtype=bool)
# -------------------------------------------------------------------------
# if return map just return the bad pixel map
if return_map:
return badpixfile, mask
# else put NaNs into the image
else:
# log that we are setting background pixels to NaN
WLOG(params, '', TextEntry('40-012-00008', args=[badpixfile]))
# correct image (set bad pixels to zero)
corrected_image = | np.array(image) | numpy.array |
#support_study.py
#Results of nnet-survival and baseline models (Cox prop. hazards model, cox-nnet) on
#SUPPORT study data (publicly available courtesy of Vanderbilt Dep't of Biostatistics)
#Prospective study survival data on 9105 hospitalized patients
#Data: http://biostat.mc.vanderbilt.edu/wiki/pub/Main/DataSets/support2csv.zip
#Description: http://biostat.mc.vanderbilt.edu/wiki/Main/SupportDesc
#The data have been cleaned and missing values have been imputed.
#Author: <NAME>, Stanford University, <EMAIL>
#Tested with Python version 3.6, Keras version 2 (using TensorFlow backend)
running_time_test = 0
if running_time_test: #disable GPU, set Keras to use only 1 CPU core
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
import tensorflow as tf
import keras.backend as K
config = tf.ConfigProto(intra_op_parallelism_threads=1,\
inter_op_parallelism_threads=1, allow_soft_placement=True,\
device_count = {'CPU' : 1, 'GPU' : 0})
session = tf.Session(config=config)
K.set_session(session)
else:
import keras.backend as K
from __future__ import print_function
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib
from keras.preprocessing import sequence
from keras.models import Sequential, Model
from keras.layers import Input, Dense, Dropout, Activation, LSTM, GRU, Embedding, Concatenate, Conv1D, GlobalMaxPooling1D, MaxPooling1D, GlobalAveragePooling1D, BatchNormalization, TimeDistributed
from keras import optimizers, layers, regularizers
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.models import load_model
import math
from lifelines import KaplanMeierFitter
from lifelines import CoxPHFitter
from lifelines.utils import concordance_index
from sklearn.preprocessing import StandardScaler
from scipy import stats
import time
import nnet_survival
import other_code.cox_nnet as cox_nnet #for cox-nnet baseline model
CB_color_cycle = ['#377eb8', '#ff7f00', '#4daf4a',
'#f781bf', '#a65628', '#984ea3',
'#999999', '#e41a1c', '#dede00']
def cox_basehaz(lp, time, dead):
#Find baseline hazard for Cox model using Breslow method
#Adapted from https://stats.stackexchange.com/questions/46532/cox-baseline-hazard
#Inputs are Numpy arrays.
#lp=Cox model linear predictor values
#time=vector of failure/censoring times
#dead=boolean, did patient fail/die
#
#Returns:
#1: unique failure times
#2: baseline hazard function at these times
time=pd.Series(time)
dead=pd.Series(dead)
prediction = np.expand_dims(np.exp(lp),1)
failed_times = time.loc[dead==1]
y = failed_times.value_counts().sort_index().index.values #ordered distinct event times
d = failed_times.value_counts().sort_index().values #number of events
h0 = np.zeros(len(y))
for l in range(len(y)):
h0[l] = d[l] / np.sum(prediction[time >= y[l]])
H0 = np.cumsum(h0)
#surv_baseline = np.exp(-H0)
return (y, H0)
def cox_pred_surv(lp, H0):
#Predicted survival curves from Cox model
#Inputs are Numpy arrays.
#lp=Cox model linear predictor values
#H0=basline hazard function
#
#Returns: predicted survival rate at each follow-up time
prediction = np.expand_dims(np.exp(lp),1)
return np.exp(-np.matmul(prediction, np.expand_dims(H0,0)))
def calib_plot(fu_time, n_bins, pred_surv, time, dead, color, label, error_bars=0,alpha=1., markersize=1., markertype='o'):
cuts = np.concatenate((np.array([-1e6]),np.percentile(pred_surv, np.arange(100/n_bins,100,100/n_bins)),np.array([1e6])))
bin = pd.cut(pred_surv,cuts,labels=False)
kmf = KaplanMeierFitter()
est = []
ci_upper = []
ci_lower = []
mean_pred_surv = []
for which_bin in range(max(bin)+1):
kmf.fit(time[bin==which_bin], event_observed=dead[bin==which_bin])
est.append(np.interp(fu_time, kmf.survival_function_.index.values, kmf.survival_function_.KM_estimate))
ci_upper.append(np.interp(fu_time, kmf.survival_function_.index.values, kmf.confidence_interval_.loc[:,'KM_estimate_upper_0.95']))
ci_lower.append(np.interp(fu_time, kmf.survival_function_.index.values, kmf.confidence_interval_.loc[:,'KM_estimate_lower_0.95']))
mean_pred_surv.append(np.mean(pred_surv[bin==which_bin]))
est = np.array(est)
ci_upper = np.array(ci_upper)
ci_lower = np.array(ci_lower)
if error_bars:
plt.errorbar(mean_pred_surv, est, yerr = np.transpose(np.column_stack((est-ci_lower,ci_upper-est))), fmt='o',c=color,label=label)
else:
plt.plot(mean_pred_surv, est, markertype, c=color,label=label, alpha=alpha, markersize=markersize)
return (mean_pred_surv, est)
data_support = pd.read_csv('data/support_parsed.csv')
train_prop = 0.7 #proportion of patients to place in training set
np.random.seed(0)
train_indices = np.random.choice(len(data_support),int(train_prop*len(data_support)),replace=False)
test_indices = np.setdiff1d(np.arange(len(data_support)), train_indices)
data_train = data_support.iloc[train_indices]
data_test = data_support.iloc[test_indices]
x_train = data_train.drop(["time", "dead"], axis=1).as_matrix()
x_test = data_test.drop(["time", "dead"], axis=1).as_matrix()
scaler = StandardScaler().fit(x_train)
x_train = scaler.transform(x_train) #Standardize each predictor variable
x_test = scaler.transform(x_test)
########################################
#Standard Cox proportional hazards model
from lifelines import CoxPHFitter
cph = CoxPHFitter()
cph.fit(data_train, duration_col='time', event_col='dead')
#cph.print_summary()
#Cox model discrimination train set
prediction = cph.predict_partial_hazard(data_train)
print(concordance_index(data_train.time,-prediction,data_train.dead)) #0.735
#Cox model discrimination test set
prediction = cph.predict_partial_hazard(data_test)
print(concordance_index(data_test.time,-prediction,data_test.dead)) #0.735
################################
#Nnet-survival / Our model (flexible version to
#allow non-proportional hazards)
halflife=365.*1.4
breaks=-np.log(1-np.arange(0.0,0.96,0.05))*halflife/np.log(2)
#breaks=-np.log(1-np.arange(0.0,1,0.099))*halflife/np.log(2)
n_intervals=len(breaks)-1
timegap = breaks[1:] - breaks[:-1]
y_train = nnet_survival.make_surv_array(data_train.time.values,data_train.dead.values,breaks)
y_test = nnet_survival.make_surv_array(data_test.time.values,data_test.dead.values,breaks)
hidden_layers_sizes = 7 #Using single hidden layer, with this many neurons
##############################################################
#Our model cross-validation to pick L2 regularization strength
from sklearn.model_selection import KFold
n_folds = 10
kf=KFold(n_splits=n_folds, shuffle=True, random_state=0)
early_stopping = EarlyStopping(monitor='loss', patience=20)
#l2_array = np.concatenate(([0.],np.power(10.,np.arange(-6,-2))))
l2_array = np.power(10.,np.arange(-4,1))
grid_search_train = np.zeros((len(l2_array),n_folds))
grid_search_test = np.zeros((len(l2_array),n_folds))
for i in range(len(l2_array)):
print(str(i+1) + '/' + str(len(l2_array)))
j=0
cv_folds = kf.split(x_train)
for traincv, testcv in cv_folds:
x_train_cv = x_train[traincv]
y_train_cv = y_train[traincv]
x_test_cv = x_train[testcv]
y_test_cv = y_train[testcv]
model = Sequential()
#model.add(Dense(n_intervals,input_dim=x_train.shape[1],bias_initializer='zeros',kernel_regularizer=regularizers.l2(l2_array[i])))
model.add(Dense(hidden_layers_sizes, input_dim=x_train.shape[1],bias_initializer='zeros', activation='relu', kernel_regularizer=regularizers.l2(l2_array[i])))
model.add(Dense(n_intervals))
model.add(Activation('sigmoid'))
model.compile(loss=nnet_survival.surv_likelihood(n_intervals), optimizer=optimizers.RMSprop()) #lr=0.0001))
history=model.fit(x_train_cv, y_train_cv, batch_size=256, epochs=100000, callbacks=[early_stopping],verbose=0)
grid_search_train[i,j] = model.evaluate(x_train_cv,y_train_cv,verbose=0)
grid_search_test[i,j] = model.evaluate(x_test_cv,y_test_cv,verbose=0)
j=j+1
print(np.average(grid_search_train,axis=1))
print(np.average(grid_search_test,axis=1))
l2_final = l2_array[np.argmax(-np.average(grid_search_test,axis=1))]
############################
#Our model: train final model
l2_final=0.1
from numpy.random import seed
seed(1)
from tensorflow import set_random_seed
set_random_seed(2)
model = Sequential()
model.add(Dense(hidden_layers_sizes, input_dim=x_train.shape[1],bias_initializer='zeros', kernel_regularizer=regularizers.l2(l2_final)))
model.add(Activation('relu'))
model.add(Dense(n_intervals))
model.add(Activation('sigmoid'))
model.compile(loss=nnet_survival.surv_likelihood(n_intervals), optimizer=optimizers.RMSprop())
early_stopping = EarlyStopping(monitor='loss', patience=20)
history=model.fit(x_train, y_train, batch_size=256, epochs=100000, callbacks=[early_stopping],verbose=0)
#Discrimination performance
y_pred=model.predict_proba(x_train,verbose=0)
oneyr_surv=np.cumprod(y_pred[:,0:np.nonzero(breaks>365)[0][0]], axis=1)[:,-1]
print(concordance_index(data_train.time,oneyr_surv,data_train.dead)) #0.723
y_pred=model.predict_proba(x_test,verbose=0)
oneyr_surv=np.cumprod(y_pred[:,0:np.nonzero(breaks>365)[0][0]], axis=1)[:,-1]
print(concordance_index(data_test.time,oneyr_surv,data_test.dead)) #0.723
#########
#cox-nnet
#https://github.com/traversc/cox-nnet/
#cross validation on training set to pick L2 regularization strength
model_params = dict(node_map = None, input_split = None)
search_params = dict(method = "nesterov", learning_rate=0.01, momentum=0.9,
max_iter=10000, stop_threshold=0.995, patience=1000, patience_incr=2,
rand_seed = 123, eval_step=23, lr_decay = 0.9, lr_growth = 1.0)
cv_params = dict(L2_range = np.arange(-6,2.1))
likelihoods, L2_reg_params, mean_cvpl = cox_nnet.L2CVProfile(x_train,data_train.time.as_matrix(),data_train.dead.as_matrix(),
model_params, search_params, cv_params, verbose=False)
L2_reg = L2_reg_params[np.argmax(mean_cvpl)] #Best L2_reg is -5
#train final model
L2_reg = -5.
model_params = dict(node_map = None, input_split = None, L2_reg=np.exp(L2_reg))
cox_nnet_model, cox_nnet_cost_iter = cox_nnet.trainCoxMlp(x_train, data_train.time.as_matrix(),data_train.dead.as_matrix(), model_params, search_params, verbose=False)
cox_nnet_theta_train = cox_nnet_model.predictNewData(x_train)
cox_nnet_theta_test = cox_nnet_model.predictNewData(x_test)
#discrimination on train, test sets
print(concordance_index(data_train.time,-cox_nnet_theta_train,data_train.dead))
print(concordance_index(data_test.time,-cox_nnet_theta_test,data_test.dead))
#######################################
#Calibration plot comparing all methods
n_bins = 10
my_alpha = 0.7
my_markersize = 5.
fu_time_array = np.array([0.5, 1, 3])*365.
fu_time_label_array = ['6 months', '1 year', '3 years']
#mse_array = np.zeros((4,len(fu_time_array)))
for fu_time_i in range(len(fu_time_array)):
fu_time = fu_time_array[fu_time_i]
plt.subplot(3, 1, 1+fu_time_i)
#plt.figure()
plt.plot([0,1], [0,1], ls="--", c=".7")
pred_surv = nnet_pred_surv(model.predict_proba(x_test,verbose=0), breaks, fu_time)
(pred, actual)=calib_plot(fu_time, n_bins, pred_surv,data_test.time.as_matrix(), data_test.dead.as_matrix(),
CB_color_cycle[1],'Nnet-survival', alpha=my_alpha, markersize=my_markersize, markertype='o')
#mse_array[0, fu_time_i] = ((pred-actual)**2).mean()
times, H0 = cox_basehaz(cox_nnet_theta_train, data_train.time.values, data_train.dead.values)
y_pred = cox_pred_surv(cox_nnet_theta_test, H0)
pred_surv = []
for i in range(y_pred.shape[0]):
pred_surv.append(np.interp(fu_time,times,y_pred[i,:]))
pred_surv = np.array(pred_surv)
(pred, actual)=calib_plot(fu_time, n_bins, pred_surv, data_test.time.as_matrix(), data_test.dead.as_matrix(),
CB_color_cycle[0],'Cox-nnet', alpha=my_alpha, markersize=my_markersize, markertype='^')
#mse_array[1, fu_time_i] = ((pred-actual)**2).mean()
deepsurv_lp_train = np.genfromtxt('results/deepsurv_train_prediction.txt')
deepsurv_lp_test = | np.genfromtxt('results/deepsurv_test_prediction.txt') | numpy.genfromtxt |
import os
import sys
import glob
import multiprocessing as mp
import numpy as np
import matplotlib.pyplot as plt
from ctypes import c_float
import math
import util
import util_feature_IO
import util_batch
import util_geometry
import util_meta
import util_amira
import constants
import calc_features_mp
def getEmptyStats():
return {
"overlapping": 0,
"connected": 0
}
def processBatch(batchIndex, results, dscFolder, nids):
stats = getEmptyStats()
for i in range(0, len(nids)):
if(i % 50 == 0):
print("batch", batchIndex, dscFolder, i, "of", len(nids))
nid = nids[i]
filename = os.path.join(dscFolder, "{}_DSC.csv".format(nid))
if(os.path.exists(filename)):
dsc = | np.loadtxt(filename, skiprows=1, delimiter=",", usecols=1) | numpy.loadtxt |
# -*- coding: iso-8859-1 -*-
"""
This code plots the scattering limit test of our code.
"""
########################
###Import useful libraries
########################
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import pdb
import pickle
deg2rad=np.pi/180.
def cm2inch(cm): #function to convert cm to inches; useful for complying with Astrobiology size guidelines
return cm/2.54
########################
###A=0
########################
###z=0
fnetdict=pickle.load(open('./TwoStreamOutput/rugheimer_earth_epoch0_w0=1-1e-12_a=0_z=0.p','rb'))
F_net=fnetdict['F_net'] #net flux in each layer, 0th layer is TOA, erg/s/cm2/nm
wav_leftedges=fnetdict['wav_leftedges'] #nm
wav_rightedges=fnetdict['wav_rightedges'] #nm
wav_centers=fnetdict['wav_centers'] #nm
z_lower=fnetdict['z_lower'] #cm, 0th layer is TOA
z_upper=fnetdict['z_upper'] #cm
z_center=fnetdict['z_center'] #cm
flux_toa=fnetdict['flux_toa'] #TOA "flux" (really intensity) in erg/s/cm2/nm (cgs)
solarzenithangle=fnetdict['solarzenithangle'] #radians
N_wavelengths=np.size(wav_centers) ###NOTE: We assume wavelength structure is the same for all of these (!!!)
direct_flux_toa=np.cos(solarzenithangle)*flux_toa #true TOA flux
F_net_deviation=np.zeros(np.shape(F_net))
F_net_deviation_max=np.zeros(N_wavelengths)
F_net_deviation_stddevs=np.zeros(N_wavelengths)
F_net_deviation_median=np.zeros(N_wavelengths)
for ind in range(0, N_wavelengths):
median_val=np.median(F_net[:,ind])
F_net_deviation_median[ind]=median_val
F_net_deviation[:,ind]=F_net[:,ind]-median_val
F_net_deviation_max[ind]=np.max(np.abs(F_net_deviation[:,ind]))
F_net_deviation_stddevs[ind]=np.std(F_net[:,ind])
F_net_deviation_max_normalized_0_0=F_net_deviation_max/(direct_flux_toa)
F_net_deviation_stddevs_normalized_0_0=F_net_deviation_stddevs/(direct_flux_toa)
###z=60
fnetdict=pickle.load(open('./TwoStreamOutput/rugheimer_earth_epoch0_w0=1-1e-12_a=0_z=60.p','rb'))
F_net=fnetdict['F_net'] #net flux in each layer, 0th layer is TOA, erg/s/cm2/nm
wav_leftedges=fnetdict['wav_leftedges'] #nm
wav_rightedges=fnetdict['wav_rightedges'] #nm
wav_centers=fnetdict['wav_centers'] #nm
z_lower=fnetdict['z_lower'] #cm, 0th layer is TOA
z_upper=fnetdict['z_upper'] #cm
z_center=fnetdict['z_center'] #cm
flux_toa=fnetdict['flux_toa'] #TOA "flux" (really intensity) in erg/s/cm2/nm (cgs)
solarzenithangle=fnetdict['solarzenithangle'] #radians
direct_flux_toa=np.cos(solarzenithangle)*flux_toa #true TOA flux
F_net_deviation=np.zeros( | np.shape(F_net) | numpy.shape |
import os
import numpy as np
from PIL import Image
from itertools import product
from collections import defaultdict
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib import colors
import pandas as pd
import torch
import cv2
from mpl_toolkits.axes_grid1 import make_axes_locatable
from sklearn.cluster import DBSCAN
import matplotlib.patches as patches
from sklearn.neighbors import NearestNeighbors
class head():
def __init__(self, left_eye=None,right_eye=None, distance=400):
self.l = left_eye
self.r = right_eye
self.distance = distance
def toVect(self, landmark):
out = np.array([[landmark[0][self.l]], [landmark[1][self.r]] ])
return out
def clusterHead(left_eyes, right_eyes, fullHeads=False):
#We use NN to cluster head objects: eyes and nose, assuming there is at least one pair of eyes
if not left_eyes or not right_eyes :
heads = {}
if fullHeads:
for headsita in list(range(len(left_eyes))):
newHead = head(left_eye = headsita)
heads[headsita] = newHead
for headsita in list(range(len(right_eyes))):
newHead = head(right_eye = headsita)
heads[headsita] = newHead
elif len(left_eyes)>1:
neigh = NearestNeighbors(n_neighbors=2)
neigh.fit(left_eyes)
distances, from_right_to_left =neigh.kneighbors(right_eyes)
index_taken = {} #[inr, distances[inr][0]]
queue = list(range(len(right_eyes)))
heads = {}
j = -1
# we examine the terms and correct previous choices
while queue:
index_right_eye = queue[0]
queue = queue[1:]
# we grab the closest left eye to the inr
index_left_eye = from_right_to_left[index_right_eye][0]
if (index_left_eye)==[] and fullHeads:
# if the point is asolated
newHead = head( right_eye=index_right_eye)
heads[j] = newHead
j = j-1
elif index_left_eye not in index_taken:
#new index
newHead = head(left_eye = index_left_eye, right_eye=index_right_eye, distance = distances[index_right_eye][0])
heads[index_left_eye] = newHead
index_taken[index_left_eye] = [index_right_eye, distances[index_right_eye][0]]
else:
# we need to compare distances
newdist = distances[index_right_eye][0]
olddist = index_taken[index_left_eye][1]
if olddist<newdist:
# wrong left eye
index_left_eye = from_right_to_left[index_right_eye][1]
newdist = distances[index_right_eye][1]
olddist = index_taken.get(index_left_eye, [[],None])[1]
if index_left_eye not in index_taken:
newHead = head(left_eye = index_left_eye, right_eye=index_right_eye, distance = distances[index_right_eye][1])
heads[index_left_eye] = newHead
index_taken[index_left_eye] = [index_right_eye, distances[index_right_eye][1]]
elif olddist < newdist and fullHeads: # olddist<newdist
newHead = head( right_eye=index_right_eye)
heads[j] = newHead
j = j-1
else:
queue = queue+[index_taken[index_left_eye][0]]
newHead = head(left_eye = index_left_eye, right_eye=index_right_eye, distance = newdist)
heads[index_left_eye] = newHead
index_taken[index_left_eye] = [index_right_eye, distances[index_right_eye][1]]
else:
# correct left eye already taken
queue = queue+[index_taken[index_left_eye][0]]
newHead = head(left_eye = index_left_eye, right_eye=index_right_eye, distance = newdist)
heads[index_left_eye] = newHead
index_taken[index_left_eye] = [index_right_eye, newdist]
if fullHeads:
missingheads = set(list(range(len(right_eyes)))).difference(index_taken)
else:
missingheads = []
for headsita in missingheads:
newHead = head(left_eye = headsita)
heads[headsita] = newHead
else:
neigh = NearestNeighbors(n_neighbors=1)
neigh.fit(right_eyes)
distances, from_right_to_left = neigh.kneighbors(left_eyes)
newHead = head(left_eye = 0, right_eye = from_right_to_left[0][0])
heads = {0:newHead}
return heads
def show_sample(sample, ax=None, color_labels=False, is_tensor=False, **kwargs):
"""Shows a sample with landmarks"""
if not ax:
ax = plt.gca()
color_list = cm.Set1.colors[: len(sample["landmarks"])]
label_color = color_list if color_labels else "r"
if is_tensor:
ax.imshow(sample["image"].permute(1, 2, 0))
else:
ax.imshow(sample["image"])
ax.scatter(
sample["landmarks"][:, 0],
sample["landmarks"][:, 1],
s=20,
marker=".",
c=label_color,
)
ax.axis("off")
# ax.set_title(f'Sample #{sample["index"]}')
return ax
def show_sample_with_mask(sample, color_labels=False, is_tensor=False, **kwargs):
"""Shows a sample with landmarks and mask"""
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 5))
color_list = cm.Set1.colors[: len(sample["landmarks"])]
label_color = color_list if color_labels else "r"
if is_tensor:
ax1.imshow(sample["image"].permute(1, 2, 0))
else:
ax1.imshow(sample["image"])
ax1.scatter(
sample["landmarks"][:, 0],
sample["landmarks"][:, 1],
s=20,
marker=".",
c=label_color,
)
ax1.axis("off")
ax2.imshow(sample["mask"], cmap="gray")
ax2.axis("off")
# ax.set_title(f'Sample #{sample["index"]}')
return fig, (ax1, ax2)
def show_multiple_samples(samples, **kwargs):
"""Shows multiple samples with landmarks"""
n = len(samples)
n_cols = 4 if n > 4 else n
n_rows = int(np.ceil(n / 4))
fig, axs = plt.subplots(n_rows, n_cols, figsize=(n_cols * 5, n_rows * 5))
for i, ax in enumerate(axs.flatten()):
if i < n:
ax = show_sample(samples[i], ax=ax, **kwargs)
else:
ax.axis("off")
return fig, axs
def show_random_sample(dataset, n_samples=4, seed=None, **kwargs):
"""Shows a random sample of images with landmarks."""
if seed:
rng = np.random.RandomState(seed)
else:
rng = np.random.RandomState()
index_list = rng.randint(0, len(dataset), size=n_samples)
fig, axs = show_multiple_samples([dataset[i] for i in index_list], **kwargs)
return fig, axs
def multi_neighborhood_mask(image, landmarks):
"""
Creates a mask in a 3 by 3 neighborhood of each landmark with a unique label
"""
w, h = image.size
mask = np.zeros((w, h))
for mask_index, (x, y) in enumerate(landmarks):
for i, j in product([-1, 0, 1], [-1, 0, 1]):
mask[int(x + i), int(y + j)] = mask_index + 1
return mask.T
def gen_all_masks(samples, root_dir, mask_dir, path_sub):
"""Generate all the masks
Args:
samples: The dataset object. Note it must be the cropped version
root_dir: Location of root data dir
mask_dir: A dir to story the masks. Note it must have same name as image dir
with path_sub[0] replace
path_sub: A list of tuples which is used for replacement of image path.
"""
if not os.path.exists(root_dir + mask_dir):
os.mkdir(root_dir + mask_dir)
for i in range(len(samples)):
h, w = samples.landmark_frame.iloc[i, 1:3]
mask = multi_neighborhood_mask(w, h, samples[i]["landmarks"])
mask_path = samples.img_paths[i].replace(*path_sub[0]).replace(*path_sub[1])
folder, file = os.path.split(mask_path)
if not os.path.exists(folder):
os.mkdir(folder)
np.save(mask_path, mask)
def samples_to_dataframe(samples, landmark_names):
"""Creates a dataframe with the landmarks data and image size.
Note: this function loops over and opens every image and
thus it takes a while to run. The dataframe only need to be created
once. In the future much faster operation can be performed
on the dataframe rather than looping over each sample. This will
improve development
Also this code depends on the ordering of height and width returned
by skimage defined in the dataset creation step. I only bring this
up because PIL and skimage are opposite.
(width, height) for PIL and (height, width) for skimage.
"""
df = pd.DataFrame(
index=range(len(samples)),
columns=["image_name", "height", "width", *landmark_names],
)
for i in range(len(samples)):
record = {}
record["image_name"] = os.path.split(samples.img_paths[i])[-1]
record["height"] = samples[i]["image"].shape[0]
record["width"] = samples[i]["image"].shape[1]
for key, value in zip(landmark_names, samples[i]["landmarks"].ravel()):
record[key] = value
df.iloc[i] = record
return df
def crop_landmarks(df):
"""
Input: landmark dataframe
Output: cropped landmark dataframe
"""
cropped_df = df.copy(deep=True)
for i, row in df.iterrows():
w, h = row["width"], row["height"]
landmarks = | np.array(row[3:]) | numpy.array |
from .preprocessing import (
preprocess_timed_token_sequences,
)
from collections.abc import Iterable
from .base_cooccurrence_vectorizer import BaseCooccurrenceVectorizer
from .preprocessing import preprocess_timed_token_sequences
from .coo_utils import (
coo_append,
coo_sum_duplicates,
CooArray,
merge_all_sum_duplicates,
em_update_matrix,
)
import numpy as np
import numba
from ._window_kernels import (
_TIMED_KERNEL_FUNCTIONS,
window_at_index,
)
@numba.njit(nogil=True)
def numba_build_skip_grams(
token_sequences,
window_size_array,
window_reversals,
kernel_functions,
kernel_args,
mix_weights,
normalize_windows,
n_unique_tokens,
array_lengths,
):
"""Generate a matrix of (weighted) counts of co-occurrences of tokens within
windows in a set of sequences of tokens. Each sequence in the collection of
sequences provides an effective boundary over which skip-grams may not pass
(such as sentence boundaries in an NLP context). This is done for a collection
of different window and kernel types simultaneously.
Parameters
----------
token_sequences: Iterable of Iterables
The collection of (token, time_stamp) sequences to generate skip-gram data for.
n_unique_tokens: int
The number of unique tokens in the token_dictionary.
window_size_array: numpy.ndarray(float, size = (n_windows, n_unique_tokens))
A collection of window sizes per vocabulary index per window function
window_reversals: numpy.array(bool, size = (n_windows,))
Array indicating whether the window is after or not.
kernel_functions: kernel_functions: tuple
The n-tuple of kernel functions
kernel_args: tuple of tuples
Arguments to pass through to the kernel functions per function
mix_weights: numpy.array(bool, size = (n_windows,))
The scalars values used to combine the values of the kernel functions
normalize_windows: bool
Indicates whether or nor to L_1 normalize the kernel values per window occurrence
array_lengths: numpy.array(int, size = (n_windows,))
The lengths of the arrays per window used to the store the coo matrix triples.
Returns
-------
cooccurrence_matrix: CooArray
Weight counts of values (kernel weighted counts) that token_head[i] cooccurred with token_tail[i]
"""
n_windows = window_size_array.shape[0]
array_mul = n_windows * n_unique_tokens + 1
coo_data = [
CooArray(
np.zeros(array_lengths[i], dtype=np.int32),
np.zeros(array_lengths[i], dtype=np.int32),
np.zeros(array_lengths[i], dtype=np.float32),
np.zeros(array_lengths[i], dtype=np.int64),
np.zeros(1, dtype=np.int64),
np.zeros(2 * np.int64(np.ceil(np.log2(array_lengths[i]))), dtype=np.int64),
np.zeros(1, dtype=np.int64),
)
for i in range(n_windows)
]
for d_i, seq in enumerate(token_sequences):
for w_i, target_pair in enumerate(seq):
windows = []
kernels = []
target_word = np.int32(target_pair[0])
target_time = target_pair[1]
for i in range(n_windows):
win = window_at_index(
seq,
window_size_array[i, target_word],
w_i,
reverse=window_reversals[i],
)
this_window = np.array([w[0] for w in win], dtype=np.int32)
time_deltas = np.array(
[np.abs(w[1] - target_time) for w in win], dtype=np.float32
)
this_kernel = mix_weights[i] * kernel_functions[i](
this_window, time_deltas, *kernel_args[i]
)
windows.append(this_window)
kernels.append(this_kernel)
total = 0
if normalize_windows:
sums = np.array([np.sum(ker) for ker in kernels])
total = np.sum(sums)
if total <= 0:
total = 1
for i, window in enumerate(windows):
this_ker = kernels[i]
for j, context in enumerate(window):
val = | np.float32(this_ker[j] / total) | numpy.float32 |
#!/usr/bin/env python3
import matplotlib
matplotlib.use('pdf')
import scrublet as scr
import scipy.io
import scipy.sparse
import numpy
import numpy.ma
from PIL import Image, ImageDraw, ImageFont
import os
import sys
import re
import warnings
import traceback
import argparse
#
# Notes:
# o apply umi_cutoff in filter_counts_matrix() that is consistent with the
# umi_cutoff value used in reduce_dimensions.R in order to produce consistent
# numbers of cells (columns).
# o I have the impression that scrublet is vulnerable to internal
# resulting from things like division by zero and sqrt of x < 0.
# o it appears that python issues a runtime warning for some of
# these errors rather than stopping so I am raising an exceptioni
# in these cases
#
def handle_warning(message, category, filename, lineno, file=None, line=None):
print( 'Scrublet: stop on warning \'%s\' in %s at line %s' % ( message, filename, lineno ), file=sys.stderr )
raise ValueError(message)
return( 0 )
def read_col_file(col_file):
col = []
with open(col_file, 'r') as fp:
for line in fp:
col.append(line.rstrip())
return(col)
def filter_counts_matrix(mat_in, outlier_filter, umi_cutoff, col_names_file):
print('run_scrublet.py: filter_counts_matrix: begin')
# start with COO format matrix
if(not scipy.sparse.isspmatrix_coo(mat_in)):
mat_in = mat_in.tocoo()
# read column names
col_in = read_col_file(col_names_file)
# binarize matrix using directly the (non-zero) m.data attribute
# (from snapATAC)
cutoff = numpy.percentile(a=mat_in.data, q=100 - outlier_filter, axis=None)
mat_in.data[mat_in.data > cutoff] = 0
mat_in.data[mat_in.data > 1] = 1
# find cells with no more than umi_cutoff counts
colsum = mat_in.sum(0)[0, :]
keep_col = colsum > umi_cutoff
# subset matrix and column names
mat_out = mat_in.tocsc()[:, keep_col.tolist()[0]]
col_out = [i for (i, v) in zip(col_in, keep_col.tolist()[0]) if v]
print('run_scrublet.py: filter_counts_matrix: end')
return(mat_out, col_out)
def run_scrublet(sample_name, counts_matrix):
print('run_scrublet.py: run_scrublet: begin')
warnings.showwarning = handle_warning
if(numpy.size(counts_matrix, 0) == 0 or numpy.size(counts_matrix, 1) == 0):
filename = args.sample_name + "-scrublet_hist.png"
image = Image.new(mode = "RGB", size = (800,600), color = "white")
draw = ImageDraw.Draw(image)
draw.text((50,50), "Scrublet failed. This is generally because there aren't enough cells with sufficient reads.\n", fill = "black")
return(-1)
if(not scipy.sparse.isspmatrix_csc(counts_matrix)):
counts_matrix = counts_matrix.T.tocsc()
else:
counts_matrix = counts_matrix.T
# count_matrix
# rows: cells
# cols: genes
scrub = scr.Scrublet(counts_matrix)
try:
doublet_scores, predicted_doublets = scrub.scrub_doublets()
scrub.plot_histogram()[0].savefig(args.sample_name + "-scrublet_hist.png")
all_scores = numpy.vstack((doublet_scores, predicted_doublets))
all_scores = | numpy.transpose(all_scores) | numpy.transpose |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.tf.scatter_nd."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
GRADIENT_TESTS_DTYPES = (dtypes.float16, dtypes.float32, dtypes.float64)
def _AsType(v, vtype):
return v.astype(vtype) if isinstance(v, np.ndarray) else vtype(v)
def _FlatInnerDims(tensor, ndims=2):
shape = list(tensor.shape)
return tensor.reshape([
functools.reduce(lambda x, y: x * y, shape[:-ndims + 1], 1)
] + shape[-ndims + 1:])
def _FlatOuterDims(tensor, ndims=2):
shape = list(tensor.shape)
return tensor.reshape(shape[:ndims - 1] + [
functools.reduce(lambda x, y: x * y, shape[ndims - 1:], 1)
])
def _NumpyScatterNd(ref, indices, updates, op):
ixdim = indices.shape[-1]
num_updates = indices.size // ixdim
total_nd = len(ref.shape)
slice_size = 1
for i in range(ixdim, total_nd):
slice_size *= ref.shape[i]
flat_indices = _FlatInnerDims(indices)
flat_updates = updates.reshape((num_updates, slice_size))
output_flat = _FlatOuterDims(ref, ixdim + 1)
for ix_updates, ix_output in enumerate(flat_indices):
ix_output = tuple(ix_output)
output_flat[ix_output] = op(output_flat[ix_output],
flat_updates[ix_updates])
return output_flat.reshape(ref.shape)
def _NumpyUpdate(ref, indices, updates):
return _NumpyScatterNd(ref, indices, updates, lambda p, u: u)
def _NumpyAdd(ref, indices, updates):
return _NumpyScatterNd(ref, indices, updates, lambda p, u: p + u)
def _NumpySub(ref, indices, updates):
return _NumpyScatterNd(ref, indices, updates, lambda p, u: p - u)
def _NumpyMul(ref, indices, updates):
return _NumpyScatterNd(ref, indices, updates, lambda p, u: p * u)
def _NumpyDiv(ref, indices, updates):
return _NumpyScatterNd(ref, indices, updates, lambda p, u: p / u)
class StatefulScatterNdTest(test.TestCase):
def _VariableRankTest(self,
np_scatter,
tf_scatter,
vtype,
itype,
repeat_indices=False):
np.random.seed(8)
ref_shapes = [(3, 6), (3, 6), (3, 6, 9), (3, 6, 9), (3, 6, 9), (3, 6, 9)]
indices_shapes = [(2,), (2, 2), (2,), (2, 2), (2, 3), (2, 3, 3)]
with self.cached_session(use_gpu=True):
for ref_shape, indices_shape in zip(ref_shapes, indices_shapes):
num_updates = indices_shape[0]
ixdim = indices_shape[-1]
indexable_area_shape = ()
for i in range(ixdim):
indexable_area_shape += (ref_shape[i],)
all_indices = [
list(coord)
for coord, _ in np.ndenumerate(
np.empty(indexable_area_shape, vtype))
]
np.random.shuffle(all_indices)
indices = np.array(all_indices[:num_updates])
if num_updates > 1 and repeat_indices:
indices = indices[:num_updates // 2]
for _ in range(num_updates - num_updates // 2):
indices = np.append(
indices, [indices[np.random.randint(num_updates // 2)]], axis=0)
np.random.shuffle(indices)
indices = _AsType(indices[:num_updates], itype)
updates_shape = (num_updates,)
for i in range(ixdim, len(ref_shape)):
updates_shape += (ref_shape[i],)
updates = _AsType(np.random.randn(*(updates_shape)), vtype)
ref = _AsType(np.random.randn(*(ref_shape)), vtype)
# Scatter via numpy
new = ref.copy()
np_scatter(new, indices, updates)
# Scatter via tensorflow
ref_var = variables.VariableV1(ref)
ref_var.initializer.run()
tf_scatter(ref_var, indices, updates).eval()
tol = 1e-03 if repeat_indices and vtype == np.float16 else 1e-06
# Compare
self.assertAllClose(new, self.evaluate(ref_var), atol=tol, rtol=tol)
def _VariableRankTests(self, np_scatter, tf_scatter):
for vtype in (np.int32, np.float16, np.float32, np.float64, np.complex64,
np.complex128):
for itype in (np.int32, np.int64):
self._VariableRankTest(np_scatter, tf_scatter, vtype, itype)
def testSimple(self):
indices = constant_op.constant([[4], [3], [1], [7]], dtype=dtypes.int32)
updates = constant_op.constant([9, 10, 11, 12], dtype=dtypes.float32)
ref = variables.Variable([0, 0, 0, 0, 0, 0, 0, 0], dtype=dtypes.float32)
expected = np.array([0, 11, 0, 10, 9, 0, 0, 12])
scatter = state_ops.scatter_nd_update(ref, indices, updates)
init = variables.global_variables_initializer()
with self.session(use_gpu=True) as sess:
self.evaluate(init)
result = self.evaluate(scatter)
self.assertAllClose(result, expected)
@test_util.run_deprecated_v1
def testSimpleResource(self):
indices = constant_op.constant([[4], [3], [1], [7]], dtype=dtypes.int32)
updates = constant_op.constant([9, 10, 11, 12], dtype=dtypes.float32)
ref = resource_variable_ops.ResourceVariable(
[0, 0, 0, 0, 0, 0, 0, 0], dtype=dtypes.float32)
expected = np.array([0, 11, 0, 10, 9, 0, 0, 12])
scatter = state_ops.scatter_nd_update(ref, indices, updates)
init = variables.global_variables_initializer()
with self.session(use_gpu=True) as sess:
self.evaluate(init)
self.evaluate(scatter)
self.assertAllClose(ref.eval(), expected)
def testSimple2(self):
indices = constant_op.constant([[1, 0], [1, 1]], dtype=dtypes.int32)
updates = constant_op.constant([11., 12.], dtype=dtypes.float32)
ref = variables.Variable(
[[0., 0.], [0., 0.], [0., 0.]], dtype=dtypes.float32)
expected = np.array([[0., 0.], [11., 12.], [0., 0.]])
scatter = state_ops.scatter_nd_update(ref, indices, updates)
init = variables.global_variables_initializer()
with self.session(use_gpu=True) as sess:
self.evaluate(init)
result = self.evaluate(scatter)
self.assertAllClose(result, expected)
def testSimple3(self):
indices = constant_op.constant([[1]], dtype=dtypes.int32)
updates = constant_op.constant([[11., 12.]], dtype=dtypes.float32)
ref = variables.Variable(
[[0., 0.], [0., 0.], [0., 0.]], dtype=dtypes.float32)
expected = np.array([[0., 0.], [11., 12.], [0., 0.]])
scatter = state_ops.scatter_nd_update(ref, indices, updates)
init = variables.global_variables_initializer()
with self.session(use_gpu=True) as sess:
self.evaluate(init)
result = self.evaluate(scatter)
self.assertAllClose(result, expected)
@test_util.run_deprecated_v1
def testVariableRankUpdate(self):
self._VariableRankTests(_NumpyUpdate, state_ops.scatter_nd_update)
@test_util.run_deprecated_v1
def testVariableRankAdd(self):
self._VariableRankTests(_NumpyAdd, state_ops.scatter_nd_add)
@test_util.run_deprecated_v1
def testVariableRankSub(self):
self._VariableRankTests(_NumpySub, state_ops.scatter_nd_sub)
# TODO(ebrevdo): Re-enable when we need ScatterNdMul.
# def testVariableRankMul(self):
# self._VariableRankTests(_NumpyMul, state_ops.scatter_nd_mul)
# TODO(ebrevdo): Re-enable when we need ScatterNdDiv.
# def testVariableRankDiv(self):
# self._VariableRankTests(_NumpyDiv, state_ops.scatter_nd_div)
def _ScatterRepeatIndicesTest(self, np_scatter, tf_scatter):
for vtype in (np.int32, np.float16, np.float32, np.float64):
for itype in (np.int32, np.int64):
self._VariableRankTest(
np_scatter, tf_scatter, vtype, itype, repeat_indices=True)
@test_util.run_v1_only("b/120545219")
def testScatterRepeatIndices(self):
"""This tests scatter_add using indices that repeat."""
self._ScatterRepeatIndicesTest(_NumpyAdd, state_ops.scatter_nd_add)
self._ScatterRepeatIndicesTest(_NumpySub, state_ops.scatter_nd_sub)
# TODO(ebrevdo): Re-enable when we need ScatterNdMul and ScatterNdDiv.
# self._ScatterRepeatIndicesTest(_NumpyMul, state_ops.scatter_nd_mul)
# self._ScatterRepeatIndicesTest(_NumpyDiv, state_ops.scatter_nd_div)
# TODO(simister): Re-enable once binary size increase due to
# extra templating is back under control and this op is re-enabled
# def testBooleanScatterUpdate(self):
# with self.session(use_gpu=False) as session:
# var = tf.Variable([True, False])
# update0 = tf.compat.v1.scatter_nd_update(var, [[1]], [True])
# update1 = tf.compat.v1.scatter_nd_update(
# var, tf.constant(
# [[0]], dtype=tf.int64), [False])
# var.initializer.run()
# session.run([update0, update1])
# self.assertAllEqual([False, True], self.evaluate(var))
@test_util.run_v1_only("b/120545219")
def testScatterOutOfRangeCpu(self):
# TODO(simister): Re-enable once binary size increase due to
# scatter_nd ops is under control.
# tf.scatter_nd_mul, tf.scatter_nd_div,
for op in (state_ops.scatter_nd_add, state_ops.scatter_nd_sub,
state_ops.scatter_nd_update):
params = np.array([1, 2, 3, 4, 5, 6]).astype(np.float32)
updates = np.array([-3, -4, -5]).astype(np.float32)
with self.cached_session(use_gpu=False):
ref = variables.VariableV1(params)
ref.initializer.run()
# Indices all in range, no problem.
indices = np.array([[2], [0], [5]])
op(ref, indices, updates).eval()
# Test some out of range errors.
indices = np.array([[-1], [0], [5]])
with self.assertRaisesOpError(
r"indices\[0\] = \[-1\] does not index into shape \[6\]"):
op(ref, indices, updates).eval()
indices = np.array([[2], [0], [6]])
with self.assertRaisesOpError(
r"indices\[2\] = \[6\] does not index into shape \[6\]"):
op(ref, indices, updates).eval()
def testRank3ValidShape(self):
indices = array_ops.zeros([2, 2, 2], dtypes.int32)
updates = array_ops.zeros([2, 2, 2], dtypes.int32)
shape = np.array([2, 2, 2])
ref = variables.Variable(array_ops.zeros(shape, dtypes.int32))
self.assertAllEqual(
state_ops.scatter_nd_update(ref, indices,
updates).get_shape().as_list(), shape)
@test_util.run_v1_only("b/120545219")
@test_util.disable_xla("b/123337890") # Error messages differ
def testResVarInvalidOutputShape(self):
res = variables.Variable(
initial_value=lambda: array_ops.zeros(shape=[], dtype=dtypes.float32),
dtype=dtypes.float32)
with self.cached_session():
res.initializer.run()
with self.assertRaisesOpError("Output must be at least 1-D"):
state_ops.scatter_nd_update(res, [[0]], [0.22]).eval()
@test_util.run_deprecated_v1
def testExtraIndicesDimensions(self):
indices = array_ops.zeros([1, 1, 2], dtypes.int32)
updates = array_ops.zeros([1, 1], dtypes.int32)
shape = np.array([2, 2])
ref = variables.Variable(array_ops.zeros(shape, dtypes.int32))
scatter_update = state_ops.scatter_nd_update(ref, indices, updates)
self.assertAllEqual(scatter_update.get_shape().as_list(), shape)
expected_result = np.zeros([2, 2], dtype=np.int32)
with self.cached_session():
ref.initializer.run()
self.assertAllEqual(expected_result, self.evaluate(scatter_update))
@test_util.run_deprecated_v1
def testRank3InvalidShape1(self):
indices = array_ops.zeros([3, 2, 2], dtypes.int32)
updates = array_ops.zeros([2, 2, 2], dtypes.int32)
shape = np.array([2, 2, 2])
ref = variables.Variable(array_ops.zeros(shape, dtypes.int32))
with self.assertRaisesWithPredicateMatch(
ValueError, r"The outer \d+ dimensions of indices\.shape="):
state_ops.scatter_nd_update(ref, indices, updates)
@test_util.run_deprecated_v1
def testRank3InvalidShape2(self):
indices = array_ops.zeros([2, 2, 1], dtypes.int32)
updates = array_ops.zeros([2, 2], dtypes.int32)
shape = np.array([2, 2, 2])
ref = variables.Variable(array_ops.zeros(shape, dtypes.int32))
with self.assertRaisesWithPredicateMatch(
ValueError, r"The inner \d+ dimensions of input\.shape="):
state_ops.scatter_nd_update(ref, indices, updates)
@test_util.run_deprecated_v1
def testConcurrentUpdates(self):
num_updates = 10000
update_values = np.random.rand(num_updates)
ref = variables.Variable(np.zeros([2, 2]), dtype=dtypes.float64)
indices = constant_op.constant([[0, 1]] * num_updates, dtype=dtypes.int32)
updates = constant_op.constant(update_values, dtype=dtypes.float64)
expected_result = np.zeros([2, 2], dtype=np.float64)
expected_result[0, 1] = np.sum(update_values)
scatter = state_ops.scatter_nd_add(ref, indices, updates)
init = variables.global_variables_initializer()
with session.Session() as sess:
self.evaluate(init)
result = self.evaluate(scatter)
assert np.allclose(result, expected_result)
# TODO(fpmc): Re-enable this test when gpu_pip test actually runs on a GPU.
def _disabledTestScatterOutOfRangeGpu(self):
if not test.IsBuiltWithCuda():
return
# TODO(simister): Re-enable once binary size increase due to
# scatter_nd ops is under control.
# tf.scatter_nd_mul, tf.scatter_nd_div,
for op in (state_ops.scatter_nd_add, state_ops.scatter_nd_sub,
state_ops.scatter_nd_update):
params = np.array([1, 2, 3, 4, 5, 6]).astype(np.float32)
updates = np.array([-3, -4, -5]).astype(np.float32)
# With GPU, the code ignores indices that are out of range.
# We don't test the implementation; just test there's no failures.
with self.cached_session(force_gpu=True):
ref = variables.Variable(params)
ref.initializer.run()
# Indices all in range, no problem.
indices = np.array([2, 0, 5])
op(ref, indices, updates).eval()
# Indices out of range should not fail.
indices = np.array([-1, 0, 5])
op(ref, indices, updates).eval()
indices = np.array([2, 0, 6])
op(ref, indices, updates).eval()
class ScatterNdTest(test.TestCase):
non_aliasing_add_test = False
def scatter_nd(self, indices, updates, shape, input_=None):
del input_ # input_ is not used in scatter_nd
return array_ops.scatter_nd(indices, updates, shape)
@test_util.run_in_graph_and_eager_modes
def testBool(self):
indices = constant_op.constant(
[[4], [3], [1], [7]], dtype=dtypes.int32)
updates = constant_op.constant(
[False, True, False, True], dtype=dtypes.bool)
expected = np.array(
[False, False, False, True, False, False, False, True])
scatter = self.scatter_nd(indices, updates, shape=(8,))
result = self.evaluate(scatter)
self.assertAllEqual(expected, result)
# Same indice is updated twice by same value.
indices = constant_op.constant(
[[4], [3], [3], [7]], dtype=dtypes.int32)
updates = constant_op.constant(
[False, True, True, True], dtype=dtypes.bool)
expected = np.array([
False, False, False, True, False, False, False, True])
scatter = self.scatter_nd(indices, updates, shape=(8,))
result = self.evaluate(scatter)
self.assertAllEqual(expected, result)
@test_util.run_in_graph_and_eager_modes
def testInvalidShape(self):
# TODO(apassos) figure out how to unify these errors
with self.assertRaises(errors.InvalidArgumentError
if context.executing_eagerly() else ValueError):
array_ops.scatter_nd(indices=[0], # this should be indices=[[0]]
updates=[0.0],
shape=[1])
def testString(self):
indices = constant_op.constant([[4], [3], [1], [7]],
dtype=dtypes.int32)
updates = constant_op.constant(["four", "three", "one", "seven"],
dtype=dtypes.string)
expected = np.array([b"", b"one", b"", b"three", b"four",
b"", b"", b"seven"])
scatter = self.scatter_nd(indices, updates, shape=(8,))
with self.cached_session() as sess:
result = self.evaluate(scatter)
self.assertAllEqual(expected, result)
# Same indice is updated twice by same value.
indices = constant_op.constant([[4], [3], [3], [7]],
dtype=dtypes.int32)
updates = constant_op.constant(["a", "b", "b", "c"],
dtype=dtypes.string)
expected = np.array([b"", b"", b"", b"bb", b"a", b"", b"", b"c"])
scatter = self.scatter_nd(indices, updates, shape=(8,))
with self.cached_session() as sess:
result = self.evaluate(scatter)
self.assertAllEqual(expected, result)
# Same indice is updated twice by different value.
indices = constant_op.constant([[4], [3], [3], [7]],
dtype=dtypes.int32)
updates = constant_op.constant(["a", "b", "c", "d"],
dtype=dtypes.string)
expected = [np.array([b"", b"", b"", b"bc", b"a", b"", b"", b"d"]),
np.array([b"", b"", b"", b"cb", b"a", b"", b"", b"d"])]
scatter = self.scatter_nd(indices, updates, shape=(8,))
with self.cached_session() as sess:
result = self.evaluate(scatter)
self.assertTrue(np.array_equal(result, expected[0]) or
np.array_equal(result, expected[1]))
def testRank3ValidShape(self):
indices = array_ops.zeros([2, 2, 2], dtypes.int32)
updates = array_ops.zeros([2, 2, 2], dtypes.int32)
shape = np.array([2, 2, 2])
self.assertAllEqual(
self.scatter_nd(indices, updates, shape).get_shape().as_list(), shape)
@test_util.run_deprecated_v1
def testExtraIndicesDimensions(self):
indices = array_ops.zeros([1, 1, 2], dtypes.int32)
updates = array_ops.zeros([1, 1], dtypes.int32)
shape = np.array([2, 2])
scatter = self.scatter_nd(indices, updates, shape)
self.assertAllEqual(scatter.get_shape().as_list(), shape)
expected_result = np.zeros([2, 2], dtype=np.int32)
with self.cached_session():
self.assertAllEqual(expected_result, self.evaluate(scatter))
@test_util.run_deprecated_v1
def testUndefinedIndicesShape(self):
indices = array_ops.placeholder(dtypes.int32, shape=None)
updates = array_ops.placeholder(dtypes.int32, shape=[2, 2, 2])
shape = constant_op.constant([2, 2, 2], dtypes.int32)
self.scatter_nd(indices, updates, shape)
@test_util.run_deprecated_v1
def testUndefinedUpdatesShape(self):
indices = array_ops.placeholder(dtypes.int32, shape=[2, 2, 2])
updates = array_ops.placeholder(dtypes.int32, shape=None)
shape = constant_op.constant([2, 2, 2], dtypes.int32)
self.scatter_nd(indices, updates, shape)
@test_util.run_deprecated_v1
def testUndefinedOutputShape(self):
indices = array_ops.placeholder(dtypes.int32, shape=[2, 2, 2])
updates = array_ops.placeholder(dtypes.int32, shape=[2, 2, 2])
shape = array_ops.placeholder(dtypes.int32, shape=[None])
self.scatter_nd(indices, updates, shape)
@test_util.run_deprecated_v1
def testEmptyOutputShape1(self):
indices = array_ops.zeros([2, 2, 2], dtypes.int32)
updates = array_ops.zeros([2, 2, 2], dtypes.int32)
shape = constant_op.constant([0, 3, 2], dtypes.int32)
with self.assertRaisesWithPredicateMatch(
ValueError, "Indices and updates specified for empty output shape"):
self.scatter_nd(indices, updates, shape)
@test_util.run_v1_only("b/120545219")
def testEmptyOutputShape2(self):
indices = array_ops.placeholder(dtypes.int32, shape=None)
updates = array_ops.placeholder(dtypes.int32, shape=None)
shape = constant_op.constant([0, 3, 2], dtypes.int32)
with self.cached_session():
with self.assertRaisesOpError(
"Indices and updates specified for empty output"):
self.scatter_nd(indices, updates, shape).eval(feed_dict={
indices: np.zeros([2, 2, 2], dtype=np.int32),
updates: np.zeros([2, 2, 2], dtype=np.int32)
})
@test_util.run_deprecated_v1
def testEmptyOutputShape3(self):
indices = array_ops.zeros([0], dtypes.int32)
updates = array_ops.zeros([0], dtypes.int32)
shape = constant_op.constant([0], dtypes.int32)
scatter = self.scatter_nd(indices, updates, shape)
with self.cached_session():
self.assertEqual(scatter.eval().size, 0)
@test_util.run_deprecated_v1
def testRank3InvalidShape1(self):
indices = array_ops.zeros([3, 2, 2], dtypes.int32)
updates = array_ops.zeros([2, 2, 2], dtypes.int32)
shape = np.array([2, 2, 2])
with self.assertRaisesWithPredicateMatch(
ValueError, r"The outer \d+ dimensions of indices\.shape="):
self.scatter_nd(indices, updates, shape)
@test_util.run_deprecated_v1
def testRank3InvalidShape2(self):
indices = array_ops.zeros([2, 2, 1], dtypes.int32)
updates = array_ops.zeros([2, 2], dtypes.int32)
shape = np.array([2, 2, 2])
with self.assertRaisesWithPredicateMatch(
ValueError, r"The inner \d+ dimensions of (input|output)\.shape="):
self.scatter_nd(indices, updates, shape)
@test_util.run_deprecated_v1
def testGradientsRank2ElementUpdate(self):
for dtype in GRADIENT_TESTS_DTYPES:
indices = constant_op.constant([[0, 0], [1, 1]], dtype=dtypes.int32)
updates = constant_op.constant([1, 4], dtype=dtype)
shape = constant_op.constant([2, 2], dtype=dtypes.int32)
input_ = array_ops.zeros(shape, dtype=dtype)
outputs = self.scatter_nd(indices, updates, shape, input_)
grad_vals = constant_op.constant([[1, 2], [3, 4]], dtype=dtype)
updates_grad, input_grad = gradients_impl.gradients(
[outputs], [updates, input_], [grad_vals])
expected_updates_grad = np.array([1, 4], dtype=dtype.as_numpy_dtype())
expected_input_grad = np.array([[1, 2], [3, 4]],
dtype=dtype.as_numpy_dtype())
with self.cached_session():
self.assertAllEqual(expected_updates_grad, self.evaluate(updates_grad))
if self.non_aliasing_add_test:
self.assertAllEqual(expected_input_grad, self.evaluate(input_grad))
@test_util.run_deprecated_v1
def testGradientsRank2SliceUpdate(self):
for dtype in GRADIENT_TESTS_DTYPES:
indices = constant_op.constant([[1], [0]], dtype=dtypes.int32)
updates = constant_op.constant([[3, 4], [1, 2]], dtype=dtype)
shape = constant_op.constant([2, 2], dtype=dtypes.int32)
input_ = array_ops.zeros(shape, dtype=dtype)
outputs = self.scatter_nd(indices, updates, shape, input_)
grad_vals = constant_op.constant([[3, 4], [1, 2]], dtype=dtype)
updates_grad, input_grad = gradients_impl.gradients(
[outputs], [updates, input_], [grad_vals])
expected_updates_grad = np.array([[1, 2], [3, 4]],
dtype=dtype.as_numpy_dtype())
expected_input_grad = np.array([[3, 4], [1, 2]],
dtype=dtype.as_numpy_dtype())
with self.cached_session():
self.assertAllEqual(expected_updates_grad, self.evaluate(updates_grad))
if self.non_aliasing_add_test:
self.assertAllEqual(expected_input_grad, self.evaluate(input_grad))
@test_util.run_deprecated_v1
def testGradientsRank3SliceUpdate(self):
for dtype in GRADIENT_TESTS_DTYPES:
indices = constant_op.constant([[[0, 1], [1, 0]], [[0, 0], [1, 1]]],
dtype=dtypes.int32)
updates = constant_op.constant([[[5, 7], [2, 4]], [[1, 3], [6, 8]]],
dtype=dtype)
shape = constant_op.constant([2, 2, 2], dtype=dtypes.int32)
input_ = array_ops.zeros(shape, dtype=dtype)
outputs = self.scatter_nd(indices, updates, shape, input_)
grad_vals = constant_op.constant([[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
dtype=dtype)
updates_grad, input_grad = gradients_impl.gradients(
[outputs], [updates, input_], [grad_vals])
expected_updates_grad = np.array([[[3, 4], [5, 6]], [[1, 2], [7, 8]]],
dtype=dtype.as_numpy_dtype())
expected_input_grad = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
dtype=dtype.as_numpy_dtype())
with self.cached_session():
self.assertAllEqual(expected_updates_grad, self.evaluate(updates_grad))
if self.non_aliasing_add_test:
self.assertAllEqual(expected_input_grad, self.evaluate(input_grad))
@test_util.run_deprecated_v1
def testGradientsRank7SliceUpdate(self):
for dtype in GRADIENT_TESTS_DTYPES:
indices = constant_op.constant(
[[[[[[[0, 0, 0, 0, 0, 1], [0, 0, 1, 0, 0, 0]]]],
[[[[0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 1]]]]]]],
dtype=dtypes.int32)
updates = constant_op.constant(
[[[[[[[5, 6], [2, 4]]]], [[[[1, 3], [6, 8]]]]]]], dtype=dtype)
shape = constant_op.constant([1, 1, 2, 1, 1, 2, 2], dtype=dtypes.int32)
input_ = array_ops.zeros(shape, dtype=dtype)
outputs = self.scatter_nd(indices, updates, shape, input_)
grad_vals = constant_op.constant(
[[[[[[[1, 2], [3, 4]]]], [[[[5, 6], [7, 8]]]]]]], dtype=dtype)
updates_grad, input_grad = gradients_impl.gradients(
[outputs], [updates, input_], [grad_vals])
expected_updates_grad = np.array(
[[[[[[[3, 4], [5, 6]]]], [[[[1, 2], [7, 8]]]]]]],
dtype=dtype.as_numpy_dtype())
expected_input_grad = np.array(
[[[[[[[1, 2], [3, 4]]]], [[[[5, 6], [7, 8]]]]]]],
dtype=dtype.as_numpy_dtype())
with self.cached_session():
self.assertAllEqual(expected_updates_grad, self.evaluate(updates_grad))
if self.non_aliasing_add_test:
self.assertAllEqual(expected_input_grad, self.evaluate(input_grad))
@test_util.run_deprecated_v1
def testScatterNdRepatedIndicesAdd(self):
indices = array_ops.zeros([100000, 1], dtypes.int32)
values = | np.random.randn(100000) | numpy.random.randn |
# %%
from multiprocessing import Pool
import time
import numpy as np
from scipy.stats import mvn
import os
import pickle
import copy
import matplotlib.pyplot as plt
from scipy import interpolate
from scipy.stats import norm
# %%
exec(open('../../env_vars.py').read())
dir_picklejar = os.environ['dir_picklejar']
filename = os.path.join(os.path.realpath(dir_picklejar), 'data_day_limits')
infile = open(filename,'rb')
data_day_limits = pickle.load(infile)
infile.close()
filename = os.path.join(os.path.realpath(dir_picklejar), 'init_latent_data_small')
infile = open(filename,'rb')
init_dict_latent_data = pickle.load(infile) # Initialization of the latent smoking times
infile.close()
filename = os.path.join(os.path.realpath(dir_picklejar), 'observed_dict_eod_survey')
infile = open(filename,'rb')
init_dict_observed_eod_survey = pickle.load(infile)
infile.close()
filename = os.path.join(os.path.realpath(dir_picklejar), 'observed_dict_all_ema')
infile = open(filename,'rb')
init_dict_observed_ema = pickle.load(infile)
infile.close()
# %%
def grow_tree(depth):
if depth==1:
current_data = list([0,1])
return current_data
elif depth > 1:
curr_level = 1
current_data = list([0,1])
curr_level = 2
while curr_level <= depth:
# Sweep through all leaves at the current level
list_curr_level = list(np.repeat(np.nan, repeats=2**curr_level))
for i in range(0, len(current_data)):
left_leaf = np.append(np.array(current_data[i]), 0)
right_leaf = np.append(np.array(current_data[i]), 1)
list_curr_level[2*i] = list(left_leaf)
list_curr_level[2*i + 1] = list(right_leaf)
# Go one level below
current_data = list_curr_level
curr_level += 1
return current_data
else:
return 0
# %%
class Latent:
'''
A collection of objects and methods related to latent process subcomponent
'''
def __init__(self, participant = None, day = None, latent_data = None, params = None, index = None):
self.participant = participant
self.day = day
self.latent_data = copy.deepcopy(latent_data)
self.params = copy.deepcopy(params)
self.index = index
def update_params(self, new_params):
'''
Update parameters
'''
self.params = copy.deepcopy(new_params)
def calc_loglik(self):
'''
Calculate loglikelihood for latent process subcomponent
'''
smoking_times = self.latent_data['hours_since_start_day']
day_length = self.latent_data['day_length']
lambda_prequit = self.params['lambda_prequit']
lambda_postquit = self.params['lambda_postquit']
# Calculate the total number of latent smoking times in the current iteration
m = len(smoking_times)
# lambda_prequit: number of events per hour during prequit period
# lambda_postquit: number of events per hour during postquit period
# day_length: total number of hours between wakeup time to sleep time on a given participant day
if self.day <4:
lik = np.exp(-lambda_prequit*day_length) * ((lambda_prequit*day_length) ** m) / np.math.factorial(m)
loglik = np.log(lik)
else:
lik = np.exp(-lambda_postquit*day_length) * ((lambda_postquit*day_length) ** m) / np.math.factorial(m)
loglik = np.log(lik)
return loglik
# %%
class EODSurvey:
'''
A collection of objects and methods related to end-of-day survey subcomponent
'''
def __init__(self, participant = None, day = None, latent_data = None, observed_data = None, params = None, index = None):
self.participant = participant
self.day = day
self.latent_data = copy.deepcopy(latent_data)
self.observed_data = copy.deepcopy(observed_data)
self.params = copy.deepcopy(params)
self.index = index
def update_params(self, new_params):
'''
Update parameters
'''
self.params = copy.deepcopy(new_params)
def calc_loglik(self):
'''
Calculate loglikelihood corresponding to end-of-day EMA subcomponent
'''
# Inputs to be checked ----------------------------------------------------------------------------
any_eod_ema = len(self.observed_data['assessment_begin'])
if any_eod_ema > 0:
# Begin after checks on inputs have been passed ---------------------------------------------------
# Go through each box one by one
collect_box_probs = np.array([])
arr_ticked = self.observed_data['ticked_box_raw'] # which boxes were ticked?
m = len(self.latent_data['hours_since_start_day']) # are there any latent smoking events?
all_boxes = np.array([8,9,10,11,12,13,14,15,16,17,18,19,20])
if (m == 0) and (len(arr_ticked) == 0):
collect_box_probs = np.repeat(1, len(all_boxes))
elif (m == 0) and (len(arr_ticked) > 0):
collect_box_probs = np.repeat(0, len(all_boxes))
else:
start_day = 0
end_day = 24
# Rescale time to be within 24 hour clock
all_true_smoke_times = self.latent_data['hours_since_start_day'] + self.observed_data['start_time_hour_of_day']
for k in range(0, len(all_boxes)):
curr_box = all_boxes[k] # lower limit of Box k; setting curr_lk and curr_box to be separate variables in case change of scale is needed for curr_lk
curr_lk = all_boxes[k] # lower limit of Box k
curr_uk = curr_lk + 1 # upper limit of Box k; add one hour to lower limit
recall_epsilon = self.params['recall_epsilon'] # in hours
num_points_to_sample = self.params['budget']
if len(all_true_smoke_times) <= num_points_to_sample:
true_smoke_times = all_true_smoke_times
else:
true_smoke_times = all_true_smoke_times[(all_true_smoke_times > curr_lk - recall_epsilon) * (all_true_smoke_times < curr_uk + recall_epsilon)]
if len(true_smoke_times) > num_points_to_sample:
true_smoke_times = np.random.choice(a = true_smoke_times, size = num_points_to_sample, replace = False)
# At this point, the length of true_smoke_times will always be at most num_points_to_sample
if len(true_smoke_times) > 0:
# Specify covariance matrix based on an exchangeable correlation matrix
rho = self.params['rho']
use_cormat = np.eye(len(true_smoke_times)) + rho*(np.ones((len(true_smoke_times),1)) * np.ones((1,len(true_smoke_times))) - np.eye(len(true_smoke_times)))
use_sd = self.params['sd']
use_covmat = (use_sd**2) * use_cormat
# Calculate total possible probability
total_possible_prob, error_code_total_possible_prob = mvn.mvnun(lower = np.repeat(start_day, len(true_smoke_times)),
upper = np.repeat(end_day, len(true_smoke_times)),
means = true_smoke_times,
covar = use_covmat)
# Begin calculating edge probabilities
collect_edge_probabilities = np.array([])
limits_of_integration = grow_tree(depth=len(true_smoke_times))
for j in range(0, len(limits_of_integration)):
curr_limits = np.array(limits_of_integration[j])
curr_lower_limits = np.where(curr_limits==0, start_day, curr_uk)
curr_upper_limits = np.where(curr_limits==0, curr_lk, end_day)
edge_probabilities, error_code_edge_probabilities = mvn.mvnun(lower = curr_lower_limits,
upper = curr_upper_limits,
means = true_smoke_times,
covar = use_covmat)
collect_edge_probabilities = np.append(collect_edge_probabilities, edge_probabilities)
total_edge_probabilities = np.sum(collect_edge_probabilities)
prob_none_recalled_within_current_box = total_edge_probabilities/total_possible_prob
# prob_none_recalled_within_current_box may be slightly above 1, e.g., 1.000000XXXXX
if (prob_none_recalled_within_current_box-1) > 0:
prob_none_recalled_within_current_box = 1
prob_at_least_one_recalled_within_box = 1-prob_none_recalled_within_current_box
else:
prob_none_recalled_within_current_box = 1
prob_at_least_one_recalled_within_box = 1-prob_none_recalled_within_current_box
# Exit the first IF-ELSE statement
if curr_box in arr_ticked:
collect_box_probs = np.append(collect_box_probs, prob_at_least_one_recalled_within_box)
else:
collect_box_probs = np.append(collect_box_probs, prob_none_recalled_within_current_box)
# Exit if-else statement
prob_observed_box_checking_pattern = np.prod(collect_box_probs)
loglik = np.log(prob_observed_box_checking_pattern)
self.observed_data['prob_bk'] = collect_box_probs
self.observed_data['product_prob_bk'] = prob_observed_box_checking_pattern
self.observed_data['log_product_prob_bk'] = loglik
else:
# If participant did not complete EOD survey, then this measurement type should NOT contribute to the loglikelihood
loglik = 0
return loglik
# %%
class SelfReport:
def __init__(self, participant = None, day = None, latent_data = None, observed_data = None, params = None, index = None):
self.participant = participant
self.day = day
self.latent_data = copy.deepcopy(latent_data)
self.observed_data = copy.deepcopy(observed_data)
self.params = copy.deepcopy(params)
self.index = index
def update_params(self, new_params):
'''
Update parameters
'''
self.params = copy.deepcopy(new_params)
def match(self):
'''
Matches each EMA with one latent smoking time occurring before the Self Report EMA
After a latent smoking time is matched, it is removed
'''
# Inputs to be checked --------------------------------------------
all_latent_times = self.latent_data['hours_since_start_day']
tot_ema = len(self.observed_data['assessment_type'])
if tot_ema > 0:
self.observed_data['matched_latent_time'] = np.repeat(np.nan, tot_ema)
remaining_latent_times = copy.deepcopy(all_latent_times)
remaining_latent_times = np.sort(remaining_latent_times)
for i in range(0, tot_ema):
current_lb = self.observed_data['assessment_begin_shifted'][i]
current_ub = self.observed_data['assessment_begin'][i]
#current_assessment_type = self.observed_data['assessment_type'][i]
which_within = (remaining_latent_times >= 0) & (remaining_latent_times < current_ub)
if np.sum(which_within)>0:
which_idx = np.where(which_within)
matched_idx = np.max(which_idx)
matched_latent_time = remaining_latent_times[matched_idx]
self.observed_data['matched_latent_time'][i] = matched_latent_time
remaining_latent_times = np.delete(remaining_latent_times, matched_idx)
remaining_latent_times = np.sort(remaining_latent_times)
else:
# This case can occur when between time 0 and time t there is no
# latent smoking time, but a self-report occurred between time 0 and time t
# This case may happen after a dumb death move
self.observed_data['matched_latent_time'][i] = np.nan
else:
self.observed_data['matched_latent_time'] = np.array([])
def calc_loglik(self):
'''
Call the method calc_loglik after the method match has been called
Calculate loglikelihood corresponding to self report EMA subcomponent
'''
# Inputs to be checked --------------------------------------------
all_latent_times = np.sort(self.latent_data['hours_since_start_day'])
tot_latent_events = len(all_latent_times)
if len(self.observed_data['assessment_type']) == 0:
tot_sr = 0
else:
# Total number of Self-Report
tot_sr = np.sum(self.observed_data['assessment_type']=='selfreport')
# Specify parameter values ----------------------------------------
lambda_delay = self.params['lambda_delay']
use_scale = self.params['sd']
prob_reporting_when_any = self.params['prob_reporting_when_any']
prob_reporting_when_none = self.params['prob_reporting_when_none']
if tot_latent_events == 0 and tot_sr > 0 :
# Note: in this case, any Self-Report EMA cannot be matched to a latent smoking time
# This case could happen if, for example, previous move might have been a 'death'
# but participant initiated at least one self-report.
# Assume that participant can lie/misremember when they Self-Report
total_lik = prob_reporting_when_none**tot_sr
total_loglik = np.log(total_lik)
elif tot_latent_events > 0 and tot_sr == 0:
# Note: in this case, latent smoking times exist but they were not reported in a Self Report EMA
# This case could happen if, for example, previous move might have been a 'birth'
# but there was no self-report observed.
# Assume that participant does not lie when they Self-Report
# However, participant may neglect to Self-Report a smoking incident
# for example, due to burden
total_lik = (1 - prob_reporting_when_any)**tot_latent_events
total_loglik = np.log(total_lik)
elif tot_latent_events > 0 and tot_sr > 0:
total_loglik = 0
# Subcomponent due to delay ---------------------------------------
self.observed_data['delay'] = self.observed_data['assessment_begin'] - self.observed_data['matched_latent_time']
total_loglik += tot_sr * np.log(lambda_delay) - lambda_delay * np.nansum(self.observed_data['delay'])
# Subcomponent due to recall --------------------------------------
tot_ema = len(self.observed_data['assessment_order'])
self.observed_data['prob_bk'] = np.repeat(np.nan, tot_ema)
self.observed_data['log_prob_bk'] = np.repeat(np.nan, tot_ema)
tot_sr_with_matched = 0
for i in range(0, tot_ema):
if self.observed_data['assessment_type'][i]=='selfreport':
current_lb = self.observed_data['assessment_begin_shifted'][i]
current_ub = self.observed_data['assessment_begin'][i]
curr_matched_time = self.observed_data['matched_latent_time'][i]
# Check: Is current Self-Report EMA matched to any latent smoking time?
if np.isnan(curr_matched_time):
# Current Self-Report EMA is NOT matched to any latent smoking time
self.observed_data['prob_bk'][i] = prob_reporting_when_none
self.observed_data['log_prob_bk'][i] = np.log(self.observed_data['prob_bk'][i])
else:
# Current Self-Report EMA is matched to a latent smoking time
tot_sr_with_matched += 1 # update counter
# Calculate numerator of bk
windowtag = self.observed_data['windowtag'][i]
# Note: each value of windowtag corresponds to a response option in hours
# use_this_window_max will be based on time when prevous EMA was delivered
use_this_window_min = {1: 0/60, 2: 5/60, 3: 15/60, 4: 30/60}
use_this_window_max = {1: 5/60, 2: 15/60, 3: 30/60, 4: np.nan}
# upper limit of integration
current_uk = self.observed_data['assessment_begin'][i] - use_this_window_min[windowtag]
if windowtag == 4:
if self.observed_data['assessment_begin_shifted'][i] > current_uk:
current_lk = self.observed_data['assessment_begin_shifted'][i] - 24 # subtract 24 hours
else:
current_lk = self.observed_data['assessment_begin_shifted'][i]
else:
current_lk = self.observed_data['assessment_begin'][i] - use_this_window_max[windowtag]
# Calculate denominator of bk
if current_lk <= current_lb:
total_prob_constrained_lb = norm.cdf(x = current_lk, loc = curr_matched_time, scale = use_scale)
else:
total_prob_constrained_lb = norm.cdf(x = current_lb, loc = curr_matched_time, scale = use_scale)
total_prob_constrained_ub = norm.cdf(x = current_ub, loc = curr_matched_time, scale = use_scale)
tot_prob_constrained = total_prob_constrained_ub - total_prob_constrained_lb
prob_constrained_lk = norm.cdf(x = current_lk, loc = curr_matched_time, scale = use_scale)
prob_constrained_uk = norm.cdf(x = current_uk, loc = curr_matched_time, scale = use_scale)
if (prob_constrained_uk - prob_constrained_lk) == tot_prob_constrained:
self.observed_data['prob_bk'][i] = (current_uk - current_lk)/(current_ub - current_lb)
self.observed_data['log_prob_bk'][i] = np.log(self.observed_data['prob_bk'][i])
else:
self.observed_data['prob_bk'][i] = (prob_constrained_uk - prob_constrained_lk)/tot_prob_constrained
self.observed_data['log_prob_bk'][i] = np.log(self.observed_data['prob_bk'][i])
# We have already exited the for loop
total_loglik += np.nansum(self.observed_data['log_prob_bk'])
# Subcomponent due to propensity to self-report
total_loglik += tot_sr_with_matched * np.log(prob_reporting_when_any) + (tot_latent_events - tot_sr_with_matched) * np.log(1-prob_reporting_when_any)
else: #tot_latent_events == 0 and tot_sr == 0:
total_lik = 1
total_loglik = np.log(total_lik)
return total_loglik
# %%
class RandomEMA:
def __init__(self, participant = None, day = None, latent_data = None, observed_data = None, params = None, index = None):
self.participant = participant
self.day = day
self.latent_data = copy.deepcopy(latent_data)
self.observed_data = copy.deepcopy(observed_data)
self.params = copy.deepcopy(params)
self.index = index
def update_params(self, new_params):
'''
Update parameters
'''
self.params = copy.deepcopy(new_params)
def match(self):
'''
Matches each EMA with one latent smoking time occurring before the Random EMA
After a latent smoking time is matched, it is removed
'''
# Inputs to be checked --------------------------------------------
all_latent_times = self.latent_data['hours_since_start_day']
tot_ema = len(self.observed_data['assessment_type'])
if tot_ema > 0:
self.observed_data['matched_latent_time'] = np.repeat(np.nan, tot_ema)
remaining_latent_times = copy.deepcopy(all_latent_times)
remaining_latent_times = np.sort(remaining_latent_times)
for i in range(0, tot_ema):
current_lb = self.observed_data['assessment_begin_shifted'][i]
current_ub = self.observed_data['assessment_begin'][i]
#current_assessment_type = self.observed_data['assessment_type'][i]
which_within = (remaining_latent_times >= 0) & (remaining_latent_times < current_ub)
if np.sum(which_within)>0:
which_idx = np.where(which_within)
matched_idx = np.max(which_idx)
matched_latent_time = remaining_latent_times[matched_idx]
self.observed_data['matched_latent_time'][i] = matched_latent_time
remaining_latent_times = np.delete(remaining_latent_times, matched_idx)
remaining_latent_times = np.sort(remaining_latent_times)
else:
# This case can occur when between time 0 and time t there is no
# latent smoking time, but a self-report occurred between time 0 and time t
# This case may happen after a dumb death move
self.observed_data['matched_latent_time'][i] = np.nan
else:
self.observed_data['matched_latent_time'] = np.array([])
def calc_loglik(self):
'''
Call the method calc_loglik after the method match has been called
Calculate loglikelihood corresponding to Random EMA subcomponent
'''
use_scale = self.params['sd']
prob_reporting_when_any = self.params['prob_reporting_when_any']
prob_reporting_when_none = self.params['prob_reporting_when_none']
all_latent_times = np.sort(self.latent_data['hours_since_start_day'])
tot_latent_events = len(all_latent_times)
tot_ema = len(self.observed_data['assessment_type'])
if tot_ema == 0:
tot_random_ema = 0
else:
tot_random_ema = np.sum(self.observed_data['assessment_type']=='random_ema')
self.observed_data['prob_bk'] = np.repeat(np.nan, tot_ema)
self.observed_data['log_prob_bk'] = np.repeat(np.nan, tot_ema)
if tot_random_ema > 0:
total_loglik = 0
# Note: each value of windowtag corresponds to a response option in hours
# use_this_window_max will be based on time when prevous EMA was delivered
use_this_window_min = {1: 0/60, 2: 20/60, 3: 40/60, 4: 60/60, 5: 80/60, 6: 100/60}
use_this_window_max = {1: 20/60, 2: 40/60, 3: 60/60, 4: 80/60, 5: 100/60, 6: np.nan}
for i in range(0, tot_ema):
if (self.observed_data['assessment_type'][i]=='random_ema') and (self.observed_data['smoke'][i]=='Yes'):
curr_matched_time = self.observed_data['matched_latent_time'][i]
if np.isnan(curr_matched_time):
self.observed_data['prob_bk'][i] = prob_reporting_when_none # i.e., prob of reporting when no latent smoking time can be matched
self.observed_data['log_prob_bk'][i] = np.log(self.observed_data['prob_bk'][i])
total_loglik += self.observed_data['log_prob_bk'][i]
else:
current_lb = self.observed_data['assessment_begin_shifted'][i]
current_ub = self.observed_data['assessment_begin'][i]
windowtag = self.observed_data['windowtag'][i]
# upper limit of integration
current_uk = self.observed_data['assessment_begin'][i] - use_this_window_min[windowtag]
# lower limit of integration
if windowtag == 6:
if self.observed_data['assessment_begin_shifted'][i] > current_uk:
current_lk = self.observed_data['assessment_begin_shifted'][i] - 24 # subtract 24 hours
else:
current_lk = self.observed_data['assessment_begin_shifted'][i]
else:
current_lk = self.observed_data['assessment_begin'][i] - use_this_window_max[windowtag]
if (current_lk <= current_lb and current_uk <= current_lb):
# i.e., the upper bound and lower bound of the recalled smoking time both come before current_lb
# adding a point to this region should be a very unlikely occurrence
total_prob_constrained_lb = norm.cdf(x = current_lk, loc = curr_matched_time, scale = use_scale) # note that x = current_lk
total_prob_constrained_ub = norm.cdf(x = current_ub, loc = curr_matched_time, scale = use_scale)
tot_prob_constrained = total_prob_constrained_ub - total_prob_constrained_lb
prob_constrained_lk = norm.cdf(x = current_lk, loc = curr_matched_time, scale = use_scale)
prob_constrained_uk = norm.cdf(x = current_uk, loc = curr_matched_time, scale = use_scale)
if (prob_constrained_uk - prob_constrained_lk) == tot_prob_constrained:
self.observed_data['prob_bk'][i] = (current_uk - current_lk)/(current_ub - current_lb)
self.observed_data['log_prob_bk'][i] = np.log(self.observed_data['prob_bk'][i])
total_loglik += self.observed_data['log_prob_bk'][i]
total_loglik += np.log(prob_reporting_when_any)
else:
self.observed_data['prob_bk'][i] = (prob_constrained_uk - prob_constrained_lk)/tot_prob_constrained
self.observed_data['log_prob_bk'][i] = np.log(self.observed_data['prob_bk'][i])
total_loglik += self.observed_data['log_prob_bk'][i]
total_loglik += np.log(prob_reporting_when_any)
elif (current_lk <= current_lb and current_uk > current_lb):
# i.e., the lower bound of the recalled smoking time come before current_lb
# but the upper bound comes after current_lb
total_prob_constrained_lb = norm.cdf(x = current_lk, loc = curr_matched_time, scale = use_scale) # note that x = current_lk
total_prob_constrained_ub = norm.cdf(x = current_ub, loc = curr_matched_time, scale = use_scale)
tot_prob_constrained = total_prob_constrained_ub - total_prob_constrained_lb
prob_constrained_lk = norm.cdf(x = current_lk, loc = curr_matched_time, scale = use_scale)
prob_constrained_uk = norm.cdf(x = current_uk, loc = curr_matched_time, scale = use_scale)
if (prob_constrained_uk - prob_constrained_lk) == tot_prob_constrained:
self.observed_data['prob_bk'][i] = (current_uk - current_lk)/(current_ub - current_lb)
self.observed_data['log_prob_bk'][i] = np.log(self.observed_data['prob_bk'][i])
total_loglik += self.observed_data['log_prob_bk'][i]
total_loglik += np.log(prob_reporting_when_any)
else:
self.observed_data['prob_bk'][i] = (prob_constrained_uk - prob_constrained_lk)/tot_prob_constrained
self.observed_data['log_prob_bk'][i] = np.log(self.observed_data['prob_bk'][i])
total_loglik += self.observed_data['log_prob_bk'][i]
total_loglik += np.log(prob_reporting_when_any)
elif (current_lk >= current_lb and current_uk >= current_lb):
total_prob_constrained_lb = norm.cdf(x = current_lb, loc = curr_matched_time, scale = use_scale)
total_prob_constrained_ub = norm.cdf(x = current_ub, loc = curr_matched_time, scale = use_scale)
tot_prob_constrained = total_prob_constrained_ub - total_prob_constrained_lb
prob_constrained_lk = norm.cdf(x = current_lk, loc = curr_matched_time, scale = use_scale)
prob_constrained_uk = norm.cdf(x = current_uk, loc = curr_matched_time, scale = use_scale)
if (prob_constrained_uk - prob_constrained_lk) == tot_prob_constrained:
self.observed_data['prob_bk'][i] = (current_uk - current_lk)/(current_ub - current_lb)
self.observed_data['log_prob_bk'][i] = np.log(self.observed_data['prob_bk'][i])
total_loglik += self.observed_data['log_prob_bk'][i]
total_loglik += np.log(prob_reporting_when_any)
else:
self.observed_data['prob_bk'][i] = (prob_constrained_uk - prob_constrained_lk)/tot_prob_constrained
self.observed_data['log_prob_bk'][i] = | np.log(self.observed_data['prob_bk'][i]) | numpy.log |
import numpy as np
from timeit import default_timer as timer
import time
import math
import transformations as tf
import cv2
import threading
# limit for averaging
ALLOW_LIMIT = 12
class Markers():
def __init__(self, MARKER_SIDE, getCoords_event):
# intiialise arrays
self.ids = []
self.tvec_origin = []
self.rvec_origin = []
self.dRot = []
self.angle_origin = np.zeros((1,3))
self.height_origin = 0
# for filtering values
self.allow_use = []
self.tvec_max = []
self.tvec_min = []
# the first markers orientation
self.orientation = np.zeros((2,3))
# for logging
self.OpenedFile=False
# for data collection
self.getCoords_event = getCoords_event
# Append marker to the list of stered markers
def appendMarker(self, seen_id_list, tvec, rvec, angles, tof):
for n_id in seen_id_list:
n_index = seen_id_list.index(n_id)
if n_id == 1 and len(self.ids) == 0:
# add the first marker as origin
self.ids.append(n_id)
self.tvec_origin.append(np.array([[0,0,0]]))
self.rvec_origin.append(rvec[n_index])
self.dRot.append(np.array([[1,0,0],[0,1,0],[0,0,1]]))
self.allow_use.append(ALLOW_LIMIT)
self.tvec_max.append(0)
self.tvec_min.append(10000)
self.angle_origin = angles
self.height_origin = abs(tof)
rvec_Euler = tf.rotationVectorToEulerAngles(rvec[n_index])*180/math.pi
# determine orientation
orig_type = "?"
if abs(rvec_Euler[0][0]) <= 150: # horizontal
self.orientation = np.array([[1, 1, 1],[0, 1, 2]])
orig_type = "Horizontal"
elif abs(rvec_Euler[0][0]) > 150: # vertical
self.orientation = np.array([[1, -1, 1],[0, 2, 1]])
orig_type = "Vertical"
#print(self.orientation)
print(orig_type + " origin set")
elif n_id not in self.ids and len(self.ids) > 0 and len(seen_id_list) >= 2:
# append new marker to lists with dummy values
self.ids.append(n_id)
self.tvec_origin.append(np.array([[0, 0, 0]]))
self.rvec_origin.append(np.array([[0, 0, 0]]))
self.dRot.append(np.array([[0,0,0],[0,0,0],[0,0,0]]))
self.allow_use.append(0)
self.tvec_max.append(0)
self.tvec_min.append(10000)
for m_id in seen_id_list:
if m_id in self.ids and m_id != n_id and self.allow_use[self.ids.index(m_id)]==ALLOW_LIMIT:
# n is to be added, m is already in list
m_index = seen_id_list.index(m_id)
m_index_list = self.ids.index(m_id)
n_index_list = self.ids.index(n_id)
# calculate needed matrix transformations
t, R, r, a, ma, mi = tf.getTransformations(n_id, tvec[m_index], tvec[n_index], rvec[m_index], rvec[n_index],
self.tvec_origin[m_index_list], self.tvec_origin[n_index_list],
self.rvec_origin[m_index_list], self.rvec_origin[n_index_list],
self.dRot[m_index_list], self.dRot[n_index_list],
self.allow_use[n_index_list], ALLOW_LIMIT,
self.tvec_max[n_index_list], self.tvec_min[n_index_list])
self.tvec_origin[n_index_list] = t
self.dRot[n_index_list] = R
self.rvec_origin[n_index_list] = r
self.allow_use[n_index_list] = a
self.tvec_max[n_index_list] = ma
self.tvec_min[n_index_list] = mi
break
elif n_id in self.ids and self.allow_use[self.ids.index(n_id)]<ALLOW_LIMIT:
# marker can be used only after ALLOW_LIMIT has been reached
for m_id in seen_id_list:
if m_id in self.ids and m_id != n_id and self.allow_use[self.ids.index(m_id)]==ALLOW_LIMIT:
# n is to be added, m is already in list
m_index = seen_id_list.index(m_id)
m_index_list = self.ids.index(m_id)
n_index_list = self.ids.index(n_id)
# calculate needed matrix transformations
t, R, r, a, ma, mi = tf.getTransformations(n_id, tvec[m_index], tvec[n_index], rvec[m_index], rvec[n_index],
self.tvec_origin[m_index_list], self.tvec_origin[n_index_list],
self.rvec_origin[m_index_list], self.rvec_origin[n_index_list],
self.dRot[m_index_list], self.dRot[n_index_list],
self.allow_use[n_index_list], ALLOW_LIMIT,
self.tvec_max[n_index_list], self.tvec_min[n_index_list])
self.tvec_origin[n_index_list] = t
self.dRot[n_index_list] = R
self.rvec_origin[n_index_list] = r
self.allow_use[n_index_list] = a
self.tvec_max[n_index_list] = ma
self.tvec_min[n_index_list] = mi
break
# Calculate camera pose from seen markers
def getCoords(self, seen_id_list, tvecs, rvecs, angles):
length = len(seen_id_list)
len_diff = 0
dtv = | np.zeros((1,3)) | numpy.zeros |
"""
jsaavedr, 2020
This is a simple version of train.py.
To use train.py, you will require to set the following parameters :
* -config : A configuration file where a set of parameters for data construction and training is defined.
* -name: The section name in the configuration file.
* -mode: [train, test] for training, testing, or showing variables of the current model. By default this is set to 'train'
* -save: Set true for saving the model
"""
import pathlib
import sys
sys.path.append('/home/jsaavedr/Research/git/tensorflow-2/convnet2')
sys.path.append(str(pathlib.Path().absolute()))
import tensorflow as tf
from models import vae
import datasets.data as data
import utils.configuration as conf
import utils_vae.losses as losses
import utils_vae.parsers as parsers
import utils.imgproc as imgproc
import numpy as np
import argparse
import os
import matplotlib.pyplot as plt
if __name__ == '__main__' :
parser = argparse.ArgumentParser(description = "Train a simple mnist model")
parser.add_argument("-config", type = str, help = "<str> configuration file", required = True)
parser.add_argument("-name", type=str, help=" name of section in the configuration file", required = True)
parser.add_argument("-mode", type=str, choices=['train', 'test', 'predict'], help=" train or test", required = False, default = 'train')
parser.add_argument("-save", type=lambda x: (str(x).lower() == 'true'), help=" True to save the model", required = False, default = False)
pargs = parser.parse_args()
configuration_file = pargs.config
configuration = conf.ConfigurationFile(configuration_file, pargs.name)
if pargs.mode == 'train' :
tfr_train_file = os.path.join(configuration.get_data_dir(), "train.tfrecords")
if pargs.mode == 'train' or pargs.mode == 'test':
tfr_test_file = os.path.join(configuration.get_data_dir(), "test.tfrecords")
if configuration.use_multithreads() :
if pargs.mode == 'train' :
tfr_train_file=[os.path.join(configuration.get_data_dir(), "train_{}.tfrecords".format(idx)) for idx in range(configuration.get_num_threads())]
if pargs.mode == 'train' or pargs.mode == 'test':
tfr_test_file=[os.path.join(configuration.get_data_dir(), "test_{}.tfrecords".format(idx)) for idx in range(configuration.get_num_threads())]
sys.stdout.flush()
mean_file = os.path.join(configuration.get_data_dir(), "mean.dat")
shape_file = os.path.join(configuration.get_data_dir(),"shape.dat")
#
input_shape = np.fromfile(shape_file, dtype=np.int32)
print(input_shape)
mean_image = | np.fromfile(mean_file, dtype=np.float32) | numpy.fromfile |
""" Represent a triangulated surface using a 3D boolean grid"""
import logging
import numpy as np
from rpl.tools.ray_tracing.bsp_tree_poly import BSP_Element
from rpl.tools.geometry import geom_utils
import data_io
class BSP_Grid(object):
def __init__(self, node_array, tris, allocate_step=100000):
"""
Store the triangles with an enumeration so that even when they are subdivided their
identity is not lost.
"""
tri_nums = np.arange(len(tris), dtype=np.int32).reshape((len(tris), 1))
minus_ones = -np.ones((len(tris), 6), dtype=np.int32)
self.tris = np.hstack((tris, minus_ones, tri_nums))
self.allocate_step = allocate_step
self.node_array = node_array # Reference to the full list of nodes
self._resize()
self.next_free = len(node_array)
self.split_cache = np.zeros(len(self.tris), dtype=np.int32)
def _resize(self):
"""
Increase node array size by the allocate_step amount.
"""
self.array_size = len(self.node_array) + self.allocate_step
self.node_array = np.concatenate((self.node_array, np.zeros((self.allocate_step, 3))))
def add_node(self, node):
"""
Adds a new node to the end of the node array (expanding if required). Returns the index of
the newly added node.
"""
if self.next_free == self.array_size:
self._resize()
self.node_array[self.next_free] = node
self.next_free += 1
return self.next_free - 1
def prepare_add(self, num_add_nodes):
"""
Make sure that ``num_add_nodes`` can be added later without needing a resize.
Useful if adding nodes from within cython where resizing is tricky.
"""
if self.next_free + num_add_nodes >= self.array_size:
self._resize()
return self.next_free
def make_grid(veh_surfs, settings):
"""
Make coordinates of voxelated grid based on overall list of vehicle surfaces
"""
## Find overall bounding box
x_min, x_max = 1e30, -1e30
y_min, y_max = 1e30, -1e30
z_min, z_max = 1e30, -1e30
for key, veh_surf in veh_surfs.items():
x_min, x_max = min(x_min, np.min(veh_surf["x"])), max(x_max, np.max(veh_surf["x"]))
y_min, y_max = min(y_min, np.min(veh_surf["y"])), max(y_max, np.max(veh_surf["y"]))
z_min, z_max = min(z_min, np.min(veh_surf["z"])), max(z_max, np.max(veh_surf["z"]))
x_min, x_max = x_min - settings["voxel_size"], x_max + settings["voxel_size"]
y_min, y_max = y_min - settings["voxel_size"], y_max + settings["voxel_size"]
z_min, z_max = z_min - settings["voxel_size"], z_max + settings["voxel_size"]
###########################################
# Create the uniformly spaced grid points
x_grid = np.arange(x_min, x_max + settings["voxel_size"], settings["voxel_size"])
y_grid = np.arange(y_min, y_max + settings["voxel_size"], settings["voxel_size"])
z_grid = np.arange(z_min, z_max + settings["voxel_size"], settings["voxel_size"])
return x_grid, y_grid, z_grid
def convert_geom(veh_surf, tr_mat):
"""
Rotate nodes using provided transformation matrix; convert xyz node dict to nodes array
"""
veh_surf["nodes"] = np.vstack((veh_surf["x"], veh_surf["y"], veh_surf["z"])).T
veh_surf['nodes'] = np.dot(veh_surf['nodes'], tr_mat[:3, :3])
veh_surf["x"] = veh_surf['nodes'][:, 0]
veh_surf["y"] = veh_surf['nodes'][:, 1]
veh_surf["z"] = veh_surf['nodes'][:, 2]
return veh_surf
def find_occupied_voxels(surf, surf_mask, voxel_data):
"""
Voxels with any triangle from ``surf`` are considered occupied and or'ed with ``group_mask``.
If the supplied ``occupied_voxels`` is None a voxel array is created and returned.
"""
nodes = surf["nodes"]
tris = surf["tris"]
x_pts, y_pts, z_pts = [voxel_data[k] for k in ("x_grid", "y_grid", "z_grid")]
vox_size = voxel_data["vox_size"]
## Find the local extents of this part
min_x, max_x = np.min(surf["x"]) - vox_size, np.max(surf["x"]) + vox_size
min_y, max_y = np.min(surf["y"]) - vox_size, np.max(surf["y"]) + vox_size
min_z, max_z = np.min(surf["z"]) - vox_size, | np.max(surf["z"]) | numpy.max |
import numpy as np
from scipy.optimize import minimize
import warnings
import properties
from ....utils.code_utils import deprecate_class
from ....utils import mkvc, sdiag, Zero
from ...base import BaseEMSimulation
from ....data import Data
from .survey import Survey
from .fields_2d import Fields2D, Fields2DCellCentered, Fields2DNodal
from .fields import FieldsDC, Fields3DCellCentered, Fields3DNodal
from .utils import _mini_pole_pole
from scipy.special import k0e, k1e, k0
from discretize.utils import make_boundary_bool
class BaseDCSimulation2D(BaseEMSimulation):
"""
Base 2.5D DC problem
"""
survey = properties.Instance("a DC survey object", Survey, required=True)
storeJ = properties.Bool("store the sensitivity matrix?", default=False)
nky = properties.Integer(
"Number of kys to use in wavenumber space", required=False, default=11
)
fieldsPair = Fields2D # SimPEG.EM.Static.Fields_2D
fieldsPair_fwd = FieldsDC
# there's actually nT+1 fields, so we don't need to store the last one
_Jmatrix = None
fix_Jmatrix = False
_mini_survey = None
def __init__(self, *args, **kwargs):
miniaturize = kwargs.pop("miniaturize", False)
do_trap = kwargs.pop("do_trap", False)
super().__init__(*args, **kwargs)
if not do_trap:
# try to find an optimal set of quadrature points and weights
def get_phi(r):
e = np.ones_like(r)
def phi(k):
# use log10 transform to enforce positivity
k = 10 ** k
A = r[:, None] * k0(r[:, None] * k)
v_i = A @ np.linalg.solve(A.T @ A, A.T @ e)
dv = (e - v_i) / len(r)
return np.linalg.norm(dv)
def g(k):
A = r[:, None] * k0(r[:, None] * k)
return np.linalg.solve(A.T @ A, A.T @ e)
return phi, g
# find the minimum cell spacing, and the maximum side of the mesh
min_r = min(*[np.min(h) for h in self.mesh.h])
max_r = max(*[np.sum(h) for h in self.mesh.h])
# generate test points log spaced between these two end members
rs = np.logspace(np.log10(min_r / 4), np.log10(max_r * 4), 100)
min_rinv = -np.log10(rs).max()
max_rinv = -np.log10(rs).min()
# a decent initial guess of the k_i's for the optimization = 1/rs
k_i = np.linspace(min_rinv, max_rinv, self.nky)
# these functions depend on r, so grab them
func, g_func = get_phi(rs)
# just use scipy's minimize for ease
out = minimize(func, k_i)
if self.verbose:
print(f"optimized ks converged? : {out['success']}")
print(f"Estimated transform Error: {out['fun']}")
# transform the solution back to normal points
points = 10 ** out["x"]
# transform has a 2/pi and we want 1/pi, so divide by 2
weights = g_func(points) / 2
if not out["success"]:
warnings.warn(
"Falling back to trapezoidal for integration. "
"You may need to change nky."
)
do_trap = True
if do_trap:
if self.verbose:
print("doing trap")
y = 0.0
points = np.logspace(-4, 1, self.nky)
dky = np.diff(points) / 2
weights = np.r_[dky, 0] + np.r_[0, dky]
weights *= np.cos(points * y) # *(1.0/np.pi)
# assume constant value at 0 frequency?
weights[0] += points[0] / 2 * (1.0 + np.cos(points[0] * y))
weights /= np.pi
self._quad_weights = weights
self._quad_points = points
self.Ainv = [None for i in range(self.nky)]
self.nT = self.nky - 1 # Only for using TimeFields
# Do stuff to simplify the forward and JTvec operation if number of dipole
# sources is greater than the number of unique pole sources
if miniaturize:
self._dipoles, self._invs, self._mini_survey = _mini_pole_pole(self.survey)
def fields(self, m):
if self.verbose:
print(">> Compute fields")
if m is not None:
self.model = m
if self.Ainv[0] is not None:
for i in range(self.nky):
self.Ainv[i].clean()
f = self.fieldsPair(self)
kys = self._quad_points
f._quad_weights = self._quad_weights
for iky, ky in enumerate(kys):
A = self.getA(ky)
if self.Ainv[iky] is not None:
self.Ainv[iky].clean()
self.Ainv[iky] = self.solver(A, **self.solver_opts)
RHS = self.getRHS(ky)
u = self.Ainv[iky] * RHS
f[:, self._solutionType, iky] = u
return f
def fields_to_space(self, f, y=0.0):
f_fwd = self.fieldsPair_fwd(self)
phi = f[:, self._solutionType, :].dot(self._quad_weights)
f_fwd[:, self._solutionType] = phi
return f_fwd
def dpred(self, m=None, f=None):
"""
Project fields to receiver locations
:param Fields u: fields object
:rtype: numpy.ndarray
:return: data
"""
if f is None:
if m is None:
m = self.model
f = self.fields(m)
weights = self._quad_weights
if self._mini_survey is not None:
survey = self._mini_survey
else:
survey = self.survey
temp = np.empty(survey.nD)
count = 0
for src in survey.source_list:
for rx in src.receiver_list:
d = rx.eval(src, self.mesh, f).dot(weights)
temp[count : count + len(d)] = d
count += len(d)
return self._mini_survey_data(temp)
def getJ(self, m, f=None):
"""
Generate Full sensitivity matrix
"""
if self._Jmatrix is not None:
return self._Jmatrix
else:
if self.verbose:
print("Calculating J and storing")
self.model = m
if f is None:
f = self.fields(m)
self._Jmatrix = (self._Jtvec(m, v=None, f=f)).T
return self._Jmatrix
def Jvec(self, m, v, f=None):
"""
Compute sensitivity matrix (J) and vector (v) product.
"""
if self.storeJ:
J = self.getJ(m, f=f)
Jv = mkvc(np.dot(J, v))
return Jv
self.model = m
if f is None:
f = self.fields(m)
if self._mini_survey is not None:
survey = self._mini_survey
else:
survey = self.survey
kys = self._quad_points
weights = self._quad_weights
Jv = np.zeros(survey.nD)
# Assume y=0.
# This needs some thoughts to implement in general when src is dipole
# TODO: this loop is pretty slow .. (Parellize)
for iky, ky in enumerate(kys):
u_ky = f[:, self._solutionType, iky]
count = 0
for i_src, src in enumerate(survey.source_list):
u_src = u_ky[:, i_src]
dA_dm_v = self.getADeriv(ky, u_src, v, adjoint=False)
# dRHS_dm_v = self.getRHSDeriv(ky, src, v) = 0
du_dm_v = self.Ainv[iky] * (-dA_dm_v) # + dRHS_dm_v)
for rx in src.receiver_list:
df_dmFun = getattr(f, "_{0!s}Deriv".format(rx.projField), None)
df_dm_v = df_dmFun(iky, src, du_dm_v, v, adjoint=False)
Jv1_temp = rx.evalDeriv(src, self.mesh, f, df_dm_v)
# Trapezoidal intergration
Jv[count : count + len(Jv1_temp)] += weights[iky] * Jv1_temp
count += len(Jv1_temp)
return self._mini_survey_data(Jv)
def Jtvec(self, m, v, f=None):
"""
Compute adjoint sensitivity matrix (J^T) and vector (v) product.
"""
if self.storeJ:
J = self.getJ(m, f=f)
Jtv = mkvc(np.dot(J.T, v))
return Jtv
self.model = m
if f is None:
f = self.fields(m)
return self._Jtvec(m, v=v, f=f)
def _Jtvec(self, m, v=None, f=None):
"""
Compute adjoint sensitivity matrix (J^T) and vector (v) product.
Full J matrix can be computed by inputing v=None
"""
kys = self._quad_points
weights = self._quad_weights
if self._mini_survey is not None:
survey = self._mini_survey
else:
survey = self.survey
if v is not None:
# Ensure v is a data object.
if isinstance(v, Data):
v = v.dobs
v = self._mini_survey_dataT(v)
Jtv = np.zeros(m.size, dtype=float)
for iky, ky in enumerate(kys):
u_ky = f[:, self._solutionType, iky]
count = 0
for i_src, src in enumerate(survey.source_list):
u_src = u_ky[:, i_src]
df_duT_sum = 0
df_dmT_sum = 0
for rx in src.receiver_list:
my_v = v[count : count + rx.nD]
count += rx.nD
# wrt f, need possibility wrt m
PTv = rx.evalDeriv(src, self.mesh, f, my_v, adjoint=True)
df_duTFun = getattr(f, "_{0!s}Deriv".format(rx.projField), None)
df_duT, df_dmT = df_duTFun(iky, src, None, PTv, adjoint=True)
df_duT_sum += df_duT
df_dmT_sum += df_dmT
ATinvdf_duT = self.Ainv[iky] * df_duT_sum
dA_dmT = self.getADeriv(ky, u_src, ATinvdf_duT, adjoint=True)
# dRHS_dmT = self.getRHSDeriv(ky, src, ATinvdf_duT,
# adjoint=True)
du_dmT = -dA_dmT # + dRHS_dmT=0
Jtv += weights[iky] * (df_dmT + du_dmT).astype(float)
return mkvc(Jtv)
else:
# This is for forming full sensitivity matrix
Jt = np.zeros((self.model.size, survey.nD), order="F")
for iky, ky in enumerate(kys):
u_ky = f[:, self._solutionType, iky]
istrt = 0
for i_src, src in enumerate(survey.source_list):
u_src = u_ky[:, i_src]
for rx in src.receiver_list:
# wrt f, need possibility wrt m
PT = rx.evalDeriv(src, self.mesh, f).toarray().T
ATinvdf_duT = self.Ainv[iky] * PT
dA_dmT = self.getADeriv(ky, u_src, ATinvdf_duT, adjoint=True)
Jtv = -weights[iky] * dA_dmT # RHS=0
iend = istrt + rx.nD
if rx.nD == 1:
Jt[:, istrt] += Jtv
else:
Jt[:, istrt:iend] += Jtv
istrt += rx.nD
return (self._mini_survey_data(Jt.T)).T
def getSourceTerm(self, ky):
"""
takes concept of source and turns it into a matrix
"""
"""
Evaluates the sources, and puts them in matrix form
:rtype: (numpy.ndarray, numpy.ndarray)
:return: q (nC or nN, nSrc)
"""
if self._mini_survey is not None:
Srcs = self._mini_survey.source_list
else:
Srcs = self.survey.source_list
if self._formulation == "EB":
n = self.mesh.nN
# return NotImplementedError
elif self._formulation == "HJ":
n = self.mesh.nC
q = np.zeros((n, len(Srcs)), order="F")
for i, src in enumerate(Srcs):
q[:, i] = src.eval(self)
return q
@property
def deleteTheseOnModelUpdate(self):
toDelete = super(BaseDCSimulation2D, self).deleteTheseOnModelUpdate
if self.sigmaMap is not None:
toDelete += ["_MnSigma", "_MnSigmaDerivMat", "_MccRhoi", "_MccRhoiDerivMat"]
if self.fix_Jmatrix:
return toDelete
if self._Jmatrix is not None:
toDelete += ["_Jmatrix"]
return toDelete
def _mini_survey_data(self, d_mini):
if self._mini_survey is not None:
out = d_mini[self._invs[0]] # AM
out[self._dipoles[0]] -= d_mini[self._invs[1]] # AN
out[self._dipoles[1]] -= d_mini[self._invs[2]] # BM
out[self._dipoles[0] & self._dipoles[1]] += d_mini[self._invs[3]] # BN
else:
out = d_mini
return out
def _mini_survey_dataT(self, v):
if self._mini_survey is not None:
out = np.zeros(self._mini_survey.nD)
# Need to use ufunc.at because there could be repeated indices
# That need to be properly handled.
np.add.at(out, self._invs[0], v) # AM
np.subtract.at(out, self._invs[1], v[self._dipoles[0]]) # AN
np.subtract.at(out, self._invs[2], v[self._dipoles[1]]) # BM
np.add.at(out, self._invs[3], v[self._dipoles[0] & self._dipoles[1]]) # BN
return out
else:
out = v
return out
####################################################
# Mass Matrices
####################################################
@property
def MnSigma(self):
"""
Node inner product matrix for \\(\\sigma\\). Used in the E-B
formulation
"""
# TODO: only works isotropic sigma
if getattr(self, "_MnSigma", None) is None:
sigma = self.sigma
vol = self.mesh.vol
self._MnSigma = sdiag(self.mesh.aveN2CC.T * (vol * sigma))
return self._MnSigma
@property
def MnSigmaDerivMat(self):
"""
Derivative of MnSigma with respect to the model
"""
if getattr(self, "_MnSigmaDerivMat", None) is None:
vol = self.mesh.vol
self._MnSigmaDerivMat = self.mesh.aveN2CC.T * sdiag(vol) * self.sigmaDeriv
return self._MnSigmaDerivMat
def MnSigmaDeriv(self, u, v, adjoint=False):
"""
Derivative of MnSigma with respect to the model times a vector (u)
"""
if v.ndim > 1:
u = u[:, None]
if self.storeInnerProduct:
if adjoint:
return self.MnSigmaDerivMat.T * (u * v)
else:
return u * (self.MnSigmaDerivMat * v)
else:
vol = self.mesh.vol
if v.ndim > 1:
vol = vol[:, None]
if adjoint:
return self.sigmaDeriv.T * (vol * (self.mesh.aveN2CC * (u * v)))
else:
dsig_dm_v = self.sigmaDeriv * v
return u * (self.mesh.aveN2CC.T * (vol * dsig_dm_v))
@property
def MccRhoi(self):
"""
Cell inner product matrix for \\(\\rho^{-1}\\). Used in the H-J
formulation
"""
# TODO: only works isotropic rho
if getattr(self, "_MccRhoi", None) is None:
self._MccRhoi = sdiag(self.mesh.vol / self.rho)
return self._MccRhoi
@property
def MccRhoiDerivMat(self):
"""
Derivative of MccRho with respect to the model
"""
if getattr(self, "_MccRhoiDerivMat", None) is None:
rho = self.rho
vol = self.mesh.vol
self._MccRhoiDerivMat = sdiag(vol * (-1.0 / rho ** 2)) * self.rhoDeriv
return self._MccRhoiDerivMat
def MccRhoiDeriv(self, u, v, adjoint=False):
"""
Derivative of :code:`MccRhoi` with respect to the model.
"""
if self.rhoMap is None:
return Zero()
if len(self.rho.shape) > 1:
if self.rho.shape[1] > self.mesh.dim:
raise NotImplementedError(
"Full anisotropy is not implemented for MccRhoiDeriv."
)
if self.storeInnerProduct:
if adjoint:
return self.MccRhoiDerivMat.T * (sdiag(u) * v)
else:
return sdiag(u) * (self.MccRhoiDerivMat * v)
else:
vol = self.mesh.vol
rho = self.rho
if adjoint:
return self.rhoDeriv.T * (sdiag(u * vol * (-1.0 / rho ** 2)) * v)
else:
return (sdiag(u * vol * (-1.0 / rho ** 2))) * (self.rhoDeriv * v)
class Simulation2DCellCentered(BaseDCSimulation2D):
"""
2.5D cell centered DC problem
"""
_solutionType = "phiSolution"
_formulation = "HJ" # CC potentials means J is on faces
fieldsPair = Fields2DCellCentered
fieldsPair_fwd = Fields3DCellCentered
bc_type = properties.StringChoice(
"Type of boundary condition to use for simulation. Note that Robin and Mixed "
"are equivalent.",
choices=["Dirichlet", "Neumann", "Robin", "Mixed"],
default="Robin",
)
def __init__(self, mesh, **kwargs):
BaseDCSimulation2D.__init__(self, mesh, **kwargs)
V = sdiag(self.mesh.cell_volumes)
self.Div = V @ self.mesh.face_divergence
self.Grad = self.Div.T
def getA(self, ky):
"""
Make the A matrix for the cell centered DC resistivity problem
A = D MfRhoI G
"""
# To handle Mixed boundary condition
self.setBC(ky=ky)
D = self.Div
G = self.Grad
if self.bc_type != "Dirichlet":
G = G - self._MBC[ky]
MfRhoI = self.MfRhoI
# Get resistivity rho
A = D * MfRhoI * G + ky ** 2 * self.MccRhoi
if self.bc_type == "Neumann":
A[0, 0] = A[0, 0] + 1.0
return A
def getADeriv(self, ky, u, v, adjoint=False):
D = self.Div
G = self.Grad
if self.bc_type != "Dirichlet":
G = G - self._MBC[ky]
if adjoint:
return self.MfRhoIDeriv(
G * u.flatten(), D.T * v, adjoint=adjoint
) + ky ** 2 * self.MccRhoiDeriv(u.flatten(), v, adjoint=adjoint)
else:
return D * self.MfRhoIDeriv(
G * u.flatten(), v, adjoint=adjoint
) + ky ** 2 * self.MccRhoiDeriv(u.flatten(), v, adjoint=adjoint)
def getRHS(self, ky):
"""
RHS for the DC problem
q
"""
RHS = self.getSourceTerm(ky)
return RHS
def getRHSDeriv(self, ky, src, v, adjoint=False):
"""
Derivative of the right hand side with respect to the model
"""
# TODO: add qDeriv for RHS depending on m
# qDeriv = src.evalDeriv(self, ky, adjoint=adjoint)
# return qDeriv
return Zero()
def setBC(self, ky=None):
if self.bc_type == "Dirichlet":
return
if getattr(self, "_MBC", None) is None:
self._MBC = {}
if ky in self._MBC:
# I have already created the BC matrix for this wavenumber
return
if self.bc_type == "Neumann":
alpha, beta, gamma = 0, 1, 0
else:
mesh = self.mesh
boundary_faces = mesh.boundary_faces
boundary_normals = mesh.boundary_face_outward_normals
n_bf = len(boundary_faces)
# Top gets 0 Neumann
alpha = np.zeros(n_bf)
beta = np.ones(n_bf)
gamma = 0
# assume a source point at the middle of the top of the mesh
middle = np.median(mesh.nodes, axis=0)
top_v = np.max(mesh.nodes[:, -1])
source_point = np.r_[middle[:-1], top_v]
r_vec = boundary_faces - source_point
r = np.linalg.norm(r_vec, axis=-1)
r_hat = r_vec / r[:, None]
r_dot_n = np.einsum("ij,ij->i", r_hat, boundary_normals)
# determine faces that are on the sides and bottom of the mesh...
if mesh._meshType.lower() == "tree":
not_top = boundary_faces[:, -1] != top_v
else:
# mesh faces are ordered, faces_x, faces_y, faces_z so...
is_b = make_boundary_bool(mesh.shape_faces_y)
is_t = | np.zeros(mesh.shape_faces_y, dtype=bool, order="F") | numpy.zeros |
# Copyright 2020 <NAME>
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
# 1. Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
# USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
# The SEIHRD class simulates the SEIHRD model
# It can either run the simulation as a differential equation
# or the simulation as a stochastic process.
# For the differential equation it allows computing gradients
class SEIHRD:
def __init__(self, N=7.6E6, delta_t=0.1, num_sim=1,
beta=0.87, alpha=0.192, b=0.87,
gamma_0=0.189, lambda_0=0.0264, delta_0=0.002,
delta_1=0.031, gamma_1=0.1,
Sigma_0=[7.6E6 - 10, 0, 10, 0, 0, 0], k=100,
c=3500, d=1000000, mu=0.01,
o_1=0, w_0=0):
# Paramters:
self.N = N # Total population size
self.delta_t = delta_t # Time step
self.num_sim = num_sim # Number of simulations
# (for running as a stochastic process)
self.beta = beta # Infection rate (this is the control parameter)
self.alpha = alpha # Exposed to infected rate
self.gamma_0 = gamma_0 # Enfected to recovered rate
self.lambda_0 = lambda_0 # Enfected to hospitalized rate
self.gamma_1 = gamma_1 # Hospitalized to recovered rate
self.delta_0 = delta_0 # Infected to dead rate
self.delta_1 = delta_1 # Hospitalized to dead rate
self.k = k # Coefficient for the control cost function
self.d = d # Coefficient for the death cost
self.c = c # Coefficient for hospitalization cost
self.mu = mu # Numerical coefficient for terminal state constraint
self.b = b # Base infection rate
self.w_0 = w_0 # Vaccination discovery rate
self.o_1 = o_1 # Vaccination dispense rate
# Initialize the empirical measure for stochastic processes
self.m = np.asarray(Sigma_0).reshape((6, 1)) \
* np.ones((1, self.num_sim))
# Initialize the mean field approximation for the differential equation
self.mf = np.asarray(Sigma_0)
# Initialize the costate
# (for computing gradients of the differential equation)
self.P = np.zeros(6)
# Initialize the cost
self.J = np.zeros(num_sim)
# Initialize the vaccine variable
self.u = np.zeros(num_sim)
# update_mf runs a single forward Euler update
# for the differential equation
def update_mf(self):
self.J = self.J + self.delta_t * self.cost()
self.mf = self.mf + self.delta_t * self.f()
# update_sim runs a single step to simulate
# as a stochastic process
def update_sim(self):
# We simulate each transition by a Poisson random variable
a = np.zeros((9, self.num_sim))
a[0] = np.random.poisson(
self.delta_t * self.beta * self.m[0] * self.m[2] / self.N,
self.num_sim) # I to E
a[1] = np.random.poisson(
self.delta_t * self.alpha * self.m[1], self.num_sim) # E to I
a[2] = np.random.poisson(
self.delta_t * self.lambda_0 * self.m[2], self.num_sim) # I to H
a[3] = np.random.poisson(
self.delta_t * self.gamma_0 * (self.m[2]), self.num_sim) # I to R
a[4] = np.random.poisson(
self.delta_t * self.delta_0 * (self.m[2]), self.num_sim) # I to D
a[5] = np.random.poisson(
self.delta_t * self.gamma_1 * self.m[3], self.num_sim) # H to R
a[6] = np.random.poisson(
self.delta_t * self.delta_1 * (self.m[3]), self.num_sim) # H to D
a[7] = np.random.poisson(
self.delta_t * self.w_0 * (1 - self.u), self.num_sim)
a[8] = np.random.poisson(
self.delta_t * self. N * self.o_1 * self.u, self.num_sim)
# We make sure we never transition more than population avialable
a[0] = np.minimum(a[0], self.m[0])
a[1] = np.minimum(a[1], self.m[1])
a[2] = np.minimum(a[2], self.m[2])
a[3] = np.minimum(a[3], self.m[2] - a[2])
a[4] = np.minimum(a[4], self.m[2] - a[2] - a[3])
a[5] = np.minimum(a[5], self.m[3])
a[6] = np.minimum(a[6], self.m[3] - a[5])
a[7] = np.minimum(a[7], 1)
a[8] = np.minimum(a[8], self.m[0] - a[0])
rhs = np.zeros((6, self.num_sim))
rhs[0] = -a[0] - a[8]
rhs[1] = a[0] - a[1]
rhs[2] = a[1] - a[2] - a[3] - a[4]
rhs[3] = a[2] - a[5] - a[6]
rhs[4] = a[3] + a[5]
rhs[5] = a[4] + a[6]
self.J = self.J + self.delta_t * self.cost_sim()
self.m = self.m + rhs
self.u = self.u + a[7]
# update_P computes one backward step of the discretized costate equation
def update_P(self):
self.P = self.P + self.delta_t * self.grad_H_state()
# f is the rhs of the differential equation
def f(self):
f = np.zeros(6)
f[0] = -self.beta * self.mf[0] * self.mf[2] / self.N
f[1] = self.beta * self.mf[0] * self.mf[2] / self.N \
- self.alpha * self.mf[1]
f[2] = self.alpha * self.mf[1] - \
(self.gamma_0 + self.lambda_0 + self.delta_0) * self.mf[2]
f[3] = self.lambda_0 * self.mf[2] - \
(self.gamma_1 + self.delta_1) * self.mf[3]
f[4] = self.gamma_0 * self.mf[2] + self.gamma_1 * self.mf[3]
f[5] = self.delta_0 * self.mf[2] + self.delta_1 * self.mf[3]
return f
# compute the gradient with respect to the control variable
def grad_f_control(self):
grad_f = np.zeros(6)
grad_f[0] = - self.mf[0] \
* self.mf[2] / self.N
grad_f[1] = self.mf[0] \
* self.mf[2] / self.N
return grad_f
# compute the Jacobian gradient with respect to the state variables
def grad_f_state(self):
grad_f = np.zeros((6, 6))
grad_f[0, 0] = -self.beta * self.mf[2] / self.N
grad_f[0, 2] = -self.beta * self.mf[0] / self.N
grad_f[1, 0] = self.beta * self.mf[2] / self.N
grad_f[1, 1] = - self.alpha
grad_f[1, 2] = self.beta * self.mf[0] / self.N
grad_f[2, 1] = self.alpha
grad_f[2, 2] = - (self.gamma_0 + self.lambda_0 + self.delta_0)
grad_f[3, 2] = self.lambda_0
grad_f[3, 3] = - (self.gamma_1 + self.delta_1)
grad_f[4, 2] = self.gamma_0
grad_f[4, 3] = self.gamma_1
grad_f[5, 2] = self.delta_0
grad_f[5, 3] = self.delta_1
return grad_f
# compute the cost for the differential equation
def cost(self):
cost = - self.k * self.N * \
(np.log(self.beta / self.b) - (
self.beta - self.b) / self.b) \
+ self.c * \
(self.mf[3] + 0.5 * self.mf[3] * self.mf[3] / self.N)
return cost
# compute the cost for the stochastic process
def cost_sim(self):
cost = - self.k * self.N * \
( | np.log(self.beta / self.b) | numpy.log |
#!/usr/bin/env python
from __future__ import division
__author__ = "<NAME>"
__version__ = "2.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__date__ = "August 2, 2013"
import unittest
import numpy as np
from pyhull.halfspace import Halfspace, HalfspaceIntersection
class HalfspaceTest(unittest.TestCase):
def test_halfspace(self):
h1 = Halfspace.from_hyperplane([[0,1,0], [1,0,0]], [1,1,-100], [2,2,2], True)
self.assertTrue(all(h1.normal == [0,0,-1]))
self.assertEqual(h1.offset, -100)
h2 = Halfspace.from_hyperplane([[0,1,0], [1,0,0]], [1,1,-100], [2,2,2], False)
self.assertEqual(h2.offset, 100)
def test_intersection(self):
h1 = Halfspace.from_hyperplane([[1,0,0], [0,1,0]], [1,1,1], [0.9, 0.9, 0.9], True)
h2 = Halfspace.from_hyperplane([[0,1,0], [0,0,1]], [1,1,1], [0.9, 0.9, 0.9], True)
h3 = Halfspace.from_hyperplane([[0,0,1], [1,0,0]], [1,1,1], [0.9, 0.9, 0.9], True)
h4 = Halfspace.from_hyperplane([[-1,0,1], [0,-1,1]], [1,1,0], [0.9, 0.9, 0.9], True)
hi = HalfspaceIntersection([h1, h2, h3, h4], [0.9, 0.9, 0.9])
self.assertTrue(np.allclose(np.sum(hi.vertices, axis = 0), [3,3,3]))
h5 = Halfspace.from_hyperplane([[1,0,0], [0,1,0]], [1,2,2], [0.9, 0.9, 0.9], True)
hi = HalfspaceIntersection([h5, h2, h3, h4], [0.9, 0.9, 0.9])
self.assertTrue(np.allclose(np.sum(hi.vertices, axis = 0), [2,2,6]))
for h, vs in zip(hi.halfspaces, hi.facets_by_halfspace):
for v in vs:
self.assertAlmostEqual(np.dot(h.normal, hi.vertices[v]) + h.offset, 0)
for v, hss in zip(hi.vertices, hi.facets_by_vertex):
for i in hss:
hs = hi.halfspaces[i]
self.assertAlmostEqual(np.dot(hs.normal, v) + hs.offset, 0)
def test_intersection2d(self):
h1 = Halfspace([1,0], -1)
h2 = Halfspace([0,1], 1)
h3 = Halfspace([-2,-1], 0)
h_redundant = Halfspace([1,0], -2)
hi = HalfspaceIntersection([h1, h2, h_redundant, h3], [0.9,-1.1])
for h, vs in zip(hi.halfspaces, hi.facets_by_halfspace):
for v in vs:
self.assertAlmostEqual(np.dot(h.normal, hi.vertices[v]) + h.offset, 0)
for v, hss in zip(hi.vertices, hi.facets_by_vertex):
for i in hss:
hs = hi.halfspaces[i]
self.assertAlmostEqual(np.dot(hs.normal, v) + hs.offset, 0)
#redundant halfspace should have no vertices
self.assertEqual(len(hi.facets_by_halfspace[2]), 0)
self.assertTrue(np.any(np.all(hi.vertices == np.array([1,-2]), axis=1)))
self.assertTrue(np.any(np.all(hi.vertices == | np.array([1,-1]) | numpy.array |
import StandardBody
import numpy as np
#File for defining approximate body part filters on the template body
#Each one of these filters takes as input a collection of points in millimeters
#and filters them down to only those points which lie within the specified body
#part boundary
def pixelSpaceBodyMask(maskFunc, points):
#First, convert the list of points into our
#stupid imaginary standard-body pixel-derived coords
x_t = StandardBody.xmin
y_t = StandardBody.ymin
p_t = np.array([x_t, y_t, 0.0])
x_c = StandardBody.xspread / (296.0)
y_c = StandardBody.yspread / (430.0)
p_c = np.array([x_c, y_c, 1.0])
standardPoints = (np.copy(points) - p_t) / p_c
filteredInds = maskFunc(standardPoints)
return filteredInds
def maskLeftInnerArm(reshaped):
relevant = reshaped[:, 0] > 240
relevant_two = reshaped[:, 0] < 220
relevant_three = reshaped[:, 1] > 132
relevant = np.logical_or(relevant, relevant_two)
relevant = np.logical_or(relevant, relevant_three)
return relevant
def maskRightInnerArm(reshaped):
relevant = reshaped[:, 0] > 70
relevant_two = reshaped[:, 0] < 50
relevant_three = reshaped[:, 1] > 132
relevant = np.logical_or(relevant, relevant_two)
relevant = np.logical_or(relevant, relevant_three)
return relevant
def maskLeftOuterArm(reshaped):
relevant = reshaped[:, 1] > 75
relevant_two = reshaped[:, 1] < 62
relevant_three = reshaped[:, 0] < 245
relevant = np.logical_or(relevant, relevant_two)
relevant = np.logical_or(relevant, relevant_three)
return relevant
def maskRightOuterArm(reshaped):
relevant = reshaped[:, 1] > 75
relevant_two = reshaped[:, 1] < 62
relevant_three = reshaped[:, 0] > 40
relevant = np.logical_or(relevant, relevant_two)
relevant = np.logical_or(relevant, relevant_three)
return relevant
def maskLeftUpperLeg(reshaped):
relevant = reshaped[:, 0] > 142
relevant_two = reshaped[:, 1] < 290
relevant_three = reshaped[:, 1] > 310
relevant = np.logical_or(relevant, relevant_two)
relevant = | np.logical_or(relevant, relevant_three) | numpy.logical_or |
from keras.layers import Dense
from keras.layers import Reshape, Conv2D, MaxPooling2D, UpSampling2D, Flatten
from keras.models import Sequential
from keras.datasets import mnist
import numpy as np
from dehydrated_vae import build_vae
#preprocess mnist dataset
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.astype('float32') / 255.
x_test = x_test.astype('float32') / 255.
x_train = x_train.reshape((len(x_train), np.prod(x_train.shape[1:])))
x_test = x_test.reshape((len(x_test), np.prod(x_test.shape[1:])))
#create encoder and decoder
#NOTE: the encoder does not contain the latent mean/stddev layers
latent_size = 2
acti = "tanh"
encoder = Sequential([
Dense(256, input_shape=[28 * 28], activation=acti),
Dense(128, activation=acti)
])
decoder = Sequential([
Dense(256, input_shape=[latent_size]),
Dense(128, activation=acti),
Dense(28 * 28, activation="sigmoid")
])
#create the VAE
#the encoder will be wrapped in a new model containing the latent mean layer
vae, encoder, decoder, loss = \
build_vae(encoder, decoder, latent_size, kl_scale=1/np.prod(x_train.shape[1:]))
vae.compile(optimizer="adam", loss=loss)
vae.summary()
vae.fit(x_train, x_train, epochs=10)
#####
import matplotlib.pyplot as plt
n = 20
digit_size = 28
figure = | np.zeros((digit_size * n, digit_size * n)) | numpy.zeros |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Copyright (c) 2019 by Inria
Authored by <NAME> (<EMAIL>)
License agreement in LICENSE.txt
"""
import numpy as np
import torch
import torch.nn as nn
#%% The following implements the MCEM algorithm for audio-only VAE
class MCEM_algo:
def __init__(self, X=None, W=None, H=None, Z=None, v=None, decoder=None,
niter_MCEM=100, niter_MH=40, burnin=30, var_MH=0.01):
self.X = X # Mixture STFT, shape (F,N)
self.W = W # NMF dictionary matrix, shape (F, K)
self.H = H # NMF activation matrix, shape (K, N)
self.V = self.W @ self.H # Noise variance, shape (F, N)
self.Z = Z # Last draw of the latent variables, shape (D, N)
self.decoder = decoder # VAE decoder, keras model
self.niter_MCEM = niter_MCEM # Maximum number of MCEM iterations
self.niter_MH = niter_MH # Number of iterations for the MH algorithm
# of the E-step
self.burnin = burnin # Burn-in period for the MH algorithm of the
# E-step
self.var_MH = var_MH # Variance of the proposal distribution of the MH
# algorithm
self.a = np.ones((1,self.X.shape[1])) # gain parameters, shape (1,N)
# output of the decoder with self.Z as input, shape (F, N)
#removed transpose from line below
if torch.cuda.is_available(): self.Z_mapped_decoder = self.torch2num_cuda(self.decoder(self.num2torch_cuda(self.Z.T))).T
else: self.Z_mapped_decoder = self.torch2num(self.decoder(self.num2torch(self.Z.T))).T
self.speech_var = (self.Z_mapped_decoder*self.a) # apply gain
def num2torch(self, x):
y = torch.from_numpy(x.astype(np.float32))
return y
def torch2num(self, x):
y = x.detach().numpy()
return y
def num2torch_cuda(self, x):
y = torch.from_numpy(x.astype(np.float32))
return y.cuda()
def torch2num_cuda(self, x):
y = x.cpu().detach().numpy()
return y
def metropolis_hastings(self, niter_MH=None, burnin=None):
if niter_MH==None:
niter_MH = self.niter_MH
if burnin==None:
burnin = self.burnin
F, N = self.X.shape # 258, 124 - power spec dim
D = self.Z.shape[0] # 32 - latent dim
Z_sampled = np.zeros((D, N, niter_MH - burnin)) # (32, 124, 10)
cpt = 0
for n in np.arange(niter_MH):
# self.Z - (32, 124)
# self.Z_prime - (32, 124)
#breakpoint()
Z_prime = self.Z + np.sqrt(self.var_MH)*np.random.randn(D,N)
if torch.cuda.is_available(): Z_prime_mapped_decoder = self.torch2num_cuda(self.decoder(self.num2torch_cuda(Z_prime.T))).T #(513, 124) #513 - input_dim
else: Z_prime_mapped_decoder = self.torch2num(self.decoder(self.num2torch(Z_prime.T))).T #(513, 124) #513 - input_dim
# shape (F, N)
speech_var_prime = (Z_prime_mapped_decoder*self.a) # apply gain #(513, 124)
# self.V and self.speech_var should be of same shape
#import pdb; pdb.set_trace()
acc_prob = ( np.sum( np.log(self.V + self.speech_var)
- np.log(self.V + speech_var_prime)
+ ( 1/(self.V + self.speech_var)
- 1/(self.V + speech_var_prime) )
* np.abs(self.X)**2, axis=0)
+ .5*np.sum( self.Z**2 - Z_prime**2 , axis=0) )
#import pdb; pdb.set_trace()
is_acc = np.log(np.random.rand(1,N)) < acc_prob
is_acc = is_acc.reshape((is_acc.shape[1],))
self.Z[:,is_acc] = Z_prime[:,is_acc]
if torch.cuda.is_available(): self.Z_mapped_decoder = self.torch2num_cuda(self.decoder(self.num2torch_cuda(self.Z.T))).T
else: self.Z_mapped_decoder = self.torch2num(self.decoder(self.num2torch(self.Z.T))).T
self.speech_var = self.Z_mapped_decoder*self.a
if n > burnin - 1:
Z_sampled[:,:,cpt] = self.Z
cpt += 1
return Z_sampled
def run(self, hop, wlen, win, tol=1e-4):
F, N = self.X.shape
X_abs_2 = np.abs(self.X)**2
cost_after_M_step = np.zeros((self.niter_MCEM, 1))
for n in np.arange(self.niter_MCEM):
# MC-Step
# print('Metropolis-Hastings')
Z_sampled = self.metropolis_hastings(self.niter_MH, self.burnin)
Z_sampled_mapped_decoder = | np.zeros((F, N, self.niter_MH-self.burnin)) | numpy.zeros |
import pytest
import numpy as np
from numpy.testing import assert_allclose, assert_equal
from sklearn.exceptions import NotFittedError
from graspy.cluster.gclust import GaussianCluster
from graspy.embed.ase import AdjacencySpectralEmbed
from graspy.simulations.simulations import sbm
def test_inputs():
# Generate random data
X = np.random.normal(0, 1, size=(100, 3))
# empty constructor
with pytest.raises(TypeError):
gclust = GaussianCluster()
# min_components < 1
with pytest.raises(ValueError):
gclust = GaussianCluster(min_components=0)
# min_components integer
with pytest.raises(TypeError):
gclust = GaussianCluster(min_components="1")
# max_components < min_components
with pytest.raises(ValueError):
gclust = GaussianCluster(min_components=1, max_components=0)
# max_components integer
with pytest.raises(TypeError):
gclust = GaussianCluster(min_components=1, max_components="1")
# covariance type is not an array, string or list
with pytest.raises(TypeError):
gclust = GaussianCluster(min_components=1, covariance_type=1)
# covariance type is not in ['spherical', 'diag', 'tied', 'full']
with pytest.raises(ValueError):
gclust = GaussianCluster(min_components=1, covariance_type="graspy")
# min_cluster > n_samples when max_cluster is None
with pytest.raises(ValueError):
gclust = GaussianCluster(1000)
gclust.fit(X)
with pytest.raises(ValueError):
gclust = GaussianCluster(1000)
gclust.fit_predict(X)
# max_cluster > n_samples when max_cluster is not None
with pytest.raises(ValueError):
gclust = GaussianCluster(10, 1001)
gclust.fit(X)
with pytest.raises(ValueError):
gclust = GaussianCluster(10, 1001)
gclust.fit_predict(X)
# min_cluster > n_samples when max_cluster is None
with pytest.raises(ValueError):
gclust = GaussianCluster(1000)
gclust.fit(X)
with pytest.raises(ValueError):
gclust = GaussianCluster(10, 1001)
gclust.fit_predict(X)
# min_cluster > n_samples when max_cluster is not None
with pytest.raises(ValueError):
gclust = GaussianCluster(1000, 1001)
gclust.fit(X)
with pytest.raises(ValueError):
gclust = GaussianCluster(1000, 1001)
gclust.fit_predict(X)
def test_predict_without_fit():
# Generate random data
X = np.random.normal(0, 1, size=(100, 3))
with pytest.raises(NotFittedError):
gclust = GaussianCluster(min_components=2)
gclust.predict(X)
def test_no_y():
np.random.seed(2)
n = 100
d = 3
X1 = np.random.normal(2, 0.5, size=(n, d))
X2 = np.random.normal(-2, 0.5, size=(n, d))
X = np.vstack((X1, X2))
gclust = GaussianCluster(min_components=5)
gclust.fit(X)
bics = gclust.bic_
assert_equal(bics.iloc[:, 0].values.argmin(), 1)
def test_outputs():
"""
Easily separable two gaussian problem.
"""
np.random.seed(2)
n = 100
d = 3
num_sims = 10
for _ in range(num_sims):
X1 = np.random.normal(2, 0.5, size=(n, d))
X2 = np.random.normal(-2, 0.5, size=(n, d))
X = np.vstack((X1, X2))
y = np.repeat([0, 1], n)
gclust = GaussianCluster(min_components=5)
gclust.fit(X, y)
bics = gclust.bic_
aris = gclust.ari_
bic_argmin = bics.iloc[:, 0].values.argmin()
# Assert that the two cluster model is the best
assert_equal(bic_argmin, 1)
# The plus one is to adjust the index by min_components
assert_allclose(aris.iloc[:, 0][bic_argmin + 1], 1)
def test_bic():
"""
Expect 3 clusters from a 3 block model
"""
np.random.seed(3)
num_sims = 10
# Generate adjacency and labels
n = 50
n_communites = [n, n, n]
p = np.array([[0.8, 0.3, 0.2], [0.3, 0.8, 0.3], [0.2, 0.3, 0.8]])
y = np.repeat([1, 2, 3], repeats=n)
for _ in range(num_sims):
A = sbm(n=n_communites, p=p)
# Embed to get latent positions
ase = AdjacencySpectralEmbed(n_components=5)
X_hat = ase.fit_transform(A)
# Compute clusters
gclust = GaussianCluster(min_components=10)
gclust.fit(X_hat, y)
bics = gclust.bic_
aris = gclust.ari_
bic_argmin = bics.iloc[:, 0].values.argmin()
assert_equal(2, bic_argmin)
# The plus one is to adjust the index by min_components
assert_allclose(1, aris.iloc[:, 0][bic_argmin + 1])
def test_covariances():
"""
Easily separable two gaussian problem.
"""
| np.random.seed(2) | numpy.random.seed |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.