prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
import numpy as np
import torch
def create_data(raw_data = None, raw_labels = None, permutation = None, ratio = None, preprocess = None, max_length = None , augmented = None, padding = 'two'):
## Input:
# raw_data: waveform data np.ndarray
# raw_labels: label data np.ndarray
# permutaion: fixed permutation when splitting train and valid data
# ratio: splitting ratio when splitting train and valid data
# An Integer: 19 for 90% train and 10% valid
# preprocess: 'zero' for zero padding; 'reflection' for reflection padding and simple normalization after it
# max_length: max number of length you want to keep or pad to for an individual waveform datum
# augmented: Boolean, augment train data to make it balance or not
# padding: 'two' for two sides padding; 'one' for one side padding
## Output:
# train_data(1*LENGTH torch.FloatTensor), train_label(LENGTH torch.LongTensor), val_data, val_label
if preprocess == 'reflection':
max_length = 10100
data = np.zeros((len(raw_data), max_length))
if preprocess == 'zero':
for i in range(len(raw_data)):
if len(raw_data[i]) >= max_length:
data[i] = raw_data[i][:max_length]
else:
remainder = max_length - len(raw_data[i])
if padding == 'two':
data[i] = np.pad(raw_data[i], (int(remainder / 2), remainder - int(remainder / 2)), 'constant', constant_values=0)
elif padding == 'one':
data[i] = np.pad(raw_data[i], (0, remainder), 'constant',
constant_values=0)
else:
for i in range(len(raw_data)):
if len(raw_data[i]) >= max_length:
data[i] = raw_data[i][:max_length]
else:
b = raw_data[i][0:(max_length - len(raw_data[i]))]
goal = np.hstack((raw_data[i], b))
while len(goal) != max_length:
b = raw_data[i][0:(max_length - len(goal))]
goal = np.hstack((goal, b))
data[i] = goal
data = (data - data.mean())/(data.std())
data = data[permutation]
labels = raw_labels[permutation]
if ratio == 19:
mid = int(len(raw_data)*0.9)
else:
mid = int(len(raw_data)*0.7)
train_data = data[:mid]
val_data = data[mid:]
train_label = labels[:mid]
val_label = labels[mid:]
if augmented == True:
# replicate noisy class 5 times
temp_data = np.tile(train_data[train_label == 3], (5,1))
temp_label = | np.tile(train_label[train_label == 3], 5) | numpy.tile |
from PIL import ImageFont
import cv2
import numpy as np
def draw_text(img, text, pos, font_size , color):
if img is None or not text :
print('<< ็กๅฝฑๅ้ฃๅๆๆๅญ >>')
return
if type(text) is not str:
text = str(text)
x = pos[0]
y = pos[1]
img_height, img_width = img.shape[0], img.shape[1]
# if img.ndim == 3:
# img_height, img_width, _ = img.shape
# else : # grayscale
# img_height, img_width = img.shape
#check range
if not 0 <= x < img_width or not 0 <= y < img_height :
print('<< ๆๅญไฝ็ฝฎ่ถ
ๅบ็ฏๅ >>')
return
# get font bitmap
font = ImageFont.truetype("msjh.ttc", font_size, encoding="utf-8")
font_bitmap = font.getmask(text)
font_width, font_height = font_bitmap.size
#print("font: ", font_width, font_height)
font_img = | np.asarray(font_bitmap, np.uint8) | numpy.asarray |
# -*- mode: python; coding: utf-8 -*
# Copyright (c) 2019 Radio Astronomy Software Group
# Licensed under the 2-clause BSD License
from __future__ import division
import unittest
import nose.tools as nt
import os
import numpy as np
import pyuvdata.tests as uvtest
from pyuvdata import UVData
from pyuvdata import UVCal
from pyuvdata.data import DATA_PATH
from pyuvdata import UVFlag
from pyuvdata.uvflag import lst_from_uv
from pyuvdata.uvflag import flags2waterfall
from pyuvdata.uvflag import and_rows_cols
from pyuvdata import version as uvversion
import shutil
import copy
import six
test_d_file = os.path.join(DATA_PATH, 'zen.2457698.40355.xx.HH.uvcAA')
test_c_file = os.path.join(DATA_PATH, 'zen.2457555.42443.HH.uvcA.omni.calfits')
test_f_file = test_d_file + '.testuvflag.h5'
test_outfile = os.path.join(DATA_PATH, 'test', 'outtest_uvflag.h5')
pyuvdata_version_str = uvversion.version + '.'
if uvversion.git_hash is not '':
pyuvdata_version_str += (' Git origin: ' + uvversion.git_origin
+ '. Git hash: ' + uvversion.git_hash
+ '. Git branch: ' + uvversion.git_branch
+ '. Git description: ' + uvversion.git_description + '.')
def test_init_UVData():
uv = UVData()
uv.read_miriad(test_d_file)
uvf = UVFlag(uv, history='I made a UVFlag object', label='test')
nt.assert_true(uvf.metric_array.shape == uv.flag_array.shape)
nt.assert_true(np.all(uvf.metric_array == 0))
nt.assert_true(uvf.weights_array.shape == uv.flag_array.shape)
nt.assert_true(np.all(uvf.weights_array == 1))
nt.assert_true(uvf.type == 'baseline')
nt.assert_true(uvf.mode == 'metric')
nt.assert_true(np.all(uvf.time_array == uv.time_array))
nt.assert_true(np.all(uvf.lst_array == uv.lst_array))
nt.assert_true(np.all(uvf.freq_array == uv.freq_array[0]))
nt.assert_true(np.all(uvf.polarization_array == uv.polarization_array))
nt.assert_true(np.all(uvf.baseline_array == uv.baseline_array))
nt.assert_true(np.all(uvf.ant_1_array == uv.ant_1_array))
nt.assert_true(np.all(uvf.ant_2_array == uv.ant_2_array))
nt.assert_true('I made a UVFlag object' in uvf.history)
nt.assert_true('Flag object with type "baseline"' in uvf.history)
nt.assert_true(pyuvdata_version_str in uvf.history)
nt.assert_true(uvf.label == 'test')
def test_init_UVData_copy_flags():
uv = UVData()
uv.read_miriad(test_d_file)
uvf = uvtest.checkWarnings(UVFlag, [uv], {'copy_flags': True, 'mode': 'metric'},
nwarnings=1, message='Copying flags to type=="baseline"')
nt.assert_false(hasattr(uvf, 'metric_array')) # Should be flag due to copy flags
nt.assert_true(np.array_equal(uvf.flag_array, uv.flag_array))
nt.assert_true(uvf.weights_array.shape == uv.flag_array.shape)
nt.assert_true(np.all(uvf.weights_array == 1))
nt.assert_true(uvf.type == 'baseline')
nt.assert_true(uvf.mode == 'flag')
nt.assert_true(np.all(uvf.time_array == uv.time_array))
nt.assert_true(np.all(uvf.lst_array == uv.lst_array))
nt.assert_true(np.all(uvf.freq_array == uv.freq_array[0]))
nt.assert_true(np.all(uvf.polarization_array == uv.polarization_array))
nt.assert_true(np.all(uvf.baseline_array == uv.baseline_array))
nt.assert_true(np.all(uvf.ant_1_array == uv.ant_1_array))
nt.assert_true(np.all(uvf.ant_2_array == uv.ant_2_array))
nt.assert_true('Flag object with type "baseline"' in uvf.history)
nt.assert_true(pyuvdata_version_str in uvf.history)
def test_init_UVCal():
uvc = UVCal()
uvc.read_calfits(test_c_file)
uvf = UVFlag(uvc)
nt.assert_true(uvf.metric_array.shape == uvc.flag_array.shape)
nt.assert_true(np.all(uvf.metric_array == 0))
nt.assert_true(uvf.weights_array.shape == uvc.flag_array.shape)
nt.assert_true(np.all(uvf.weights_array == 1))
nt.assert_true(uvf.type == 'antenna')
nt.assert_true(uvf.mode == 'metric')
nt.assert_true(np.all(uvf.time_array == uvc.time_array))
lst = lst_from_uv(uvc)
nt.assert_true(np.all(uvf.lst_array == lst))
nt.assert_true(np.all(uvf.freq_array == uvc.freq_array[0]))
nt.assert_true(np.all(uvf.polarization_array == uvc.jones_array))
nt.assert_true(np.all(uvf.ant_array == uvc.ant_array))
nt.assert_true('Flag object with type "antenna"' in uvf.history)
nt.assert_true(pyuvdata_version_str in uvf.history)
def test_init_cal_copy_flags():
uv = UVCal()
uv.read_calfits(test_c_file)
uvf = uvtest.checkWarnings(UVFlag, [uv], {'copy_flags': True, 'mode': 'metric'},
nwarnings=1, message='Copying flags to type=="antenna"')
nt.assert_false(hasattr(uvf, 'metric_array')) # Should be flag due to copy flags
nt.assert_true(np.array_equal(uvf.flag_array, uv.flag_array))
nt.assert_true(uvf.weights_array.shape == uv.flag_array.shape)
nt.assert_true(uvf.type == 'antenna')
nt.assert_true(uvf.mode == 'flag')
nt.assert_true(np.all(uvf.time_array == np.unique(uv.time_array)))
nt.assert_true(np.all(uvf.freq_array == uv.freq_array[0]))
nt.assert_true(np.all(uvf.polarization_array == uv.jones_array))
nt.assert_true(pyuvdata_version_str in uvf.history)
def test_init_waterfall_uvd():
uv = UVData()
uv.read_miriad(test_d_file)
uvf = UVFlag(uv, waterfall=True)
nt.assert_true(uvf.metric_array.shape == (uv.Ntimes, uv.Nfreqs, uv.Npols))
nt.assert_true(np.all(uvf.metric_array == 0))
nt.assert_true(uvf.weights_array.shape == (uv.Ntimes, uv.Nfreqs, uv.Npols))
nt.assert_true(np.all(uvf.weights_array == 1))
nt.assert_true(uvf.type == 'waterfall')
nt.assert_true(uvf.mode == 'metric')
nt.assert_true(np.all(uvf.time_array == np.unique(uv.time_array)))
nt.assert_true(np.all(uvf.lst_array == np.unique(uv.lst_array)))
nt.assert_true(np.all(uvf.freq_array == uv.freq_array[0]))
nt.assert_true(np.all(uvf.polarization_array == uv.polarization_array))
nt.assert_true('Flag object with type "waterfall"' in uvf.history)
nt.assert_true(pyuvdata_version_str in uvf.history)
def test_init_waterfall_uvc():
uv = UVCal()
uv.read_calfits(test_c_file)
uvf = UVFlag(uv, waterfall=True)
nt.assert_true(uvf.metric_array.shape == (uv.Ntimes, uv.Nfreqs, uv.Njones))
nt.assert_true(np.all(uvf.metric_array == 0))
nt.assert_true(uvf.weights_array.shape == (uv.Ntimes, uv.Nfreqs, uv.Njones))
nt.assert_true(np.all(uvf.weights_array == 1))
nt.assert_true(uvf.type == 'waterfall')
nt.assert_true(uvf.mode == 'metric')
nt.assert_true(np.all(uvf.time_array == np.unique(uv.time_array)))
nt.assert_true(np.all(uvf.freq_array == uv.freq_array[0]))
nt.assert_true(np.all(uvf.polarization_array == uv.jones_array))
nt.assert_true('Flag object with type "waterfall"' in uvf.history)
nt.assert_true(pyuvdata_version_str in uvf.history)
def test_init_waterfall_flag():
uv = UVCal()
uv.read_calfits(test_c_file)
uvf = UVFlag(uv, waterfall=True, mode='flag')
nt.assert_true(uvf.flag_array.shape == (uv.Ntimes, uv.Nfreqs, uv.Njones))
nt.assert_true(not np.any(uvf.flag_array))
nt.assert_true(uvf.weights_array.shape == (uv.Ntimes, uv.Nfreqs, uv.Njones))
nt.assert_true(np.all(uvf.weights_array == 1))
nt.assert_true(uvf.type == 'waterfall')
nt.assert_true(uvf.mode == 'flag')
nt.assert_true(np.all(uvf.time_array == np.unique(uv.time_array)))
nt.assert_true(np.all(uvf.freq_array == uv.freq_array[0]))
nt.assert_true(np.all(uvf.polarization_array == uv.jones_array))
nt.assert_true('Flag object with type "waterfall"' in uvf.history)
nt.assert_true(pyuvdata_version_str in uvf.history)
def test_init_waterfall_copy_flags():
uv = UVCal()
uv.read_calfits(test_c_file)
uvf = uvtest.checkWarnings(UVFlag, [uv], {'copy_flags': True, 'mode': 'flag', 'waterfall': True},
nwarnings=1, message='Copying flags into waterfall')
nt.assert_false(hasattr(uvf, 'flag_array')) # Should be metric due to copy flags
nt.assert_true(uvf.metric_array.shape == (uv.Ntimes, uv.Nfreqs, uv.Njones))
nt.assert_true(uvf.weights_array.shape == (uv.Ntimes, uv.Nfreqs, uv.Njones))
nt.assert_true(uvf.type == 'waterfall')
nt.assert_true(uvf.mode == 'metric')
nt.assert_true(np.all(uvf.time_array == np.unique(uv.time_array)))
nt.assert_true(np.all(uvf.freq_array == uv.freq_array[0]))
nt.assert_true(np.all(uvf.polarization_array == uv.jones_array))
nt.assert_true('Flag object with type "waterfall"' in uvf.history)
nt.assert_true(pyuvdata_version_str in uvf.history)
@uvtest.skipIf_no_h5py
def test_read_write_loop():
uv = UVData()
uv.read_miriad(test_d_file)
uvf = UVFlag(uv, label='test')
uvf.write(test_outfile, clobber=True)
uvf2 = UVFlag(test_outfile)
# Update history to match expected additions that were made
uvf.history += 'Written by ' + pyuvdata_version_str
uvf.history += ' Read by ' + pyuvdata_version_str
nt.assert_true(uvf.__eq__(uvf2, check_history=True))
@uvtest.skipIf_no_h5py
def test_read_write_ant():
uv = UVCal()
uv.read_calfits(test_c_file)
uvf = UVFlag(uv, mode='flag', label='test')
uvf.write(test_outfile, clobber=True)
uvf2 = UVFlag(test_outfile)
# Update history to match expected additions that were made
uvf.history += 'Written by ' + pyuvdata_version_str
uvf.history += ' Read by ' + pyuvdata_version_str
nt.assert_true(uvf.__eq__(uvf2, check_history=True))
@uvtest.skipIf_no_h5py
def test_read_write_nocompress():
uv = UVData()
uv.read_miriad(test_d_file)
uvf = UVFlag(uv, label='test')
uvf.write(test_outfile, clobber=True, data_compression=None)
uvf2 = UVFlag(test_outfile)
# Update history to match expected additions that were made
uvf.history += 'Written by ' + pyuvdata_version_str
uvf.history += ' Read by ' + pyuvdata_version_str
nt.assert_true(uvf.__eq__(uvf2, check_history=True))
@uvtest.skipIf_no_h5py
def test_read_write_nocompress_flag():
uv = UVData()
uv.read_miriad(test_d_file)
uvf = UVFlag(uv, mode='flag', label='test')
uvf.write(test_outfile, clobber=True, data_compression=None)
uvf2 = UVFlag(test_outfile)
# Update history to match expected additions that were made
uvf.history += 'Written by ' + pyuvdata_version_str
uvf.history += ' Read by ' + pyuvdata_version_str
nt.assert_true(uvf.__eq__(uvf2, check_history=True))
@uvtest.skipIf_no_h5py
def test_init_list():
uv = UVData()
uv.read_miriad(test_d_file)
uv.time_array -= 1
uvf = UVFlag([uv, test_f_file])
uvf1 = UVFlag(uv)
uvf2 = UVFlag(test_f_file)
nt.assert_true(np.array_equal(np.concatenate((uvf1.metric_array, uvf2.metric_array), axis=0),
uvf.metric_array))
nt.assert_true(np.array_equal(np.concatenate((uvf1.weights_array, uvf2.weights_array), axis=0),
uvf.weights_array))
nt.assert_true(np.array_equal(np.concatenate((uvf1.time_array, uvf2.time_array)),
uvf.time_array))
nt.assert_true(np.array_equal(np.concatenate((uvf1.baseline_array, uvf2.baseline_array)),
uvf.baseline_array))
nt.assert_true(np.array_equal(np.concatenate((uvf1.ant_1_array, uvf2.ant_1_array)),
uvf.ant_1_array))
nt.assert_true(np.array_equal(np.concatenate((uvf1.ant_2_array, uvf2.ant_2_array)),
uvf.ant_2_array))
nt.assert_true(uvf.mode == 'metric')
nt.assert_true(np.all(uvf.freq_array == uv.freq_array[0]))
nt.assert_true(np.all(uvf.polarization_array == uv.polarization_array))
@uvtest.skipIf_no_h5py
def test_read_list():
uv = UVData()
uv.read_miriad(test_d_file)
uv.time_array -= 1
uvf = UVFlag(uv)
uvf.write(test_outfile, clobber=True)
uvf.read([test_outfile, test_f_file])
uvf1 = UVFlag(uv)
uvf2 = UVFlag(test_f_file)
nt.assert_true(np.array_equal(np.concatenate((uvf1.metric_array, uvf2.metric_array), axis=0),
uvf.metric_array))
nt.assert_true(np.array_equal(np.concatenate((uvf1.weights_array, uvf2.weights_array), axis=0),
uvf.weights_array))
nt.assert_true(np.array_equal(np.concatenate((uvf1.time_array, uvf2.time_array)),
uvf.time_array))
nt.assert_true(np.array_equal(np.concatenate((uvf1.baseline_array, uvf2.baseline_array)),
uvf.baseline_array))
nt.assert_true(np.array_equal(np.concatenate((uvf1.ant_1_array, uvf2.ant_1_array)),
uvf.ant_1_array))
nt.assert_true(np.array_equal(np.concatenate((uvf1.ant_2_array, uvf2.ant_2_array)),
uvf.ant_2_array))
nt.assert_true(uvf.mode == 'metric')
nt.assert_true(np.all(uvf.freq_array == uv.freq_array[0]))
nt.assert_true(np.all(uvf.polarization_array == uv.polarization_array))
@uvtest.skipIf_no_h5py
def test_read_error():
nt.assert_raises(IOError, UVFlag, 'foo')
@uvtest.skipIf_no_h5py
def test_read_change_type():
uv = UVData()
uv.read_miriad(test_d_file)
uvc = UVCal()
uvc.read_calfits(test_c_file)
uvf = UVFlag(uvc)
uvf.write(test_outfile, clobber=True)
nt.assert_true(hasattr(uvf, 'ant_array'))
uvf.read(test_f_file)
nt.assert_false(hasattr(uvf, 'ant_array'))
nt.assert_true(hasattr(uvf, 'baseline_array'))
nt.assert_true(hasattr(uvf, 'ant_1_array'))
nt.assert_true(hasattr(uvf, 'ant_2_array'))
uvf.read(test_outfile)
nt.assert_true(hasattr(uvf, 'ant_array'))
nt.assert_false(hasattr(uvf, 'baseline_array'))
nt.assert_false(hasattr(uvf, 'ant_1_array'))
nt.assert_false(hasattr(uvf, 'ant_2_array'))
@uvtest.skipIf_no_h5py
def test_read_change_mode():
uv = UVData()
uv.read_miriad(test_d_file)
uvf = UVFlag(uv, mode='flag')
nt.assert_true(hasattr(uvf, 'flag_array'))
nt.assert_false(hasattr(uvf, 'metric_array'))
uvf.write(test_outfile, clobber=True)
uvf.read(test_f_file)
nt.assert_true(hasattr(uvf, 'metric_array'))
nt.assert_false(hasattr(uvf, 'flag_array'))
uvf.read(test_outfile)
nt.assert_true(hasattr(uvf, 'flag_array'))
nt.assert_false(hasattr(uvf, 'metric_array'))
@uvtest.skipIf_no_h5py
def test_write_no_clobber():
uvf = UVFlag(test_f_file)
nt.assert_raises(ValueError, uvf.write, test_f_file)
def test_lst_from_uv():
uv = UVData()
uv.read_miriad(test_d_file)
lst_array = lst_from_uv(uv)
nt.assert_true(np.allclose(uv.lst_array, lst_array))
def test_lst_from_uv_error():
nt.assert_raises(ValueError, lst_from_uv, 4)
@uvtest.skipIf_no_h5py
def test_add():
uv1 = UVFlag(test_f_file)
uv2 = copy.deepcopy(uv1)
uv2.time_array += 1 # Add a day
uv3 = uv1 + uv2
nt.assert_true(np.array_equal(np.concatenate((uv1.time_array, uv2.time_array)),
uv3.time_array))
nt.assert_true(np.array_equal(np.concatenate((uv1.baseline_array, uv2.baseline_array)),
uv3.baseline_array))
nt.assert_true(np.array_equal(np.concatenate((uv1.ant_1_array, uv2.ant_1_array)),
uv3.ant_1_array))
nt.assert_true(np.array_equal(np.concatenate((uv1.ant_2_array, uv2.ant_2_array)),
uv3.ant_2_array))
nt.assert_true(np.array_equal(np.concatenate((uv1.lst_array, uv2.lst_array)),
uv3.lst_array))
nt.assert_true(np.array_equal(np.concatenate((uv1.metric_array, uv2.metric_array), axis=0),
uv3.metric_array))
nt.assert_true(np.array_equal(np.concatenate((uv1.weights_array, uv2.weights_array), axis=0),
uv3.weights_array))
nt.assert_true(np.array_equal(uv1.freq_array, uv3.freq_array))
nt.assert_true(uv3.type == 'baseline')
nt.assert_true(uv3.mode == 'metric')
nt.assert_true(np.array_equal(uv1.polarization_array, uv3.polarization_array))
nt.assert_true('Data combined along time axis with ' + pyuvdata_version_str in uv3.history)
@uvtest.skipIf_no_h5py
def test_add_baseline():
uv1 = UVFlag(test_f_file)
uv2 = copy.deepcopy(uv1)
uv2.baseline_array += 100 # Arbitrary
uv3 = uv1.__add__(uv2, axis='baseline')
nt.assert_true(np.array_equal(np.concatenate((uv1.time_array, uv2.time_array)),
uv3.time_array))
nt.assert_true(np.array_equal(np.concatenate((uv1.baseline_array, uv2.baseline_array)),
uv3.baseline_array))
nt.assert_true(np.array_equal(np.concatenate((uv1.ant_1_array, uv2.ant_1_array)),
uv3.ant_1_array))
nt.assert_true(np.array_equal(np.concatenate((uv1.ant_2_array, uv2.ant_2_array)),
uv3.ant_2_array))
nt.assert_true(np.array_equal(np.concatenate((uv1.lst_array, uv2.lst_array)),
uv3.lst_array))
nt.assert_true(np.array_equal(np.concatenate((uv1.metric_array, uv2.metric_array), axis=0),
uv3.metric_array))
nt.assert_true(np.array_equal(np.concatenate((uv1.weights_array, uv2.weights_array), axis=0),
uv3.weights_array))
nt.assert_true(np.array_equal(uv1.freq_array, uv3.freq_array))
nt.assert_true(uv3.type == 'baseline')
nt.assert_true(uv3.mode == 'metric')
nt.assert_true(np.array_equal(uv1.polarization_array, uv3.polarization_array))
nt.assert_true('Data combined along baseline axis with ' + pyuvdata_version_str in uv3.history)
def test_add_antenna():
uvc = UVCal()
uvc.read_calfits(test_c_file)
uv1 = UVFlag(uvc)
uv2 = copy.deepcopy(uv1)
uv2.ant_array += 100 # Arbitrary
uv3 = uv1.__add__(uv2, axis='antenna')
nt.assert_true(np.array_equal(np.concatenate((uv1.ant_array, uv2.ant_array)),
uv3.ant_array))
nt.assert_true(np.array_equal(np.concatenate((uv1.metric_array, uv2.metric_array), axis=0),
uv3.metric_array))
nt.assert_true(np.array_equal(np.concatenate((uv1.weights_array, uv2.weights_array), axis=0),
uv3.weights_array))
nt.assert_true(np.array_equal(uv1.freq_array, uv3.freq_array))
nt.assert_true(np.array_equal(uv1.time_array, uv3.time_array))
nt.assert_true(np.array_equal(uv1.lst_array, uv3.lst_array))
nt.assert_true(uv3.type == 'antenna')
nt.assert_true(uv3.mode == 'metric')
nt.assert_true(np.array_equal(uv1.polarization_array, uv3.polarization_array))
nt.assert_true('Data combined along antenna axis with ' + pyuvdata_version_str in uv3.history)
@uvtest.skipIf_no_h5py
def test_add_frequency():
uv1 = UVFlag(test_f_file)
uv2 = copy.deepcopy(uv1)
uv2.freq_array += 1e4 # Arbitrary
uv3 = uv1.__add__(uv2, axis='frequency')
nt.assert_true(np.array_equal(np.concatenate((uv1.freq_array, uv2.freq_array)),
uv3.freq_array))
nt.assert_true(np.array_equal(uv1.time_array, uv3.time_array))
nt.assert_true(np.array_equal(uv1.baseline_array, uv3.baseline_array))
nt.assert_true(np.array_equal(uv1.ant_1_array, uv3.ant_1_array))
nt.assert_true(np.array_equal(uv1.ant_2_array, uv3.ant_2_array))
nt.assert_true(np.array_equal(uv1.lst_array, uv3.lst_array))
nt.assert_true(np.array_equal(np.concatenate((uv1.metric_array, uv2.metric_array), axis=2),
uv3.metric_array))
nt.assert_true(np.array_equal( | np.concatenate((uv1.weights_array, uv2.weights_array), axis=2) | numpy.concatenate |
"""
"""
from numbers import Real
from typing import Union, Optional, Any, List, Tuple, Sequence
import numpy as np
from easydict import EasyDict as ED
from utils import dict_to_str
from cfg import BaseCfg
__all__ = [
"CPSC2020_loss",
"CPSC2020_score",
"eval_score",
]
def CPSC2020_loss(y_true:np.ndarray, y_pred:np.ndarray, y_indices:np.ndarray, dtype:type=str, verbose:int=0) -> int:
""" finished, updated with the latest (updated on 2020.8.31) official function
Parameters:
-----------
y_true: ndarray,
array of ground truth of beat types
y_true: ndarray,
array of predictions of beat types
y_indices: ndarray,
indices of beat (rpeak) in the original ecg signal
dtype: type, default str,
dtype of `y_true` and `y_pred`
Returns:
--------
total_loss: int,
the total loss of all ectopic beat types (SPB, PVC)
"""
classes = ['S', 'V']
truth_arr = {}
pred_arr = {}
if dtype == str:
for c in classes:
truth_arr[c] = y_indices[np.where(y_true==c)[0]]
pred_arr[c] = y_indices[np.where(y_pred==c)[0]]
elif dtype == int:
for c in classes:
truth_arr[c] = y_indices[np.where(y_true==BaseCfg.class_map[c])[0]]
pred_arr[c] = y_indices[np.where(y_pred==BaseCfg.class_map[c])[0]]
true_positive = {c: 0 for c in classes}
for c in classes:
for tc in truth_arr[c]:
pc = np.where(abs(pred_arr[c]-tc) <= BaseCfg.bias_thr)[0]
if pc.size > 0:
true_positive[c] += 1
false_positive = {
c: len(pred_arr[c]) - true_positive[c] for c in classes
}
false_negative = {
c: len(truth_arr[c]) - true_positive[c] for c in classes
}
false_positive_loss = {c: 1 for c in classes}
false_negative_loss = {c: 5 for c in classes}
if verbose >= 1:
print(f"true_positive = {dict_to_str(true_positive)}")
print(f"false_positive = {dict_to_str(false_positive)}")
print(f"false_negative = {dict_to_str(false_negative)}")
total_loss = sum([
false_positive[c] * false_positive_loss[c] + false_negative[c] * false_negative_loss[c] \
for c in classes
])
return total_loss
def CPSC2020_score(spb_true:List[np.ndarray], pvc_true:List[np.ndarray], spb_pred:List[np.ndarray], pvc_pred:List[np.ndarray], verbose:int=0) -> Union[Tuple[int],dict]:
""" finished, checked,
Score Function for all (test) records
Parameters:
-----------
spb_true, pvc_true, spb_pred, pvc_pred: list of ndarray,
verbose: int
Returns:
--------
retval: tuple or dict,
tuple of (negative) scores for each ectopic beat type (SPB, PVC), or
dict of more scoring details, including
- total_loss: sum of loss of each ectopic beat type (PVC and SPB)
- true_positive: number of true positives of each ectopic beat type
- false_positive: number of false positives of each ectopic beat type
- false_negative: number of false negatives of each ectopic beat type
"""
s_score = np.zeros([len(spb_true), ], dtype=int)
v_score = np.zeros([len(spb_true), ], dtype=int)
true_positive = ED({'S':0, 'V':0})
false_positive = ED({'S':0, 'V':0})
false_negative = ED({'S':0, 'V':0})
## Scoring ##
for i, (s_ref, v_ref, s_pos, v_pos) in enumerate(zip(spb_true, pvc_true, spb_pred, pvc_pred)):
s_tp = 0
s_fp = 0
s_fn = 0
v_tp = 0
v_fp = 0
v_fn = 0
# SPB
if s_ref.size == 0:
s_fp = len(s_pos)
else:
for m, ans in enumerate(s_ref):
s_pos_cand = np.where(abs(s_pos-ans) <= BaseCfg.bias_thr)[0]
if s_pos_cand.size == 0:
s_fn += 1
else:
s_tp += 1
s_fp += len(s_pos_cand) - 1
# PVC
if v_ref.size == 0:
v_fp = len(v_pos)
else:
for m, ans in enumerate(v_ref):
v_pos_cand = np.where(abs(v_pos-ans) <= BaseCfg.bias_thr)[0]
if v_pos_cand.size == 0:
v_fn += 1
else:
v_tp += 1
v_fp += len(v_pos_cand) - 1
# calculate the score
s_score[i] = s_fp * (-1) + s_fn * (-5)
v_score[i] = v_fp * (-1) + v_fn * (-5)
if verbose >= 3:
print(f"for the {i}-th record")
print(f"s_tp = {s_tp}, s_fp = {s_fp}, s_fn = {s_fn}")
print(f"v_tp = {v_tp}, v_fp = {v_fp}, v_fn = {v_fn}")
print(f"s_score[{i}] = {s_score[i]}, v_score[{i}] = {v_score[i]}")
true_positive.S += s_tp
true_positive.V += v_tp
false_positive.S += s_fp
false_positive.V += v_fp
false_negative.S += s_fn
false_negative.V += v_fn
Score1 = np.sum(s_score)
Score2 = np.sum(v_score)
if verbose >= 1:
retval = ED(
total_loss=-(Score1+Score2),
class_loss={'S':-Score1, 'V':-Score2},
true_positive=true_positive,
false_positive=false_positive,
false_negative=false_negative,
)
else:
retval = Score1, Score2
return retval
# -------------------------------------------------------
# the following are borrowed from CINC2020
# for classification of segments of ECGs using ECG_CRNN
def eval_score(classes:List[str], truth:Sequence, binary_pred:Sequence, scalar_pred:Sequence) -> Tuple[float]:
""" finished, checked,
for classification of segments of ECGs
Parameters:
-----------
classes: list of str,
list of all the classes, in the format of abbrevations
truth: sequence,
ground truth array, of shape (n_records, n_classes), with values 0 or 1
binary_pred: sequence,
binary predictions, of shape (n_records, n_classes), with values 0 or 1
scalar_pred: sequence,
probability predictions, of shape (n_records, n_classes), with values within [0,1]
Returns:
--------
auroc: float,
auprc: float,
accuracy: float,
f_measure: float,
f_beta_measure: float,
g_beta_measure: float,
"""
_truth = np.array(truth)
_binary_pred = np.array(binary_pred)
_scalar_pred = np.array(scalar_pred)
print('- AUROC and AUPRC...')
auroc, auprc = compute_auc(_truth, _scalar_pred)
print('- Accuracy...')
accuracy = compute_accuracy(_truth, _binary_pred)
print('- F-measure...')
f_measure = compute_f_measure(_truth, _binary_pred)
print('- F-beta and G-beta measures...')
f_beta_measure, g_beta_measure = compute_beta_measures(_truth, _binary_pred, beta=2)
print('Done.')
# Return the results.
return auroc, auprc, accuracy, f_measure, f_beta_measure, g_beta_measure
# Compute recording-wise accuracy.
def compute_accuracy(labels:np.ndarray, outputs:np.ndarray) -> float:
""" checked,
"""
num_recordings, num_classes = np.shape(labels)
num_correct_recordings = 0
for i in range(num_recordings):
if np.all(labels[i, :]==outputs[i, :]):
num_correct_recordings += 1
return float(num_correct_recordings) / float(num_recordings)
# Compute confusion matrices.
def compute_confusion_matrices(labels:np.ndarray, outputs:np.ndarray, normalize:bool=False) -> np.ndarray:
""" checked,
"""
# Compute a binary confusion matrix for each class k:
#
# [TN_k FN_k]
# [FP_k TP_k]
#
# If the normalize variable is set to true, then normalize the contributions
# to the confusion matrix by the number of labels per recording.
num_recordings, num_classes = np.shape(labels)
if not normalize:
A = np.zeros((num_classes, 2, 2))
for i in range(num_recordings):
for j in range(num_classes):
if labels[i, j]==1 and outputs[i, j]==1: # TP
A[j, 1, 1] += 1
elif labels[i, j]==0 and outputs[i, j]==1: # FP
A[j, 1, 0] += 1
elif labels[i, j]==1 and outputs[i, j]==0: # FN
A[j, 0, 1] += 1
elif labels[i, j]==0 and outputs[i, j]==0: # TN
A[j, 0, 0] += 1
else: # This condition should not happen.
raise ValueError('Error in computing the confusion matrix.')
else:
A = np.zeros((num_classes, 2, 2))
for i in range(num_recordings):
normalization = float(max(np.sum(labels[i, :]), 1))
for j in range(num_classes):
if labels[i, j]==1 and outputs[i, j]==1: # TP
A[j, 1, 1] += 1.0/normalization
elif labels[i, j]==0 and outputs[i, j]==1: # FP
A[j, 1, 0] += 1.0/normalization
elif labels[i, j]==1 and outputs[i, j]==0: # FN
A[j, 0, 1] += 1.0/normalization
elif labels[i, j]==0 and outputs[i, j]==0: # TN
A[j, 0, 0] += 1.0/normalization
else: # This condition should not happen.
raise ValueError('Error in computing the confusion matrix.')
return A
# Compute macro F-measure.
def compute_f_measure(labels:np.ndarray, outputs:np.ndarray) -> float:
""" checked,
"""
num_recordings, num_classes = np.shape(labels)
A = compute_confusion_matrices(labels, outputs)
f_measure = np.zeros(num_classes)
for k in range(num_classes):
tp, fp, fn, tn = A[k, 1, 1], A[k, 1, 0], A[k, 0, 1], A[k, 0, 0]
if 2 * tp + fp + fn:
f_measure[k] = float(2 * tp) / float(2 * tp + fp + fn)
else:
f_measure[k] = float('nan')
macro_f_measure = np.nanmean(f_measure)
return macro_f_measure
# Compute F-beta and G-beta measures from the unofficial phase of the Challenge.
def compute_beta_measures(labels:np.ndarray, outputs:np.ndarray, beta:Real) -> Tuple[float, float]:
""" checked,
"""
num_recordings, num_classes = np.shape(labels)
A = compute_confusion_matrices(labels, outputs, normalize=True)
f_beta_measure = np.zeros(num_classes)
g_beta_measure = np.zeros(num_classes)
for k in range(num_classes):
tp, fp, fn, tn = A[k, 1, 1], A[k, 1, 0], A[k, 0, 1], A[k, 0, 0]
if (1+beta**2)*tp + fp + beta**2*fn:
f_beta_measure[k] = float((1+beta**2)*tp) / float((1+beta**2)*tp + fp + beta**2*fn)
else:
f_beta_measure[k] = float('nan')
if tp + fp + beta*fn:
g_beta_measure[k] = float(tp) / float(tp + fp + beta*fn)
else:
g_beta_measure[k] = float('nan')
macro_f_beta_measure = | np.nanmean(f_beta_measure) | numpy.nanmean |
import numpy as np
from numpy.testing import assert_allclose
import pytest
from linearmodels.asset_pricing.model import LinearFactorModelGMM
from linearmodels.tests.asset_pricing._utility import generate_data, get_all
@pytest.fixture(params=["numpy", "pandas"])
def data(request):
return generate_data(nportfolio=10, output=request.param)
def test_linear_model_gmm_moments_jacobian(data):
mod = LinearFactorModelGMM(data.portfolios, data.factors)
res = mod.fit(cov_type="robust", disp=0, debiased=False)
params = np.r_[
res.betas.values.ravel(),
res.risk_premia.values.ravel(),
mod.factors.ndarray.mean(0),
]
mod_mom = mod._moments(params[:, None], True)
mom = []
p = mod.portfolios.ndarray
f = mod.factors.ndarray
n = f.shape[0]
fc = np.c_[np.ones((n, 1)), f]
mu = f.mean(0)[None, :]
lam = res.risk_premia.values[None, :]
x = f - mu + lam
b = res.betas.values
for i in range(p.shape[1]):
eps = p[:, i : (i + 1)] - x @ b[[i]].T
for j in range(fc.shape[1]):
mom.append(eps * fc[:, [j]])
mom.append(f - mu)
mom_arr = np.hstack(tuple(mom))
mod_jac = mod._jacobian(params, True)
jac = np.zeros((mom_arr.shape[1], params.shape[0]))
nport, nf = p.shape[1], f.shape[1]
# 1,1
jac[: (nport * (nf + 1)), : nport * nf] = np.kron(np.eye(nport), fc.T @ x / n)
# 1, 2
col = []
for i in range(nport):
col.append(fc.T @ np.ones((n, 1)) @ b[[i]] / n)
col = np.vstack(tuple(col))
jac[: (nport * (nf + 1)), nport * nf : nport * nf + nf] = col
# 1, 3
col = []
for i in range(nport):
col.append(-fc.T @ np.ones((n, 1)) @ b[[i]] / n)
col = np.vstack(tuple(col))
jac[: (nport * (nf + 1)), -nf:] = col
# 2,2
jac[-nf:, -nf:] = np.eye(nf)
assert_allclose(mom_arr, mod_mom)
| assert_allclose(jac, mod_jac) | numpy.testing.assert_allclose |
"""
Encoders for encoding categorical variables and scaling continuous data.
"""
from typing import Dict, Iterable, List, Tuple, Union
import warnings
import numpy as np
import pandas as pd
from sklearn.base import BaseEstimator, TransformerMixin
import torch
import torch.nn.functional as F
class NaNLabelEncoder(BaseEstimator, TransformerMixin):
"""
Labelencoder that can optionally always encode nan and unknown classes (in transform) as class ``0``
"""
def __init__(self, add_nan: bool = False, warn: bool = True):
"""
init NaNLabelEncoder
Args:
add_nan: if to force encoding of nan at 0
warn: if to warn if additional nans are added because items are unknown
"""
self.add_nan = add_nan
self.warn = warn
super().__init__()
def fit_transform(self, y: pd.Series) -> np.ndarray:
"""
Fit and transform data.
Args:
y (pd.Series): input data
Returns:
np.ndarray: encoded data
"""
if self.add_nan:
self.fit(y)
return self.transform(y)
return super().transform(y)
@staticmethod
def is_numeric(y: pd.Series) -> bool:
"""
Determine if series is numeric or not. Will also return True if series is a categorical type with
underlying integers.
Args:
y (pd.Series): series for which to carry out assessment
Returns:
bool: True if series is numeric
"""
return y.dtype.kind in "bcif" or (isinstance(y, pd.CategoricalDtype) and y.cat.categories.dtype.kind in "bcif")
def fit(self, y: pd.Series):
"""
Fit transformer
Args:
y (pd.Series): input data to fit on
Returns:
NaNLabelEncoder: self
"""
if self.add_nan:
if self.is_numeric(y):
nan = np.nan
else:
nan = "nan"
self.classes_ = {nan: 0}
for idx, val in enumerate(np.unique(y)):
self.classes_[val] = idx + 1
else:
self.classes_ = {val: idx for idx, val in enumerate(np.unique(y))}
self.classes_vector_ = np.array(list(self.classes_.keys()))
return self
def transform(self, y: Iterable) -> Union[torch.Tensor, np.ndarray]:
"""
Encode iterable with integers.
Args:
y (Iterable): iterable to encode
Returns:
Union[torch.Tensor, np.ndarray]: returns encoded data as torch tensor or numpy array depending on input type
"""
if self.add_nan:
if self.warn:
cond = ~np.isin(y, self.classes_)
if cond.any():
warnings.warn(
f"Found {np.unique(np.asarray(y)[cond]).size} unknown classes which were set to NaN",
UserWarning,
)
encoded = [self.classes_.get(v, 0) for v in y]
else:
encoded = [self.classes_[v] for v in y]
if isinstance(y, torch.Tensor):
encoded = torch.tensor(encoded, dtype=torch.long, device=y.device)
else:
encoded = np.array(encoded)
return encoded
def inverse_transform(self, y: Union[torch.Tensor, np.ndarray]) -> np.ndarray:
"""
Decode data, i.e. transform from integers to labels.
Args:
y (Union[torch.Tensor, np.ndarray]): encoded data
Raises:
KeyError: if unknown elements should be decoded
Returns:
np.ndarray: decoded data
"""
if y.max() >= len(self.classes_vector_):
raise KeyError("New unknown values detected")
# decode
decoded = self.classes_vector_[y]
return decoded
def __call__(self, data: (Dict[str, torch.Tensor])) -> torch.Tensor:
"""
Extract prediction from network output. Does not map back to input
categories as this would require a numpy tensor without grad-abilities.
Args:
data (Dict[str, torch.Tensor]): Dictionary with entries
* prediction: data to de-scale
Returns:
torch.Tensor: prediction
"""
return data["prediction"]
class TorchNormalizer(BaseEstimator, TransformerMixin):
"""
Basic target transformer that can be fit also on torch tensors.
"""
def __init__(
self,
method: str = "standard",
center: bool = True,
log_scale: Union[bool, float] = False,
log_zero_value: float = 0.0,
coerce_positive: Union[float, bool] = None,
eps: float = 1e-8,
):
"""
Initialize
Args:
method (str, optional): method to rescale series. Either "standard" (standard scaling) or "robust"
(scale using quantiles 0.25-0.75). Defaults to "standard".
center (bool, optional): If to center the output to zero. Defaults to True.
log_scale (bool, optional): If to take log of values. Defaults to False. Defaults to False.
log_zero_value (float, optional): Value to map 0 to for ``log_scale=True`` or in softplus. Defaults to 0.0
coerce_positive (Union[bool, float, str], optional): If to coerce output to positive. Valid values:
* None, i.e. is automatically determined and might change to True if all values are >= 0 (Default).
* True, i.e. output is clamped at 0.
* False, i.e. values are not coerced
* float, i.e. softmax is applied with beta = coerce_positive.
eps (float, optional): Number for numerical stability of calcualtions. Defaults to 1e-8.
"""
self.method = method
assert method in ["standard", "robust"], f"method has invalid value {method}"
self.center = center
self.eps = eps
# set log scale
self.log_zero_value = np.exp(log_zero_value)
self.log_scale = log_scale
# check if coerce positive should be determined automatically
if coerce_positive is None:
if log_scale:
coerce_positive = False
else:
assert not (self.log_scale and coerce_positive), (
"log scale means that output is transformed to a positive number by default while coercing positive"
" will apply softmax function - decide for either one or the other"
)
self.coerce_positive = coerce_positive
def get_parameters(self, *args, **kwargs) -> torch.Tensor:
"""
Returns parameters that were used for encoding.
Returns:
torch.Tensor: First element is center of data and second is scale
"""
return torch.tensor([self.center_, self.scale_])
def _preprocess_y(self, y: Union[pd.Series, np.ndarray, torch.Tensor]) -> Union[np.ndarray, torch.Tensor]:
"""
Preprocess input data (e.g. take log).
Can set coerce positive to a value if it was set to None and log_scale to False.
Returns:
Union[np.ndarray, torch.Tensor]: return rescaled series with type depending on input type
"""
if self.coerce_positive is None and not self.log_scale:
self.coerce_positive = (y >= 0).all()
if self.log_scale:
if isinstance(y, torch.Tensor):
y = torch.log(y + self.log_zero_value)
else:
y = np.log(y + self.log_zero_value)
return y
def fit(self, y: Union[pd.Series, np.ndarray, torch.Tensor]):
"""
Fit transformer, i.e. determine center and scale of data
Args:
y (Union[pd.Series, np.ndarray, torch.Tensor]): input data
Returns:
TorchNormalizer: self
"""
y = self._preprocess_y(y)
if self.method == "standard":
if isinstance(y, torch.Tensor):
self.center_ = torch.mean(y)
self.scale_ = torch.std(y) / (self.center_ + self.eps)
else:
self.center_ = np.mean(y)
self.scale_ = np.std(y) / (self.center_ + self.eps)
elif self.method == "robust":
if isinstance(y, torch.Tensor):
self.center_ = torch.median(y)
q_75 = y.kthvalue(int(len(y) * 0.75)).values
q_25 = y.kthvalue(int(len(y) * 0.25)).values
else:
self.center_ = np.median(y)
q_75 = np.percentiley(y, 75)
q_25 = np.percentiley(y, 25)
self.scale_ = (q_75 - q_25) / (self.center_ + self.eps) / 2.0
return self
def transform(
self, y: Union[pd.Series, np.ndarray, torch.Tensor], return_norm: bool = False
) -> Union[Tuple[Union[np.ndarray, torch.Tensor], np.ndarray], Union[np.ndarray, torch.Tensor]]:
"""
Rescale data.
Args:
y (Union[pd.Series, np.ndarray, torch.Tensor]): input data
return_norm (bool, optional): [description]. Defaults to False.
Returns:
Union[Tuple[Union[np.ndarray, torch.Tensor], np.ndarray], Union[np.ndarray, torch.Tensor]]: rescaled
data with type depending on input type. returns second element if ``return_norm=True``
"""
if self.log_scale:
if isinstance(y, torch.Tensor):
y = (y + self.log_zero_value + self.eps).log()
else:
y = np.log(y + self.log_zero_value + self.eps)
if self.center:
y = (y / (self.center_ + self.eps) - 1) / (self.scale_ + self.eps)
else:
y = y / (self.center_ + self.eps)
if return_norm:
return y, self.get_parameters().numpy()[None, :]
else:
return y
def inverse_transform(self, y: torch.Tensor) -> torch.Tensor:
"""
Inverse scale.
Args:
y (torch.Tensor): scaled data
Returns:
torch.Tensor: de-scaled data
"""
return self(dict(prediction=y, target_scale=self.get_parameters().unsqueeze(0)))
def __call__(self, data: Dict[str, torch.Tensor]) -> torch.Tensor:
"""
Inverse transformation but with network output as input.
Args:
data (Dict[str, torch.Tensor]): Dictionary with entries
* prediction: data to de-scale
* target_scale: center and scale of data
Returns:
torch.Tensor: de-scaled data
"""
# inverse transformation with tensors
norm = data["target_scale"]
# use correct shape for norm
if data["prediction"].ndim > norm.ndim:
norm = norm.unsqueeze(-1)
# transform
if self.center:
y_normed = (data["prediction"] * norm[:, 1, None] + 1) * norm[:, 0, None]
else:
y_normed = data["prediction"] * norm[:, 0, None]
if self.log_scale:
y_normed = (y_normed.exp() - self.log_zero_value).clamp_min(0.0)
elif isinstance(self.coerce_positive, bool) and self.coerce_positive:
y_normed = y_normed.clamp_min(0.0)
elif isinstance(self.coerce_positive, float):
y_normed = F.softplus(y_normed, beta=float(self.coerce_positive))
# return correct shape
if data["prediction"].ndim == 1 and y_normed.ndim > 1:
y_normed = y_normed.squeeze(0)
return y_normed
class EncoderNormalizer(TorchNormalizer):
"""
Special Normalizer that is fit on each encoding sequence.
If passed as target normalizer, this transformer will be fitted on each encoder sequence separately.
"""
pass
class GroupNormalizer(TorchNormalizer):
"""
Normalizer that scales by groups.
For each group a scaler is fitted and applied. This scaler can be used as target normalizer or
also to normalize any other variable.
"""
# todo: allow window (exp weighted), different methods such as quantile for robust scaling
def __init__(
self,
method: str = "standard",
groups: List[str] = [],
center: bool = True,
scale_by_group: bool = False,
log_scale: Union[bool, float] = False,
log_zero_value: float = 0.0,
coerce_positive: Union[float, bool] = None,
eps: float = 1e-8,
):
"""
Group normalizer to normalize a given entry by groups. Can be used as target normalizer.
Args:
method (str, optional): method to rescale series. Either "standard" (standard scaling) or "robust"
(scale using quantiles 0.25-0.75). Defaults to "standard".
groups (List[str], optional): Group names to normalize by. Defaults to [].
center (bool, optional): If to center the output to zero. Defaults to True.
scale_by_group (bool, optional): If to scale the output by group, i.e. norm is calculated as
``(group1_norm * group2_norm * ...) ^ (1 / n_groups)``. Defaults to False.
log_scale (bool, optional): If to take log of values. Defaults to False. Defaults to False.
log_zero_value (float, optional): Value to map 0 to for ``log_scale=True`` or in softplus. Defaults to 0.0
coerce_positive (Union[bool, float, str], optional): If to coerce output to positive. Valid values:
* None, i.e. is automatically determined and might change to True if all values are >= 0 (Default).
* True, i.e. output is clamped at 0.
* False, i.e. values are not coerced
* float, i.e. softmax is applied with beta = coerce_positive.
eps (float, optional): Number for numerical stability of calculations. Defaults to 1e-8.
"""
self.groups = groups
self.scale_by_group = scale_by_group
super().__init__(
method=method,
center=center,
log_scale=log_scale,
log_zero_value=log_zero_value,
coerce_positive=coerce_positive,
eps=eps,
)
def fit(self, y: pd.Series, X: pd.DataFrame):
"""
Determine scales for each group
Args:
y (pd.Series): input data
X (pd.DataFrame): dataframe with columns for each group defined in ``groups`` parameter.
Returns:
self
"""
y = self._preprocess_y(y)
if len(self.groups) == 0:
assert not self.scale_by_group, "No groups are defined, i.e. `scale_by_group=[]`"
if self.method == "standard":
mean = np.mean(y)
self.norm_ = mean, np.std(y) / (mean + self.eps)
else:
quantiles = np.quantile(y, [0.25, 0.5, 0.75])
self.norm_ = quantiles[1], (quantiles[2] - quantiles[0]) / (quantiles[1] + self.eps)
elif self.scale_by_group:
if self.method == "standard":
self.norm_ = {
g: X[[g]]
.assign(y=y)
.groupby(g, observed=True)
.agg(mean=("y", "mean"), scale=("y", "std"))
.assign(scale=lambda x: x.scale / (x["mean"] + self.eps))
for g in self.groups
}
else:
self.norm_ = {
g: X[[g]]
.assign(y=y)
.groupby(g, observed=True)
.y.quantile([0.25, 0.5, 0.75])
.unstack(-1)
.assign(
median=lambda x: x[0.5] + self.eps,
scale=lambda x: (x[0.75] - x[0.25] + self.eps) / (x[0.5] + self.eps),
)[["median", "scale"]]
for g in self.groups
}
# calculate missings
self.missing_ = {group: scales.median().to_dict() for group, scales in self.norm_.items()}
else:
if self.method == "standard":
self.norm_ = (
X[self.groups]
.assign(y=y)
.groupby(self.groups, observed=True)
.agg(mean=("y", "mean"), scale=("y", "std"))
.assign(scale=lambda x: x.scale / (x["mean"] + self.eps))
)
else:
self.norm_ = (
X[self.groups]
.assign(y=y)
.groupby(self.groups, observed=True)
.y.quantile([0.25, 0.5, 0.75])
.unstack(-1)
.assign(
median=lambda x: x[0.5] + self.eps,
scale=lambda x: (x[0.75] - x[0.25] + self.eps) / (x[0.5] + self.eps) / 2.0,
)[["median", "scale"]]
)
self.missing_ = self.norm_.median().to_dict()
return self
@property
def names(self) -> List[str]:
"""
Names of determined scales.
Returns:
List[str]: list of names
"""
if self.method == "standard":
return ["mean", "scale"]
else:
return ["median", "scale"]
def fit_transform(
self, y: pd.Series, X: pd.DataFrame, return_norm: bool = False
) -> Union[np.ndarray, Tuple[np.ndarray, np.ndarray]]:
"""
Fit normalizer and scale input data.
Args:
y (pd.Series): data to scale
X (pd.DataFrame): dataframe with ``groups`` columns
return_norm (bool, optional): If to return . Defaults to False.
Returns:
Union[np.ndarray, Tuple[np.ndarray, np.ndarray]]: Scaled data, if ``return_norm=True``, returns also scales
as second element
"""
return self.fit(y, X).transform(y, X, return_norm=return_norm)
def inverse_transform(self, y: pd.Series, X: pd.DataFrame):
"""
Rescaling data to original scale - not implemented.
"""
raise NotImplementedError()
def transform(
self, y: pd.Series, X: pd.DataFrame, return_norm: bool = False
) -> Union[np.ndarray, Tuple[np.ndarray, np.ndarray]]:
"""
Scale input data.
Args:
y (pd.Series): data to scale
X (pd.DataFrame): dataframe with ``groups`` columns
return_norm (bool, optional): If to return . Defaults to False.
Returns:
Union[np.ndarray, Tuple[np.ndarray, np.ndarray]]: Scaled data, if ``return_norm=True``, returns also scales
as second element
"""
norm = self.get_norm(X)
y = self._preprocess_y(y)
if self.center:
y_normed = (y / (norm[:, 0] + self.eps) - 1) / (norm[:, 1] + self.eps)
else:
y_normed = y / (norm[:, 0] + self.eps)
if return_norm:
return y_normed, norm
else:
return y_normed
def get_parameters(self, groups: Union[torch.Tensor, list, tuple], group_names: List[str] = None) -> np.ndarray:
"""
Get fitted scaling parameters for a given group.
Args:
groups (Union[torch.Tensor, list, tuple]): group ids for which to get parameters
group_names (List[str], optional): Names of groups corresponding to positions
in ``groups``. Defaults to None, i.e. the instance attribute ``groups``.
Returns:
np.ndarray: parameters used for scaling
"""
if isinstance(groups, torch.Tensor):
groups = groups.tolist()
if isinstance(groups, list):
groups = tuple(groups)
if group_names is None:
group_names = self.groups
else:
# filter group names
group_names = [name for name in group_names if name in self.groups]
assert len(group_names) == len(self.groups), "Passed groups and fitted do not match"
if len(self.groups) == 0:
params = np.asarray(self.norm_).squeeze()
elif self.scale_by_group:
norm = np.array([1.0, 1.0])
for group, group_name in zip(groups, group_names):
try:
norm = norm * self.norm_[group_name].loc[group].to_numpy()
except KeyError:
norm = norm * np.asarray([self.missing_[group_name][name] for name in self.names])
norm = np.power(norm, 1.0 / len(self.groups))
params = norm
else:
try:
params = self.norm_.loc[groups].to_numpy()
except (KeyError, TypeError):
params = np.asarray([self.missing_[name] for name in self.names])
return params
def get_norm(self, X: pd.DataFrame) -> pd.DataFrame:
"""
Get scaling parameters for multiple groups.
Args:
X (pd.DataFrame): dataframe with ``groups`` columns
Returns:
pd.DataFrame: dataframe with scaling parameterswhere each row corresponds to the input dataframe
"""
if len(self.groups) == 0:
norm = | np.asarray(self.norm_) | numpy.asarray |
import numpy as np
from numba import njit
@njit(fastmath=True) #cache=True
def fill(source,image,mask,rng):
x = source[0][0]
y = source[1][0]
r = []
g = []
b = []
for i in range(len(x)):
rr = []
gg = []
bb = []
xside = np.int(x[i])
yside = np.int(y[i])
x1 = np.int(xside-rng)
x2 = np.int(xside+rng+1)
y1 = np.int(yside-rng)
y2 = np.int(yside+rng+1)
window = image[x1:x2,y1:y2]
win_mask = mask[x1:x2,y1:y2]
for j in range(len(win_mask)):
for k in range(len(win_mask)):
if win_mask[j,k]==0:
rr.append(window[j,k,0])
gg.append(window[j,k,1])
bb.append(window[j,k,2])
r.append(np.median(np.array(rr)))
g.append(np.median(np.array(gg)))
b.append(np.median( | np.array(bb) | numpy.array |
import argparse
import numpy as NP
from astropy.io import fits
from astropy.io import ascii
import scipy.constants as FCNST
import matplotlib.pyplot as PLT
import matplotlib.colors as PLTC
import progressbar as PGB
import healpy as HP
import geometry as GEOM
import interferometry as RI
import catalog as SM
import constants as CNST
import my_operations as OPS
import primary_beams as PB
import ipdb as PDB
def Jy2K(fluxJy, freq, pixres):
return fluxJy * CNST.Jy / pixres / (2.0* FCNST.k * (freq)**2 / FCNST.c**2)
def K2Jy(tempK, freq, pixres):
return tempK * (2.0* FCNST.k * (freq)**2 / FCNST.c**2) * pixres / CNST.Jy
## Parse input arguments
parser = argparse.ArgumentParser(description='Program to simulate interferometer array data')
project_group = parser.add_mutually_exclusive_group(required=True)
project_group.add_argument('--project-MWA', dest='project_MWA', action='store_true')
project_group.add_argument('--project-HERA', dest='project_HERA', action='store_true')
project_group.add_argument('--project-beams', dest='project_beams', action='store_true')
project_group.add_argument('--project-drift-scan', dest='project_drift_scan', action='store_true')
project_group.add_argument('--project-global-EoR', dest='project_global_EoR', action='store_true')
telescope_group = parser.add_argument_group('Telescope parameters', 'Telescope/interferometer specifications')
telescope_group.add_argument('--label-prefix', help='Prefix for baseline labels [str, Default = ""]', default='', type=str, dest='label_prefix')
telescope_group.add_argument('--telescope', help='Telescope name [str, default="custom"]', default='custom', type=str, dest='telescope_id', choices=['mwa', 'vla', 'gmrt', 'hera', 'mwa_dipole', 'paper_dipole', 'custom', 'mwa_tools'])
telescope_group.add_argument('--latitude', help='Latitude of interferometer array in degrees [float, Default=-26.701]', default=-26.701, type=float, dest='latitude')
telescope_group.add_argument('--A-eff', help='Effective area in m^2', type=float, dest='A_eff', nargs='?')
antenna_element_group = parser.add_argument_group('Antenna element parameters', 'Antenna element specifications')
antenna_element_group.add_argument('--shape', help='Shape of antenna element [no default]', type=str, dest='antenna_element_shape', default=None, choices=['dish', 'dipole', 'delta'])
antenna_element_group.add_argument('--size', help='Size of dish or length of dipole (in meters) [float, no default]', default=None, type=float, dest='antenna_element_size')
antenna_element_group.add_argument('--orientation', help='Orientation of dipole or pointing direction of dish [float, (altitude azimuth) or (l m [n])]', default=None, type=float, nargs='*', dest='antenna_element_orientation')
antenna_element_group.add_argument('--ocoords', help='Coordinates of dipole orientation or dish pointing direction [str]', default=None, type=str, dest='antenna_element_orientation_coords', choices=['dircos', 'altaz'])
antenna_element_group.add_argument('--phased-array', dest='phased_array', action='store_true')
antenna_element_group.add_argument('--phased-array-file', help='Locations of antenna elements to be phased', default='/data3/t_nithyanandan/project_MWA/MWA_tile_dipole_locations.txt', type=file, dest='phased_elements_file')
antenna_element_group.add_argument('--groundplane', help='Height of antenna element above ground plane (in meters) [float]', default=None, type=float, dest='ground_plane')
obsparm_group = parser.add_argument_group('Observation setup', 'Parameters specifying the observation')
obsparm_group.add_argument('-f', '--freq', help='Foreground center frequency in Hz [float, Default=185e6]', default=185e6, type=float, dest='freq')
obsparm_group.add_argument('--dfreq', help='Frequency resolution in Hz [float, Default=40e3]', default=40e3, type=float, dest='freq_resolution')
obsparm_group.add_argument('--obs-mode', help='Observing mode [str, track/drift/drift-shift/custom]', default=None, type=str, dest='obs_mode', choices=['track', 'drift', 'dns', 'custom'])
# obsparm_group.add_argument('--t-snap', help='Integration time (seconds) [float, Default=300.0]', default=5.0*60.0, type=float, dest='t_snap')
obsparm_group.add_argument('--nchan', help='Number of frequency channels [int, Default=256]', default=256, type=int, dest='n_channels')
duration_group = parser.add_argument_group('Observing duration parameters', 'Parameters specifying observing duration')
duration_group.add_argument('--t-obs', help='Duration of observation [seconds]', dest='t_obs', default=None, type=float, metavar='t_obs')
duration_group.add_argument('--n-snap', help='Number of snapshots or records that make up the observation', dest='n_snaps', default=None, type=int, metavar='n_snapshots')
duration_group.add_argument('--t-snap', help='integration time of each snapshot [seconds]', dest='t_snap', default=None, type=int, metavar='t_snap')
pointing_group = parser.add_mutually_exclusive_group(required=True)
pointing_group.add_argument('--pointing-file', dest='pointing_file', type=str, nargs=1, default=None)
pointing_group.add_argument('--pointing-info', dest='pointing_info', type=float, nargs=3, metavar=('lst_init', 'ra_init', 'dec_init'))
snapshot_selection_group = parser.add_mutually_exclusive_group(required=False)
snapshot_selection_group.add_argument('--beam-switch', dest='beam_switch', action='store_true')
snapshot_selection_group.add_argument('--snap-pick', dest='pick_snapshots', default=None, type=int, nargs='*')
snapshot_selection_group.add_argument('--snap-range', dest='snapshots_range', default=None, nargs=2, type=int)
snapshot_selection_group.add_argument('--all-snaps', dest='all_snapshots', action='store_true')
fgmodel_group = parser.add_mutually_exclusive_group(required=True)
fgmodel_group.add_argument('--ASM', action='store_true') # Diffuse (GSM) + Compact (NVSS+SUMSS) All-sky model
fgmodel_group.add_argument('--DSM', action='store_true') # Diffuse all-sky model
fgmodel_group.add_argument('--CSM', action='store_true') # Point source model (NVSS+SUMSS)
fgmodel_group.add_argument('--SUMSS', action='store_true') # SUMSS catalog
fgmodel_group.add_argument('--NVSS', action='store_true') # NVSS catalog
fgmodel_group.add_argument('--MSS', action='store_true') # Molonglo Sky Survey
fgmodel_group.add_argument('--GLEAM', action='store_true') # GLEAM catalog
fgmodel_group.add_argument('--PS', action='store_true') # Point sources
fgmodel_group.add_argument('--USM', action='store_true') # Uniform all-sky model
fgparm_group = parser.add_argument_group('Foreground Setup', 'Parameters describing foreground sky')
fgparm_group.add_argument('--flux-unit', help='Units of flux density [str, Default="Jy"]', type=str, dest='flux_unit', default='Jy', choices=['Jy','K'])
fgparm_group.add_argument('--spindex', help='Spectral index, ~ f^spindex [float, Default=0.0]', type=float, dest='spindex', default=0.0)
fgparm_group.add_argument('--spindex-rms', help='Spectral index rms [float, Default=0.0]', type=float, dest='spindex_rms', default=0.0)
fgparm_group.add_argument('--spindex-seed', help='Spectral index seed [float, Default=None]', type=int, dest='spindex_seed', default=None)
fgparm_group.add_argument('--nside', help='nside parameter for healpix map [int, Default=64]', type=int, dest='nside', default=64, choices=[64, 128])
fgcat_group = parser.add_argument_group('Catalog files', 'Catalog file locations')
fgcat_group.add_argument('--dsm-file-prefix', help='Diffuse sky model filename prefix [str]', type=str, dest='DSM_file_prefix', default='/data3/t_nithyanandan/project_MWA/foregrounds/gsmdata')
fgcat_group.add_argument('--sumss-file', help='SUMSS catalog file [str]', type=str, dest='SUMSS_file', default='/data3/t_nithyanandan/project_MWA/foregrounds/sumsscat.Mar-11-2008.txt')
fgcat_group.add_argument('--nvss-file', help='NVSS catalog file [str]', type=file, dest='NVSS_file', default='/data3/t_nithyanandan/project_MWA/foregrounds/NVSS_catalog.fits')
fgcat_group.add_argument('--GLEAM-file', help='GLEAM catalog file [str]', type=str, dest='GLEAM_file', default='/data3/t_nithyanandan/project_MWA/foregrounds/mwacs_b1_131016.csv')
fgcat_group.add_argument('--PS-file', help='Point source catalog file [str]', type=str, dest='PS_file', default='/data3/t_nithyanandan/project_MWA/foregrounds/PS_catalog.txt')
# parser.add_argument('--', help='', type=, dest='', required=True)
parser.add_argument('--plots', help='Create plots', action='store_true', dest='plots')
args = vars(parser.parse_args())
rootdir = '/data3/t_nithyanandan/'
project_MWA = args['project_MWA']
project_HERA = args['project_HERA']
project_beams = args['project_beams']
project_drift_scan = args['project_drift_scan']
project_global_EoR = args['project_global_EoR']
if project_MWA: project_dir = 'project_MWA'
if project_HERA: project_dir = 'project_HERA'
if project_beams: project_dir = 'project_beams'
if project_drift_scan: project_dir = 'project_drift_scan'
if project_global_EoR: project_dir = 'project_global_EoR'
telescope_id = args['telescope_id']
element_shape = args['antenna_element_shape']
element_size = args['antenna_element_size']
element_orientation = args['antenna_element_orientation']
element_ocoords = args['antenna_element_orientation_coords']
phased_array = args['phased_array']
phased_elements_file = args['phased_elements_file']
if (telescope_id == 'mwa') or (telescope_id == 'mwa_dipole'):
element_size = 0.74
element_shape = 'dipole'
if telescope_id == 'mwa': phased_array = True
elif telescope_id == 'vla':
element_size = 25.0
element_shape = 'dish'
elif telescope_id == 'gmrt':
element_size = 45.0
element_shape = 'dish'
elif telescope_id == 'hera':
element_size = 14.0
element_shape = 'dish'
elif telescope_id == 'custom':
if element_shape != 'delta':
if (element_shape is None) or (element_size is None):
raise ValueError('Both antenna element shape and size must be specified for the custom telescope type.')
elif element_size <= 0.0:
raise ValueError('Antenna element size must be positive.')
elif telescope_id == 'mwa_tools':
pass
else:
raise ValueError('telescope ID must be specified.')
if telescope_id == 'custom':
if element_shape == 'delta':
telescope_id = 'delta'
else:
telescope_id = '{0:.1f}m_{1:}'.format(element_size, element_shape)
if phased_array:
telescope_id = telescope_id + '_array'
telescope_str = telescope_id+'_'
if element_orientation is None:
if element_ocoords is not None:
if element_ocoords == 'altaz':
if (telescope_id == 'mwa') or (telescope_id == 'mwa_dipole') or (element_shape == 'dipole'):
element_orientation = NP.asarray([0.0, 90.0]).reshape(1,-1)
else:
element_orientation = NP.asarray([90.0, 270.0]).reshape(1,-1)
elif element_ocoords == 'dircos':
if (telescope_id == 'mwa') or (telescope_id == 'mwa_dipole') or (element_shape == 'dipole'):
element_orientation = NP.asarray([1.0, 0.0, 0.0]).reshape(1,-1)
else:
element_orientation = NP.asarray([0.0, 0.0, 1.0]).reshape(1,-1)
else:
raise ValueError('Invalid value specified antenna element orientation coordinate system.')
else:
if (telescope_id == 'mwa') or (telescope_id == 'mwa_dipole') or (element_shape == 'dipole'):
element_orientation = NP.asarray([0.0, 90.0]).reshape(1,-1)
else:
element_orientation = NP.asarray([90.0, 270.0]).reshape(1,-1)
element_ocoords = 'altaz'
else:
if element_ocoords is None:
raise ValueError('Antenna element orientation coordinate system must be specified to describe the specified antenna orientation.')
element_orientation = NP.asarray(element_orientation).reshape(1,-1)
if (element_orientation.size < 2) or (element_orientation.size > 3):
raise ValueError('Antenna element orientation must be a two- or three-element vector.')
elif (element_ocoords == 'altaz') and (element_orientation.size != 2):
raise ValueError('Antenna element orientation must be a two-element vector if using Alt-Az coordinates.')
ground_plane = args['ground_plane']
if ground_plane is None:
ground_plane_str = 'no_ground_'
else:
if ground_plane > 0.0:
ground_plane_str = '{0:.1f}m_ground_'.format(ground_plane)
else:
raise ValueError('Height of antenna element above ground plane must be positive.')
latitude = args['latitude']
latitude_str = 'lat_{0:.3f}_'.format(latitude)
telescope = {}
if telescope_id in ['mwa', 'vla', 'gmrt', 'hera', 'mwa_dipole', 'mwa_tools']:
telescope['id'] = telescope_id
telescope['shape'] = element_shape
telescope['size'] = element_size
telescope['orientation'] = element_orientation
telescope['ocoords'] = element_ocoords
telescope['groundplane'] = ground_plane
telescope['latitude'] = latitude
freq = args['freq']
freq_resolution = args['freq_resolution']
n_channels = args['n_channels']
nchan = n_channels
chans = (freq + (NP.arange(nchan) - 0.5 * nchan) * freq_resolution)/ 1e9 # in GHz
bw = n_channels * freq_resolution
bandpass_str = '{0:0d}x{1:.1f}_kHz'.format(nchan, freq_resolution/1e3)
if args['A_eff'] is None:
if (telescope['shape'] == 'dipole') or (telescope['shape'] == 'delta'):
A_eff = (0.5*FCNST.c/freq)**2
if (telescope_id == 'mwa') or phased_array:
A_eff *= 16
if telescope['shape'] == 'dish':
A_eff = NP.pi * (0.5*element_size)**2
else:
A_eff = args['A_eff']
obs_mode = args['obs_mode']
t_snap = args['t_snap']
t_obs = args['t_obs']
n_snaps = args['n_snaps']
snapshot_type_str = obs_mode
pointing_file = args['pointing_file']
if pointing_file is not None:
pointing_file = pointing_file[0]
pointing_info = args['pointing_info']
element_locs = None
if phased_array:
try:
element_locs = NP.loadtxt(phased_elements_file, skiprows=1, comments='#', usecols=(0,1,2))
except IOError:
raise IOError('Could not open the specified file for phased array of antenna elements.')
if telescope_id == 'mwa':
xlocs, ylocs = NP.meshgrid(1.1*NP.linspace(-1.5,1.5,4), 1.1*NP.linspace(1.5,-1.5,4))
element_locs = NP.hstack((xlocs.reshape(-1,1), ylocs.reshape(-1,1), NP.zeros(xlocs.size).reshape(-1,1)))
if pointing_file is not None:
pointing_init = None
pointing_info_from_file = NP.loadtxt(pointing_file, skiprows=2, comments='#', usecols=(1,2,3), delimiter=',')
obs_id = NP.loadtxt(pointing_file, skiprows=2, comments='#', usecols=(0,), delimiter=',', dtype=str)
if (telescope_id == 'mwa') or (telescope_id == 'mwa_tools') or (phased_array):
delays_str = NP.loadtxt(pointing_file, skiprows=2, comments='#', usecols=(4,), delimiter=',', dtype=str)
delays_list = [NP.fromstring(delaystr, dtype=float, sep=';', count=-1) for delaystr in delays_str]
delay_settings = NP.asarray(delays_list)
delay_settings *= 435e-12
delays = NP.copy(delay_settings)
if n_snaps is None:
n_snaps = pointing_info_from_file.shape[0]
pointing_info_from_file = pointing_info_from_file[:min(n_snaps, pointing_info_from_file.shape[0]),:]
obs_id = obs_id[:min(n_snaps, pointing_info_from_file.shape[0])]
if (telescope_id == 'mwa') or (telescope_id == 'mwa_tools') or (phased_array):
delays = delay_settings[:min(n_snaps, pointing_info_from_file.shape[0]),:]
n_snaps = min(n_snaps, pointing_info_from_file.shape[0])
pointings_altaz = OPS.reverse(pointing_info_from_file[:,:2].reshape(-1,2), axis=1)
pointings_altaz_orig = OPS.reverse(pointing_info_from_file[:,:2].reshape(-1,2), axis=1)
lst = 15.0 * pointing_info_from_file[:,2]
lst_wrapped = lst + 0.0
lst_wrapped[lst_wrapped > 180.0] = lst_wrapped[lst_wrapped > 180.0] - 360.0
lst_edges = NP.concatenate((lst_wrapped, [lst_wrapped[-1]+lst_wrapped[-1]-lst_wrapped[-2]]))
if obs_mode is None:
obs_mode = 'custom'
if (obs_mode == 'dns') and beam_switch:
angle_diff = GEOM.sphdist(pointings_altaz[1:,1], pointings_altaz[1:,0], pointings_altaz[:-1,1], pointings_altaz[:-1,0])
angle_diff = NP.concatenate(([0.0], angle_diff))
shift_threshold = 1.0 # in degrees
# lst_edges = NP.concatenate(([lst_edges[0]], lst_edges[angle_diff > shift_threshold], [lst_edges[-1]]))
lst_wrapped = NP.concatenate(([lst_wrapped[0]], lst_wrapped[angle_diff > shift_threshold], [lst_wrapped[-1]]))
n_snaps = lst_wrapped.size - 1
pointings_altaz = NP.vstack((pointings_altaz[0,:].reshape(-1,2), pointings_altaz[angle_diff>shift_threshold,:].reshape(-1,2)))
obs_id = NP.concatenate(([obs_id[0]], obs_id[angle_diff>shift_threshold]))
if (telescope_id == 'mwa') or (telescope_id == 'mwa_tools') or (phased_array):
delays = NP.vstack((delay_settings[0,:], delay_settings[angle_diff>shift_threshold,:]))
obs_mode = 'custom'
lst_edges_left = lst_wrapped[:-1] + 0.0
lst_edges_right = NP.concatenate(([lst_edges[1]], lst_edges[NP.asarray(NP.where(angle_diff > shift_threshold)).ravel()+1]))
elif snapshots_range is not None:
snapshots_range[1] = snapshots_range[1] % n_snaps
if snapshots_range[0] > snapshots_range[1]:
raise IndexError('min snaphost # must be <= max snapshot #')
lst_wrapped = lst_wrapped[snapshots_range[0]:snapshots_range[1]+2]
lst_edges = NP.copy(lst_wrapped)
pointings_altaz = pointings_altaz[snapshots_range[0]:snapshots_range[1]+1,:]
obs_id = obs_id[snapshots_range[0]:snapshots_range[1]+1]
if (telescope_id == 'mwa') or (telescope_id == 'mwa_tools') or (phased_array):
delays = delay_settings[snapshots_range[0]:snapshots_range[1]+1,:]
n_snaps = snapshots_range[1]-snapshots_range[0]+1
elif pick_snapshots is not None:
pick_snapshots = NP.asarray(pick_snapshots)
lst_begin = NP.asarray(lst_wrapped[pick_snapshots])
lst_end = NP.asarray(lst_wrapped[pick_snapshots+1])
t_snap = (lst_end - lst_begin) / 15.0 * 3.6e3
n_snaps = t_snap.size
lst = 0.5 * (lst_begin + lst_end)
pointings_altaz = pointings_altaz[pick_snapshots,:]
obs_id = obs_id[pick_snapshots]
if (telescope_id == 'mwa') or (phased_array) or (telescope_id == 'mwa_tools'):
delays = delay_settings[pick_snapshots,:]
obs_mode = 'custom'
if pick_snapshots is None:
if not beam_switch:
lst = 0.5*(lst_edges[1:]+lst_edges[:-1])
t_snap = (lst_edges[1:]-lst_edges[:-1]) / 15.0 * 3.6e3
else:
lst = 0.5*(lst_edges_left + lst_edges_right)
t_snap = (lst_edges_right - lst_edges_left) / 15.0 * 3.6e3
pointings_dircos = GEOM.altaz2dircos(pointings_altaz, units='degrees')
pointings_hadec = GEOM.altaz2hadec(pointings_altaz, latitude, units='degrees')
pointings_radec = NP.hstack(((lst-pointings_hadec[:,0]).reshape(-1,1), pointings_hadec[:,1].reshape(-1,1)))
pointings_radec[:,0] = pointings_radec[:,0] % 360.0
t_obs = NP.sum(t_snap)
elif pointing_info is not None:
pointing_init = NP.asarray(pointing_info[1:])
lst_init = pointing_info[0]
pointing_file = None
if t_snap is None:
raise NameError('t_snap must be provided for an automated observing run')
if (n_snaps is None) and (t_obs is None):
raise NameError('n_snaps or t_obs must be provided for an automated observing run')
elif (n_snaps is not None) and (t_obs is not None):
raise ValueError('Only one of n_snaps or t_obs must be provided for an automated observing run')
elif n_snaps is None:
n_snaps = int(t_obs/t_snap)
else:
t_obs = n_snaps * t_snap
t_snap = t_snap + NP.zeros(n_snaps)
lst = (lst_init + (t_snap/3.6e3) * NP.arange(n_snaps)) * 15.0 # in degrees
if obs_mode is None:
obs_mode = 'track'
if obs_mode == 'track':
pointings_radec = NP.repeat(NP.asarray(pointing_init).reshape(-1,2), n_snaps, axis=0)
else:
ha_init = lst_init * 15.0 - pointing_init[0]
pointings_radec = NP.hstack((NP.asarray(lst-pointing_init[0]).reshape(-1,1), pointing_init[1]+NP.zeros(n_snaps).reshape(-1,1)))
pointings_hadec = NP.hstack(((lst-pointings_radec[:,0]).reshape(-1,1), pointings_radec[:,1].reshape(-1,1)))
pointings_altaz = GEOM.hadec2altaz(pointings_hadec, latitude, units='degrees')
pointings_dircos = GEOM.altaz2dircos(pointings_altaz, units='degrees')
pointings_radec_orig = NP.copy(pointings_radec)
pointings_hadec_orig = NP.copy(pointings_hadec)
pointings_altaz_orig = NP.copy(pointings_altaz)
pointings_dircos_orig = NP.copy(pointings_dircos)
lst_wrapped = lst + 0.0
lst_wrapped[lst_wrapped > 180.0] = lst_wrapped[lst_wrapped > 180.0] - 360.0
lst_edges = NP.concatenate((lst_wrapped, [lst_wrapped[-1]+lst_wrapped[-1]-lst_wrapped[-2]]))
duration_str = ''
if obs_mode in ['track', 'drift']:
if (t_snap is not None) and (n_snaps is not None):
duration_str = '_{0:0d}x{1:.1f}s'.format(n_snaps, NP.asarray(t_snap)[0])
pointing_info = {}
pointing_info['pointing_center'] = pointings_altaz
pointing_info['pointing_coords'] = 'altaz'
pointing_info['lst'] = lst
if element_locs is not None:
telescope['element_locs'] = element_locs
plots = args['plots']
use_GSM = args['ASM']
use_DSM = args['DSM']
use_CSM = args['CSM']
use_NVSS = args['NVSS']
use_SUMSS = args['SUMSS']
use_MSS = args['MSS']
use_GLEAM = args['GLEAM']
use_PS = args['PS']
use_USM = args['USM']
fg_str = ''
nside = args['nside']
pixres = HP.nside2pixarea(nside)
flux_unit = args['flux_unit']
spindex_seed = args['spindex_seed']
spindex_rms = args['spindex_rms']
spindex_rms_str = ''
spindex_seed_str = ''
if spindex_rms > 0.0:
spindex_rms_str = '{0:.1f}'.format(spindex_rms)
else:
spindex_rms = 0.0
if spindex_seed is not None:
spindex_seed_str = '{0:0d}_'.format(spindex_seed)
if use_GSM:
fg_str = 'asm'
dsm_file = args['DSM_file_prefix']+'_{0:.1f}_MHz_nside_{1:0d}.fits'.format(freq*1e-6, nside)
hdulist = fits.open(dsm_file)
pixres = hdulist[0].header['PIXAREA']
dsm_table = hdulist[1].data
ra_deg_DSM = dsm_table['RA']
dec_deg_DSM = dsm_table['DEC']
temperatures = dsm_table['T_{0:.0f}'.format(freq/1e6)]
fluxes_DSM = temperatures * (2.0 * FCNST.k * freq**2 / FCNST.c**2) * pixres / CNST.Jy
spindex = dsm_table['spindex'] + 2.0
freq_DSM = 0.185 # in GHz
freq_catalog = freq_DSM * 1e9 + NP.zeros(fluxes_DSM.size)
catlabel = NP.repeat('DSM', fluxes_DSM.size)
ra_deg = ra_deg_DSM + 0.0
dec_deg = dec_deg_DSM + 0.0
majax = NP.degrees(HP.nside2resol(nside)) * NP.ones(fluxes_DSM.size)
minax = NP.degrees(HP.nside2resol(nside)) * NP.ones(fluxes_DSM.size)
# majax = NP.degrees(NP.sqrt(HP.nside2pixarea(64)*4/NP.pi) * NP.ones(fluxes_DSM.size))
# minax = NP.degrees(NP.sqrt(HP.nside2pixarea(64)*4/NP.pi) * NP.ones(fluxes_DSM.size))
fluxes = fluxes_DSM + 0.0
freq_SUMSS = 0.843 # in GHz
SUMSS_file = args['SUMSS_file']
catalog = NP.loadtxt(SUMSS_file, usecols=(0,1,2,3,4,5,10,12,13,14,15,16))
ra_deg_SUMSS = 15.0 * (catalog[:,0] + catalog[:,1]/60.0 + catalog[:,2]/3.6e3)
dec_dd = NP.loadtxt(SUMSS_file, usecols=(3,), dtype="|S3")
sgn_dec_str = NP.asarray([dec_dd[i][0] for i in range(dec_dd.size)])
sgn_dec = 1.0*NP.ones(dec_dd.size)
sgn_dec[sgn_dec_str == '-'] = -1.0
dec_deg_SUMSS = sgn_dec * (NP.abs(catalog[:,3]) + catalog[:,4]/60.0 + catalog[:,5]/3.6e3)
fmajax = catalog[:,7]
fminax = catalog[:,8]
fpa = catalog[:,9]
dmajax = catalog[:,10]
dminax = catalog[:,11]
PS_ind = NP.logical_and(dmajax == 0.0, dminax == 0.0)
ra_deg_SUMSS = ra_deg_SUMSS[PS_ind]
dec_deg_SUMSS = dec_deg_SUMSS[PS_ind]
fint = catalog[PS_ind,6] * 1e-3
if spindex_seed is None:
spindex_SUMSS = -0.83 + spindex_rms * NP.random.randn(fint.size)
else:
NP.random.seed(spindex_seed)
spindex_SUMSS = -0.83 + spindex_rms * NP.random.randn(fint.size)
fmajax = fmajax[PS_ind]
fminax = fminax[PS_ind]
fpa = fpa[PS_ind]
dmajax = dmajax[PS_ind]
dminax = dminax[PS_ind]
bright_source_ind = fint >= 10.0 * (freq_SUMSS*1e9/freq)**spindex_SUMSS
ra_deg_SUMSS = ra_deg_SUMSS[bright_source_ind]
dec_deg_SUMSS = dec_deg_SUMSS[bright_source_ind]
fint = fint[bright_source_ind]
fmajax = fmajax[bright_source_ind]
fminax = fminax[bright_source_ind]
fpa = fpa[bright_source_ind]
dmajax = dmajax[bright_source_ind]
dminax = dminax[bright_source_ind]
spindex_SUMSS = spindex_SUMSS[bright_source_ind]
valid_ind = NP.logical_and(fmajax > 0.0, fminax > 0.0)
ra_deg_SUMSS = ra_deg_SUMSS[valid_ind]
dec_deg_SUMSS = dec_deg_SUMSS[valid_ind]
fint = fint[valid_ind]
fmajax = fmajax[valid_ind]
fminax = fminax[valid_ind]
fpa = fpa[valid_ind]
spindex_SUMSS = spindex_SUMSS[valid_ind]
freq_catalog = NP.concatenate((freq_catalog, freq_SUMSS*1e9 + NP.zeros(fint.size)))
catlabel = NP.concatenate((catlabel, NP.repeat('SUMSS', fint.size)))
ra_deg = NP.concatenate((ra_deg, ra_deg_SUMSS))
dec_deg = NP.concatenate((dec_deg, dec_deg_SUMSS))
spindex = NP.concatenate((spindex, spindex_SUMSS))
majax = NP.concatenate((majax, fmajax/3.6e3))
minax = NP.concatenate((minax, fminax/3.6e3))
fluxes = NP.concatenate((fluxes, fint))
nvss_file = args['NVSS_file']
freq_NVSS = 1.4 # in GHz
hdulist = fits.open(nvss_file)
ra_deg_NVSS = hdulist[1].data['RA(2000)']
dec_deg_NVSS = hdulist[1].data['DEC(2000)']
nvss_fpeak = hdulist[1].data['PEAK INT']
nvss_majax = hdulist[1].data['MAJOR AX']
nvss_minax = hdulist[1].data['MINOR AX']
hdulist.close()
if spindex_seed is None:
spindex_NVSS = -0.83 + spindex_rms * NP.random.randn(nvss_fpeak.size)
else:
NP.random.seed(2*spindex_seed)
spindex_NVSS = -0.83 + spindex_rms * NP.random.randn(nvss_fpeak.size)
not_in_SUMSS_ind = NP.logical_and(dec_deg_NVSS > -30.0, dec_deg_NVSS <= min(90.0, latitude+90.0))
bright_source_ind = nvss_fpeak >= 10.0 * (freq_NVSS*1e9/freq)**(spindex_NVSS)
PS_ind = NP.sqrt(nvss_majax**2-(0.75/60.0)**2) < 14.0/3.6e3
count_valid = NP.sum(NP.logical_and(NP.logical_and(not_in_SUMSS_ind, bright_source_ind), PS_ind))
nvss_fpeak = nvss_fpeak[NP.logical_and(NP.logical_and(not_in_SUMSS_ind, bright_source_ind), PS_ind)]
freq_catalog = NP.concatenate((freq_catalog, freq_NVSS*1e9 + NP.zeros(count_valid)))
catlabel = NP.concatenate((catlabel, NP.repeat('NVSS',count_valid)))
ra_deg = NP.concatenate((ra_deg, ra_deg_NVSS[NP.logical_and(NP.logical_and(not_in_SUMSS_ind, bright_source_ind), PS_ind)]))
dec_deg = NP.concatenate((dec_deg, dec_deg_NVSS[NP.logical_and(NP.logical_and(not_in_SUMSS_ind, bright_source_ind), PS_ind)]))
spindex = NP.concatenate((spindex, spindex_NVSS[NP.logical_and(NP.logical_and(not_in_SUMSS_ind, bright_source_ind), PS_ind)]))
majax = NP.concatenate((majax, nvss_majax[NP.logical_and(NP.logical_and(not_in_SUMSS_ind, bright_source_ind), PS_ind)]))
minax = NP.concatenate((minax, nvss_minax[NP.logical_and(NP.logical_and(not_in_SUMSS_ind, bright_source_ind), PS_ind)]))
fluxes = NP.concatenate((fluxes, nvss_fpeak))
spec_parms = {}
# spec_parms['name'] = NP.repeat('tanh', ra_deg.size)
spec_parms['name'] = NP.repeat('power-law', ra_deg.size)
spec_parms['power-law-index'] = spindex
# spec_parms['freq-ref'] = freq/1e9 + NP.zeros(ra_deg.size)
spec_parms['freq-ref'] = freq_catalog + NP.zeros(ra_deg.size)
spec_parms['flux-scale'] = fluxes
spec_parms['flux-offset'] = NP.zeros(ra_deg.size)
spec_parms['freq-width'] = NP.zeros(ra_deg.size)
skymod = SM.SkyModel(catlabel, chans*1e9, NP.hstack((ra_deg.reshape(-1,1), dec_deg.reshape(-1,1))), 'func', spec_parms=spec_parms, src_shape=NP.hstack((majax.reshape(-1,1),minax.reshape(-1,1),NP.zeros(fluxes.size).reshape(-1,1))), src_shape_units=['degree','degree','degree'])
elif use_DSM:
fg_str = 'dsm'
dsm_file = args['DSM_file_prefix']+'_{0:.1f}_MHz_nside_{1:0d}.fits'.format(freq*1e-6, nside)
hdulist = fits.open(dsm_file)
pixres = hdulist[0].header['PIXAREA']
dsm_table = hdulist[1].data
ra_deg_DSM = dsm_table['RA']
dec_deg_DSM = dsm_table['DEC']
temperatures = dsm_table['T_{0:.0f}'.format(freq/1e6)]
fluxes_DSM = temperatures * (2.0 * FCNST.k * freq**2 / FCNST.c**2) * pixres / CNST.Jy
spindex = dsm_table['spindex'] + 2.0
freq_DSM = 0.185 # in GHz
freq_catalog = freq_DSM * 1e9 + NP.zeros(fluxes_DSM.size)
catlabel = NP.repeat('DSM', fluxes_DSM.size)
ra_deg = ra_deg_DSM
dec_deg = dec_deg_DSM
majax = NP.degrees(HP.nside2resol(nside)) * NP.ones(fluxes_DSM.size)
minax = NP.degrees(HP.nside2resol(nside)) * NP.ones(fluxes_DSM.size)
# majax = NP.degrees(NP.sqrt(HP.nside2pixarea(64)*4/NP.pi) * NP.ones(fluxes_DSM.size))
# minax = NP.degrees(NP.sqrt(HP.nside2pixarea(64)*4/NP.pi) * NP.ones(fluxes_DSM.size))
fluxes = fluxes_DSM
hdulist.close()
spec_parms = {}
# spec_parms['name'] = NP.repeat('tanh', ra_deg.size)
spec_parms['name'] = NP.repeat('power-law', ra_deg.size)
spec_parms['power-law-index'] = spindex
# spec_parms['freq-ref'] = freq/1e9 + NP.zeros(ra_deg.size)
spec_parms['freq-ref'] = freq_catalog + NP.zeros(ra_deg.size)
spec_parms['flux-scale'] = fluxes
spec_parms['flux-offset'] = NP.zeros(ra_deg.size)
spec_parms['freq-width'] = NP.zeros(ra_deg.size)
skymod = SM.SkyModel(catlabel, chans*1e9, NP.hstack((ra_deg.reshape(-1,1), dec_deg.reshape(-1,1))), 'func', spec_parms=spec_parms, src_shape=NP.hstack((majax.reshape(-1,1),minax.reshape(-1,1),NP.zeros(fluxes.size).reshape(-1,1))), src_shape_units=['degree','degree','degree'])
elif use_USM:
fg_str = 'usm'
dsm_file = args['DSM_file_prefix']+'_{0:.1f}_MHz_nside_{1:0d}.fits'.format(freq*1e-6, nside)
hdulist = fits.open(dsm_file)
pixres = hdulist[0].header['PIXAREA']
dsm_table = hdulist[1].data
ra_deg = dsm_table['RA']
dec_deg = dsm_table['DEC']
temperatures = dsm_table['T_{0:.0f}'.format(freq/1e6)]
avg_temperature = NP.mean(temperatures)
fluxes_USM = avg_temperature * (2.0 * FCNST.k * freq**2 / FCNST.c**2) * pixres / CNST.Jy * NP.ones(temperatures.size)
spindex = NP.zeros(fluxes_USM.size)
freq_USM = 0.185 # in GHz
freq_catalog = freq_USM * 1e9 + NP.zeros(fluxes_USM.size)
catlabel = NP.repeat('USM', fluxes_USM.size)
majax = NP.degrees(HP.nside2resol(nside)) * NP.ones(fluxes_USM.size)
minax = NP.degrees(HP.nside2resol(nside)) * NP.ones(fluxes_USM.size)
hdulist.close()
spec_parms = {}
# spec_parms['name'] = NP.repeat('tanh', ra_deg.size)
spec_parms['name'] = NP.repeat('power-law', ra_deg.size)
spec_parms['power-law-index'] = spindex
# spec_parms['freq-ref'] = freq/1e9 + NP.zeros(ra_deg.size)
spec_parms['freq-ref'] = freq_catalog + NP.zeros(ra_deg.size)
spec_parms['flux-scale'] = fluxes
spec_parms['flux-offset'] = NP.zeros(ra_deg.size)
spec_parms['freq-width'] = NP.zeros(ra_deg.size)
skymod = SM.SkyModel(catlabel, chans*1e9, NP.hstack((ra_deg.reshape(-1,1), dec_deg.reshape(-1,1))), 'func', spec_parms=spec_parms, src_shape=NP.hstack((majax.reshape(-1,1),minax.reshape(-1,1),NP.zeros(fluxes.size).reshape(-1,1))), src_shape_units=['degree','degree','degree'])
elif use_CSM:
fg_str = 'csm'
freq_SUMSS = 0.843 # in GHz
SUMSS_file = args['SUMSS_file']
catalog = NP.loadtxt(SUMSS_file, usecols=(0,1,2,3,4,5,10,12,13,14,15,16))
ra_deg_SUMSS = 15.0 * (catalog[:,0] + catalog[:,1]/60.0 + catalog[:,2]/3.6e3)
dec_dd = NP.loadtxt(SUMSS_file, usecols=(3,), dtype="|S3")
sgn_dec_str = NP.asarray([dec_dd[i][0] for i in range(dec_dd.size)])
sgn_dec = 1.0*NP.ones(dec_dd.size)
sgn_dec[sgn_dec_str == '-'] = -1.0
dec_deg_SUMSS = sgn_dec * (NP.abs(catalog[:,3]) + catalog[:,4]/60.0 + catalog[:,5]/3.6e3)
fmajax = catalog[:,7]
fminax = catalog[:,8]
fpa = catalog[:,9]
dmajax = catalog[:,10]
dminax = catalog[:,11]
PS_ind = NP.logical_and(dmajax == 0.0, dminax == 0.0)
ra_deg_SUMSS = ra_deg_SUMSS[PS_ind]
dec_deg_SUMSS = dec_deg_SUMSS[PS_ind]
fint = catalog[PS_ind,6] * 1e-3
if spindex_seed is None:
spindex_SUMSS = -0.83 + spindex_rms * NP.random.randn(fint.size)
else:
NP.random.seed(spindex_seed)
spindex_SUMSS = -0.83 + spindex_rms * NP.random.randn(fint.size)
fmajax = fmajax[PS_ind]
fminax = fminax[PS_ind]
fpa = fpa[PS_ind]
dmajax = dmajax[PS_ind]
dminax = dminax[PS_ind]
bright_source_ind = fint >= 10.0 * (freq_SUMSS*1e9/freq)**spindex_SUMSS
ra_deg_SUMSS = ra_deg_SUMSS[bright_source_ind]
dec_deg_SUMSS = dec_deg_SUMSS[bright_source_ind]
fint = fint[bright_source_ind]
fmajax = fmajax[bright_source_ind]
fminax = fminax[bright_source_ind]
fpa = fpa[bright_source_ind]
dmajax = dmajax[bright_source_ind]
dminax = dminax[bright_source_ind]
spindex_SUMSS = spindex_SUMSS[bright_source_ind]
valid_ind = NP.logical_and(fmajax > 0.0, fminax > 0.0)
ra_deg_SUMSS = ra_deg_SUMSS[valid_ind]
dec_deg_SUMSS = dec_deg_SUMSS[valid_ind]
fint = fint[valid_ind]
fmajax = fmajax[valid_ind]
fminax = fminax[valid_ind]
fpa = fpa[valid_ind]
spindex_SUMSS = spindex_SUMSS[valid_ind]
freq_catalog = freq_SUMSS*1e9 + NP.zeros(fint.size)
catlabel = NP.repeat('SUMSS', fint.size)
ra_deg = ra_deg_SUMSS + 0.0
dec_deg = dec_deg_SUMSS
spindex = spindex_SUMSS
majax = fmajax/3.6e3
minax = fminax/3.6e3
fluxes = fint + 0.0
nvss_file = args['NVSS_file']
freq_NVSS = 1.4 # in GHz
hdulist = fits.open(nvss_file)
ra_deg_NVSS = hdulist[1].data['RA(2000)']
dec_deg_NVSS = hdulist[1].data['DEC(2000)']
nvss_fpeak = hdulist[1].data['PEAK INT']
nvss_majax = hdulist[1].data['MAJOR AX']
nvss_minax = hdulist[1].data['MINOR AX']
hdulist.close()
if spindex_seed is None:
spindex_NVSS = -0.83 + spindex_rms * NP.random.randn(nvss_fpeak.size)
else:
NP.random.seed(2*spindex_seed)
spindex_NVSS = -0.83 + spindex_rms * NP.random.randn(nvss_fpeak.size)
not_in_SUMSS_ind = NP.logical_and(dec_deg_NVSS > -30.0, dec_deg_NVSS <= min(90.0, latitude+90.0))
bright_source_ind = nvss_fpeak >= 10.0 * (freq_NVSS*1e9/freq)**(spindex_NVSS)
PS_ind = NP.sqrt(nvss_majax**2-(0.75/60.0)**2) < 14.0/3.6e3
count_valid = NP.sum(NP.logical_and(NP.logical_and(not_in_SUMSS_ind, bright_source_ind), PS_ind))
nvss_fpeak = nvss_fpeak[NP.logical_and(NP.logical_and(not_in_SUMSS_ind, bright_source_ind), PS_ind)]
freq_catalog = NP.concatenate((freq_catalog, freq_NVSS*1e9 + NP.zeros(count_valid)))
catlabel = NP.concatenate((catlabel, NP.repeat('NVSS',count_valid)))
ra_deg = NP.concatenate((ra_deg, ra_deg_NVSS[NP.logical_and(NP.logical_and(not_in_SUMSS_ind, bright_source_ind), PS_ind)]))
dec_deg = NP.concatenate((dec_deg, dec_deg_NVSS[NP.logical_and(NP.logical_and(not_in_SUMSS_ind, bright_source_ind), PS_ind)]))
spindex = NP.concatenate((spindex, spindex_NVSS[NP.logical_and(NP.logical_and(not_in_SUMSS_ind, bright_source_ind), PS_ind)]))
majax = NP.concatenate((majax, nvss_majax[NP.logical_and(NP.logical_and(not_in_SUMSS_ind, bright_source_ind), PS_ind)]))
minax = NP.concatenate((minax, nvss_minax[NP.logical_and(NP.logical_and(not_in_SUMSS_ind, bright_source_ind), PS_ind)]))
fluxes = NP.concatenate((fluxes, nvss_fpeak))
spec_parms = {}
# spec_parms['name'] = NP.repeat('tanh', ra_deg.size)
spec_parms['name'] = NP.repeat('power-law', ra_deg.size)
spec_parms['power-law-index'] = spindex
# spec_parms['freq-ref'] = freq/1e9 + NP.zeros(ra_deg.size)
spec_parms['freq-ref'] = freq_catalog + NP.zeros(ra_deg.size)
spec_parms['flux-scale'] = fluxes
spec_parms['flux-offset'] = NP.zeros(ra_deg.size)
spec_parms['freq-width'] = NP.zeros(ra_deg.size)
skymod = SM.SkyModel(catlabel, chans*1e9, NP.hstack((ra_deg.reshape(-1,1), dec_deg.reshape(-1,1))), 'func', spec_parms=spec_parms, src_shape=NP.hstack((majax.reshape(-1,1),minax.reshape(-1,1),NP.zeros(fluxes.size).reshape(-1,1))), src_shape_units=['degree','degree','degree'])
elif use_SUMSS:
SUMSS_file = args['SUMSS_file']
catalog = NP.loadtxt(SUMSS_file, usecols=(0,1,2,3,4,5,10,12,13,14,15,16))
ra_deg = 15.0 * (catalog[:,0] + catalog[:,1]/60.0 + catalog[:,2]/3.6e3)
dec_dd = NP.loadtxt(SUMSS_file, usecols=(3,), dtype="|S3")
sgn_dec_str = NP.asarray([dec_dd[i][0] for i in range(dec_dd.size)])
sgn_dec = 1.0*NP.ones(dec_dd.size)
sgn_dec[sgn_dec_str == '-'] = -1.0
dec_deg = sgn_dec * (NP.abs(catalog[:,3]) + catalog[:,4]/60.0 + catalog[:,5]/3.6e3)
fmajax = catalog[:,7]
fminax = catalog[:,8]
fpa = catalog[:,9]
dmajax = catalog[:,10]
dminax = catalog[:,11]
PS_ind = NP.logical_and(dmajax == 0.0, dminax == 0.0)
ra_deg = ra_deg[PS_ind]
dec_deg = dec_deg[PS_ind]
fint = catalog[PS_ind,6] * 1e-3
fmajax = fmajax[PS_ind]
fminax = fminax[PS_ind]
fpa = fpa[PS_ind]
dmajax = dmajax[PS_ind]
dminax = dminax[PS_ind]
bright_source_ind = fint >= 1.0
ra_deg = ra_deg[bright_source_ind]
dec_deg = dec_deg[bright_source_ind]
fint = fint[bright_source_ind]
fmajax = fmajax[bright_source_ind]
fminax = fminax[bright_source_ind]
fpa = fpa[bright_source_ind]
dmajax = dmajax[bright_source_ind]
dminax = dminax[bright_source_ind]
valid_ind = NP.logical_and(fmajax > 0.0, fminax > 0.0)
ra_deg = ra_deg[valid_ind]
dec_deg = dec_deg[valid_ind]
fint = fint[valid_ind]
fmajax = fmajax[valid_ind]
fminax = fminax[valid_ind]
fpa = fpa[valid_ind]
freq_catalog = 0.843 # in GHz
if spindex_seed is None:
spindex = -0.83 + spindex_rms * NP.random.randn(fint.size)
else:
NP.random.seed(spindex_seed)
spindex = -0.83 + spindex_rms * NP.random.randn(fint.size)
fg_str = 'sumss'
spec_parms = {}
# spec_parms['name'] = NP.repeat('tanh', ra_deg.size)
spec_parms['name'] = NP.repeat('power-law', ra_deg.size)
spec_parms['power-law-index'] = spindex
# spec_parms['freq-ref'] = freq/1e9 + NP.zeros(ra_deg.size)
spec_parms['freq-ref'] = freq_catalog + NP.zeros(ra_deg.size)
spec_parms['flux-scale'] = fluxes
spec_parms['flux-offset'] = | NP.zeros(ra_deg.size) | numpy.zeros |
import numpy as np
class ChoiceSetData:
"""
Data structure to store choice sets. See Table 1 in the Supplementary Material of the article.
"""
def __init__(self, num_features, max_choice_set_size, max_number_of_choice_sets=np.inf):
self.num_features = num_features
self.data = np.zeros((0, self.num_features + 2))
self.choice_set_counter = 0.
self.current_number_of_choice_sets = 0.
self.max_choice_set_size = max_choice_set_size
self.max_number_of_choice_sets = max_number_of_choice_sets
def push(self, features, choice_index, delete_oldest=False):
choice_set_len = len(features)
one_hot_choice = np.zeros((choice_set_len, 1))
one_hot_choice[choice_index] = 1.
choice_set_index = np.full(shape=(choice_set_len, 1), fill_value=self.choice_set_counter)
self.data = np.vstack((self.data, np.hstack((choice_set_index, one_hot_choice, features))))
self.choice_set_counter += 1.
self.current_number_of_choice_sets += 1.
if delete_oldest or self.current_number_of_choice_sets > self.max_number_of_choice_sets:
first_choice_set_index = self.data[0, 0]
for ix in range(self.max_choice_set_size+1):
if self.data[ix, 0] != first_choice_set_index:
break
if ix > self.max_choice_set_size:
raise ValueError("Choice set should not be higher than " + str(self.max_choice_set_size))
self.data = self.data[ix:]
if self.current_number_of_choice_sets > 0:
self.current_number_of_choice_sets -= 1.
def sample(self):
# Currently just returns a copy of the entire data set.
return self.data.copy()
def delete_data(self):
self.data = | np.zeros((0, self.num_features + 2)) | numpy.zeros |
"""DAQmx acquisition module (:mod:`pymanip.daq.DAQmx`)
======================================================
The :mod:`fluidlab.daq.daqmx` module is a simple functional front-end to
the third-party :mod:`PyDAQmx` module. It mainly provides two simple one-liner functions:
- :func:`~fluidlab.daq.daqmx.read_analog`
- :func:`~fluidlab.daq.daqmx.write_analog`
The :mod:`pymanip.daq.DAQmx` module is essentially based on its fluidlab counterpart, with
other choices for the default values of arguments, and an additionnal `autoset` feature for
the :func:`read_analog` function.
It also adds a convenience function for printing the list of DAQmx devices, used by the
pymanip CLI interface.
A discovery function is also added, :func:`~pymanip.daq.daqmx.print_connected_devices`, based
on the dedicated :class:`~pymanip.daq.daqmx.DAQDevice` class, which is used by the
`list_daq` sub-command on pymanip command line.
.. autoclass:: DAQDevice
:members:
:private-members:
.. autofunction:: print_connected_devices
.. autofunction:: read_analog
"""
import ctypes
import numpy as np
from fluiddyn.util.terminal_colors import cprint
import fluidlab.daq.daqmx as daqmx
from fluidlab.daq.daqmx import write_analog
class DAQDevice:
"""This class is represents a DAQmx device.
:param device_name: name of the DAQmx device, e.g. "Dev1"
:type device_name: str
It mostly implement a number of property getters, which are wrappers to the
:mod:`PyDAQmx` low-level functions.
In addition, it has a static method, :meth:`~pymanip.daq.DAQmx.DAQDevice.list_connected_devices`
to discover currently connected devices.
"""
@staticmethod
def list_connected_devices():
"""This static method discovers the connected devices.
:return: connected devices
:rtype: list of :class:`pymanip.daq.DAQmx.DAQDevice` objects
"""
try:
from PyDAQmx import DAQmxGetSystemInfoAttribute, DAQmx_Sys_DevNames
bufsize = 1024
buf = ctypes.create_string_buffer(bufsize)
DAQmxGetSystemInfoAttribute(DAQmx_Sys_DevNames, ctypes.byref(buf), bufsize)
return [DAQDevice(s.strip().decode("ascii")) for s in buf.value.split(b",")]
except ImportError:
print("Cannot list connected devices.")
return None
pass
def __init__(self, device_name):
"""Constructor method
"""
self.device_name = device_name
@property
def product_category(self):
"""Device product category (str)"""
try:
from PyDAQmx import (
DAQmxGetDevProductCategory,
DAQmx_Val_MSeriesDAQ,
DAQmx_Val_XSeriesDAQ,
DAQmx_Val_ESeriesDAQ,
DAQmx_Val_SSeriesDAQ,
DAQmx_Val_BSeriesDAQ,
DAQmx_Val_SCSeriesDAQ,
DAQmx_Val_USBDAQ,
DAQmx_Val_AOSeries,
DAQmx_Val_DigitalIO,
DAQmx_Val_TIOSeries,
DAQmx_Val_DynamicSignalAcquisition,
DAQmx_Val_Switches,
DAQmx_Val_CompactDAQChassis,
DAQmx_Val_CSeriesModule,
DAQmx_Val_SCXIModule,
DAQmx_Val_SCCConnectorBlock,
DAQmx_Val_SCCModule,
DAQmx_Val_NIELVIS,
DAQmx_Val_NetworkDAQ,
DAQmx_Val_SCExpress,
DAQmx_Val_Unknown,
)
category = ctypes.c_int32(DAQmx_Val_Unknown)
DAQmxGetDevProductCategory(self.device_name, ctypes.byref(category))
return {
DAQmx_Val_MSeriesDAQ: "M Series DAQ",
DAQmx_Val_XSeriesDAQ: "X Series DAQ",
DAQmx_Val_ESeriesDAQ: "E Series DAQ",
DAQmx_Val_SSeriesDAQ: "S Series DAQ",
DAQmx_Val_BSeriesDAQ: "B Series DAQ",
DAQmx_Val_SCSeriesDAQ: "SC Series DAQ",
DAQmx_Val_USBDAQ: "USB DAQ",
DAQmx_Val_AOSeries: "AO Series",
DAQmx_Val_DigitalIO: "Digital I/O",
DAQmx_Val_TIOSeries: "TIO Series",
DAQmx_Val_DynamicSignalAcquisition: "Dynamic Signal Acquisition",
DAQmx_Val_Switches: "Switches",
DAQmx_Val_CompactDAQChassis: "CompactDAQ chassis",
DAQmx_Val_CSeriesModule: "C Series I/O module",
DAQmx_Val_SCXIModule: "SCXI module",
DAQmx_Val_SCCConnectorBlock: "SCC Connector Block",
DAQmx_Val_SCCModule: "SCC Module",
DAQmx_Val_NIELVIS: "NI ELVIS",
DAQmx_Val_NetworkDAQ: "Network DAQ",
DAQmx_Val_SCExpress: "SC Express",
DAQmx_Val_Unknown: "Unknown by DAQmx",
}.get(category.value, "Unknown")
except ImportError:
return None
@property
def product_type(self):
"""Device product type"""
from PyDAQmx import DAQmxGetDevProductType
bufsize = 1024
buf = ctypes.create_string_buffer(bufsize)
DAQmxGetDevProductType(self.device_name, buf, bufsize)
return buf.value.decode("ascii")
@property
def product_num(self):
"""Device product num"""
from PyDAQmx import DAQmxGetDevProductNum
num = ctypes.c_uint32(0)
DAQmxGetDevProductNum(self.device_name, ctypes.byref(num))
return num.value
@property
def ai_chans(self):
"""List of the analog input channels on the device"""
from PyDAQmx import DAQmxGetDevAIPhysicalChans
bufsize = 2048
buf = ctypes.create_string_buffer(bufsize)
DAQmxGetDevAIPhysicalChans(self.device_name, buf, bufsize)
chans = [s.strip().decode("ascii") for s in buf.value.split(b",")]
if chans == [""]:
chans = []
return chans
@property
def ao_chans(self):
"""List of the analog output channels on the device"""
from PyDAQmx import DAQmxGetDevAOPhysicalChans
bufsize = 2048
buf = ctypes.create_string_buffer(bufsize)
DAQmxGetDevAOPhysicalChans(self.device_name, buf, bufsize)
chans = [s.strip().decode("ascii") for s in buf.value.split(b",")]
if chans == [""]:
chans = []
return chans
@property
def di_lines(self):
"""List of digital input lines on the device"""
from PyDAQmx import DAQmxGetDevDILines
bufsize = 2048
buf = ctypes.create_string_buffer(bufsize)
DAQmxGetDevDILines(self.device_name, buf, bufsize)
chans = [s.strip().decode("ascii") for s in buf.value.split(b",")]
if chans == [""]:
chans = []
return chans
@property
def di_ports(self):
"""List of digital input ports on the device"""
from PyDAQmx import DAQmxGetDevDIPorts
bufsize = 2048
buf = ctypes.create_string_buffer(bufsize)
DAQmxGetDevDIPorts(self.device_name, buf, bufsize)
chans = [s.strip().decode("ascii") for s in buf.value.split(b",")]
if chans == [""]:
chans = []
return chans
@property
def do_lines(self):
"""List of digital output lines on the device"""
from PyDAQmx import DAQmxGetDevDOLines
bufsize = 2048
buf = ctypes.create_string_buffer(bufsize)
DAQmxGetDevDOLines(self.device_name, buf, bufsize)
chans = [s.strip().decode("ascii") for s in buf.value.split(b",")]
if chans == [""]:
chans = []
return chans
@property
def do_ports(self):
"""List of digital output ports on the device"""
from PyDAQmx import DAQmxGetDevDOPorts
bufsize = 2048
buf = ctypes.create_string_buffer(bufsize)
DAQmxGetDevDOPorts(self.device_name, buf, bufsize)
chans = [s.strip().decode("ascii") for s in buf.value.split(b",")]
if chans == [""]:
chans = []
return chans
@property
def bus_type(self):
"""Bus type connection to the device"""
from PyDAQmx import (
DAQmxGetDevBusType,
DAQmx_Val_PCI,
DAQmx_Val_PXI,
DAQmx_Val_SCXI,
DAQmx_Val_PCCard,
DAQmx_Val_USB,
DAQmx_Val_Unknown,
)
t = ctypes.c_int32(0)
DAQmxGetDevBusType(self.device_name, ctypes.byref(t))
return {
DAQmx_Val_PCI: "PCI",
DAQmx_Val_PXI: "PXI",
DAQmx_Val_SCXI: "SCXI",
DAQmx_Val_PCCard: "PCCard",
DAQmx_Val_USB: "USB",
DAQmx_Val_Unknown: "DAQmx unknown",
}.get(t.value, "Unknown")
@property
def pci_busnum(self):
"""PCI Bus number"""
from PyDAQmx import DAQmxGetDevPCIBusNum
num = ctypes.c_uint32(0)
DAQmxGetDevPCIBusNum(self.device_name, ctypes.byref(num))
return num.value
@property
def pci_devnum(self):
"""PCI Device number"""
from PyDAQmx import DAQmxGetDevPCIDevNum
num = ctypes.c_uint32(0)
DAQmxGetDevPCIDevNum(self.device_name, ctypes.byref(num))
return num.value
@property
def pxi_chassisnum(self):
"""PXI Chassis number"""
from PyDAQmx import DAQmxGetDevPXIChassisNum
num = ctypes.c_uint32(0)
DAQmxGetDevPXIChassisNum(self.device_name, ctypes.byref(num))
return num.value
@property
def pxi_slotnum(self):
"""PXI Slot number"""
from PyDAQmx import DAQmxGetDevPXISlotNum
num = ctypes.c_uint32(0)
DAQmxGetDevPXISlotNum(self.device_name, ctypes.byref(num))
return num.value
@property
def location(self):
"""Description of the location (PCI bus and number, or PXI chassis and slot)"""
from PyDAQmx import DAQError
bus = self.bus_type
if bus == "PCI":
desc = "PCI {:d}, {:d}".format(self.pci_busnum, self.pci_devnum)
elif bus == "PXI":
try:
desc = "PXI chassis {:d} slot {:d}".format(
self.pxi_chassisnum, self.pxi_slotnum
)
except DAQError:
# Si le chassis n'est pas identifiรฉ alors DAQmx ne peut pas
# renvoyer les informations, et une exception est levรฉe
desc = "PXI (unidentified)"
pass
else:
desc = bus
return desc
def print_connected_devices():
"""This function prints the list of connected DAQmx devices.
"""
for device in DAQDevice.list_connected_devices():
print(
"**",
device.device_name,
"(" + device.product_type + ") on",
device.location,
"**",
)
print("Analog input :", device.ai_chans)
print("Analog output :", device.ao_chans)
# Ici read_analog est verbose=True par dรฉfaut contrairement ร fluidlab
# et on ajoute une fonction "autoset" si volt_min, volt_max sont None
def read_analog(
resource_names,
terminal_config,
volt_min=None,
volt_max=None,
samples_per_chan=1,
sample_rate=1,
coupling_types="DC",
output_filename=None,
verbose=True,
):
"""This function reads signal from analog input.
:param resources_names: names from MAX (Dev1/ai0)
:type resource_names: str, or list
:param terminal_config: "Diff", "RSE", "NRSE"
:type terminal_config: str, or list
:param volt_min: minimum voltage
:type volt_min: float, or list, optional
:param volt_max: maximum voltage
:type volt_max: float, or list, optional
:param samples_per_chan: Number of samples to be read per channel
:type samples_per_chan: int
:param sample_rate: Clock frequency
:type sample_rate: float
:param coupling_type: Coupling of the channels ("DC", "AC", "GND")
:type coupling_type: str, or list
:param output_filename: If not None, file to write the acquired data
:type output_filename: str, optional
:param verbose: Verbosity level. Defaults to True (unlike in Fluidlab)
:type verbose: bool, optional
If the channel range is not specified, a 5.0 seconds samples will first be acquired
to determine appropriate channel range (autoset feature).
"""
# Les type checks ci-dessous ne sont pas trรจs pythoniques
# mais nรฉcessaire parce que PyDAQmx est une passerelle vers le C
# et il y a des sous-entendus de type.
# Ensure that samples_per_chan is integer
if not isinstance(samples_per_chan, int):
samples_per_chan = int(samples_per_chan)
# Ensure resource_names is str or list of str
if isinstance(resource_names, str):
num_channels = 1
resource_names = str(resource_names)
else:
num_channels = len(resource_names)
resource_names = [str(r) for r in resource_names]
# If no range is provided, take a 5s sample
if volt_min is None or volt_max is None:
print("Sampling 5s data to determine channel range")
if num_channels == 1:
volt_min = -10.0
volt_max = 10.0
else:
volt_min = [-10.0] * num_channels
volt_max = [10.0] * num_channels
data = daqmx.read_analog(
resource_names,
terminal_config,
volt_min,
volt_max,
samples_per_chan=50000,
sample_rate=10e3,
coupling_types=coupling_types,
verbose=False,
)
if num_channels == 1:
volt_range = np.max(np.abs(data)) * 1.25
volt_min = -volt_range
volt_max = volt_range
else:
for chan in range(num_channels):
volt_range = np.max(np.abs(data[chan])) * 1.25
volt_min[chan] = -volt_range
volt_max[chan] = volt_range
print(
"Channel", chan, "min max:", np.min(data[chan]), np.max(data[chan])
)
# Run fluidlab daqmx.read_analog with verbose=True by default
data = daqmx.read_analog(
resource_names,
terminal_config,
volt_min,
volt_max,
samples_per_chan,
sample_rate,
coupling_types,
output_filename,
verbose,
)
# If verbose, check that voltage range has not been reached and issue a warning otherwise
if verbose:
if num_channels == 1:
channel_range = np.max([np.abs(volt_min), np.abs(volt_max)])
if np.max(np.abs(data)) >= channel_range:
cprint.red("WARNING: channel range too small!")
else:
for chan in range(num_channels):
try:
channel_range = np.max(
[ | np.abs(volt_min[chan]) | numpy.abs |
from typing import Tuple
import pandas as pd
import numpy as np
from sklearn.datasets import fetch_kddcup99
def load_train_test_data(small: bool, train_normal_only: bool) -> Tuple[Tuple[pd.DataFrame, np.ndarray], Tuple[pd.DataFrame, np.ndarray]]:
X, y = fetch_kddcup99(subset='SA', percent10=small, return_X_y=True)
columns = ["duration", "protocol_type", "service", "flag", "src_bytes", "dst_bytes", "land", "wrong_fragment",
"urgent", "hot", "num_failed_logins", "logged_in", "num_compromised", "root_shell", "su_attempted",
"num_root", "num_file_creations", "num_shells", "num_access_files", "num_outbound_cmds", "is_host_login",
"is_guest_login", "count", "srv_count", "serror_rate", "srv_serror_rate", "rerror_rate", "srv_rerror_rate",
"same_srv_rate", "diff_srv_rate", "srv_diff_host_rate", "dst_host_count", "dst_host_srv_count", "dst_host_same_srv_rate",
"dst_host_diff_srv_rate", "dst_host_same_src_port_rate", "dst_host_srv_diff_host_rate", "dst_host_serror_rate",
"dst_host_srv_serror_rate", "dst_host_rerror_rate", "dst_host_srv_rerror_rate"]
categorical_columns = ["protocol_type", "flag", "service"]
features = pd.DataFrame(X, columns=columns)
target = (y == b'normal.') * 1
for categorical_column in categorical_columns:
features[categorical_column] = features[categorical_column].astype('category')
number_anomalies = | np.sum(1 - target) | numpy.sum |
import pytest
import numpy as np
from hera_sim.visibilities import VisCPU
from hera_sim import io
from hera_sim.beams import PerturbedPolyBeam, PolyBeam, efield_to_pstokes
from hera_sim.defaults import defaults
from astropy_healpix import healpy as hp
from vis_cpu import HAVE_GPU
np.seterr(invalid="ignore")
def antennas():
locs = [[308, 253, 0.49], [8, 299, 0.22]]
ants = {}
for i in range(len(locs)):
ants[i] = (locs[i][0], locs[i][1], locs[i][2])
return ants
def sources():
sources = np.array([[128, -29, 4, 0]])
ra_dec = sources[:, :2]
flux = sources[:, 2]
spectral_index = sources[:, 3]
ra_dec = np.deg2rad(ra_dec)
return ra_dec, flux, spectral_index
def perturbed_beams(rotation, nants, polarized=False, power_beam=False):
"""
Elliptical PerturbedPolyBeam.
This will also test PolyBeam, from which PerturbedPolybeam is derived.
"""
cfg_beam = dict(
ref_freq=1.0e8,
spectral_index=-0.6975,
mainlobe_width=0.3,
beam_coeffs=[
0.29778665,
-0.44821433,
0.27338272,
-0.10030698,
-0.01195859,
0.06063853,
-0.04593295,
0.0107879,
0.01390283,
-0.01881641,
-0.00177106,
0.01265177,
-0.00568299,
-0.00333975,
0.00452368,
0.00151808,
-0.00593812,
0.00351559,
],
)
beams = [
PerturbedPolyBeam(
perturb_coeffs=np.array(
[
-0.20437532,
-0.4864951,
-0.18577532,
-0.38053642,
0.08897764,
0.06367166,
0.29634711,
1.40277112,
]
),
mainlobe_scale=1.0,
xstretch=1.1,
ystretch=0.8,
rotation=rotation,
polarized=polarized,
**cfg_beam
)
for i in range(nants)
]
# Specify power beam if requested
if power_beam:
for i in range(len(beams)):
beams[i].beam_type = "power"
return beams
class DummyMPIComm:
"""
Exists so the MPI interface can be tested, but not run.
"""
def Get_size(self):
return 2 # Pretend there are 2 processes
def create_polarized_polybeam():
"""
Create a polarized PolyBeam.
The parameters of the beam were copied from the HERA Memo nยฐ81:
https://reionization.org/wp-content/uploads/2013/03/HERA081_HERA_Primary_Beam_Chebyshev_Apr2020.pdf.
"""
# parameters
spectral_index = -0.6975
beam_coeffs = [
2.35088101e-01,
-4.20162599e-01,
2.99189140e-01,
-1.54189057e-01,
3.38651457e-02,
3.46936067e-02,
-4.98838130e-02,
3.23054464e-02,
-7.56006552e-03,
-7.24620596e-03,
7.99563166e-03,
-2.78125602e-03,
-8.19945835e-04,
1.13791191e-03,
-1.24301372e-04,
-3.74808752e-04,
1.93997376e-04,
-1.72012040e-05,
]
ref_freq = 1e8
# instantiate the PolyBeam object
cfg_pol_beam = dict(
ref_freq=ref_freq,
spectral_index=spectral_index,
beam_coeffs=beam_coeffs,
polarized=True,
)
pol_PolyBeam = PolyBeam(**cfg_pol_beam)
return pol_PolyBeam
def evaluate_polybeam(polybeam):
"""
Evaluate a PolyBeam at hard-coded az and za angles, and frequencies.
"""
n_pix_lm = 500
L = np.linspace(-1, 1, n_pix_lm, dtype=np.float64)
L, m = np.meshgrid(L, L)
L = L.flatten()
m = m.flatten()
lsqr = L ** 2 + m ** 2
n = np.where(lsqr < 1, np.sqrt(1 - lsqr), 0)
# Generate azimuth and zenith angle.
az = -np.arctan2(m, L)
za = np.pi / 2 - np.arcsin(n)
freqs = np.array(
[
1.00e08,
1.04e08,
1.08e08,
1.12e08,
1.16e08,
1.20e08,
1.24e08,
1.28e08,
1.32e08,
1.36e08,
1.40e08,
1.44e08,
1.48e08,
1.52e08,
1.56e08,
1.60e08,
1.64e08,
1.68e08,
1.72e08,
1.76e08,
1.80e08,
1.84e08,
1.88e08,
1.92e08,
1.96e08,
2.00e08,
]
)
eval_beam = polybeam.interp(az, za, freqs)
# Check that calling the interp() method with wrongly sized
# coordinates results in an error
with pytest.raises(ValueError):
_ = polybeam.interp(az, za[:-1], freqs)
return (eval_beam[0], az, za, freqs.size)
def convert_to_pStokes(eval_beam, az, za, Nfreq):
"""
Convert an E-field to its pseudo-Stokes power beam.
"""
nside_test = 64
pixel_indices_test = hp.ang2pix(nside_test, za, az)
npix_test = hp.nside2npix(nside_test)
pol_efield_beam_plot = np.zeros((2, 1, 2, Nfreq, npix_test), dtype=np.complex128)
pol_efield_beam_plot[:, :, :, :, pixel_indices_test] = eval_beam[:, :, :, :]
eval_beam_pStokes = efield_to_pstokes(pol_efield_beam_plot, npix_test, Nfreq)
return eval_beam_pStokes
def run_sim(
beam_rotation,
use_pixel_beams=True,
use_gpu=False,
use_pol=False,
use_mpi=False,
pol="xx",
power_beam=False,
):
"""
Run a simple sim using a rotated elliptic polybeam.
"""
defaults.set("h1c")
pol_array = ["xx"]
if use_pol:
pol_array = np.array(
["yx", "xy", "yy", "xx"]
) # yx, xy, yy, xx = ne, en, nn, ee
ants = antennas()
# Observing parameters in a UVData object.
uvdata = io.empty_uvdata(
Nfreqs=1,
start_freq=100000000.0,
channel_width=97000.0,
start_time=2458902.4,
integration_time=40,
Ntimes=1,
array_layout=ants,
polarization_array=pol_array,
)
freqs = np.unique(uvdata.freq_array)
ra_dec, flux, spectral_index = sources()
# calculate source fluxes for hera_sim
flux = (freqs[:, np.newaxis] / freqs[0]) ** spectral_index * flux
simulator = VisCPU(
uvdata=uvdata,
beams=perturbed_beams(
beam_rotation,
len(ants.keys()),
polarized=use_pol,
power_beam=power_beam,
),
beam_ids=list(ants.keys()),
sky_freqs=freqs,
point_source_pos=ra_dec,
point_source_flux=flux,
use_pixel_beams=use_pixel_beams,
use_gpu=use_gpu,
polarized=use_pol,
mpi_comm=DummyMPIComm() if use_mpi else None,
bm_pix=200,
precision=2,
)
simulator.simulate()
auto = np.abs(simulator.uvdata.get_data(0, 0, pol)[0][0])
return auto
def test_perturbed_polybeam():
# Rotate the beam from 0 to 180 degrees, and check that autocorrelation
# of antenna 0 has approximately the same value when pixel beams are
# used, and when pixel beams not used (direct beam calculation).
rvals = np.linspace(0.0, 180.0, 31, dtype=int)
rotations = np.zeros(rvals.size)
pix_results = np.zeros(rvals.size)
calc_results = np.zeros(rvals.size)
for i, r in enumerate(rvals):
pix_result = run_sim(r, use_pixel_beams=True)
# Direct beam calculation - no pixel beams
calc_result = run_sim(r, use_pixel_beams=False)
rotations[i] = r
pix_results[i] = pix_result
calc_results[i] = calc_result
# Check that the maximum difference between pixel beams/direct calculation
# cases is no more than 5%. This shows the direct calculation of the beam
# tracks the pixel beam interpolation. They won't be exactly the same.
np.testing.assert_allclose(pix_results, calc_results, rtol=0.05)
# Check that rotations 0 and 180 produce the same values.
assert pix_results[0] == pytest.approx(pix_results[-1], abs=1e-8)
assert calc_results[0] == pytest.approx(calc_results[-1], abs=1e-8)
# Check that the values are not all the same. Shouldn't be, due to
# elliptic beam.
assert np.min(pix_results) != pytest.approx( | np.max(pix_results) | numpy.max |
import numpy as np
import scipy.special as ss
import scipy.signal as ss2
import scipy
from numpy import abs, sin, cos, real, exp, pi, sqrt
def psi_s(z, x, beta):
"""
2D longitudinal potential
Eq. (23) from Ref[1] with no constant factor (e*beta**2/2/rho**2).
Ref[1]: <NAME> and <NAME>, PRAB 23, 014402 (2020).
Note that 'x' here corresponds to 'chi = x / rho' in the paper.
"""
#try:
out = (cos(2 * alpha(z, x, beta)) - 1 / (1+x)) / (
kappa(z, x, beta) - beta * (1+x) * sin(2*alpha(z, x, beta)))
#except ZeroDivisionError:
# out = 0
# print(f"Oops! ZeroDivisionError at (z,x)= ({z:5.2f},{x:5.2f}). Returning 0.")
return np.nan_to_num(out)
def psi_x_where_x_equals_zero(z, dx, beta):
"""
Evaluate psi_x close to x = 0
This is a rough approximation of the singularity across x = 0
"""
return (psi_x(z, -dx/2, beta) + psi_x(z, dx/2, beta))/2
@np.vectorize
def ss_ellipf(phi, m):
y = ss.ellipkinc(phi, m)
# y = np.float(y)
return y
@np.vectorize
def ss_ellipe(phi, m):
y = ss.ellipeinc(phi, m)
# y = np.float(y)
return y
def psi_x(z, x, beta):
"""
Eq.(24) from Ref[1] with argument zeta=0 and no constant factor e*beta**2/2/rho**2.
Note that 'x' here corresponds to 'chi = x/rho',
and 'z' here corresponds to 'xi = z/2/rho' in the paper.
"""
# z = np.float(z)
# x = np.float(x)
kap = kappa(z, x, beta)
alp = alpha(z, x, beta)
arg2 = -4 * (1+x) / x**2
try:
T1 = (1/abs(x)/(1 + x) * ((2 + 2*x + x**2) * ss.ellipkinc(alp, arg2)- x**2 * ss.ellipeinc(alp, arg2)))
D = kap**2 - beta**2 * (1 + x)**2 * sin(2*alp)**2
T2 = ((kap**2 - 2*beta** 2 * (1+x)**2 + beta**2 * (1+x) * (2 + 2*x + x**2) * cos(2*alp))/ beta/ (1+x)/ D)
T3 = -kap * sin(2 * alp) / D
T4 = kap * beta ** 2 * (1 + x) * sin(2 * alp) * cos(2 * alp) / D
T5 = 1 / | abs(x) | numpy.abs |
#!/usr/bin/env python
# Copyright (c) 2018, DIANA-HEP
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import histbook.expr
import numpy
INDEXTYPE = numpy.int32
library = {}
library["numpy.add"] = numpy.add
library["numpy.subtract"] = numpy.subtract
library["numpy.multiply"] = numpy.multiply
library["numpy.true_divide"] = numpy.true_divide
library["numpy.equal"] = numpy.equal
library["numpy.not_equal"] = numpy.not_equal
library["numpy.less"] = numpy.less
library["numpy.less_equal"] = numpy.less_equal
try:
library["numpy.isin"] = numpy.isin
except AttributeError:
library["numpy.isin"] = numpy.in1d
library["numpy.logical_or"] = numpy.logical_or
library["numpy.logical_and"] = numpy.logical_and
library["numpy.logical_not"] = numpy.logical_not
library["abs"] = numpy.absolute; library["fabs"] = numpy.absolute
library["arccos"] = numpy.arccos
library["arccosh"] = numpy.arccosh
library["arcsin"] = numpy.arcsin
library["arcsinh"] = numpy.arcsinh
library["arctan2"] = numpy.arctan2
library["arctan"] = numpy.arctan
library["arctanh"] = numpy.arctanh
library["ceil"] = numpy.ceil
library["conj"] = numpy.conjugate
library["copysign"] = numpy.copysign
library["cos"] = numpy.cos
library["cosh"] = numpy.cosh
library["deg2rad"] = numpy.deg2rad
library["exp2"] = numpy.exp2
library["exp"] = numpy.exp
library["expm1"] = numpy.expm1
library["floor"] = numpy.floor
library["fmod"] = numpy.fmod
try:
library["heaviside"] = lambda x, middle=0.5: numpy.heaviside(x, middle)
except AttributeError:
def heaviside(x, middle=0.5):
out = numpy.where(x < 0, 0.0, 1.0)
out[x == 0] = middle
return out
library["heaviside"] = heaviside
library["hypot"] = numpy.hypot
library["isfinite"] = numpy.isfinite
library["isinf"] = numpy.isinf
library["isnan"] = numpy.isnan
library["log10"] = numpy.log10
library["log1p"] = numpy.log1p
library["log2"] = numpy.log2
library["logaddexp2"] = numpy.logaddexp2
library["logaddexp"] = numpy.logaddexp
library["log"] = numpy.log
library["max"] = numpy.maximum; library["fmax"] = numpy.maximum
library["min"] = numpy.minimum; library["fmin"] = numpy.minimum
library["pow"] = numpy.power
library["rad2deg"] = numpy.rad2deg
library["mod"] = numpy.remainder; library["fmod"] = numpy.remainder
library["rint"] = numpy.rint
library["sign"] = numpy.sign
library["sinh"] = numpy.sinh
library["sin"] = numpy.sin
library["sqrt"] = numpy.sqrt
library["tanh"] = numpy.tanh
library["tan"] = numpy.tan
library["trunc"] = numpy.trunc
def vectorized_erf(complement):
a1 = 0.254829592
a2 = -0.284496736
a3 = 1.421413741
a4 = -1.453152027
a5 = 1.061405429
p = 0.3275911
def erf(values):
sign = numpy.where(values < 0, -1.0, 1.0)
values = numpy.absolute(values)
t = 1.0 / (values * p + 1)
y = 1.0 - ((((a5*t + a4)*t + a3)*t + a2)*t + a1)*t * numpy.exp(numpy.negative(numpy.square(values)))
if complement:
return 1.0 - sign * y
else:
return sign * y
return erf
library["erf"] = vectorized_erf(False)
library["erfc"] = vectorized_erf(True)
def vectorized_gamma(logarithm):
cofs = (76.18009173, -86.50532033, 24.01409822, -1.231739516e0, 0.120858003e-2, -0.536382e-5)
stp = 2.50662827465
def lgamma(values):
x = values - 1.0
tmp = x + 5.5
tmp = (x + 0.5)*numpy.log(tmp) - tmp
ser = numpy.ones(len(values), dtype=numpy.dtype(numpy.float64))
for cof in cofs:
numpy.add(x, 1.0, x)
numpy.add(ser, cof/x, ser)
return tmp + numpy.log(stp*ser)
if logarithm:
return lgamma
else:
return lambda values: numpy.exp(lgamma(values))
library["gamma"] = vectorized_gamma(False)
lgamma = library["lgamma"] = vectorized_gamma(True)
library["factorial"] = lambda values: numpy.round(numpy.exp(lgamma(numpy.round(values) + 1)))
library["where"] = lambda condition, yes, no: numpy.where(condition, yes, no)
def histbook_groupby(values):
uniques, inverse = numpy.unique(values, return_inverse=True)
inverse = inverse.astype(INDEXTYPE)
return uniques, inverse
library["histbook.groupby"] = histbook_groupby
def histbook_groupbin(nanflow, closedlow):
def groupbin(values, binwidth, origin):
if origin == 0:
indexes = numpy.multiply(values, 1.0/float(binwidth))
else:
indexes = values - float(origin)
numpy.multiply(indexes, 1.0/float(binwidth), indexes)
if closedlow:
numpy.floor(indexes, indexes)
else:
numpy.ceil(indexes, indexes)
numpy.subtract(indexes, 1, indexes)
numpy.multiply(indexes, float(binwidth), indexes)
if origin != 0:
numpy.add(indexes, float(origin), indexes)
ok = numpy.isnan(indexes)
numpy.logical_not(ok, ok)
if ok.all():
uniques, inverse = numpy.unique(indexes, return_inverse=True)
inverse = inverse.astype(INDEXTYPE)
else:
uniques, okinverse = numpy.unique(indexes[ok], return_inverse=True)
inverse = numpy.ones(indexes.shape, dtype=INDEXTYPE)
if nanflow:
numpy.multiply(inverse, len(uniques), inverse)
inverse[ok] = okinverse
uniques = list(uniques) + ["NaN"]
else:
numpy.multiply(inverse, -1, inverse)
inverse[ok] = okinverse
return uniques, inverse
return groupbin
library["histbook.groupbinNL"] = histbook_groupbin(True, True)
library["histbook.groupbinNH"] = histbook_groupbin(True, False)
library["histbook.groupbin_L"] = histbook_groupbin(False, True)
library["histbook.groupbin_H"] = histbook_groupbin(False, False)
def histbook_bin(underflow, overflow, nanflow, closedlow):
if nanflow:
nanindex = (1 if underflow else 0) + (1 if overflow else 0)
else:
nanindex = numpy.ma.masked
if underflow:
shift = 1
else:
shift = 0
def bin(values, numbins, low, high):
indexes = values - float(low)
numpy.multiply(indexes, float(numbins) / float(high - low), indexes)
if closedlow:
numpy.floor(indexes, indexes)
if shift != 0:
numpy.add(indexes, shift, indexes)
else:
numpy.ceil(indexes, indexes)
numpy.add(indexes, shift - 1, indexes)
out = numpy.ma.array(indexes, dtype=INDEXTYPE)
with | numpy.errstate(invalid="ignore") | numpy.errstate |
# -*- coding: utf-8 -*-
# BCDI: tools for pre(post)-processing Bragg coherent X-ray diffraction imaging data
# (c) 07/2017-06/2019 : CNRS UMR 7344 IM2NP
# (c) 07/2019-05/2021 : DESY PHOTON SCIENCE
# (c) 06/2021-present : DESY CFEL
# authors:
# <NAME>, <EMAIL>
import numpy as np
import unittest
import bcdi.utils.image_registration as reg
def run_tests(test_class):
suite = unittest.TestLoader().loadTestsFromTestCase(test_class)
runner = unittest.TextTestRunner(verbosity=2)
return runner.run(suite)
class TestCalcNewPositions(unittest.TestCase):
"""
Tests on the function calc_new_positions.
def calc_new_positions(old_positions: list, shift: Sequence[float]) -> np.ndarray
"""
def setUp(self):
self.shapes = ((3,), (4,), (2, 2), (1, 2, 2))
self.shifts = ((1,), (-0.2,), (2.3, -0.1), (1.1, 0.3, 0))
def test_ndim_no_shift(self):
correct = (
np.array([[-2], [-1], [0]]),
np.array([[-2], [-1], [0], [1]]),
np.array(
[
[-1, -1],
[-1, 0],
[0, -1],
[0, 0],
]
),
np.array([[-1, -1, -1], [-1, -1, 0], [-1, 0, -1], [-1, 0, 0]]),
)
for index, shape in enumerate(self.shapes):
with self.subTest():
old_pos = [np.arange(-val // 2, val // 2) for val in shape]
new_pos = reg.calc_new_positions(old_pos, shift=(0,) * len(shape))
self.assertTrue(np.allclose(new_pos, correct[index]))
def test_ndim_with_shift(self):
correct = (
np.array([[-3], [-2], [-1]]),
np.array([[-1.8], [-0.8], [0.2], [1.2]]),
np.array(
[
[-3.3, -0.9],
[-3.3, 0.1],
[-2.3, -0.9],
[-2.3, 0.1],
]
),
np.array(
[[-2.1, -1.3, -1], [-2.1, -1.3, 0], [-2.1, -0.3, -1], [-2.1, -0.3, 0]]
),
)
for index, shape in enumerate(self.shapes):
with self.subTest():
old_pos = [np.arange(-val // 2, val // 2) for val in shape]
new_pos = reg.calc_new_positions(old_pos, shift=self.shifts[index])
self.assertTrue(np.allclose(new_pos, correct[index]))
def test_empty_positions(self):
with self.assertRaises(ValueError):
reg.calc_new_positions([], shift=[])
def test_wrong_shift_length(self):
with self.assertRaises(ValueError):
reg.calc_new_positions([np.arange(-2, 1)], shift=[1, 2])
def test_wrong_shift_none(self):
with self.assertRaises(ValueError):
reg.calc_new_positions([np.arange(-2, 1)], shift=None)
class TestGetShift2D(unittest.TestCase):
"""
Tests on the function image_registration.get_shift for 2D arrays.
def get_shift(
reference_array: np.ndarray,
shifted_array: np.ndarray,
shift_method: str = "modulus",
precision: int = 1000,
support_threshold: Union[None, float] = None,
verbose: bool = True,
) -> Sequence[float]:
"""
def setUp(self):
# executed before each test
reference_array = np.zeros((5, 5), dtype=complex)
reference_array[1:4, 1:4] = 1 + 1j
shifted_array = np.zeros((5, 5), dtype=complex)
shifted_array[2:, 2:] = 1 + 1j
self.reference_array = reference_array
self.shifted_array = shifted_array
def test_method_modulus(self):
shifts = reg.get_shift(
reference_array=self.reference_array,
shifted_array=self.shifted_array,
shift_method="modulus",
)
self.assertTrue(
np.allclose(
np.asarray(shifts),
np.array([-1.0, -1.0]),
rtol=1e-09,
atol=1e-09,
)
)
def test_method_raw(self):
shifts = reg.get_shift(
reference_array=self.reference_array,
shifted_array=self.shifted_array,
shift_method="raw",
)
self.assertTrue(
np.allclose(
np.asarray(shifts),
np.array([-1.0, -1.0]),
rtol=1e-09,
atol=1e-09,
)
)
def test_method_support(self):
shifts = reg.get_shift(
reference_array=self.reference_array,
shifted_array=self.shifted_array,
shift_method="support",
support_threshold=0.5,
)
self.assertTrue(
np.allclose(
np.asarray(shifts),
np.array([-1.0, -1.0]),
rtol=1e-09,
atol=1e-09,
)
)
def test_method_support_none(self):
with self.assertRaises(ValueError):
reg.get_shift(
reference_array=self.reference_array,
shifted_array=self.shifted_array,
shift_method="support",
)
def test_precision_float(self):
with self.assertRaises(TypeError):
reg.get_shift(
reference_array=self.reference_array,
shifted_array=self.shifted_array,
precision=2.3,
)
def test_precision_null(self):
with self.assertRaises(ValueError):
reg.get_shift(
reference_array=self.reference_array,
shifted_array=self.shifted_array,
precision=0,
)
def test_precision_None(self):
with self.assertRaises(ValueError):
reg.get_shift(
reference_array=self.reference_array,
shifted_array=self.shifted_array,
precision=None,
)
def test_precision_min_allowed(self):
shifts = reg.get_shift(
reference_array=self.reference_array,
shifted_array=self.shifted_array,
precision=1,
)
self.assertTrue(
np.allclose(
np.asarray(shifts),
np.array([-1.0, -1.0]),
rtol=1e-09,
atol=1e-09,
)
)
def test_wrong_method_name(self):
with self.assertRaises(ValueError):
reg.get_shift(
reference_array=self.reference_array,
shifted_array=self.shifted_array,
shift_method="wrong",
)
class TestGetShift3D(unittest.TestCase):
"""
Tests on the function image_registration.get_shift for 3D arrays.
def get_shift(
reference_array: np.ndarray,
shifted_array: np.ndarray,
shift_method: str = "modulus",
precision: int = 1000,
support_threshold: Union[None, float] = None,
verbose: bool = True,
) -> Sequence[float]:
"""
def setUp(self):
# executed before each test
reference_array = np.zeros((5, 5, 5), dtype=complex)
reference_array[1:4, 1:4, 1:4] = 1 + 1j
shifted_array = np.zeros((5, 5, 5), dtype=complex)
shifted_array[2:, 2:, 0:3] = 1 + 1j
self.reference_array = reference_array
self.shifted_array = shifted_array
def test_method_modulus(self):
shifts = reg.get_shift(
reference_array=self.reference_array,
shifted_array=self.shifted_array,
shift_method="modulus",
)
self.assertTrue(
np.allclose(
np.asarray(shifts),
np.array([-1.0, -1.0, 1.0]),
rtol=1e-09,
atol=1e-09,
)
)
def test_method_raw(self):
shifts = reg.get_shift(
reference_array=self.reference_array,
shifted_array=self.shifted_array,
shift_method="raw",
)
self.assertTrue(
np.allclose(
np.asarray(shifts),
np.array([-1.0, -1.0, 1.0]),
rtol=1e-09,
atol=1e-09,
)
)
def test_method_support(self):
shifts = reg.get_shift(
reference_array=self.reference_array,
shifted_array=self.shifted_array,
shift_method="support",
support_threshold=0.5,
)
self.assertTrue(
np.allclose(
| np.asarray(shifts) | numpy.asarray |
#!/usr/bin/env python3
# manual
"""
This script allows you to manually control the simulator or Duckiebot
using the keyboard arrows.
"""
import sys
import argparse
import pyglet
from pyglet.window import key
import numpy as np
import gym
import gym_duckietown
from gym_duckietown.envs import DuckietownEnv
from gym_duckietown.wrappers import UndistortWrapper
####
from PIL import Image
import cv2
import math
from apriltag import Detector
import transformations as tf
####
# from experiments.utils import save_img
parser = argparse.ArgumentParser()
parser.add_argument('--env-name', default='Duckietown')
parser.add_argument('--map-name', default='udem1')
parser.add_argument('--distortion', default=False, action='store_true')
parser.add_argument('--draw-curve', action='store_true', help='draw the lane following curve')
parser.add_argument('--draw-bbox', action='store_true', help='draw collision detection bounding boxes')
parser.add_argument('--domain-rand', action='store_true', help='enable domain randomization')
parser.add_argument('--frame-skip', default=1, type=int, help='number of frames to skip')
parser.add_argument('--seed', default=1, type=int, help='seed')
args = parser.parse_args()
if args.env_name and args.env_name.find('Duckietown') != -1:
env = DuckietownEnv(
seed = args.seed,
map_name = args.map_name,
draw_curve = args.draw_curve,
draw_bbox = args.draw_bbox,
domain_rand = args.domain_rand,
frame_skip = args.frame_skip,
distortion = args.distortion,
)
else:
env = gym.make(args.env_name)
#env.reset()
env.render()
@env.unwrapped.window.event
def on_key_press(symbol, modifiers):
"""
This handler processes keyboard commands that
control the simulation
"""
if symbol == key.BACKSPACE or symbol == key.SLASH:
print('RESET')
env.reset()
env.render()
elif symbol == key.PAGEUP:
env.unwrapped.cam_angle[0] = 0
elif symbol == key.ESCAPE:
env.close()
sys.exit(0)
# Take a screenshot
# UNCOMMENT IF NEEDED - Skimage dependency
# elif symbol == key.RETURN:
# print('saving screenshot')
# img = env.render('rgb_array')
# save_img('screenshot.png', img)
# Register a keyboard handler
key_handler = key.KeyStateHandler()
env.unwrapped.window.push_handlers(key_handler)
def _draw_pose(overlay, camera_params, tag_size, pose, z_sign=1):
opoints = np.array([
-1, -1, 0,
1, -1, 0,
1, 1, 0,
-1, 1, 0,
-1, -1, -2*z_sign,
1, -1, -2*z_sign,
1, 1, -2*z_sign,
-1, 1, -2*z_sign,
]).reshape(-1, 1, 3) * 0.5*tag_size
edges = np.array([
0, 1,
1, 2,
2, 3,
3, 0,
0, 4,
1, 5,
2, 6,
3, 7,
4, 5,
5, 6,
6, 7,
7, 4
]).reshape(-1, 2)
fx, fy, cx, cy = camera_params
# matriz homogenea
K = np.array([fx, 0, cx, 0, fy, cy, 0, 0, 1]).reshape(3, 3)
print("matriz homogenea ", K)
rvec, _ = cv2.Rodrigues(pose[:3,:3])
tvec = pose[:3, 3]
dcoeffs = np.zeros(5)
ipoints, _ = cv2.projectPoints(opoints, rvec, tvec, K, dcoeffs)
ipoints = np.round(ipoints).astype(int)
ipoints = [tuple(pt) for pt in ipoints.reshape(-1, 2)]
for i, j in edges:
cv2.line(overlay, ipoints[i], ipoints[j], (0, 255, 0), 1, 16)
def global_pose(matrix,x_ob,y_ob,angle): # matrix es la pose del apriltag x_ob e y_ob es el x e y del apriltag
tag_size = 0.18
tile_size = 0.585
T_a = tf.translation_matrix([
-x_ob, -tag_size*3/4, y_ob]) # esto ya viene multiplicado por el tile_size
R_a = tf.euler_matrix(0,angle,0)
T_m_a = tf.concatenate_matrices(T_a, R_a)
# pose tag con respecto al robot
T_r_a = np.dot(matrix, tf.euler_matrix(0, np.pi, 0))
# pose tag con respecto al mapa
T_a_r = np.linalg.inv(T_r_a) # T_r_a-1
T_m_r = np.dot(T_m_a, T_a_r)
return T_m_r
def angle2(q,angle,euler):
return q-(angle-yaw(euler))
def l1(x,y):
return math.sqrt(x**2,y**2)
def yaw(euler_angles):
return euler_angles[2]
def dist(matrix):
return np.linalg.norm([matrix[0][3],matrix[1][3],matrix[2][3]])
def update(dt):
"""
This function is called at every frame to handle
movement/stepping and redrawing
"""
action = np.array([0.0, 0.0])
if key_handler[key.UP]:
action = np.array([0.44, 0.0])
if key_handler[key.DOWN]:
action = np.array([-0.44, 0])
if key_handler[key.LEFT]:
action = | np.array([0.35, +1]) | numpy.array |
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import numpy as np
import unittest
import sys
from tests.op_test import OpTest
import paddle
import paddle.fluid as fluid
paddle.enable_static()
SEED = 2021
def ComputeGrad(x, y, out, axis):
grad = 1 / out.size
shape_x = x.shape
shape_y = y.shape
shape_out = out.shape
reduce_axes_x = []
reduce_axes_y = []
if shape_x != shape_out:
if len(shape_x.shape) < len(shape_out.shape):
src_axis = axis
else:
src_axis = 0
for ax in range(len(shape_out)):
if (ax < src_axis or ax >= src_axis + len(shape_x)) or (
shape_out[ax] > 1 and shape_x[ax - src_axis] == 1):
reduce_axes_x.append(ax)
if shape_y != shape_out:
if len(shape_y) < len(shape_out):
src_axis = axis
else:
src_axis = 0
for ax in range(len(shape_out)):
if (ax < src_axis or ax >= src_axis + len(shape_y)) or (
shape_out[ax] > 1 and shape_y[ax - src_axis] == 1):
reduce_axes_y.append(ax)
if len(reduce_axes_x) > 0:
for i in reduce_axes_x:
x = np.expand_dims(x, axis=i)
if len(reduce_axes_y) > 0:
for i in reduce_axes_y:
y = np.expand_dims(y, axis=i)
mask = np.sign(np.subtract(x, y))
dx = np.maximum(mask, 0) * grad
dy = np.abs(np.minimum(mask, 0) * grad)
if len(reduce_axes_x) > 0:
for i, element in enumerate(reduce_axes_x):
dx = np.add.reduce(dx, element - i)
if len(reduce_axes_y) > 0:
for i, element in enumerate(reduce_axes_y):
dy = np.add.reduce(dy, element - i)
return dx, dy
class TestElementwiseMaxOp(OpTest):
def setUp(self):
self.set_npu()
self.op_type = "elementwise_max"
self.place = paddle.CustomPlace('ascend', 0)
self.init_dtype()
self.init_input_output()
self.init_axis()
self.inputs = {
'X': OpTest.np_dtype_to_fluid_dtype(self.x),
'Y': OpTest.np_dtype_to_fluid_dtype(self.y)
}
self.attrs = {'axis': self.axis}
self.outputs = {'Out': self.out}
def set_npu(self):
self.__class__.use_custom_device = True
def init_dtype(self):
self.dtype = np.float32
def init_input_output(self):
self.x = np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype)
sgn = np.random.choice([-1, 1], [13, 17]).astype(self.dtype)
self.y = self.x + sgn * np.random.uniform(0.1, 1,
[13, 17]).astype(self.dtype)
self.out = np.maximum(self.x, self.y)
def init_axis(self):
self.axis = -1
def test_check_output(self):
self.check_output_with_place(self.place)
def test_check_grad_normal(self):
self.check_grad_with_place(self.place, ['X', 'Y'], 'Out')
def test_check_grad_ingore_x(self):
self.check_grad_with_place(
self.place, ['Y'], 'Out', no_grad_set=set("X"))
def test_check_grad_ingore_y(self):
self.check_grad_with_place(
self.place, ['X'], 'Out', no_grad_set=set("Y"))
class TestElementwiseMaxOp_int32(TestElementwiseMaxOp):
def init_dtype(self):
self.dtype = np.int32
# CTest does not support check grad for int32.
def test_check_grad_normal(self):
pass
def test_check_grad_ingore_x(self):
pass
def test_check_grad_ingore_y(self):
pass
# TODO(windstamp)
@unittest.skipIf(True, "Right now failed maybe caused by other reasons")
class TestElementwiseMaxOp_scalar(TestElementwiseMaxOp):
def init_input_output(self):
self.x = np.random.random_integers(-5, 5, [2, 3, 20]).astype(self.dtype)
self.y = np.array([0.5]).astype(self.dtype)
self.out = np.maximum(self.x, self.y)
class TestElementwiseMaxOp_vector(TestElementwiseMaxOp):
def init_input_output(self):
self.x = np.random.random((100, )).astype(self.dtype)
sgn = np.random.choice([-1, 1], (100, )).astype(self.dtype)
self.y = self.x + sgn * np.random.uniform(0.1, 1,
(100, )).astype(self.dtype)
self.out = np.maximum(self.x, self.y)
class TestElementwiseMaxOp_broadcast_0(TestElementwiseMaxOp):
def init_input_output(self):
self.x = np.random.uniform(0.5, 1, (100, 5, 2)).astype(self.dtype)
sgn = np.random.choice([-1, 1], (100, )).astype(self.dtype)
self.y = self.x[:, 0, 0] + sgn * \
np.random.uniform(1, 2, (100, )).astype(self.dtype)
self.out = np.maximum(self.x, self.y.reshape(100, 1, 1))
def init_axis(self):
self.axis = 0
class TestElementwiseMaxOp_broadcast_1(TestElementwiseMaxOp):
def init_input_output(self):
self.x = np.random.uniform(0.5, 1, (2, 100, 3)).astype(self.dtype)
sgn = np.random.choice([-1, 1], (100, )).astype(self.dtype)
self.y = self.x[0, :, 0] + sgn * \
np.random.uniform(1, 2, (100, )).astype(self.dtype)
self.out = np.maximum(self.x, self.y.reshape(1, 100, 1))
def init_axis(self):
self.axis = 1
def test_check_grad_ingore_x(self):
_, dy = ComputeGrad(self.x, self.y, self.out, self.axis)
self.check_grad_with_place(
self.place, ['Y'],
'Out',
no_grad_set=set("X"),
user_defined_grads=[dy])
def test_check_grad_ingore_y(self):
dx, _ = ComputeGrad(self.x, self.y, self.out, self.axis)
self.check_grad_with_place(
self.place, ['X'],
'Out',
no_grad_set=set("Y"),
user_defined_grads=[dx])
class TestElementwiseMaxOp_broadcast_2(TestElementwiseMaxOp):
def init_input_output(self):
self.x = np.random.uniform(0.5, 1, (2, 3, 100)).astype(self.dtype)
sgn = np.random.choice([-1, 1], (100, )).astype(self.dtype)
self.y = self.x[0, 0, :] + sgn * \
np.random.uniform(1, 2, (100, )).astype(self.dtype)
self.out = np.maximum(self.x, self.y.reshape(1, 1, 100))
def test_check_grad_normal(self):
dx, dy = ComputeGrad(self.x, self.y, self.out, self.axis)
self.check_grad_with_place(
self.place, ['X', 'Y'], 'Out', user_defined_grads=[dx, dy])
def test_check_grad_ingore_x(self):
_, dy = ComputeGrad(self.x, self.y, self.out, self.axis)
self.check_grad_with_place(
self.place, ['Y'],
'Out',
no_grad_set=set("X"),
user_defined_grads=[dy])
def test_check_grad_ingore_y(self):
dx, _ = ComputeGrad(self.x, self.y, self.out, self.axis)
self.check_grad_with_place(
self.place, ['X'],
'Out',
no_grad_set=set("Y"),
user_defined_grads=[dx])
class TestElementwiseMaxOp_broadcast_3(TestElementwiseMaxOp):
def init_input_output(self):
self.x = | np.random.uniform(0.5, 1, (2, 50, 2, 1)) | numpy.random.uniform |
# Importing required libraries
import tkinter
from tkinter import *
from tkinter import Tk
from tkinter import Label
from tkinter import filedialog
from matplotlib import pyplot as plt
import numpy as np
import statistics as st
# Creating the class of the application
class Application(Tk):
# Defining the __init__ function
def __init__(self):
super().__init__()
# Setting the required variables by self
self.file_location=""
self.keyword_location=""
self.count_of_words=0
self.count_of_sentences=0
self.count_of_lines=0
self.most_frequent_word=""
self.least_frequent_word=""
self.count_in_keyword=0
self.sentences_with_keyword=""
# The symbols that need to be ignored in the input text file are in except_array
self.except_array=[".",",","?","!",":",";"]
self.geometry('550x520')
self.grid()
# File labels
self.file_label=Label(self, text = "File Not Chosen ." + self.file_location)
self.file_label.pack()
self.keyword_label = Label(self, text = "Keywords File Not Chosen . " + self.keyword_location)
self.keyword_label.pack()
#Creating the browse button for choosing the text file in the GUI
self.browse = tkinter.Button(self,text = "Text File",command = self.set_file_location)
self.browse.pack()
#Creating the browse button for choosing the keyword file in the GUI
self.browse = tkinter.Button(self,text = "Keywords File",fg="brown", command = self.set_keyword_location)
self.browse.pack()
#Creating the refresh button in the GUI
self.refresh = tkinter.Button(self, text="Refresh",fg="green",command=self.refresh_function)
self.refresh.pack()
#Labels for all statistics functions used
self.number_of_words = Label(self, text="Number of Words: " + str(self.count_of_words))
self.number_of_words.pack()
self.number_of_sentences = Label(self, text="Number of Sentences: " + str(self.count_of_sentences))
self.number_of_sentences.pack()
self.number_of_lines = Label(self, text="Number of Lines: " + str(self.count_of_lines))
self.number_of_lines.pack()
self.most_frequent = Label(self, text="Most Frequent Word ...." + (self.most_frequent_word))
self.most_frequent.pack()
self.least_frequent = Label(self, text="Least Frequent Word ...." + (self.least_frequent_word))
self.least_frequent.pack()
self.keyword_file = Label(self, text="Number of Sentences with keywords..." + str(self.count_in_keyword))
self.keyword_file.pack()
#Creating button which shows extra infomation in the GUI
self.extra = tkinter.Button(self,text = "Exta Information", command = self.extra_function)
self.extra.pack()
#Labels for extra information
self.most_frequents = Label(self, text="")
self.most_frequents.pack()
self.least_frequents= Label(self, text="")
self.least_frequents.pack()
#Creting Histogram button which displays histogram in the GUI
self.histogram = tkinter.Button(self, text="Plot Histogram",fg="red",command=self.plot_function)
self.histogram.pack()
#Creating label of string with keyword
self.keyword_string = Label(self, text="Printing sentences :\n")
self.keyword_string.pack()
#Creating the quit button in the GUI
self.quit = tkinter.Button(self, text="QUIT", fg="red",command=self.destroy)
self.quit.pack(side="bottom")
# Defining the function that sets the main text file location
def set_file_location(self):
self.file_location = filedialog.askopenfilename()
self.file_label.config(text = "File: " + self.file_location)
self.file_function()
# Defining the function that sets the keyword file location
def set_keyword_location(self):
self.keyword_location = filedialog.askopenfilename()
self.keyword_label.config(text = "Keyword File: " + self.keyword_location)
self.keyword_function()
# Defining the function which refreshes
def refresh_function(self):
if(self.file_location!=""):
self.file_function()
if(self.keyword_location!=""):
self.keyword_function()
if(self.file_location!=""):
self.extra_function()
# Defining the function to perform all operations on the text file
def file_function(self):
# Open and process the text file
data = open(self.file_location,"r")
raw_data = data.read()
words=raw_data.split()
# Create an array word_new_list, containing all required words from the file
word_new_list=[]
for i in range(len(words)):
check=0
for j in range(len(self.except_array)):
if words[i]==self.except_array[j]:
check=1
if(check==0):
word_new_list.append(words[i])
# Count and display the number of words in the file
self.count_of_words=len(word_new_list)
self.number_of_words.config(text="Number of Words: " + str(self.count_of_words))
# Count and display the number of sentences in the file
self.count_of_sentences = raw_data.count('.') + raw_data.count('?') + raw_data.count('!')
self.number_of_sentences.config(text="Number of Sentences: " + str(self.count_of_sentences))
# Count and display the number of lines in the file
self.count_of_lines = raw_data.count('\n')
self.number_of_lines.config(text="Number of Lines: " + str(self.count_of_lines))
# Creating a map to store the words(for finding most and least frequent word)
map_words=dict()
word_list=word_new_list
for i in range(len(word_list)):
if(map_words.get(word_list[i])==None):
map_words[word_list[i]]=1
else:
map_words[word_list[i]]+=1
# Display the most frequent word in the file using the map
self.most_frequent_word=max(map_words,key=map_words.get)
self.most_frequent.config(text="Most Frequent Word : " + self.most_frequent_word)
# Display the least frequent word in the file using the map
self.least_frequent_word=min(map_words,key=map_words.get)
self.least_frequent.config(text="Least Frequent Word : " + self.least_frequent_word)
# Defining the function to perform operations on keyword file
def keyword_function(self):
# Open and process the file
data = open(self.file_location,"r")
raw_data=data.read()
keyword_data= open(self.keyword_location,"r")
keyword=keyword_data.read().split()
# Replacing '?'and '!' with '.' and splitting wrt '.' (finding end of sentences)
raw_data.replace("?",".")
raw_data.replace("!",".")
sentences=raw_data.split(".")
self.sentences_with_keyword=""
# Count the number of sentences with keywords
self.count_in_keyword=0
for i in range(len(sentences)):
check=0
for j in range(len(keyword)):
if(sentences[i].count(keyword[j])>0):
check=1
self.sentences_with_keyword+=("\n"+sentences[i])
self.count_in_keyword+=check
# Display the number of sentences with keywords and also print them..
self.keyword_string.config(text="Sentences with keywords: \n" + str(self.sentences_with_keyword))
self.keyword_file.config(text="Number of Sentences with keywords: " + str(self.count_in_keyword))
# Defining the function to plot the Bar graph
def plot_function(self):
# Open and process the file
data = open(self.file_location,"r")
raw_data=data.read()
words=raw_data.split()
# Create an array word_new_list, containing all required words from the file
word_new_list=[]
for i in range(len(words)):
check=0
for j in range(len(self.except_array)):
if words[i]==self.except_array[j]:
check=1
if(check==0):
word_new_list.append(words[i])
# Plot the bar graph from the processed data
word_uniq,value = | np.unique(word_new_list,return_counts=True) | numpy.unique |
"""
*
* Copyright (c) 2021 <NAME>
* 2021 Autonomous Systems Lab ETH Zurich
* All rights reserved.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* 3. Neither the name Data Driven Dynamics nor the names of its contributors may be
* used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
"""
__author__ = "<NAME>"
__maintainer__ = "<NAME>"
__license__ = "BSD 3"
from src.tools.math_tools import cropped_sym_sigmoid
from src.tools.quat_utils import quaternion_to_rotation_matrix
from src.tools.dataframe_tools import resample_dataframe_list
from src.tools.ulog_tools import load_ulog, pandas_from_topic
from .model_plots import model_plots, aerodynamics_plots, linear_model_plots
from .rotor_models import RotorModel, BiDirectionalRotorModel, TiltingRotorModel, ChangingAxisRotorModel
import matplotlib.pyplot as plt
from scipy.linalg import block_diag
import src.optimizers as optimizers
import numpy as np
import yaml
import time
import warnings
import math
import pandas as pd
from progress.bar import Bar
""" The model class contains properties shared between all models and shgall simplyfy automated checks and the later
export to a sitl gazebo model by providing a unified interface for all models. """
class DynamicsModel():
def __init__(self, config_dict):
assert type(
config_dict) is dict, 'req_topics_dict input must be a dict'
assert bool(config_dict), 'req_topics_dict can not be empty'
self.model_name = "unknown_model"
self.config_dict = config_dict
self.resample_freq = config_dict["resample_freq"]
self.optimizer_config = config_dict["optimizer_config"]
self.req_topics_dict = config_dict["data"]["required_ulog_topics"]
self.req_dataframe_topic_list = config_dict["data"]["req_dataframe_topic_list"]
self.visual_dataframe_selector_config_dict = {
"x_axis_col": "timestamp",
"sub_plt1_data": ["q0", "q1", "q2", "q3"],
"sub_plt2_data": ["u0", "u1", "u2", "u3"]}
self.estimate_forces = config_dict["estimate_forces"]
self.estimate_moments = config_dict["estimate_moments"]
# used to generate a dict with the resulting coefficients later on.
self.coef_name_list = []
self.y_dict = {}
self.coef_dict = {}
self.result_dict = {}
def prepare_regression_matrices(self):
if "V_air_body_x" not in self.data_df:
self.normalize_actuators()
self.compute_airspeed_from_groundspeed(["vx", "vy", "vz"])
# Rotor features
angular_vel_mat = self.data_df[[
"ang_vel_x", "ang_vel_y", "ang_vel_z"]].to_numpy()
self.compute_rotor_features(self.rotor_config_dict, angular_vel_mat)
if (self.estimate_forces and self.estimate_moments):
self.prepare_force_regression_matrices()
self.prepare_moment_regression_matrices()
elif (self.estimate_forces):
self.prepare_force_regression_matrices()
elif (self.estimate_moments):
self.prepare_moment_regression_matrices()
else:
raise ValueError("Neither Forces nor Moments estimation activated")
return
def prepare_force_regression_matrices(self):
raise NotImplementedError()
def prepare_moment_regression_matrices(self):
raise NotImplementedError()
def assemble_regression_matrices(self,measurements):
sizes = [len(self.y_dict[i].keys()) for i in measurements]
y = np.empty(sum(sizes)*self.n_samples)
i = 0
for m in measurements:
for k in self.y_dict[m].keys():
y[i*self.n_samples:(i+1)*self.n_samples] = self.data_df[self.y_dict[m][k]]
i += 1
coef_list = []
for i in self.coef_dict.keys():
for m in measurements:
if m in self.coef_dict[i]:
coef_list.append(i)
X = np.zeros((len(measurements)*self.n_samples*3,len(coef_list)))
for coef_index, coef in enumerate(coef_list):
for i_index, i in enumerate(measurements):
for j_index, j in enumerate(["x","y","z"]):
try:
pos = self.n_samples*(i_index*3+j_index)
key = self.coef_dict[coef][i][j]
X[pos:pos+self.n_samples,coef_index] = self.data_df[key]
except: KeyError
return X,y,coef_list
def get_topic_list_from_topic_type(self, topic_type):
topic_type_name_dict = self.req_topics_dict[topic_type]
if "dataframe_name" in topic_type_name_dict.keys():
topic_columns = topic_type_name_dict["dataframe_name"].copy()
else:
topic_columns = topic_type_name_dict["ulog_name"].copy()
topic_columns.remove("timestamp")
return topic_columns
def compute_airspeed_from_groundspeed(self, airspeed_topic_list):
groundspeed_ned_mat = (self.data_df[airspeed_topic_list]).to_numpy()
airspeed_body_mat = self.rot_to_body_frame(groundspeed_ned_mat)
aoa_vec = np.zeros((airspeed_body_mat.shape[0], 1))
sideslip_vec = np.zeros((airspeed_body_mat.shape[0], 1))
for i in range(airspeed_body_mat.shape[0]):
aoa_vec[i, :] = math.atan2(
airspeed_body_mat[i, 2], airspeed_body_mat[i, 0])
sideslip_vec[i, :] = math.atan2(
airspeed_body_mat[i, 1], airspeed_body_mat[i, 0])
airspeed_body_mat = np.hstack(
(airspeed_body_mat, aoa_vec, sideslip_vec))
airspeed_body_df = pd.DataFrame(airspeed_body_mat, columns=[
"V_air_body_x", "V_air_body_y", "V_air_body_z", "angle_of_attack", "angle_of_sideslip"])
self.data_df = pd.concat(
[self.data_df, airspeed_body_df], axis=1, join="inner")
def compute_body_rotation_features(self, angular_vel_topic_list):
"""Include the moment contribution due to rotation body frame:
w x Iw = X_body_rot * v
Where v = (I_y-I_z, I_z-I_x, I_x- I_y)^T
is comprised of the inertia moments we want to estimate
"""
angular_vel_mat = (self.data_df[angular_vel_topic_list]).to_numpy()
X_body_rot = np.zeros((3*angular_vel_mat.shape[0], 3))
X_body_rot_coef_list = ["I_yy-I_zz", "I_zz-I_xx", "I_xx- I_yy"]
for i in range(angular_vel_mat.shape[0]):
X_body_rot[3*i, 0] = angular_vel_mat[i,
1]*angular_vel_mat[i, 2]
X_body_rot[3*i + 1, 0] = angular_vel_mat[i, 2] * \
angular_vel_mat[i, 0]
X_body_rot[3*i + 2, 0] = angular_vel_mat[i, 0] * \
angular_vel_mat[i, 1]
return X_body_rot, X_body_rot_coef_list
def normalize_actuators(self, actuator_topic_types=["actuator_outputs"], control_outputs_used=False):
# u : normalize actuator output from pwm to be scaled between 0 and 1
# To be adjusted using parameters:
# This should probably be adapted in the future to allow different values for each actuator specified in the config.
if control_outputs_used:
self.min_output = -1
self.max_output = 1.01
self.trim_output = 0
else:
self.min_output = 0
self.max_output = 2000
self.trim_output = 1500
self.actuator_columns = []
self.actuator_type = []
for topic_type in actuator_topic_types:
self.actuator_columns += self.get_topic_list_from_topic_type(
topic_type)
self.actuator_type += self.req_topics_dict[topic_type]["actuator_type"]
self.actuator_type.remove("timestamp")
for i in range(len(self.actuator_columns)):
actuator_data = self.data_df[self.actuator_columns[i]].to_numpy()
if (self.actuator_type[i] == "motor"):
for j in range(actuator_data.shape[0]):
if (actuator_data[j] < self.min_output):
actuator_data[j] = 0
else:
actuator_data[j] = (
actuator_data[j] - self.min_output)/(self.max_output - self.min_output)
elif ((self.actuator_type[i] == "control_surface" or self.actuator_type[i] == "bi_directional_motor")):
for j in range(actuator_data.shape[0]):
if (actuator_data[j] < self.min_output):
actuator_data[j] = 0
else:
actuator_data[j] = 2*(
actuator_data[j] - self.trim_output)/(self.max_output - self.min_output)
else:
print("actuator type unknown:", self.actuator_type[i])
print("normalization failed")
exit(1)
self.data_df[self.actuator_columns[i]] = actuator_data
def initialize_rotor_model(self, rotor_config_dict, angular_vel_mat=None):
valid_rotor_types = ["RotorModel", "ChangingAxisRotorModel",
"BiDirectionalRotorModel", "TiltingRotorModel"]
rotor_input_name = rotor_config_dict["dataframe_name"]
u_vec = self.data_df[rotor_input_name].to_numpy()
if "rotor_type" not in rotor_config_dict.keys():
# Set default rotor model
rotor_type = "RotorModel"
print("no Rotor model specified for ", rotor_input_name)
print("Selecting default: RotorModel")
else:
rotor_type = rotor_config_dict["rotor_type"]
if rotor_type == "RotorModel":
rotor = RotorModel(
rotor_config_dict, u_vec, self.v_airspeed_mat, angular_vel_mat=angular_vel_mat)
elif rotor_type == "ChangingAxisRotorModel":
rotor = ChangingAxisRotorModel(
rotor_config_dict, u_vec, self.v_airspeed_mat, angular_vel_mat=angular_vel_mat)
elif rotor_type == "BiDirectionalRotorModel":
rotor = BiDirectionalRotorModel(
rotor_config_dict, u_vec, self.v_airspeed_mat, angular_vel_mat=angular_vel_mat)
elif rotor_type == "TiltingRotorModel":
tilt_actuator_df_name = rotor_config_dict["tilt_actuator_dataframe_name"]
tilt_actuator_vec = self.data_df[tilt_actuator_df_name]
rotor = TiltingRotorModel(
rotor_config_dict, u_vec, self.v_airspeed_mat, tilt_actuator_vec, angular_vel_mat=angular_vel_mat)
else:
print(rotor_type, " is not a valid rotor model.")
print("Valid rotor models are: ", valid_rotor_types)
print("Adapt your config file to a valid rotor model!")
exit(1)
return rotor
def compute_rotor_features(self, rotors_config_dict, angular_vel_mat=None):
self.v_airspeed_mat = self.data_df[[
"V_air_body_x", "V_air_body_y", "V_air_body_z"]].to_numpy()
self.rotor_dict = {}
for rotor_group in rotors_config_dict.keys():
rotor_group_list = rotors_config_dict[rotor_group]
self.rotor_dict[rotor_group] = {}
if (self.estimate_forces):
X_force_collector = np.zeros(
(self.n_samples, 3*3))
if (self.estimate_moments):
X_moment_collector = np.zeros(
(self.n_samples, 3*5))
for rotor_config_dict in rotor_group_list:
rotor = self.initialize_rotor_model(
rotor_config_dict, angular_vel_mat)
self.rotor_dict[rotor_group][rotor_config_dict["dataframe_name"]] = rotor
if (self.estimate_forces):
X_force_curr, coef_dict_force, col_names_force = rotor.compute_actuator_force_matrix()
X_force_collector = X_force_collector + X_force_curr
# Include rotor group name in coefficient names:
for i in range(len(col_names_force)):
col_names_force[i] = rotor_group + \
col_names_force[i]
for key in list(coef_dict_force.keys()):
coef_dict_force[rotor_group+key] = coef_dict_force.pop(key)
for i in ["x","y","z"]:
coef_dict_force[rotor_group+key]["lin"][i] = rotor_group + coef_dict_force[rotor_group+key]["lin"][i]
if (self.estimate_moments):
X_moment_curr, coef_dict_moment, col_names_moment = rotor.compute_actuator_moment_matrix()
X_moment_collector = X_moment_collector + X_moment_curr
# Include rotor group name in coefficient names:
for i in range(len(col_names_moment)):
col_names_moment[i] = rotor_group + \
col_names_moment[i]
for key in list(coef_dict_moment.keys()):
coef_dict_moment[rotor_group+key] = coef_dict_moment.pop(key)
for i in ["x","y","z"]:
coef_dict_moment[rotor_group+key]["rot"][i] = rotor_group + coef_dict_moment[rotor_group+key]["rot"][i]
if (self.estimate_forces):
self.data_df[col_names_force] = X_force_collector
self.coef_dict.update(coef_dict_force)
if (self.estimate_moments):
self.data_df[col_names_moment] = X_moment_collector
self.coef_dict.update(coef_dict_moment)
return
def rot_to_body_frame(self, vec_mat):
"""
Rotates horizontally stacked 3D vectors from NED world frame to FRD body frame
inputs:
vec_mat: numpy array of dimensions (n,3),
containing the horizontally stacked 3D vectors [x,y,z] in world frame.
"""
vec_mat_transformed = np.zeros(vec_mat.shape)
for i in range(vec_mat.shape[0]):
R_world_to_body = np.linalg.inv(
quaternion_to_rotation_matrix(self.q_mat[i, :]))
vec_mat_transformed[i, :] = np.transpose(
R_world_to_body @ np.transpose(vec_mat[i, :]))
return vec_mat_transformed
def rot_to_world_frame(self, vec_mat):
"""
Rotates horizontally stacked 3D vectors from FRD body frame to NED world frame
inputs:
vec_mat: numpy array of dimensions (n,3),
containing the horizontally stacked 3D vectors [x,y,z] in body frame.
"""
vec_mat_transformed = np.zeros(vec_mat.shape)
for i in range(vec_mat.shape[0]):
R_body_to_world = quaternion_to_rotation_matrix(self.q_mat[i, :])
vec_mat_transformed[i, :] = R_body_to_world @ vec_mat[i, :]
return vec_mat_transformed
def generate_model_dict(self, coefficient_list, metrics_dict, model_dict):
assert (len(self.coef_name_list) == len(coefficient_list)), \
("Length of coefficient list and coefficient name list does not match: Length of coefficient list:",
len(coefficient_list), "length of coefficient name list: ", len(self.coef_name_list))
coefficient_list = [float(coef) for coef in coefficient_list]
coef_dict = dict(zip(self.coef_name_list, coefficient_list))
self.result_dict = {"model": model_dict,
"coefficients": coef_dict,
"metrics": metrics_dict,
"number of samples": self.n_samples}
def save_result_dict_to_yaml(self, file_name="model_parameters", result_path="model_results/"):
timestr = time.strftime("%Y-%m-%d-%H-%M-%S")
file_path = result_path + file_name + "_" + timestr + ".yaml"
with open(file_path, 'w') as outfile:
yaml.dump(self.result_dict, outfile, default_flow_style=False)
yaml.dump(self.fisher_metric, outfile, default_flow_style=False)
print("-------------------------------------------------------------------------------")
print("Complete results saved to: ")
print(file_path)
print("-------------------------------------------------------------------------------")
def load_dataframes(self, data_frame):
self.data_df = data_frame
self.n_samples = self.data_df.shape[0]
self.quaternion_df = self.data_df[["q0", "q1", "q2", "q3"]]
self.q_mat = self.quaternion_df.to_numpy()
print("-------------------------------------------------------------------------------")
print("Initialized dataframe with the following columns: ")
print(list(self.data_df.columns))
print("Data contains ", self.n_samples, "timestamps.")
def predict_model(self, opt_coefs_dict):
print("===============================================================================")
print(" Preparing Model Features ")
print("===============================================================================")
X, y = self.prepare_regression_matrices()
c_opt_list = []
for coef in self.coef_name_list:
c_opt_list.append(opt_coefs_dict[coef])
self.initialize_optimizer()
self.optimizer.set_optimal_coefficients(c_opt_list, X, y)
self.generate_optimization_results()
def estimate_model(self):
print("===============================================================================")
print(" Preparing Model Features ")
print("===============================================================================")
self.X, self.y, self.coef_name_list = self.assemble_regression_matrices(["lin","rot"])
self.initialize_optimizer()
self.optimizer.estimate_parameters(self.X, self.y)
self.generate_optimization_results()
return
def initialize_optimizer(self):
print("===============================================================================")
print(" Initialize Optimizer ")
print(" " +
self.optimizer_config["optimizer_class"])
print("===============================================================================")
try:
# This will call the optimizer constructor directly from the optimizer_class
self.optimizer = getattr(optimizers, self.optimizer_config["optimizer_class"])(
self.optimizer_config, self.coef_name_list)
except AttributeError:
error_str = "Optimizer Class '{0}' not found, is it added to optimizers "\
"directory and optimizers/__init__.py?"
raise AttributeError(error_str)
def generate_prediction_results(self):
print("===============================================================================")
print(" Prediction Results ")
print("===============================================================================")
metrics_dict = self.optimizer.compute_optimization_metrics()
coef_list = self.optimizer.get_optimization_parameters()
model_dict = {}
model_dict.update(self.rotor_config_dict)
if hasattr(self, 'aero_config_dict'):
model_dict.update(self.aero_config_dict)
self.generate_model_dict(coef_list, metrics_dict, model_dict)
print(
" Optimal Coefficients ")
print("-------------------------------------------------------------------------------")
print(
yaml.dump(self.result_dict["coefficients"], default_flow_style=False))
print("-------------------------------------------------------------------------------")
print(" Prediction Metrics ")
print("-------------------------------------------------------------------------------")
print(
yaml.dump(self.result_dict["metrics"], default_flow_style=False))
self.save_result_dict_to_yaml(file_name=self.model_name)
def generate_optimization_results(self):
print("===============================================================================")
print(" Optimization Results ")
print("===============================================================================")
metrics_dict = self.optimizer.compute_optimization_metrics()
coef_list = self.optimizer.get_optimization_parameters()
model_dict = {}
model_dict.update(self.rotor_config_dict)
if hasattr(self, 'aero_config_dict'):
model_dict.update(self.aero_config_dict)
self.generate_model_dict(coef_list, metrics_dict, model_dict)
print(
" Optimal Coefficients ")
print("-------------------------------------------------------------------------------")
print(
yaml.dump(self.result_dict["coefficients"], default_flow_style=False))
print("-------------------------------------------------------------------------------")
print(" Optimization Metrics ")
print("-------------------------------------------------------------------------------")
print(
yaml.dump(self.result_dict["metrics"], default_flow_style=False))
self.save_result_dict_to_yaml(file_name=self.model_name)
def compute_residuals(self):
y_pred = self.optimizer.predict(self.X)
_,y_forces,_ = self.assemble_regression_matrices(["lin"])
_,y_moments,_ = self.assemble_regression_matrices(["rot"])
y_forces_measured = np.zeros(y_forces.shape)
y_forces_measured[0::3] = y_forces[0:int(y_forces.shape[0]/3)]
y_forces_measured[1::3] = y_forces[int(y_forces.shape[0]/3):int(2*y_forces.shape[0]/3)]
y_forces_measured[2::3] = y_forces[int(2*y_forces.shape[0]/3):y_forces.shape[0]]
y_forces_pred = np.zeros(y_forces.shape)
y_forces_pred[0::3] = y_pred[0:int(y_forces.shape[0]/3)]
y_forces_pred[1::3] = y_pred[int(y_forces.shape[0]/3):int(2*y_forces.shape[0]/3)]
y_forces_pred[2::3] = y_pred[int(2*y_forces.shape[0]/3):y_forces.shape[0]]
y_moments_measured = np.zeros(y_moments.shape)
y_moments_measured[0::3] = y_moments[0:int(y_moments.shape[0]/3)]
y_moments_measured[1::3] = y_moments[int(y_moments.shape[0]/3):int(2*y_moments.shape[0]/3)]
y_moments_measured[2::3] = y_moments[int(2*y_moments.shape[0]/3):y_moments.shape[0]]
y_moments_pred = np.zeros(y_moments.shape)
y_moments_pred[0::3] = y_pred[y_moments.shape[0]:int(4*y_moments.shape[0]/3)]
y_moments_pred[1::3] = y_pred[int(4*y_moments.shape[0]/3):int(5*y_moments.shape[0]/3)]
y_moments_pred[2::3] = y_pred[int(5*y_moments.shape[0]/3):]
error_y_forces = y_forces_pred - y_forces_measured
error_y_moments = y_moments_pred - y_moments_measured
stacked_error_y_forces = np.array(error_y_forces)
acc_mat = stacked_error_y_forces.reshape((-1, 3))
residual_force_df = pd.DataFrame(acc_mat, columns=[
"residual_force_x", "residual_force_y", "residual_force_z"])
self.data_df = pd.concat(
[self.data_df, residual_force_df], axis=1, join="inner").reindex(self.data_df.index)
stacked_error_y_moments = np.array(error_y_moments)
mom_mat = stacked_error_y_moments.reshape((-1, 3))
residual_moment_df = pd.DataFrame(mom_mat, columns=[
"residual_moment_x", "residual_moment_y", "residual_moment_z"])
self.data_df = pd.concat(
[self.data_df, residual_moment_df], axis=1, join="inner").reindex(self.data_df.index)
def plot_model_predicitons(self):
def plot_scatter(ax, title, dataframe_x, dataframe_y, dataframe_z, color='blue'):
ax.scatter(self.data_df[dataframe_x], self.data_df[dataframe_y],
self.data_df[dataframe_z], s=10, facecolor=color, lw=0, alpha=0.1)
ax.set_title(title)
ax.set_xlabel(dataframe_x)
ax.set_ylabel(dataframe_y)
ax.set_zlabel(dataframe_z)
y_pred = self.optimizer.predict(self.X)
if (self.estimate_forces and self.estimate_moments):
_,y_forces,_ = self.assemble_regression_matrices(["lin"])
_,y_moments,_ = self.assemble_regression_matrices(["rot"])
y_forces_measured = np.zeros(y_forces.shape)
y_forces_measured[0::3] = y_forces[0:int(y_forces.shape[0]/3)]
y_forces_measured[1::3] = y_forces[int(y_forces.shape[0]/3):int(2*y_forces.shape[0]/3)]
y_forces_measured[2::3] = y_forces[int(2*y_forces.shape[0]/3):y_forces.shape[0]]
y_forces_pred = np.zeros(y_forces.shape)
y_forces_pred[0::3] = y_pred[0:int(y_forces.shape[0]/3)]
y_forces_pred[1::3] = y_pred[int(y_forces.shape[0]/3):int(2*y_forces.shape[0]/3)]
y_forces_pred[2::3] = y_pred[int(2*y_forces.shape[0]/3):y_forces.shape[0]]
y_moments_measured = | np.zeros(y_moments.shape) | numpy.zeros |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright ยฉ 2020-2021 <NAME> and <NAME>
#
# Distributed under terms of the BSD 3-Clause license.
"""Functions related to plotting depth with matplotlib."""
import os
import copy
import pathlib
import argparse
import tempfile
import multiprocessing
from typing import Sequence, Tuple
import numpy
import rasterio
import rasterio.plot
import matplotlib.pyplot
import matplotlib.axes
import matplotlib.colors
import matplotlib.cm
from gclandspill import pyclaw
from gclandspill import _misc
from gclandspill import _preprocessing
from gclandspill import _postprocessing
def plot_depth(args: argparse.Namespace):
"""Plot depth with Matplotlib.
This function is called by the main function.
Argumenst
---------
args : argparse.Namespace
CMD argument parsed by `argparse`.
Returns
-------
Execution code. 0 for success.
"""
# process nprocs
args.nprocs = len(os.sched_getaffinity(0)) if args.nprocs is None else args.nprocs
# process case path
args.case = pathlib.Path(args.case).expanduser().resolve()
_misc.check_folder(args.case)
# process level, frame_ed, topofilee, and dry_tol
args = _misc.extract_info_from_setrun(args)
# process args.soln_dir
args.soln_dir = _misc.process_path(args.soln_dir, args.case, "_output")
_misc.check_folder(args.soln_dir)
# process args.dest_dir
if args.use_sat:
args.dest_dir = _misc.process_path(
args.dest_dir, args.case, "_plots/sat/level{:02d}".format(args.level))
else:
args.dest_dir = _misc.process_path(
args.dest_dir, args.case, "_plots/depth/level{:02d}".format(args.level))
os.makedirs(args.dest_dir, exist_ok=True) # make sure the folder exists
# process args.extent
if args.extent is None: # get the minimum extent convering the solutions at all frames
args.extent = _postprocessing.calc.get_soln_extent(
args.soln_dir, args.frame_bg, args.frame_ed, args.level)
# process the max of solution
if args.cmax is None:
args.cmax = _postprocessing.calc.get_soln_max(
args.soln_dir, args.frame_bg, args.frame_ed, args.level)
# prepare args for child processes (also initialize for the first proc)
per_proc = (args.frame_ed - args.frame_bg) // args.nprocs # number of frames per porcess
child_args = [copy.deepcopy(args)]
child_args[0].frame_bg = args.frame_bg
child_args[0].frame_ed = args.frame_bg + per_proc
# the first process has to do more jobs ...
child_args[0].frame_ed += (args.frame_ed - args.frame_bg) % args.nprocs
# remaining processes
for _ in range(args.nprocs-1):
child_args.append(copy.deepcopy(args))
child_args[-1].frame_bg = child_args[-2].frame_ed
child_args[-1].frame_ed = child_args[-1].frame_bg + per_proc
# if using satellite image as the background
if args.use_sat:
# download satellite image if necessarry
with tempfile.TemporaryDirectory() as tempdir:
sat_extent = _preprocessing.download_satellite_image(
args.extent, pathlib.Path(tempdir).joinpath("sat_img.png"))
sat_img = matplotlib.pyplot.imread(pathlib.Path(tempdir).joinpath("sat_img.png"))
# change the function arguments
for i in range(args.nprocs):
child_args[i] = [child_args[i], copy.deepcopy(sat_img), copy.deepcopy(sat_extent)]
# plot
print("Spawning plotting tasks to {} processes: ".format(args.nprocs))
with multiprocessing.Pool(args.nprocs, lambda: print("PID {}".format(os.getpid()))) as pool:
if args.use_sat:
pool.starmap(plot_soln_frames_on_sat, child_args)
else:
pool.map(plot_soln_frames, child_args)
return 0
def plot_soln_frames(args: argparse.Namespace):
"""Plot solution frames.
Currently, this function is supposed to be called by `plot_depth` with multiprocessing.
Argumenst
---------
args : argparse.Namespace
CMD argument parsed by `argparse`.
Returns
-------
Execution code. 0 for success.
"""
# plot
if args.no_topo:
fig, axes = matplotlib.pyplot.subplots(1, 2, gridspec_kw={"width_ratios": [10, 1]})
else:
fig, axes = matplotlib.pyplot.subplots(1, 3, gridspec_kw={"width_ratios": [10, 1, 1]})
axes[0], _, cmap_t, cmscale_t = plot_topo_on_ax(
axes[0], args.topofiles, args.colorize, extent=args.extent,
degs=[args.topo_azdeg, args.topo_altdeg], clims=[args.topo_cmin, args.topo_cmax]
)
for fno in range(args.frame_bg, args.frame_ed):
print("Processing frame {} by PID {}".format(fno, os.getpid()))
# read in solution data
soln = pyclaw.Solution()
soln.read(
fno, str(args.soln_dir), file_format="binary",
read_aux=args.soln_dir.joinpath("fort.a"+"{}".format(fno).zfill(4)).is_file()
)
axes[0], imgs, cmap_s, cmscale_s = plot_soln_frame_on_ax(
axes[0], soln, args.level, [args.cmin, args.cmax], args.dry_tol,
cmap=args.cmap, border=args.border)
axes[0].set_xlim(args.extent[0], args.extent[2])
axes[0].set_ylim(args.extent[1], args.extent[3])
# solution depth colorbar
fig.colorbar(matplotlib.cm.ScalarMappable(cmscale_s, cmap_s), cax=axes[1])
if not args.no_topo:
# topography colorbar
fig.colorbar(matplotlib.cm.ScalarMappable(cmscale_t, cmap_t), cax=axes[2])
fig.suptitle("T = {} sec".format(soln.state.t)) # title
fig.savefig(args.dest_dir.joinpath("frame{:05d}.png".format(fno))) # save
# clear artists
while True:
try:
img = imgs.pop()
img.remove()
del img
except IndexError:
break
print("PID {} done processing frames {} - {}".format(os.getpid(), args.frame_bg, args.frame_ed))
return 0
def plot_soln_frames_on_sat(
args: argparse.Namespace,
satellite_img: numpy.ndarray,
satellite_extent: Tuple[float, float, float, float]):
"""Plot solution frames on a satellite image.
Currently, this function is supposed to be called by `plot_depth` with multiprocessing.
Argumenst
---------
args : argparse.Namespace
CMD argument parsed by `argparse`.
satellite_img : numpy.ndarray,
The RBG data for the satellite image.
satellite_extent : Tuple[float, float, float, float]):
The extent of the satellite image.
Returns
-------
Execution code. 0 for success.
"""
# plot
fig, axes = matplotlib.pyplot.subplots()
axes.imshow(
satellite_img,
extent=[satellite_extent[0], satellite_extent[2], satellite_extent[1], satellite_extent[3]]
)
for fno in range(args.frame_bg, args.frame_ed):
print("Processing frame {} by PID {}".format(fno, os.getpid()))
# read in solution data
soln = pyclaw.Solution()
soln.read(
fno, str(args.soln_dir), file_format="binary",
read_aux=args.soln_dir.joinpath("fort.a"+"{}".format(fno).zfill(4)).is_file()
)
axes, imgs, _, _ = plot_soln_frame_on_ax(
axes, soln, args.level, [args.cmin, args.cmax], args.dry_tol,
cmap=args.cmap, border=args.border)
axes.set_xlim(satellite_extent[0], satellite_extent[2])
axes.set_ylim(satellite_extent[1], satellite_extent[3])
fig.suptitle("T = {} sec".format(soln.state.t)) # title
fig.savefig(args.dest_dir.joinpath("frame{:05d}.png".format(fno))) # save
# clear artists
while True:
try:
img = imgs.pop()
img.remove()
del img
except IndexError:
break
print("PID {} done processing frames {} - {}".format(os.getpid(), args.frame_bg, args.frame_ed))
return 0
def plot_topo_on_ax(
axes: matplotlib.axes.Axes,
topo_files: Sequence[os.PathLike],
colorize: bool = False,
**kwargs: str
):
"""Add a topography elevation plot to an existing Axes object.
Arguments
---------
axes : matplotlib.axes.Axes
The target Axes object.
topo_files : tuple/lsit of pathlike
A list of list following the topography files specification in GeoClaw's settings.
colorize : bool
Whether to use colorized colormap for the elevation. (default: False).
**kwargs :
Other possible keyword arguments:
extent : [xmin, ymin, xmax, ymax]
The extent of the topography. If not porvided, use the union of all provided topography
files.
degs : [azdeg, altdeg]
The `azdeg` and `altdeg` for shading. See matplotlib's documentation regarding light
sources. If not provided, use the default value of [45, 25].
clims : [colormap min, colormap max]
Customize the limits of the colormap. If not provided, use the full range.
nodata : int
Indicates the `nodata` values in the topography files. Default value is -9999.
alpha : float
Opacity.
Returns
-------
axes : matplotlib.axes.Axes
The updated Axes object.
img : matplotlib.image.AxesImage
The image object of the topography plot returned by matplotlib's `imshow`.
cmap : matplotlib.colors.Colormap
The colormap object used by the topography plot.
cmscale : matplotlib.colors.Normalize
The normalization object that maps elevation data to the the colormap valess.
"""
# process optional keyword arguments
extent = None if "extent" not in kwargs else kwargs["extent"]
degs = [45, 25] if "degs" not in kwargs else kwargs["degs"]
clims = None if "clims" not in kwargs else kwargs["clims"]
nodata = -9999 if "nodata" not in kwargs else kwargs["nodata"]
alpha = 0.7 if "alpha" not in kwargs else kwargs["alpha"]
# use mosaic raster to obtain interpolated terrain
rasters = [rasterio.open(topo, "r") for topo in topo_files]
# merge and interplate
dst, affine = rasterio.merge.merge(rasters, extent)
# close raster datasets
for topo in rasters:
topo.close()
# convert to masked array
dst = numpy.ma.array(dst[0], mask=(dst[0] == nodata))
# update the limits based on elevation
clims = [dst.min(), dst.max()] if clims is None else clims
if colorize: # use colorized colormap
if | numpy.all(dst >= 0.) | numpy.all |
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 3 10:33:41 2020
@author: <NAME>
"""
# =============================================================================
# Import Libraries
# =============================================================================
import numpy as np
from numba import jit
import matplotlib.pyplot as plt
# =============================================================================
# Define Functions
# =============================================================================
def ar_1(mu, a, sigma, T, x_0):
"""
This function computes a simulated ar1 process assuming x_t = mu + a*x_{t-1} + e_t
"""
x_path = np.zeros(T)
x_path[0] = x_0
shocks = np.random.normal(0,sigma,T) # The first term isn't used and will be ignored for sake of code readability
# iteratively construct the AR1 according to x_t = mu + a*x_{t-1} + e_t
for t in range(1,T):
x_path[t] = mu + a*x_path[t-1] + shocks[t]
return x_path # Return the path of the AR1
def censored_ar_1(mu, a, sigma, T, x_0):
"""
This function computes a simulated ar1 process assuming x_t = max(mu + a*x_{t-1} + e_t,0)
"""
x_path = | np.zeros(T) | numpy.zeros |
"""
==========================
FastICA on 2D point clouds
==========================
This example illustrates visually in the feature space a comparison by
results using two different component analysis techniques.
:ref:`ICA` vs :ref:`PCA`.
Representing ICA in the feature space gives the view of 'geometric ICA':
ICA is an algorithm that finds directions in the feature space
corresponding to projections with high non-Gaussianity. These directions
need not be orthogonal in the original feature space, but they are
orthogonal in the whitened feature space, in which all directions
correspond to the same variance.
PCA, on the other hand, finds orthogonal directions in the raw feature
space that correspond to directions accounting for maximum variance.
Here we simulate independent sources using a highly non-Gaussian
process, 2 student T with a low number of degrees of freedom (top left
figure). We mix them to create observations (top right figure).
In this raw observation space, directions identified by PCA are
represented by orange vectors. We represent the signal in the PCA space,
after whitening by the variance corresponding to the PCA vectors (lower
left). Running ICA corresponds to finding a rotation in this space to
identify the directions of largest non-Gaussianity (lower right).
"""
print(__doc__)
# Authors: <NAME>, <NAME>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA, FastICA
###############################################################################
# Generate sample data
rng = np.random.RandomState(42)
S = rng.standard_t(1.5, size=(20000, 2))
S[:, 0] *= 2.
# Mix data
A = np.array([[1, 1], [0, 2]]) # Mixing matrix
X = np.dot(S, A.T) # Generate observations
pca = PCA()
S_pca_ = pca.fit(X).transform(X)
ica = FastICA(random_state=rng)
S_ica_ = ica.fit(X).transform(X) # Estimate the sources
S_ica_ /= S_ica_.std(axis=0)
###############################################################################
# Plot results
def plot_samples(S, axis_list=None):
plt.scatter(S[:, 0], S[:, 1], s=2, marker='o', zorder=10,
color='steelblue', alpha=0.5)
if axis_list is not None:
colors = ['orange', 'red']
for color, axis in zip(colors, axis_list):
axis /= axis.std()
x_axis, y_axis = axis
# Trick to get legend to work
plt.plot(0.1 * x_axis, 0.1 * y_axis, linewidth=2, color=color)
plt.quiver(0, 0, x_axis, y_axis, zorder=11, width=0.01, scale=6,
color=color)
plt.hlines(0, -3, 3)
plt.vlines(0, -3, 3)
plt.xlim(-3, 3)
plt.ylim(-3, 3)
plt.xlabel('x')
plt.ylabel('y')
plt.figure()
plt.subplot(2, 2, 1)
plot_samples(S / S.std())
plt.title('True Independent Sources')
axis_list = [pca.components_.T, ica.mixing_]
plt.subplot(2, 2, 2)
plot_samples(X / | np.std(X) | numpy.std |
""" Hyperparameters for MJC peg insertion policy optimization. """
from __future__ import division
from datetime import datetime
import os.path
import numpy as np
from gps import __file__ as gps_filepath
from gps.agent.mjc.agent_mjc import AgentMuJoCo
from gps.algorithm.algorithm_mdgps import AlgorithmMDGPS
from gps.algorithm.cost.cost_fk import CostFK
from gps.algorithm.cost.cost_action import CostAction
from gps.algorithm.cost.cost_sum import CostSum
from gps.algorithm.cost.cost_utils import RAMP_FINAL_ONLY
from gps.algorithm.dynamics.dynamics_lr_prior import DynamicsLRPrior
from gps.algorithm.dynamics.dynamics_prior_gmm import DynamicsPriorGMM
from gps.algorithm.traj_opt.traj_opt_lqr_python import TrajOptLQRPython
from gps.algorithm.policy_opt.policy_opt_caffe import PolicyOptCaffe
from gps.algorithm.policy.lin_gauss_init import init_lqr
from gps.algorithm.policy.policy_prior_gmm import PolicyPriorGMM
from gps.algorithm.policy.policy_prior import PolicyPrior
from gps.proto.gps_pb2 import JOINT_ANGLES, JOINT_VELOCITIES, \
END_EFFECTOR_POINTS, END_EFFECTOR_POINT_VELOCITIES, ACTION
from gps.gui.config import generate_experiment_info
SENSOR_DIMS = {
JOINT_ANGLES: 7,
JOINT_VELOCITIES: 7,
END_EFFECTOR_POINTS: 6,
END_EFFECTOR_POINT_VELOCITIES: 6,
ACTION: 7,
}
PR2_GAINS = np.array([3.09, 1.08, 0.393, 0.674, 0.111, 0.152, 0.098])
BASE_DIR = '/'.join(str.split(gps_filepath, '/')[:-2])
EXP_DIR = BASE_DIR + '/../experiments/mjc_mdgps_example/'
common = {
'experiment_name': 'my_experiment' + '_' + \
datetime.strftime(datetime.now(), '%m-%d-%y_%H-%M'),
'experiment_dir': EXP_DIR,
'data_files_dir': EXP_DIR + 'data_files/',
'target_filename': EXP_DIR + 'target.npz',
'log_filename': EXP_DIR + 'log.txt',
'conditions': 4,
}
if not os.path.exists(common['data_files_dir']):
os.makedirs(common['data_files_dir'])
agent = {
'type': AgentMuJoCo,
'filename': './mjc_models/pr2_arm3d.xml',
'x0': np.concatenate([np.array([0.1, 0.1, -1.54, -1.7, 1.54, -0.2, 0]),
np.zeros(7)]),
'dt': 0.05,
'substeps': 5,
'conditions': common['conditions'],
'pos_body_idx': np.array([1]),
'pos_body_offset': [[np.array([-0.08, -0.08, 0])], [np.array([-0.08, 0.08, 0])],
[np.array([0.08, 0.08, 0])], [ | np.array([0.08, -0.08, 0]) | numpy.array |
"""Experimental image transformations."""
from __future__ import division
import random
import numpy as np
import mxnet as mx
from mxnet import nd
def random_color_distort(src, brightness_delta=32, contrast_low=0.5, contrast_high=1.5,
saturation_low=0.5, saturation_high=1.5, hue_delta=18):
"""Randomly distort image color space.
Note that input image should in original range [0, 255].
Parameters
----------
src : mxnet.nd.NDArray
Input image as HWC format.
brightness_delta : int
Maximum brightness delta. Defaults to 32.
contrast_low : float
Lowest contrast. Defaults to 0.5.
contrast_high : float
Highest contrast. Defaults to 1.5.
saturation_low : float
Lowest saturation. Defaults to 0.5.
saturation_high : float
Highest saturation. Defaults to 1.5.
hue_delta : int
Maximum hue delta. Defaults to 18.
Returns
-------
mxnet.nd.NDArray
Distorted image in HWC format.
"""
def brightness(src, delta, p=0.5):
"""Brightness distortion."""
if | np.random.uniform(0, 1) | numpy.random.uniform |
import bayesnewton
import objax
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import time
import tikzplotlib
print('loading rainforest data ...')
data = np.loadtxt('../data/TRI2TU-data.csv', delimiter=',')
nr = 50 # spatial grid point (y-axis)
nt = 100 # temporal grid points (x-axis)
binsize = 1000 / nt
t, r, Y_ = bayesnewton.utils.discretegrid(data, [0, 1000, 0, 500], [nt, nr])
t_flat, r_flat, Y_flat = t.flatten(), r.flatten(), Y_.flatten()
N = nr * nt # number of data points
np.random.seed(99)
test_ind = np.random.permutation(N)[:N//10]
t_test = t_flat[test_ind]
r_test = r_flat[test_ind]
Y_test = Y_flat[test_ind]
Y_flat[test_ind] = np.nan
Y = Y_flat.reshape(nt, nr)
# put test points on a grid to speed up prediction
X_test = | np.concatenate([t_test[:, None], r_test[:, None]], axis=1) | numpy.concatenate |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import generators
from __future__ import nested_scopes
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import with_statement
import multiprocessing
import platform
import pandas as pd
import numpy as np
import matplotlib
import keras
if platform.system().find('Darwin') != -1:
matplotlib.use('tkagg')
else:
matplotlib.use('agg')
import matplotlib.pyplot as plt
# plt.rcParams['image.cmap'] = 'viridis'
import util.preprocess as pre
import util.audit as audit
from util import INPUT_PATH, OUTPUT_PATH, PROCESS_NUM, listdir_no_hidden, labels
from util.log import _INFO, _ERROR
from util.list import *
import feature.frequency as fre
import model.cnn as cnn
import model.fft as fft
def main():
_INFO("main started")
# for i in range(PROCESS_NUM):
# p = multiprocessing.Process(target=fre.multi_load_diffs_by_patient, args=(patients[i::PROCESS_NUM],))
# p.start()
_INFO("Positive num: %s" % len(pos_list()))
_INFO("Negative num: %s" % len(neg_list()))
_INFO("Test num: %s" % len(test_list()))
# cnn.train()
# fft.train()
model = keras.models.load_model(OUTPUT_PATH + '/cnn_balance_stage1.h5')
patients = test_list()
_INFO("Found %s patients" % len(patients))
for patient in patients:
segments = pre.load_segment_by_patient(patient)
segments = np.expand_dims(segments, axis=3)
ret = model.predict_on_batch(segments)
positive = []
out_file = open(OUTPUT_PATH + '/cnn_predict_' + patient + '.csv', 'w')
for i in range(len(ret)):
out_file.write(str(i) + ',' + str(ret[i][1]) + '\n')
positive.append(ret[i][1])
_INFO("Predict patient %s is %s" % (patient, | np.mean(positive) | numpy.mean |
from astropy.io import fits
from astropy.table import Table
from astropy.time import Time
import astropy.units as u
import os
import numpy as np
from srttools.io import mkdir_p, locations, read_data_fitszilla, \
get_chan_columns, classify_chan_columns
from srttools.utils import scantype, force_move_file, minmax, median_diff
from srttools.fit import detrend_spectroscopic_data
import warnings
from astropy import log
def default_scan_info_table():
return Table(names=['scan_id', 'start', 'stop',
'ra_min', 'ra_max', 'ra_d',
'dec_min', 'dec_max', 'dec_d',
'az_min', 'az_max', 'az_d',
'el_min', 'el_max', 'el_d',
'glon_min', 'glon_max', 'glon_d',
'glat_min', 'glat_max', 'glat_d',
'is_skydip', 'kind', 'direction'],
dtype=[int, float, float,
float, float, float, float, float, float,
float, float, float, float, float, float,
float, float, float, float, float, float,
bool, 'S10', 'S5'])
def get_subscan_info(subscan):
info = default_scan_info_table()
scan_id = subscan.meta['SubScanID']
start, stop = minmax(subscan['time'])
ramin, ramax = minmax(subscan['ra'])
decmin, decmax = minmax(subscan['dec'])
azmin, azmax = minmax(subscan['az'])
elmin, elmax = minmax(subscan['el'])
is_skydip = subscan.meta['is_skydip']
d_ra = median_diff(subscan['ra'])
d_dec = median_diff(subscan['dec'])
d_az = median_diff(subscan['az'])
d_el = median_diff(subscan['el'])
ravar = (ramax - ramin) * np.cos(np.mean((decmin, decmax)))
decvar = decmax - decmin
azvar = (azmax - azmin) * np.cos(np.mean((elmin, elmax)))
elvar = elmax - elmin
tot_eq = np.sqrt(ravar ** 2 + decvar ** 2)
tot_hor = np.sqrt(elvar ** 2 + azvar ** 2)
ravar /= tot_eq
decvar /= tot_hor
directions = np.array(['ra', 'dec', 'az', 'el'])
allvars = | np.array([ravar, decvar, azvar, elvar]) | numpy.array |
import numpy as np
def q31sat(x):
if x > 0x7FFFFFFF:
return(np.int32(0x7FFFFFFF))
elif x < -0x80000000:
return(np.int32(0x80000000))
else:
return(np.int32(x))
q31satV=np.vectorize(q31sat)
def toQ31(x):
return(q31satV(np.round(np.array(x) * (1<<31))))
def q15sat(x):
if x > 0x7FFF:
return(np.int16(0x7FFF))
elif x < -0x8000:
return(np.int16(0x8000))
else:
return(np.int16(x))
q15satV=np.vectorize(q15sat)
def toQ15(x):
return(q15satV(np.round(np.array(x) * (1<<15))))
def q7sat(x):
if x > 0x7F:
return(np.int8(0x7F))
elif x < -0x80:
return(np.int8(0x80))
else:
return( | np.int8(x) | numpy.int8 |
from fractions import Fraction
import numpy as np
import numpy.testing as npt
from ..renderer_common import FixedGains, InterpGains
def test_FixedGains():
g = FixedGains(
start_sample=Fraction(0.5),
end_sample=Fraction(10.5),
gains=np.array([0.5]),
)
sample_no = np.arange(11)
expected_gains = | np.ones((11, 1)) | numpy.ones |
import unittest
import numpy as np
import PySeismoSoil.helper_hh_calibration as hhc
import os
from os.path import join as _join
f_dir = _join(os.path.dirname(os.path.realpath(__file__)), 'files')
class Test_Helper_HH_Calibration(unittest.TestCase):
def test_calc_rho(self):
h = np.array([2, 3, 4, 5])
Vs = np.array([200, 300, 400, 500])
rho = hhc._calc_rho(h, Vs)
rho_ = np.array([1.6500, 2.0375, 1.9892, 1.9995]) * 1000
self.assertTrue(np.allclose(rho, rho_, rtol=0.001, atol=0.0))
def test_calc_Gmax(self):
rho = np.array([1600, 1700, 1800])
Vs = np.array([200, 300, 400])
self.assertTrue(np.allclose(
hhc._calc_Gmax(Vs, rho), [6.4e7, 1.53e8, 2.88e8], rtol=1e-2, atol=0.0,
))
def test_calc_vertical_stress(self):
h = np.array([80, 90, 100])
rho = np.array([1600, 1700, 1800])
sigma = hhc._calc_vertical_stress(h, rho)
sigma_ = [627840, 2006145, 3639510] # from MATLAB
self.assertTrue(np.allclose(sigma, sigma_, rtol=1e-3, atol=0.0))
def test_calc_OCR__case_1_no_upper_limit(self):
Vs = np.array([200, 300, 400])
rho = np.array([1600, 1700, 1800])
sigma_v0 = np.array([6e4, 8e4, 1e5])
OCR = hhc._calc_OCR(Vs, rho, sigma_v0)
OCR_bench = [4.26254237, 5.80208548, 7.08490535]
self.assertTrue(np.allclose(OCR, OCR_bench))
def test_calc_OCR__case_2_with_an_upper_limit_of_6(self):
Vs = np.array([200, 300, 400])
rho = np.array([1600, 1700, 1800])
sigma_v0 = | np.array([6e4, 8e4, 1e5]) | numpy.array |
import numpy as np
from gym import utils
from trajopt.envs import mujoco_env
import os
import dm_control.mujoco as mujoco
import fileinput
import trimesh
import scipy
def prep_XML(xml_path, replacement_path):
"""
Prepares MJCF XML code. Replaces mesh and textures directory for compilter
with that specified in our configuration file.
"""
compileToReplace = '<compiler coordinate="local" angle="radian" fusestatic="false" meshdir="{}" texturedir="{}"/>'
compileToReplace = compileToReplace.format(replacement_path, replacement_path)
for line in fileinput.input(xml_path, inplace=True):
if "compiler " in line:
print(compileToReplace)
else:
print(line.rstrip())
class Randomized_Pushing_Env(mujoco_env.MujocoEnv, utils.EzPickle):
#@profile
def __init__(self, path='/home/willie/workspace/SSC/herb_reconf/scene.xml', task='easy', obs=False, push_mesh_vertices=np.zeros((1,3)), target_mesh_vertices=np.zeros((1,3)), shapenet_path='/media/willie/fda8d90f-998c-425a-9823-a20479be9e98/data/ShapeNetCore.v2/'):
# trajopt specific attributes
self.obs=obs
self.task=task
self.env_name = 'herb_pushing_easy'
self.seeding = False
self.real_step = True
self.env_timestep = 0
# placeholder
self.hand_sid = -2
self.target_sid = -1
#prep_XML(path, '/home/willie/workspace/herbpushing/herb_reconf')
self.model = mujoco.Physics.from_xml_path(path)
self.model.forward()
a=self.model.named.data.xpos.axes.row.names
self.hand_sid = "herb/wam_1/bhand//unnamed_geom_0"
if self.obs:
self.block_sid_1 = "gen_body_0"
else:
self.block_sid_1 = "push_obj"
self.target_sid_1 = "//unnamed_geom_15"
self.block_sid_2 = "//unnamed_geom_9"
self.target_sid_2 = "//unnamed_geom_16"
self.block_sid_3 = "//unnamed_geom_10"
self.target_sid_3 = "//unnamed_geom_17"
self.push_mesh_vertices=push_mesh_vertices
self.target_mesh_vertices=target_mesh_vertices
self.last_block_pos=None
self.init_qpos= np.array([-1.48, -1.07, -1.48, 0.899, 0, 1.12,
0, 0, 0, 0, 0, 0,
0, 0, 0
])#, 0.13900801576105609, -0.42142641007555215, 0.3549998,0,0,0,0
#self.init_qpos[:]=0.0
mujoco_env.MujocoEnv.__init__(self, path, 1)
utils.EzPickle.__init__(self)
self.observation_dim = 66
self.action_dim = 15
self.robot_reset()
#@profile
def _step(self, a):
s_ob = self._get_obs()
# if a.shape[0]==15:
# zero_vel_a=np.zeros(2*a.shape[0])
# zero_vel_a[0:15]=a
# #zero_vel_a[14:22]=a[7:15]
# a=zero_vel_a
self.do_simulation(a, self.frame_skip)
hand_pos = self.model.named.data.geom_xpos[self.hand_sid]
#target_pos_1 = self.model.named.data.geom_xpos[self.target_sid_1]
#mesh=self.model.named.model.geom_dataid[self.block_sid_1]
block_pos_1 = self.model.named.data.xpos[self.block_sid_1]
block_orientation_1=np.reshape(self.model.named.data.xmat[self.block_sid_1], (3,3))
trans_push_mesh_vertices=np.matmul(block_orientation_1, self.push_mesh_vertices.T).T
trans_push_mesh_vertices=trans_push_mesh_vertices+block_pos_1
target_pos_1 = self.model.named.data.geom_xpos[self.target_sid_1]
target_orientation_1=np.reshape(self.model.named.data.geom_xmat[self.target_sid_1], (3,3))
trans_target_mesh_vertices=np.matmul(target_orientation_1, self.target_mesh_vertices.T).T
trans_target_mesh_vertices=trans_target_mesh_vertices+target_pos_1
#target_orientation_1=np.reshape(self.model.named.data.geom_xmat[self.target_sid_1], (3,3))
# block_pos_2 = self.model.named.data.geom_xpos[self.block_sid_2]
# block_orientation_2=np.reshape(self.model.named.data.geom_xmat[self.block_sid_2], (3,3))
# target_pos_2 = self.model.named.data.geom_xpos[self.target_sid_2]
# target_orientation_2=np.reshape(self.model.named.data.geom_xmat[self.target_sid_2], (3,3))
#
# block_pos_3 = self.model.named.data.geom_xpos[self.block_sid_3]
# block_orientation_3=np.reshape(self.model.named.data.geom_xmat[self.block_sid_3], (3,3))
# target_pos_3 = self.model.named.data.geom_xpos[self.target_sid_3]
# target_orientation_3=np.reshape(self.model.named.data.geom_xmat[self.target_sid_3], (3,3))
if self.task=='easy':
#cube_target_ADDS=self.get_cubes_ADDS(block_pos_1, target_pos_1, block_orientation_1, target_orientation_1, 0.055)
target_loss=np.max(scipy.spatial.distance.cdist(trans_push_mesh_vertices, trans_target_mesh_vertices, 'euclidean'))#scipy.spatial.distance.directed_hausdorff(trans_push_mesh_vertices, trans_target_mesh_vertices)[0]
robot_block_reward = -np.linalg.norm(hand_pos - block_pos_1)
#print('hand_pos', hand_pos, 'block_pos_1', block_pos_1, 'target_pos_1', target_pos_1)
vel_penalty=0.0
if not self.last_block_pos is None:
velocity=np.linalg.norm(self.last_block_pos-block_pos_1)
if velocity>0.01:
vel_penalty=-100*velocity
self.last_block_pos=np.copy(block_pos_1)
#a=np.linalg.norm(self.data.qvel)
reward = 1+0.1 * robot_block_reward+-target_loss#-0.001*np.linalg.norm(self.data.qvel)#+
elif self.task=='three_blocks':
cube_target_ADDS_1=self.get_cubes_ADDS(block_pos_1, target_pos_1, block_orientation_1, target_orientation_1, 0.055)
robot_block_reward_1 = -np.sum(np.abs(hand_pos - block_pos_1))
cube_target_ADDS_2=self.get_cubes_ADDS(block_pos_2, target_pos_2, block_orientation_2, target_orientation_2, 0.055)
robot_block_reward_2 = -np.sum(np.abs(hand_pos - block_pos_2))
cube_target_ADDS_3=self.get_cubes_ADDS(block_pos_3, target_pos_3, block_orientation_3, target_orientation_3, 0.055)
robot_block_reward_3 = -np.sum(np.abs(hand_pos - block_pos_3))
if cube_target_ADDS_1>0.05:
reward = 0.01*robot_block_reward_1+-cube_target_ADDS_1
elif cube_target_ADDS_2>0.05:
reward = 0.01*robot_block_reward_2+-cube_target_ADDS_2
elif cube_target_ADDS_3>0.05:
reward = 0.01*robot_block_reward_3+-cube_target_ADDS_3
ob = self.model.position()
# keep track of env timestep (needed for continual envs)
self.env_timestep += 1
return ob, reward, False, self.get_env_infos()
def get_dist(self):
hand_pos = self.model.named.data.geom_xpos[self.hand_sid]
#target_pos_1 = self.model.named.data.geom_xpos[self.target_sid_1]
#mesh=self.model.named.model.geom_dataid[self.block_sid_1]
block_pos_1 = self.model.named.data.xpos[self.block_sid_1]
block_orientation_1=np.reshape(self.model.named.data.xmat[self.block_sid_1], (3,3))
trans_push_mesh_vertices=np.matmul(block_orientation_1, self.push_mesh_vertices.T).T
trans_push_mesh_vertices=trans_push_mesh_vertices+block_pos_1
target_pos_1 = self.model.named.data.geom_xpos[self.target_sid_1]
target_orientation_1=np.reshape(self.model.named.data.geom_xmat[self.target_sid_1], (3,3))
trans_target_mesh_vertices=np.matmul(target_orientation_1, self.target_mesh_vertices.T).T
trans_target_mesh_vertices=trans_target_mesh_vertices+target_pos_1
#target_orientation_1=np.reshape(self.model.named.data.geom_xmat[self.target_sid_1], (3,3))
# block_pos_2 = self.model.named.data.geom_xpos[self.block_sid_2]
# block_orientation_2=np.reshape(self.model.named.data.geom_xmat[self.block_sid_2], (3,3))
# target_pos_2 = self.model.named.data.geom_xpos[self.target_sid_2]
# target_orientation_2=np.reshape(self.model.named.data.geom_xmat[self.target_sid_2], (3,3))
#
# block_pos_3 = self.model.named.data.geom_xpos[self.block_sid_3]
# block_orientation_3=np.reshape(self.model.named.data.geom_xmat[self.block_sid_3], (3,3))
# target_pos_3 = self.model.named.data.geom_xpos[self.target_sid_3]
# target_orientation_3=np.reshape(self.model.named.data.geom_xmat[self.target_sid_3], (3,3))
if self.task=='easy':
#cube_target_ADDS=self.get_cubes_ADDS(block_pos_1, target_pos_1, block_orientation_1, target_orientation_1, 0.055)
target_loss=np.max(scipy.spatial.distance.cdist(trans_push_mesh_vertices, trans_target_mesh_vertices, 'euclidean'))#scipy.spatial.distance.directed_hausdorff(trans_push_mesh_vertices, trans_target_mesh_vertices)[0]
robot_block_reward = -np.linalg.norm(hand_pos - block_pos_1)
#print('hand_pos', hand_pos, 'block_pos_1', block_pos_1, 'target_pos_1', target_pos_1)
vel_penalty=0.0
if not self.last_block_pos is None:
velocity=np.linalg.norm(self.last_block_pos-block_pos_1)
if velocity>0.01:
vel_penalty=-100*velocity
self.last_block_pos=np.copy(block_pos_1)
#a=np.linalg.norm(self.data.qvel)
reward = 1+0.1 * robot_block_reward+-target_loss#-0.001*np.linalg.norm(self.data.qvel)#+
return target_loss
#@profile
def get_cubes_ADDS(self, cube_1_position, cube_2_position, cube_1_orientation, cube_2_orientation, side_length):
cube_points=np.array([
[0,0,0],
[side_length,0,0],
[0,side_length,0],
[side_length,side_length,0],
[0,0,side_length],
[side_length,0,side_length],
[0,side_length,side_length],
[side_length,side_length,side_length]
])
cube_1_points=np.zeros(cube_points.shape)
cube_2_points=np.zeros(cube_points.shape)
for point_ind in range(8):
cube_1_points[point_ind]=np.matmul(cube_1_orientation, cube_points[point_ind])
cube_2_points[point_ind]=np.matmul(cube_2_orientation, cube_points[point_ind])
total_distance=0.0
for point_1_ind in range(8):
best_distance=float('inf')
distances=np.linalg.norm(cube_1_points[point_1_ind]-cube_2_points, axis=1)
best_distance=np.amin(distances)
total_distance+=best_distance
total_distance+=8*np.linalg.norm(cube_1_position-cube_2_position)
return total_distance/8.0
def step(self, a):
# overloading to preserve backwards compatibility
return self._step(a)
def _get_obs(self):
if self.task=='easy':
return np.concatenate([
self.model.position(),
self.model.velocity(),
self.model.named.data.geom_xpos[self.hand_sid],
self.model.named.data.xpos[self.block_sid_1],
self.model.named.data.geom_xpos[self.target_sid_1],
])
elif self.task=='three_blocks':
return np.concatenate([
self.model.position(),
self.model.velocity(),
self.model.named.data.geom_xpos[self.hand_sid],
self.model.named.data.geom_xpos[self.block_sid_1],
self.model.named.data.geom_xpos[self.target_sid_1],
self.model.named.data.geom_xpos[self.block_sid_2],
self.model.named.data.geom_xpos[self.target_sid_2],
self.model.named.data.geom_xpos[self.block_sid_3],
self.model.named.data.geom_xpos[self.target_sid_3],
])
# --------------------------------
# resets and randomization
# --------------------------------
def robot_reset(self):
self.set_state(self.init_qpos, self.init_qvel)
def target_reset(self):
target_pos = np.array([0.1, 0.1, 0.1])
if self.seeding is True:
target_pos[0] = self.np_random.uniform(low=-0.3, high=0.3)
target_pos[1] = self.np_random.uniform(low=-0.2, high=0.2)
target_pos[2] = self.np_random.uniform(low=-0.25, high=0.25)
#self.model.named.data.geom_xpos[self.target_sid] = target_pos
self.sim.physics.forward()
def reset_model(self, seed=None):
if seed is not None:
self.seeding = True
self.seed(seed)
self.robot_reset()
self.target_reset()
return self._get_obs()
# --------------------------------
# get and set states
# --------------------------------
def get_env_state(self):
target_pos = self.model.named.data.geom_xpos[self.target_sid_1].copy()
return dict(qp=self.data.qpos.copy(), qv=self.data.qvel.copy(),
target_pos=target_pos, timestep=self.env_timestep)
def set_env_state(self, state):
self.sim.reset()
qp = state['qp'].copy()
qv = state['qv'].copy()
target_pos = state['target_pos']
self.set_state(qp, qv)
self.model.named.data.geom_xpos[self.target_sid_1] = target_pos
self.env_timestep = state['timestep']
self.last_block_pos=None
# --------------------------------
# utility functions
# --------------------------------
def get_env_infos(self):
return dict(state=self.get_env_state())
def mj_viewer_setup(self):
u=0
import numpy as np
from gym import utils
from trajopt.envs import mujoco_env
import os
import dm_control.mujoco as mujoco
import fileinput
import trimesh
import scipy
def prep_XML(xml_path, replacement_path):
"""
Prepares MJCF XML code. Replaces mesh and textures directory for compilter
with that specified in our configuration file.
"""
compileToReplace = '<compiler coordinate="local" angle="radian" fusestatic="false" meshdir="{}" texturedir="{}"/>'
compileToReplace = compileToReplace.format(replacement_path, replacement_path)
for line in fileinput.input(xml_path, inplace=True):
if "compiler " in line:
print(compileToReplace)
else:
print(line.rstrip())
class HerbEnv(mujoco_env.MujocoEnv, utils.EzPickle):
#@profile
def __init__(self, path='/home/willie/workspace/SSC/herb_reconf/scene.xml', task='easy', obs=False, push_mesh_vertices=np.zeros((1,3)), target_mesh_vertices=np.zeros((1,3))):
# trajopt specific attributes
self.obs=obs
self.task=task
self.env_name = 'herb_pushing_easy'
self.seeding = False
self.real_step = True
self.env_timestep = 0
# placeholder
self.hand_sid = -2
self.target_sid = -1
#prep_XML(path, '/home/willie/workspace/herbpushing/herb_reconf')
self.model = mujoco.Physics.from_xml_path(path)
self.model.forward()
a=self.model.named.data.xpos.axes.row.names
self.hand_sid = "herb/wam_1/bhand//unnamed_geom_0"
if self.obs:
self.block_sid_1 = "gen_body_0"
else:
self.block_sid_1 = "push_obj"
self.target_sid_1 = "//unnamed_geom_15"
self.block_sid_2 = "//unnamed_geom_9"
self.target_sid_2 = "//unnamed_geom_16"
self.block_sid_3 = "//unnamed_geom_10"
self.target_sid_3 = "//unnamed_geom_17"
self.push_mesh_vertices=push_mesh_vertices
self.target_mesh_vertices=target_mesh_vertices
self.last_block_pos=None
self.init_qpos= np.array([-1.48, -1.07, -1.48, 0.899, 0, 1.12,
0, 0, 0, 0, 0, 0,
0, 0, 0
])#, 0.13900801576105609, -0.42142641007555215, 0.3549998,0,0,0,0
#self.init_qpos[:]=0.0
mujoco_env.MujocoEnv.__init__(self, path, 1)
utils.EzPickle.__init__(self)
self.observation_dim = 66
self.action_dim = 15
self.robot_reset()
#@profile
def _step(self, a):
s_ob = self._get_obs()
# if a.shape[0]==15:
# zero_vel_a=np.zeros(2*a.shape[0])
# zero_vel_a[0:15]=a
# #zero_vel_a[14:22]=a[7:15]
# a=zero_vel_a
self.do_simulation(a, self.frame_skip)
hand_pos = self.model.named.data.geom_xpos[self.hand_sid]
#target_pos_1 = self.model.named.data.geom_xpos[self.target_sid_1]
#mesh=self.model.named.model.geom_dataid[self.block_sid_1]
block_pos_1 = self.model.named.data.xpos[self.block_sid_1]
block_orientation_1=np.reshape(self.model.named.data.xmat[self.block_sid_1], (3,3))
trans_push_mesh_vertices=np.matmul(block_orientation_1, self.push_mesh_vertices.T).T
trans_push_mesh_vertices=trans_push_mesh_vertices+block_pos_1
target_pos_1 = self.model.named.data.geom_xpos[self.target_sid_1]
target_orientation_1=np.reshape(self.model.named.data.geom_xmat[self.target_sid_1], (3,3))
trans_target_mesh_vertices=np.matmul(target_orientation_1, self.target_mesh_vertices.T).T
trans_target_mesh_vertices=trans_target_mesh_vertices+target_pos_1
#target_orientation_1=np.reshape(self.model.named.data.geom_xmat[self.target_sid_1], (3,3))
# block_pos_2 = self.model.named.data.geom_xpos[self.block_sid_2]
# block_orientation_2=np.reshape(self.model.named.data.geom_xmat[self.block_sid_2], (3,3))
# target_pos_2 = self.model.named.data.geom_xpos[self.target_sid_2]
# target_orientation_2=np.reshape(self.model.named.data.geom_xmat[self.target_sid_2], (3,3))
#
# block_pos_3 = self.model.named.data.geom_xpos[self.block_sid_3]
# block_orientation_3=np.reshape(self.model.named.data.geom_xmat[self.block_sid_3], (3,3))
# target_pos_3 = self.model.named.data.geom_xpos[self.target_sid_3]
# target_orientation_3=np.reshape(self.model.named.data.geom_xmat[self.target_sid_3], (3,3))
if self.task=='easy':
#cube_target_ADDS=self.get_cubes_ADDS(block_pos_1, target_pos_1, block_orientation_1, target_orientation_1, 0.055)
target_loss=np.max(scipy.spatial.distance.cdist(trans_push_mesh_vertices, trans_target_mesh_vertices, 'euclidean'))#scipy.spatial.distance.directed_hausdorff(trans_push_mesh_vertices, trans_target_mesh_vertices)[0]
robot_block_reward = -np.linalg.norm(hand_pos - block_pos_1)
#print('hand_pos', hand_pos, 'block_pos_1', block_pos_1, 'target_pos_1', target_pos_1)
vel_penalty=0.0
if not self.last_block_pos is None:
velocity= | np.linalg.norm(self.last_block_pos-block_pos_1) | numpy.linalg.norm |
#!/usr/bin/env python
import os, sys, traceback
from mpl_toolkits.mplot3d import axes3d
import matplotlib.pylab as pylab
import numpy as np
import pylab as pl
import scipy as sci
import scipy.optimize.linesearch as ln
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.neighbors import kneighbors_graph as kn_graph
from cvxopt import matrix, spmatrix, solvers, printing
solvers.options['show_progress'] = False
### ------------------------------- Optimal Transport ---------------------------------------
########### Compute transport with a LP Solver
def computeTransportLP(distribWeightS,distribWeightT, distances):
# init data
Nini = len(distribWeightS)
Nfin = len(distribWeightT)
# generate probability distribution of each class
p1p2 = np.concatenate((distribWeightS,distribWeightT))
p1p2 = p1p2[0:-1]
# generate cost matrix
costMatrix = distances.flatten()
# express the constraints matrix
I = []
J = []
for i in range(Nini):
for j in range(Nfin):
I.append(i)
J.append(i*Nfin+j)
for i in range(Nfin-1):
for j in range(Nini):
I.append(i+Nini)
J.append(j*Nfin+i)
A = spmatrix(1.0,I,J)
# positivity condition
G = spmatrix(-1.0,range(Nini*Nfin),range(Nini*Nfin))
sol = solvers.lp(matrix(costMatrix),G,matrix(np.zeros(Nini*Nfin)),A,matrix(p1p2))
S = np.array(sol['x'])
Gamma = np.reshape([l[0] for l in S],(Nini,Nfin))
return Gamma
########### Compute transport with the Sinkhorn algorithm
## ref "Sinkhorn distances: Lightspeed computation of Optimal Transport", NIPS 2013, <NAME>
def computeTransportSinkhorn(distribS,distribT, M, reg,Mmax=0,numItermax = 200,stopThr=1e-9):
# init data
Nini = len(distribS)
Nfin = len(distribT)
cpt = 0
# we assume that no distances are null except those of the diagonal of distances
u = np.ones(Nini)/Nini
v = np.ones(Nfin)/Nfin
uprev=np.zeros(Nini)
vprev=np.zeros(Nini)
if Mmax:
regmax=300./Mmax
else:
regmax=300./np.max(M)
reg=regmax*(1-np.exp(-reg/regmax))
#print reg
K = np.exp(-reg*M)
#print np.min(K)
Kp = np.dot(np.diag(1/distribS),K)
transp = K
cpt = 0
err=1
while (err>stopThr and cpt<numItermax):
if np.any(np.dot(K.T,u)==0) or np.any(np.isnan(u)) or np.any(np.isnan(v)):
# we have reached the machine precision
# come back to previous solution and quit loop
print('Warning: numerical errrors')
if cpt!=0:
u = uprev
v = vprev
break
uprev = u
vprev = v
v = np.divide(distribT,np.dot(K.T,u))
u = 1./np.dot(Kp,v)
if cpt%10==0:
# we can speed up the process by checking for the error only all the 10th iterations
transp = np.dot(np.diag(u),np.dot(K,np.diag(v)))
err = np.linalg.norm((np.sum(transp,axis=0)-distribT))**2
cpt = cpt +1
#print 'err=',err,' cpt=',cpt
return np.dot(np.diag(u),np.dot(K,np.diag(v)))
########### Compute transport with the Sinkhorn algorithm + Class regularization
## ref "Domain adaptation with regularized optimal transport ", ECML 2014,
def indices(a, func):
return [i for (i, val) in enumerate(a) if func(val)]
def computeTransportSinkhornLabelsLpL1(distribS,LabelsS, distribT, M, reg, eta=0.1,nbitermax=10):
p=0.5
epsilon = 1e-3
# init data
Nini = len(distribS)
Nfin = len(distribT)
W=np.zeros(M.shape)
for cpt in range(nbitermax):
Mreg = M + eta*W
transp=computeTransportSinkhorn(distribS,distribT,Mreg,reg,numItermax = 200)
# the transport has been computed. Check if classes are really separated
W = np.ones((Nini,Nfin))
for t in range(Nfin):
for c in np.unique(LabelsS):
maj = p*((np.sum(transp[LabelsS==c,t])+epsilon)**(p-1))
W[LabelsS==c,t]=maj
return transp
########### Compute transport with the Generalized conditionnal gradient method + Group-Lasso Class regularization
## ref "Optimal transport for Domain Adaptation ", T PAMI 2016
def get_W_L1L2(transp,labels,lstlab):
W=np.zeros(transp.shape)
for i in range(transp.shape[1]):
for lab in lstlab:
temp=transp[labels==lab,i]
n=np.linalg.norm(temp)
if n:
W[labels==lab,i]=temp/n
return W
def loss_L1L2(transp,labels,lstlab):
res=0
for i in range(transp.shape[1]):
for lab in lstlab:
temp=transp[labels==lab,i]
#W[]
res+=np.linalg.norm(temp)
return res
def computeTransportL1L2_CGS(distribS,LabelsS, distribT, M, reg, eta=0.1,nbitermax=10,thr_stop=1e-8,**kwargs):
Nini = len(distribS)
Nfin = len(distribT)
W=np.zeros(M.shape)
maxdist = np.max(M)
distances=M
lstlab=np.unique(LabelsS)
regmax=300./maxdist
reg0=regmax*(1-np.exp(-reg/regmax))
transp= computeTransportSinkhorn(distribS,distribT,distances,reg,maxdist)
niter=1;
while True:
old_transp=transp.copy()
W = get_W_L1L2(old_transp,LabelsS,lstlab)
G=eta*W
transp0= computeTransportSinkhorn(distribS,distribT,distances + G,reg,maxdist)
deltatransp = transp0 - old_transp
# do a line search for best tau
def f(tau):
T = old_transp+tau*deltatransp
return np.sum(T*distances)+1./reg0*np.sum(T*np.log(T))+eta*loss_L1L2(T,LabelsS,lstlab)
# compute f'(0)
res=0
for i in range(transp.shape[1]):
for lab in lstlab:
temp1=old_transp[LabelsS==lab,i]
temp2=deltatransp[LabelsS==lab,i]
res+= | np.dot(temp1,temp2) | numpy.dot |
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import json
import os
import datetime as dt
import main
from eval import data_analysis
# LaTeX settings
plt.rc('text', usetex=True)
plt.rc('font', **{'family': 'serif', 'sans-serif': ['lmodern'], 'size': 18})
plt.rc('axes', **{'titlesize': 18, 'labelsize': 18})
# Constants
JSON_PATH = './out/'
OUT_PATH = './out/'
MODEL_NAMES = {
'KF': ('KalmanFilter', ''),
'KF(+W)': ('KalmanFilter', '_W'),
'KF(+WF)': ('KalmanFilter', '_WF'),
'KD-IC': ('KD-IC', ''),
'KD-IC(+W)': ('KD-IC', '_W'),
'KD-IC(+WF)': ('KD-IC', '_WF'),
'LN-IC': ('LogNormal-IC', ''),
'LN-IC(+W)': ('LogNormal-IC', '_W'),
'LN-IC(+WF)': ('LogNormal-IC', '_WF'),
'DeepAR': ('DeepAR', ''),
'DeepAR(+W)': ('DeepAR', '_W'),
'DeepAR(+WF)': ('DeepAR', '_WF'),
'LW': ('LastWeek', '')
}
MAIN_SEED = '42'
DECIMALS = 2
COLORS = ('C0', 'C1', 'C3', 'C9', 'C7')
MARKERS = ('o', 'X', 'v', 'd', 'p')
LINESTYLES = ('solid', 'dashed', 'dashdot')
S_D = 48
S_W = 7 * S_D
def get_file_name(model, level, cluster, seed=''):
return f'{MODEL_NAMES[model][0]}{seed}_{level}_{cluster}{MODEL_NAMES[model][1]}'
def get_path(model, level, cluster, seed=''):
return JSON_PATH + f'{MODEL_NAMES[model][0]}{seed}/{get_file_name(model, level, cluster, seed)}.json'
def load_res(model, level, cluster, seed=''):
if 'DeepAR' in model and seed == '':
seed = MAIN_SEED
with open(get_path(model, level, cluster, seed), 'r') as fp:
res = json.load(fp)
return res
def collect_results(
levels=('L0', 'L1', 'L2', 'L3'),
metrics=('MAPE', 'rMAE', 'rRMSE', 'rCRPS'),
models=('KF', 'KF(+W)', 'KF(+WF)',
'KD-IC', 'KD-IC(+W)', 'KD-IC(+WF)',
'DeepAR', 'DeepAR(+W)', 'DeepAR(+WF)',
'LW'),
seeds=(0, 1, 2, 3, 4),
forecast_reps=28,
save_results_with_info=True
):
results_path = os.path.join(JSON_PATH, 'results_with_info.npy')
if os.path.isfile(results_path):
results_with_info = np.load(results_path, allow_pickle=True)
return results_with_info[0], results_with_info[1]
results = {}
level_info = data_analysis.get_level_info(levels)
for level in levels:
clusters = level_info[level]['clusters']
# Create results array
results[level] = np.empty((len(metrics), len(models), len(clusters), forecast_reps))
results[level][:] = np.nan
for m, model in enumerate(models):
if level == 'L3' and 'KF' in model:
# No level 3 results for the KF model
continue
for c, cluster in enumerate(clusters):
if 'DeepAR' in model and level is not 'L3':
res_per_seed = []
for seed in seeds:
res_per_seed.append(load_res(model, level, cluster, seed))
for i, metric in enumerate(metrics):
results[level][i, m, c] = np.mean([res[metric] for res in res_per_seed], axis=0)
else:
res = load_res(model, level, cluster)
for i, metric in enumerate(metrics):
if 'CRPS' in metric and model == 'LW':
# No distributional forecasts for LW model
continue
results[level][i, m, c] = res[metric]
info = {
'levels': level_info,
'metrics': list(metrics),
'models': list(models),
'reps': forecast_reps
}
if save_results_with_info:
np.save(results_path, (results, info), allow_pickle=True)
return results, info
def collect_results_per_tstp(
levels=('L0', 'L1', 'L2'),
metrics=('rMAE', 'rRMSE', 'rCRPS'),
models=('KF', 'KF(+W)', 'KF(+WF)',
'KD-IC', 'KD-IC(+W)', 'KD-IC(+WF)',
'DeepAR', 'DeepAR(+W)', 'DeepAR(+WF)',
'LW'),
seeds=(0, 1, 2, 3, 4),
forecast_reps=28,
horizon=192,
save_results_per_tstp_with_info=True
):
results_path = os.path.join(JSON_PATH, 'results_per_tstp_with_info.npy')
if os.path.isfile(results_path):
results_with_info = np.load(results_path, allow_pickle=True)
return results_with_info[0], results_with_info[1]
results = {}
level_info = data_analysis.get_level_info(levels)
t_train, t_val = main.train_val_split(data_analysis.energy_df.index)
for level in levels:
clusters = level_info[level]['clusters']
# Create results array
results[level] = np.empty((len(seeds), len(metrics), len(models), len(clusters), forecast_reps, horizon))
results[level][:] = np.nan
level_info[level]['y_mean'] = []
for c, cluster in enumerate(clusters):
level_info[level]['y_mean'].append(
np.nanmean(data_analysis.get_observations_at(level, cluster, t_train))
)
y_true = data_analysis.get_observations_at(level, cluster, t_val).reshape(forecast_reps, horizon)
for m, model in enumerate(models):
if level == 'L3' and 'KF' in model:
# No level 3 results for the KF model
continue
if 'DeepAR' in model and level is not 'L3':
for s, seed in enumerate(seeds):
res = load_res(model, level, cluster, seed)
for i, metric in enumerate(metrics):
if metric == 'rMAE':
results[level][s, i, m, c] = np.abs(y_true - res['p50'])
elif metric == 'rRMSE':
results[level][s, i, m, c] = (y_true - res['mean']) ** 2
elif metric == 'rCRPS':
results[level][s, i, m, c] = res['CRPS']
else:
res = load_res(model, level, cluster)
for i, metric in enumerate(metrics):
if 'CRPS' in metric and model == 'LW':
# No distributional forecasts for LW model
continue
if metric == 'rMAE':
results[level][0, i, m, c] = np.abs(y_true - res['p50'])
elif metric == 'rRMSE':
results[level][0, i, m, c] = (y_true - res['mean']) ** 2
elif metric == 'rCRPS':
results[level][0, i, m, c] = res['CRPS']
info = {
'levels': level_info,
'metrics': list(metrics),
'models': list(models),
'reps': forecast_reps,
'horizon': horizon
}
if save_results_per_tstp_with_info:
np.save(results_path, (results, info), allow_pickle=True)
return results, info
def create_metric_df(metric, with_std=True, to_LaTeX=True):
results, info = collect_results()
i = info['metrics'].index(metric)
row_names = info['models']
col_names = info['levels'].keys()
metric_df = pd.DataFrame(index=row_names, columns=col_names, dtype=float)
for level in col_names:
for m, model in enumerate(row_names):
mean = np.mean(results[level][i, m])
metric_df.loc[model, level] = (('%%.%sf' % DECIMALS) % mean) if not np.isnan(mean) else '-'
if with_std and not np.isnan(mean):
std = np.std(results[level][i, m])
metric_df.loc[model, level] += (' (%%.%sf)' % DECIMALS) % std
if to_LaTeX:
df_to_LaTeX(metric_df)
return metric_df
def create_level_df(level, with_std=True, to_LaTeX=True):
results, info = collect_results()
row_names = info['metrics']
col_names = info['models']
level_df = pd.DataFrame(index=row_names, columns=col_names, dtype=float)
for i, metric in enumerate(row_names):
for m, model in enumerate(col_names):
mean = np.mean(results[level][i, m])
level_df.loc[metric, model] = (('%%.%sf' % DECIMALS) % mean) if not np.isnan(mean) else '-'
if with_std and not np.isnan(mean):
std = np.std(results[level][i, m])
level_df.loc[metric, model] += (' (%%.%sf)' % DECIMALS) % std
if to_LaTeX:
df_to_LaTeX(level_df)
return level_df
def create_runtime_df(models=('KF', 'KD-IC', 'DeepAR', 'LW'), with_std=False, to_LaTeX=True):
_, info = collect_results()
train_name = 'Avg. training time [s]'
prediction_name = 'Avg. prediction time [s]'
runtime_df = pd.DataFrame(index=[train_name, prediction_name], columns=models, dtype=float)
for model in models:
training_times = []
prediction_times = []
for level in info['levels'].keys():
if level == 'L3' and 'KF' in model:
# No level 3 results for the KF model
continue
for cluster in info['levels'][level]['clusters']:
res = load_res(model, level, cluster)
training_times.append(res['fit_time'])
prediction_times.append(res['prediction_time'])
decimals = DECIMALS + 1
runtime_df.loc[train_name, model] = ('%%.%sf' % decimals) % np.mean(training_times)
runtime_df.loc[prediction_name, model] = ('%%.%sf' % decimals) % np.mean(prediction_times)
if with_std:
runtime_df.loc[train_name, model] += (' (%%.%sf)' % decimals) % np.std(training_times)
runtime_df.loc[prediction_name, model] += (' (%%.%sf)' % decimals) % np.std(prediction_times)
if to_LaTeX:
df_to_LaTeX(runtime_df)
return runtime_df
def df_to_LaTeX(df):
num_columns = len(df.columns)
print(df.to_latex(
float_format=f'%.{DECIMALS}f',
na_rep='-',
column_format='l' + ''.join('r' * num_columns)
))
def get_color(model):
if 'KF' in model:
return COLORS[0]
elif 'KD-IC' in model:
return COLORS[1]
elif 'DeepAR' in model:
return COLORS[2]
elif 'LW' in model:
return COLORS[3]
else:
return COLORS[4]
def get_linestyle(model):
if '(+W)' in model:
return LINESTYLES[1]
elif '(+WF)' in model:
return LINESTYLES[2]
else:
return LINESTYLES[0]
def _complete_plot(name, legend=True, grid=True):
if legend:
plt.legend()
if grid:
plt.grid()
plt.tight_layout()
plt.savefig(OUT_PATH + f'{name}.pdf', bbox_inches='tight')
plt.close()
def plot_epoch_loss(model, level, cluster, seed=MAIN_SEED):
assert 'DeepAR' in model, "Loss plot only available for deep models"
res = load_res(model, level, cluster, seed)
train_loss = res['train_loss']
val_loss = res['val_loss']
plt.figure(figsize=(6, 4))
plt.plot(np.arange(len(train_loss)) + 1, train_loss, color=COLORS[0], label='Train')
plt.plot(np.arange(len(val_loss)) + 1, val_loss, color=COLORS[1], label='Validation')
plt.ylabel('Loss')
plt.xlabel('Epoch')
_complete_plot(f'{get_file_name(model, level, cluster, seed)}_epoch_loss', grid=False)
def plot_horizon(model, metric, horizons=(1, 2, 3, 4), levels=('L0', 'L1', 'L2')):
results, info = collect_results_per_tstp()
model_W = model + '(+W)'
model_WF = model + '(+WF)'
i = info['metrics'].index(metric)
m = info['models'].index(model)
m_W = info['models'].index(model_W)
m_WF = info['models'].index(model_WF)
score = np.empty(len(horizons))
score_W = np.empty(len(horizons))
score_WF = np.empty(len(horizons))
for h, horizon in enumerate(horizons):
idx = np.arange(0, horizon * S_D)
res = []
res_W = []
res_WF = []
for level in levels:
for c, cluster in enumerate(info['levels'][level]['clusters']):
y_mean = info['levels'][level]['y_mean'][c]
if metric == 'rRMSE':
res.append(100 * np.sqrt(np.mean(results[level][:, i, m, c, :, idx], axis=2)) / y_mean)
res_W.append(100 * np.sqrt(np.mean(results[level][:, i, m_W, c, :, idx], axis=2)) / y_mean)
res_WF.append(100 * np.sqrt(np.mean(results[level][:, i, m_WF, c, :, idx], axis=2)) / y_mean)
else:
res.append(100 * np.mean(results[level][:, i, m, c, :, idx], axis=2) / y_mean)
res_W.append(100 * np.mean(results[level][:, i, m_W, c, :, idx], axis=2) / y_mean)
res_WF.append(100 * np.mean(results[level][:, i, m_WF, c, :, idx], axis=2) / y_mean)
score[h] = np.nanmean(res)
score_W[h] = np.nanmean(res_W)
score_WF[h] = np.nanmean(res_WF)
skill_W = 100 * (1 - score_W / score)
skill_WF = 100 * (1 - score_WF / score)
print(f'SS_{metric} (W): {skill_W}')
print(f'SS_{metric} (WF): {skill_WF}')
plt.figure(figsize=(3.5, 4))
plt.plot(
score,
linestyle=get_linestyle(model),
color=get_color(model),
marker=MARKERS[0]
)
plt.plot(
score_W,
linestyle=get_linestyle(model_W),
color=get_color(model_W),
marker=MARKERS[1]
)
plt.plot(
score_WF,
linestyle=get_linestyle(model_WF),
color=get_color(model_WF),
marker=MARKERS[2]
)
plt.ylim(6.95, 8.35)
plt.ylabel(metric)
plt.xlabel('Horizon')
plt.xticks(np.arange(len(horizons)), np.array(horizons))
plt.title(model)
_complete_plot(f"{model}_{metric}_horizon", grid=False, legend=False)
def plot_reps(metric, levels=('L0', 'L1', 'L2'), models=None, name=None):
results, info = collect_results()
models = info['models'] if models is None else models
i = info['metrics'].index(metric)
# Lines for second legend
_, ax = plt.subplots()
lines = ax.plot([0, 1], [0, 1], '-C7', [0, 1], [0, 2], '--C7')
plt.close()
plt.figure(figsize=(10, 4))
for j, model in enumerate(models):
m = info['models'].index(model)
reps_mean = []
for level in levels:
if level == 'L3' and 'KF' in model:
# No level 3 results for the KF model
continue
for c, cluster in enumerate(info['levels'][level]['clusters']):
reps_mean.append(results[level][i, m, c])
reps_mean = np.mean(reps_mean, axis=0)
plt.plot(
reps_mean,
label=model if '(' not in model else None,
linestyle=get_linestyle(model),
color=get_color(model)
)
plt.ylabel(metric)
plt.xlabel('Forecast origin')
plt.yticks(np.arange(5, 17, 2.5))
t0 = load_res('LW', 'L0', 'Agg')['t0']
ticks = [dt.datetime.strptime(tstp, '%Y-%m-%d, %H:%M').strftime('%b, %d') for tstp in t0[1::5]]
plt.xticks(np.arange(1, len(t0), 5), ticks, rotation=0)
plt.grid(axis='y')
second_legend = plt.legend(lines, ('no weather', 'actual weather'), loc='upper left')
plt.gca().add_artist(second_legend)
_complete_plot(f"{f'{name}_' if name is not None else ''}{metric}_reps", grid=False)
def plot_clusters(level, metric, models=None, name=None):
results, info = collect_results()
models = info['models'] if models is None else models
i = info['metrics'].index(metric)
plt.figure(figsize=(10, 4))
for model in models:
if level == 'L3' and 'KF' in model:
# No level 3 results for the KF model
continue
m = info['models'].index(model)
clusters_mean = np.mean(results[level][i, m], axis=1)
plt.plot(
clusters_mean,
label=model,
linestyle=get_linestyle(model),
color=get_color(model)
)
plt.ylabel(metric)
cluster_labels = [f"{cluster.replace('ACORN-', '')} ({count})" for cluster, count in zip(
info['levels'][level]['clusters'],
info['levels'][level]['cardinality']
)]
if level == 'L3':
plt.xticks(np.arange(0, len(cluster_labels), 100), np.array(cluster_labels)[::100], rotation=90)
elif level == 'L2':
plt.xticks(np.arange(len(cluster_labels)), cluster_labels, rotation=90)
else:
plt.xticks(np.arange(len(cluster_labels)), cluster_labels)
_complete_plot(f"{f'{name}_' if name is not None else ''}{level}_{metric}_clusters")
def plot_aggregate_size(metric, models=None, name=None):
results, info = collect_results()
models = info['models'] if models is None else models
i = info['metrics'].index(metric)
aggregate_sizes = []
errors = {}
bottom_level_errors = {}
for model in models:
errors[model] = []
bottom_level_errors[model] = []
for level, level_info in info['levels'].items():
for c, agg_size in enumerate(level_info['cardinality']):
if level != 'L3':
aggregate_sizes.append(agg_size)
for model in models:
m = info['models'].index(model)
errors[model].append(np.mean(results[level][i, m, c]))
else:
for model in models:
m = info['models'].index(model)
bottom_level_errors[model].append(np.mean(results[level][i, m, c]))
aggregate_sizes.append(1)
for model in models:
errors[model].append(np.mean(bottom_level_errors[model]))
sorted_idx = np.argsort(aggregate_sizes)
aggregate_sizes = np.array(aggregate_sizes)[sorted_idx]
plt.figure(figsize=(6, 4))
for model in models:
if 'CRPS' in metric and model == 'LW':
# No distributional forecasts for LW model
continue
plt.plot(
aggregate_sizes,
np.array(errors[model])[sorted_idx],
label=model,
linestyle=get_linestyle(model),
color=get_color(model)
)
plt.ylabel(metric)
plt.yticks(np.arange(0, 70, 20))
plt.xlabel('\\# aggregated meters')
plt.xscale('log')
plt.xticks([1, 10, 100, 1000], ['1', '10', '100', '1000'])
_complete_plot(f"{f'{name}_' if name is not None else ''}{metric}_aggregate_size", grid=False)
def get_skill_scores(model, metric, no_L3=False):
results, info = collect_results()
i = info['metrics'].index(metric)
m = info['models'].index(model)
m_W = info['models'].index(model + '(+W)')
m_WF = info['models'].index(model + '(+WF)')
aggregate_sizes = []
score = []
score_W = []
score_WF = []
bottom_level_score = []
bottom_level_score_W = []
bottom_level_score_WF = []
t_train = main.train_val_split(data_analysis.energy_df.index)[0]
u = data_analysis.daily(
data_analysis.get_weather_df(forecast=False).loc[t_train, 'temperature'].to_numpy(float),
reduce=True
)
u_F = data_analysis.daily(
data_analysis.get_weather_df(forecast=True).loc[t_train, 'temperature'].to_numpy(float),
reduce=True
)
corr_W = []
corr_WF = []
bottom_level_corr_W = []
bottom_level_corr_WF = []
for level, level_info in info['levels'].items():
if level == 'L3' and ('KF' in model or no_L3):
# No level 3 results for the KF model
continue
for c, (cluster, agg_size) in enumerate(zip(level_info['clusters'], level_info['cardinality'])):
y = data_analysis.daily(
np.array(data_analysis.get_observations_at(level, cluster, t_train)),
reduce=True
)
if level != 'L3':
aggregate_sizes.append(agg_size)
score.append( | np.mean(results[level][i, m, c]) | numpy.mean |
"""
Created on Wed Jun 17 14:01:23 2020
combine graph properties for different seeds
@author: Jyotika.bahuguna
"""
import os
import glob
import numpy as np
import pylab as pl
import scipy.io as sio
from copy import copy, deepcopy
import pickle
import matplotlib.cm as cm
import pdb
import h5py
import pandas as pd
import bct
from collections import Counter
import matplotlib.cm as cm
import analyze as anal
import sys
#
data_target_dir = "./data/"
data_type = sys.argv[1]
print(data_type)
if data_type == "subtype":
electrophys = "ELECTROPHY"
# Raw data
data_dir = "../SpaethBahugunaData/ProcessedData/Adaptive_Dataset/"
subtypes = os.listdir(data_dir)
#data_2d = pickle.load(open(data_target_dir+"data_2d_maps.pickle","rb"))
#data = pd.read_csv(data_target_dir+"meta_data.csv")
files = glob.glob(data_target_dir+"graph_properties_norm_*.pickle")
elif data_type == "development":
development = "DEVELOPMENT"
# Raw data
data_dir = "../SpaethBahugunaData/ProcessedData/Development_Dataset/"
subtypes = os.listdir(data_dir) # Just the name of the variable is subtypes, its actually days
#data_2d = pickle.load(open(data_target_dir+"data_2d_maps_days.pickle","rb"))
#data = pd.read_csv(data_target_dir+"meta_data_days.csv")
files = glob.glob(data_target_dir+"graph_properties_days_norm_*.pickle")
num_or_size = "num" # num of clusters or size of the largest cluster
gamma_re_arrange = 0.34
gammas = np.arange(0.0,1.5,0.17)
cmaps = [cm.get_cmap('Reds',len(gammas)+10), cm.get_cmap('Blues',len(gammas)+10), cm.get_cmap('Greens',len(gammas)+10), cm.get_cmap('Purples',len(gammas)+10),cm.get_cmap('Greys',len(gammas)+4),cm.get_cmap('pink_r',len(gammas)+10)]
graph_prop_simps = dict()
graph_prop_simps_null = dict()
percentile = 70
dat_type = data_type
print(files)
print(len(files))
for f in files:
seed = f.split('/')[-1].split('_')[-1].split('.')[0]
graph_properties = pickle.load(open(f,"rb"))
graph_prop_df = pd.DataFrame(columns=["modularity_index","gamma","participation_pos","participation_neg","local_assortativity_pos_whole","module_degree_zscore","total_amplitude","average_amplitude","percentage_active_sites","names"]+[dat_type])
graph_prop_df_null = pd.DataFrame(columns=["modularity_index","gamma","participation_pos","local_assortativity_pos_whole","module_degree_zscore","names"]+[dat_type])
temp_dict = dict()
for x in list(graph_prop_df.keys()):
temp_dict[x] = []
temp_dict_null = dict()
for x in list(graph_prop_df_null.keys()):
temp_dict_null[x] = []
for i,st in enumerate(subtypes):
st_list_cov=[]
st_mods_list_cov=[]
st_list_corr=[]
st_list_corr_null=[]
st_mods_list_corr=[]
st_mods_list_corr_null=[]
norms =[]
tot_amp=[]
avg_amp = []
per_act_sit = []
graph_prop_simps[st] = dict()
graph_prop_simps_null[st] = dict()
participation_pos = []
participation_pos_null = []
participation_neg = []
participation_neg_null = []
loc_ass_pos = []
loc_ass_pos_null = []
#loc_ass_neg = []
zscore = []
zscore_null = []
names=[]
nz_inds = []
count = 0
print("==================================================================")
print(st)
print("==================================================================")
for j,x in enumerate(list(graph_properties[st]["modularity"].keys())):
ind = graph_properties[st]["indices"]
for y1 in list(graph_properties[st]["modularity"][x].keys()):
if "norm" in y1:
norms.append(graph_properties[st]["modularity"][x]["norm"])
elif "total_amplitude" in y1:
tot_amp.append(graph_properties[st]["modularity"][x]["total_amplitude"])
elif "average_amplitude" in y1:
avg_amp.append(graph_properties[st]["modularity"][x]["average_amplitude"])
elif "percentage_active_sites" in y1:
per_act_sit.append(graph_properties[st]["modularity"][x]["percentage_active_sites"])
elif "participation" in y1 and "whole" in y1:
if "null" in y1:
participation_pos_null.append(graph_properties[st]["modularity"][x]["participation_whole_null"][0])
participation_neg_null.append(graph_properties[st]["modularity"][x]["participation_whole_null"][1])
else:
participation_pos.append(graph_properties[st]["modularity"][x]["participation_whole"][0])
participation_neg.append(graph_properties[st]["modularity"][x]["participation_whole"][1])
elif "zscore" in y1 and "whole" in y1:
if "null" in y1:
zscore_null.append(graph_properties[st]["modularity"][x]["module_degree_zscore_whole_null"])
else:
zscore.append(graph_properties[st]["modularity"][x]["module_degree_zscore_whole"])
elif "local" in y1:
if "null" in y1:
loc_ass_pos_null.append(graph_properties[st]["modularity"][x]["local_assortativity_whole_null"])
else:
loc_ass_pos.append(graph_properties[st]["modularity"][x]["local_assortativity_whole"])
elif y1 == "cov" or y1 == "corr":
mod_indices = graph_properties[st]["modularity"][x][y1][0]
num_mods = [len(y) for y in graph_properties[st]["modularity"][x][y1][1]]
# If num_mods are zero just go to next data point, because if this empty, causes problems, while slicing by gammas
if num_mods[0] == 0:
continue
num_mods_size = [np.max(y) for y in graph_properties[st]["modularity"][x][y1][1] if len(y) > 0]
num_mods_greater_size = [ len(np.where(np.array(y) >= np.percentile(y,percentile))[0]) for y in graph_properties[st]["modularity"][x][y1][1] if len(y) > 0]
nz_inds.append(x)
print(mod_indices)
print(num_mods)
if "cov" in y1:
st_list_cov.append((mod_indices,num_mods,num_mods_size,num_mods_greater_size))
st_mods_list_cov.append(graph_properties[st]["modularity"][x][y1][1])
elif "corr" in y1:
st_list_corr.append((mod_indices,num_mods,num_mods_size,num_mods_greater_size))
st_mods_list_corr.append(graph_properties[st]["modularity"][x][y1][1])
elif y1 == "corr_null":
mod_indices_null = graph_properties[st]["modularity"][x][y1][0]
#if num_or_size == "num":
num_mods_null = [len(y) for y in graph_properties[st]["modularity"][x][y1][1]]
# If num_mods are zero just go to next data point, because if this empty, causes problems, while slicing by gammas
if num_mods_null[0] == 0:
continue
#elif num_or_size == "size":
num_mods_size_null = [np.max(y) for y in graph_properties[st]["modularity"][x][y1][1] if len(y) > 0]
num_mods_greater_size_null = [ len(np.where(np.array(y) >= np.percentile(y,percentile))[0]) for y in graph_properties[st]["modularity"][x][y1][1] if len(y) > 0]
st_list_corr_null.append((mod_indices_null,num_mods_null,num_mods_size_null,num_mods_greater_size_null))
st_mods_list_corr_null.append(graph_properties[st]["modularity"][x][y1][1])
graph_prop_simps[st]["participation_pos"] = participation_pos
graph_prop_simps_null[st]["participation_pos_null"] = participation_pos_null
graph_prop_simps[st]["participation_neg"] = participation_neg
graph_prop_simps_null[st]["participation_neg_null"] = participation_neg_null
graph_prop_simps[st]["module_degree_zscore"] = zscore
graph_prop_simps_null[st]["module_degree_zscore_null"] = zscore_null
print(len(norms),len(st_list_corr))
nz_inds = np.unique(nz_inds)
if len(norms) > len(st_list_corr):
graph_prop_simps[st]["st_list_corr_norm"] = np.array(norms)[nz_inds]
graph_prop_simps[st]["total_amplitude"] = np.array(tot_amp)[nz_inds]
graph_prop_simps[st]["average_amplitude"] = np.array(avg_amp)[nz_inds]
graph_prop_simps[st]["percentage_active_sites"] = np.array(per_act_sit)[nz_inds]
else:
graph_prop_simps[st]["st_list_corr_norm"] = np.array(norms)
graph_prop_simps[st]["total_amplitude"] = np.array(tot_amp)
graph_prop_simps[st]["average_amplitude"] = np.array(avg_amp)
graph_prop_simps[st]["percentage_active_sites"] = np.array(per_act_sit)
if len(loc_ass_pos) > len(st_list_corr):
graph_prop_simps[st]["local_assortativity_pos_whole"] = np.array(loc_ass_pos)[nz_inds]
else:
graph_prop_simps[st]["local_assortativity_pos_whole"] = np.array(loc_ass_pos)
if len(loc_ass_pos_null) > len(st_list_corr_null):
graph_prop_simps_null[st]["local_assortativity_pos_whole_null"] = np.array(loc_ass_pos_null)[nz_inds]
else:
graph_prop_simps_null[st]["local_assortativity_pos_whole_null"] = np.array(loc_ass_pos_null)
if len(graph_properties[st]['names']) > len(st_list_corr):
graph_prop_simps[st]["names"] = np.array(graph_properties[st]['names'])[nz_inds]
graph_prop_simps_null[st]["names"] = np.array(graph_properties[st]['names'])[nz_inds]
else:
graph_prop_simps[st]["names"] = np.array(graph_properties[st]['names'])
graph_prop_simps_null[st]["names"] = np.array(graph_properties[st]['names'])
if num_or_size == "num":
ind_prop = 1
elif num_or_size == "size":
ind_prop = 2
for k in np.arange(0,len(gammas)):
temp_dict["modularity_index"].append(np.array(st_list_corr)[:,:,k][:,0])
temp_dict_null["modularity_index"].append(np.array(st_list_corr_null)[:,:,k][:,0])
nz_inds = np.unique(nz_inds)
temp_dict["gamma"].append([ np.round(gammas[k],2) for i2 in np.arange(0,len(np.array(st_list_corr)[:,:,k][:,0]))])
temp_dict_null["gamma"].append([ np.round(gammas[k],2) for i2 in np.arange(0,len(np.array(st_list_corr_null)[:,:,k][:,0]))])
if len(norms) > len(st_list_corr):
temp_dict["total_amplitude"].append(np.array(tot_amp)[nz_inds])
temp_dict["average_amplitude"].append(np.array(avg_amp)[nz_inds])
temp_dict["percentage_active_sites"].append(np.array(per_act_sit)[nz_inds])
temp_dict["participation_pos"].append(np.array(graph_prop_simps[st]["participation_pos"])[nz_inds,k])
temp_dict_null["participation_pos"].append(np.array(graph_prop_simps_null[st]["participation_pos_null"])[nz_inds,k])
temp_dict["participation_neg"].append(np.array(graph_prop_simps[st]["participation_neg"])[nz_inds,k])
temp_dict["module_degree_zscore"].append(np.array(graph_prop_simps[st]["module_degree_zscore"])[nz_inds,k])
temp_dict_null["module_degree_zscore"].append(np.array(graph_prop_simps_null[st]["module_degree_zscore_null"])[nz_inds,k])
else:
temp_dict["total_amplitude"].append(np.array(tot_amp))
temp_dict["average_amplitude"].append(np.array(avg_amp))
temp_dict["percentage_active_sites"].append(np.array(per_act_sit))
temp_dict["participation_pos"].append(np.array(graph_prop_simps[st]["participation_pos"])[:,k])
temp_dict_null["participation_pos"].append(np.array(graph_prop_simps_null[st]["participation_pos_null"])[:,k])
temp_dict["participation_neg"].append(np.array(graph_prop_simps[st]["participation_neg"])[:,k])
temp_dict["module_degree_zscore"].append(np.array(graph_prop_simps[st]["module_degree_zscore"])[:,k])
temp_dict_null["module_degree_zscore"].append(np.array(graph_prop_simps_null[st]["module_degree_zscore_null"])[:,k])
if len(names) > len(st_list_corr):
temp_dict["names"].append(np.array(graph_prop_simps[st]["names"])[nz_inds])
temp_dict_null["names"].append(np.array(graph_prop_simps_null[st]["names"])[nz_inds])
else:
temp_dict["names"].append( | np.array(graph_prop_simps[st]["names"]) | numpy.array |
# Copyright 2021 <NAME>. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import numpy as np
from common.python.utils import log_utils
from kernel.transfer.framework.horz.blocks import secure_aggregator
from kernel.transfer.framework.horz.blocks.aggregator import AggregatorTransVar
from kernel.transfer.framework.horz.blocks.random_padding_cipher import RandomPaddingCipherTransVar
from kernel.transfer.framework.horz.blocks.secure_aggregator import SecureAggregatorTransVar
from kernel.transfer.framework.weights import Weights
from kernel.utils import consts
LOGGER = log_utils.get_logger()
class SecureSumAggregatorTransVar(SecureAggregatorTransVar):
def __init__(self, server=(consts.ARBITER,), clients=(consts.PROMOTER, consts.PROVIDER), prefix=None):
super().__init__(server=server, clients=clients, prefix=prefix)
self.aggregator_trans_var = AggregatorTransVar(server=server, clients=clients, prefix=self.prefix)
self.random_padding_cipher_trans_var = \
RandomPaddingCipherTransVar(server=server, clients=clients, prefix=self.prefix)
class Server(secure_aggregator.Server):
def __init__(self, trans_var: SecureSumAggregatorTransVar = SecureSumAggregatorTransVar(),
enable_secure_aggregate=True):
super().__init__(trans_var=trans_var, enable_secure_aggregate=enable_secure_aggregate)
def sum_model(self, suffix=tuple()):
def _func(models):
return functools.reduce(model_add, models)
return self.aggregate(_func, suffix=suffix)
def max_model(self, suffix=tuple()):
def _func(models):
return functools.reduce(model_max, models)
return self.aggregate(_func, suffix=suffix)
def min_model(self, suffix=tuple()):
def _func(models):
return functools.reduce(model_min, models)
return self.aggregate(_func, suffix=suffix)
class Client(secure_aggregator.Client):
def __init__(self, trans_var: SecureSumAggregatorTransVar = SecureSumAggregatorTransVar(),
enable_secure_aggregate=True):
super().__init__(trans_var=trans_var, enable_secure_aggregate=enable_secure_aggregate)
@functools.singledispatch
def model_encrypted(model, cipher):
return model.encrypted(cipher, inplace=True)
@functools.singledispatch
def model_add(model, other):
return model + other
@functools.singledispatch
def model_max(model, other):
if isinstance(model, Weights):
model = model.unboxed
if isinstance(other, Weights):
other = other.unboxed
return np.maximum(np.array(model), np.array(other))
@functools.singledispatch
def model_min(model, other):
if isinstance(model, Weights):
model = model.unboxed
if isinstance(other, Weights):
other = other.unboxed
return np.minimum( | np.array(model) | numpy.array |
from numpy import array, vstack, zeros
import numpy
from configuration import configuration_time_line
from copy import deepcopy
class ProblemFormulation():
"""
Problem formulation class for unit commitment problem in hybrid AC/DC microgrid.
"""
def problem_formulation_local(*args):
"""
Problem formulation for unit commitment problem (normal state)
:param model:
:return:
"""
from modelling.data.idx_uc_format import IG, PG, RG, IUG, PUG, RUG, PBIC_AC2DC, PBIC_DC2AC, PESS_C, \
PESS_DC, RESS, EESS, PMG, NX
model = deepcopy(args[0])
T = configuration_time_line.default_look_ahead_time_step["Look_ahead_time_uc_time_step"]
nx = NX * T
lb = [0] * NX
ub = [0] * NX
vtypes = ["c"] * NX
vtypes[IG] = "b"
vtypes[IUG] = "b"
vtypes = vtypes * T
## Update lower boundary
lb[IG] = 0
lb[PG] = model["DG"]["PMIN"]
lb[RG] = model["DG"]["PMIN"]
lb[IUG] = 0
lb[PUG] = model["UG"]["PMIN"]
lb[RUG] = model["UG"]["PMIN"]
lb[PBIC_AC2DC] = 0
lb[PBIC_DC2AC] = 0
lb[PESS_C] = 0
lb[PESS_DC] = 0
lb[RESS] = 0
lb[EESS] = model["ESS"]["SOC_MIN"] * model["ESS"]["CAP"]
lb[PMG] = 0 # The line flow limitation, the predefined status is, the transmission line is off-line
## Update lower boundary
for i in range(T):
ub[IG] = model["DG"]["STATUS"][i]
ub[PG] = model["DG"]["PMAX"]
ub[RG] = model["DG"]["PMAX"]
for i in range(T):
ub[IUG] = model["UG"]["STATUS"][i]
ub[PUG] = model["UG"]["PMAX"]
ub[RUG] = model["UG"]["PMAX"]
ub[PBIC_AC2DC] = model["BIC"]["SMAX"]
ub[PBIC_DC2AC] = model["BIC"]["SMAX"]
ub[PESS_C] = model["ESS"]["PMAX_CH"]
ub[PESS_DC] = model["ESS"]["PMAX_DIS"]
ub[RESS] = model["ESS"]["PMAX_DIS"] + model["ESS"]["PMAX_CH"]
ub[EESS] = model["ESS"]["SOC_MAX"] * model["ESS"]["CAP"]
ub[PMG] = 0 # The line flow limitation, the predefined status is, the transmission line is off-line
# Finalize the boundary information
LB = lb * T
UB = ub * T
## Constraints set
# 1) Power balance equation
Aeq = zeros((T, nx))
beq = [ ]
for i in range(T):
Aeq[i][i * NX + PG] = 1
Aeq[i][i * NX + PUG] = 1
Aeq[i][i * NX + PBIC_AC2DC] = -1
Aeq[i][i * NX + PBIC_DC2AC] = model["BIC"]["EFF_DC2AC"]
beq.append(model["Load_ac"]["PD"][i] + model["Load_nac"]["PD"][i])
# 2) DC power balance equation
Aeq_temp = zeros((T, nx))
for i in range(T):
Aeq_temp[i][i * NX + PBIC_AC2DC] = model["BIC"]["EFF_AC2DC"]
Aeq_temp[i][i * NX + PBIC_DC2AC] = -1
Aeq_temp[i][i * NX + PESS_C] = -1
Aeq_temp[i][i * NX + PESS_DC] = 1
Aeq_temp[i][i * NX + PMG] = -1
beq.append(
model["Load_dc"]["PD"][i] + model["Load_ndc"]["PD"][i] - model["PV"]["PG"][i] - model["WP"]["PG"][i])
Aeq = vstack([Aeq, Aeq_temp])
# 3) Energy storage system
Aeq_temp = zeros((T, nx))
for i in range(T):
if i == 0:
Aeq_temp[i][i * NX + EESS] = 1
Aeq_temp[i][i * NX + PESS_C] = -model["ESS"]["EFF_CH"] * configuration_time_line.default_time[
"Time_step_uc"] / 3600
Aeq_temp[i][i * NX + PESS_DC] = 1 / model["ESS"]["EFF_DIS"] * configuration_time_line.default_time[
"Time_step_uc"] / 3600
beq.append(model["ESS"]["SOC"] * model["ESS"]["CAP"])
else:
Aeq_temp[i][(i - 1) * NX + EESS] = -1
Aeq_temp[i][i * NX + EESS] = 1
Aeq_temp[i][i * NX + PESS_C] = -model["ESS"]["EFF_CH"] * configuration_time_line.default_time[
"Time_step_uc"] / 3600
Aeq_temp[i][i * NX + PESS_DC] = 1 / model["ESS"]["EFF_DIS"] * configuration_time_line.default_time[
"Time_step_uc"] / 3600
beq.append(0)
Aeq = vstack([Aeq, Aeq_temp])
# 4) Energy storage system
Aeq_temp = zeros((1, nx))
Aeq_temp[0][(T-1)*NX+EESS] = 1
Aeq = vstack([Aeq, Aeq_temp])
beq.append(model["ESS"]["SOC"] * model["ESS"]["CAP"])
# Inequality constraints
# 1) PG + RG <= IG*PGMAX
Aineq = zeros((T, nx))
bineq = []
for i in range(T):
Aineq[i][i * NX + PG] = 1
Aineq[i][i * NX + RG] = 1
Aineq[i][i * NX + IG] = -model["DG"]["PMAX"]
bineq.append(0)
# 2) PG - RG >= IG*PGMIN
Aineq_temp = zeros((T, nx))
for i in range(T):
Aineq_temp[i][i * NX + PG] = -1
Aineq_temp[i][i * NX + RG] = 1
Aineq_temp[i][i * NX + IG] = model["DG"]["PMIN"]
bineq.append(0)
Aineq = vstack([Aineq, Aineq_temp])
# 3) PUG + RUG <= PUGMAX
Aineq_temp = zeros((T, nx))
for i in range(T):
Aineq_temp[i][i * NX + PUG] = 1
Aineq_temp[i][i * NX + RUG] = 1
Aineq_temp[i][i * NX + IUG] = -model["UG"]["PMAX"]
bineq.append(0)
Aineq = vstack([Aineq, Aineq_temp])
# 4) PUG - RUG >= PUGMIN
Aineq_temp = zeros((T, nx))
for i in range(T):
Aineq_temp[i][i * NX + PUG] = -1
Aineq_temp[i][i * NX + RUG] = 1
Aineq_temp[i][i * NX + IUG] = model["UG"]["PMIN"]
bineq.append(0)
Aineq = vstack([Aineq, Aineq_temp])
# 5) PESS_DC - PESS_C + RESS <= PESS_DC_MAX
Aineq_temp = zeros((T, nx))
for i in range(T):
Aineq_temp[i][i * NX + PESS_DC] = 1
Aineq_temp[i][i * NX + PESS_C] = -1
Aineq_temp[i][i * NX + RESS] = 1
bineq.append(model["ESS"]["PMAX_DIS"])
Aineq = vstack([Aineq, Aineq_temp])
# 6) PESS_DC - PESS_C - RESS >= -PESS_C_MAX
Aineq_temp = zeros((T, nx))
for i in range(T):
Aineq_temp[i][i * NX + PESS_DC] = -1
Aineq_temp[i][i * NX + PESS_C] = 1
Aineq_temp[i][i * NX + RESS] = 1
bineq.append(model["ESS"]["PMAX_CH"])
Aineq = vstack([Aineq, Aineq_temp])
# 7) EESS - RESS*delta >= EESSMIN
Aineq_temp = zeros((T, nx))
for i in range(T):
Aineq_temp[i][i * NX + EESS] = -1
Aineq_temp[i][i * NX + RESS] = configuration_time_line.default_time["Time_step_uc"] / 3600
bineq.append(-model["ESS"]["SOC_MIN"] * model["ESS"]["CAP"])
Aineq = vstack([Aineq, Aineq_temp])
# 8) EESS + RESS*delta <= EESSMAX
Aineq_temp = zeros((T, nx))
for i in range(T):
Aineq_temp[i][i * NX + EESS] = 1
Aineq_temp[i][i * NX + RESS] = configuration_time_line.default_time["Time_step_uc"] / 3600
bineq.append(model["ESS"]["SOC_MAX"] * model["ESS"]["CAP"])
Aineq = vstack([Aineq, Aineq_temp])
# 9) RG + RUG + RESS >= sum(Load)*beta + sum(PV)*beta_pv + sum(WP)*beta_wp
# No reserve requirement
# 10๏ผ IG+IUG<=1
Aineq_temp = zeros((T, nx))
for i in range(T):
Aineq_temp[i][i * NX + IG] = 1
Aineq_temp[i][i * NX + IUG] = 1
bineq.append(1)
Aineq = vstack([Aineq, Aineq_temp])
c = [0] * NX
if model["DG"]["COST_MODEL"] == 2:
c[PG] = model["DG"]["COST"][1]
else:
c[PG] = model["DG"]["COST"][0]
c[IG] = model["DG"]["COST_START_UP"]
c[PUG] = model["UG"]["COST"][0]
c[IUG] = model["UG"]["COST_START_UP"]
c[PESS_C] = model["ESS"]["COST_CH"][0]
c[PESS_DC] = model["ESS"]["COST_DIS"][0]
C = c * T
# Generate the quadratic parameters
Q = zeros((nx, nx))
for i in range(T):
if model["DG"]["COST_MODEL"] == 2:
Q[i * NX + PG][i * NX + PG] = model["DG"]["COST"][1]
mathematical_model = {"Q": Q,
"c": C,
"Aeq": Aeq,
"beq": beq,
"A": Aineq,
"b": bineq,
"lb": LB,
"ub": UB,
"vtypes": vtypes}
return mathematical_model
def problem_formulation_local_recovery(*args):
"""
Problem formulation for unit commitment problem (emergency state)
:param model: information model
:return:
"""
from modelling.data.idx_uc_recovery_format import IG, PG, RG, IUG, PUG, RUG, IBIC, PBIC_AC2DC, PBIC_DC2AC, \
IESS, PESS_C, PESS_DC, RESS, EESS, PMG, IPV, IWP, IL_AC, IL_NAC, IL_DC, IL_NDC, NX
model = deepcopy(args[0])
T = configuration_time_line.default_look_ahead_time_step["Look_ahead_time_uc_time_step"]
nx = T * NX
lb = [0] * nx
ub = [0] * nx
vtypes = ["c"] * nx
for i in range(T):
## Update lower boundary
lb[i * NX + IG] = 0
vtypes[i * NX + IG] = "b"
lb[i * NX + PG] = model["DG"]["PMIN"]
lb[i * NX + RG] = model["DG"]["PMIN"]
lb[i * NX + IUG] = 0
vtypes[i * NX + IUG] = "b"
lb[i * NX + IBIC] = 0
lb[i * NX + PUG] = model["UG"]["PMIN"]
lb[i * NX + RUG] = model["UG"]["PMIN"]
vtypes[i * NX + IBIC] = "b"
lb[i * NX + PBIC_AC2DC] = 0
lb[i * NX + PBIC_DC2AC] = 0
vtypes[i * NX + IESS] = "b"
lb[i * NX + IESS] = 0
lb[i * NX + PESS_C] = 0
lb[i * NX + PESS_DC] = 0
lb[i * NX + RESS] = 0
lb[i * NX + EESS] = model["ESS"]["SOC_MIN"] * model["ESS"]["CAP"]
lb[i * NX + PMG] = 0 # The line flow limitation, the predefined status is, the transmission line is off-line
lb[i * NX + IPV] = 0
lb[i * NX + IWP] = 0
lb[i * NX + IL_AC] = 0
lb[i * NX + IL_NAC] = 0
lb[i * NX + IL_DC] = 0
lb[i * NX + IL_NDC] = 0
## Update lower boundary
ub[i * NX + IG] = model["DG"]["STATUS"][i]
ub[i * NX + PG] = model["DG"]["PMAX"]
ub[i * NX + RG] = model["DG"]["PMAX"]
ub[i * NX + IUG] = model["UG"]["STATUS"][i]
ub[i * NX + PUG] = model["UG"]["PMAX"]
ub[i * NX + RUG] = model["UG"]["PMAX"]
ub[i * NX + IBIC] = 1
ub[i * NX + PBIC_AC2DC] = model["BIC"]["SMAX"]
ub[i * NX + PBIC_DC2AC] = model["BIC"]["SMAX"]
ub[i * NX + IESS] = 1
ub[i * NX + PESS_C] = model["ESS"]["PMAX_CH"]
ub[i * NX + PESS_DC] = model["ESS"]["PMAX_DIS"]
ub[i * NX + RESS] = model["ESS"]["PMAX_DIS"] + model["ESS"]["PMAX_CH"]
ub[i * NX + EESS] = model["ESS"]["SOC_MAX"] * model["ESS"]["CAP"]
ub[i * NX + PMG] = 0 # The line flow limitation, the predefined status is, the transmission line is off-line
ub[i * NX + IPV] = model["PV"]["PG"][i]
ub[i * NX + IWP] = model["WP"]["PG"][i]
ub[i * NX + IL_AC] = model["Load_ac"]["PD"][i]
ub[i * NX + IL_NAC] = model["Load_nac"]["PD"][i]
ub[i * NX + IL_DC] = model["Load_dc"]["PD"][i]
ub[i * NX + IL_NDC] = model["Load_ndc"]["PD"][i]
## Constraints set
# 1) Power balance equation
Aeq = zeros((T, nx))
beq = []
for i in range(T):
Aeq[i][i * NX + PG] = 1
Aeq[i][i * NX + PUG] = 1
Aeq[i][i * NX + PBIC_AC2DC] = -1
Aeq[i][i * NX + PBIC_DC2AC] = model["BIC"]["EFF_DC2AC"]
Aeq[i][i * NX + IL_AC] = -1
Aeq[i][i * NX + IL_NAC] = -1
beq.append(0)
# 2) DC power balance equation
Aeq_temp = zeros((T, nx))
for i in range(T):
Aeq_temp[i][i * NX + PBIC_AC2DC] = model["BIC"]["EFF_AC2DC"]
Aeq_temp[i][i * NX + PBIC_DC2AC] = -1
Aeq_temp[i][i * NX + PESS_C] = -1
Aeq_temp[i][i * NX + PESS_DC] = 1
Aeq_temp[i][i * NX + PMG] = -1
Aeq_temp[i][i * NX + IL_DC] = -1
Aeq_temp[i][i * NX + IL_NDC] = -1
Aeq_temp[i][i * NX + IPV] = 1
Aeq_temp[i][i * NX + IWP] = 1
beq.append(0)
Aeq = vstack([Aeq, Aeq_temp])
# 3) Energy storage system
# 3) Energy storage system
Aeq_temp = zeros((T, nx))
for i in range(T):
if i == 0:
Aeq_temp[i][i * NX + EESS] = 1
Aeq_temp[i][i * NX + PESS_C] = -model["ESS"]["EFF_CH"] * configuration_time_line.default_time[
"Time_step_uc"] / 3600
Aeq_temp[i][i * NX + PESS_DC] = 1 / model["ESS"]["EFF_DIS"] * configuration_time_line.default_time[
"Time_step_uc"] / 3600
beq.append(model["ESS"]["SOC"] * model["ESS"]["CAP"])
else:
Aeq_temp[i][(i - 1) * NX + EESS] = -1
Aeq_temp[i][i * NX + EESS] = 1
Aeq_temp[i][i * NX + PESS_C] = -model["ESS"]["EFF_CH"] * configuration_time_line.default_time[
"Time_step_uc"] / 3600
Aeq_temp[i][i * NX + PESS_DC] = 1 / model["ESS"]["EFF_DIS"] * configuration_time_line.default_time[
"Time_step_uc"] / 3600
beq.append(0)
Aeq = vstack([Aeq, Aeq_temp])
# Inequality constraints
# Inequality constraints
Aineq = zeros((T, nx))
bineq = []
for i in range(T):
Aineq[i][i * NX + PG] = 1
Aineq[i][i * NX + RG] = 1
Aineq[i][i * NX + IG] = -model["DG"]["PMAX"]
bineq.append(0)
# 2) PG - RG >= IG*PGMIN
Aineq_temp = zeros((T, nx))
for i in range(T):
Aineq_temp[i][i * NX + PG] = -1
Aineq_temp[i][i * NX + RG] = 1
Aineq_temp[i][i * NX + IG] = model["DG"]["PMIN"]
bineq.append(0)
Aineq = vstack([Aineq, Aineq_temp])
# 3) PUG + RUG <= PUGMAX
Aineq_temp = zeros((T, nx))
for i in range(T):
Aineq_temp[i][i * NX + PUG] = 1
Aineq_temp[i][i * NX + RUG] = 1
Aineq_temp[i][i * NX + IUG] = -model["UG"]["PMAX"]
bineq.append(0)
Aineq = vstack([Aineq, Aineq_temp])
# 4) PUG - RUG >= PUGMIN
Aineq_temp = zeros((T, nx))
for i in range(T):
Aineq_temp[i][i * NX + PUG] = -1
Aineq_temp[i][i * NX + RUG] = 1
Aineq_temp[i][i * NX + IUG] = model["UG"]["PMIN"]
bineq.append(0)
Aineq = vstack([Aineq, Aineq_temp])
# 5) PESS_DC - PESS_C + RESS <= PESS_DC_MAX
Aineq_temp = zeros((T, nx))
for i in range(T):
Aineq_temp[i][i * NX + PESS_DC] = 1
Aineq_temp[i][i * NX + PESS_C] = -1
Aineq_temp[i][i * NX + RESS] = 1
bineq.append(model["ESS"]["PMAX_DIS"])
Aineq = vstack([Aineq, Aineq_temp])
# 6) PESS_DC - PESS_C - RESS >= -PESS_C_MAX
Aineq_temp = | zeros((T, nx)) | numpy.zeros |
import numpy as np
class PDController():
def __init__(self, env):
self.env = env
self.reset()
def set_default_values(self):
self.error = 0.0005
self.k_p = 15
self.k_d = 0.05
self.dt = 1./240
self.state = 0
def reset(self):
self.set_default_values()
self.init_target()
def init_target(self):
target_position = self.env.get_door_handle_position()
gripper_width = self.env.get_gripper_width()
self.target = [target_position[0] - 0.125, target_position[1], target_position[2] - 0.125, gripper_width]
def get_relative_observation(self):
observation = self.env.get_current_state()
dx = self.target[0] - observation["position"][0]
dy = self.target[1] - observation["position"][1]
dz = self.target[2] - observation["position"][2]
dgw = self.target[3] - observation["gripper_width"]
return (dx, dy, dz, dgw)
def change_target(self, dx, dy, dz, dgw):
if abs(dx) < self.error and abs(dy) < self.error and abs(dz) < self.error and abs(dgw) < self.error:
self.state += 1
target_position = self.env.get_door_handle_position()
gripper_width = self.env.get_gripper_width()
if self.state == 1:
self.target = [target_position[0] - 0.125, target_position[1], target_position[2] - 0.125, 0.045]
elif self.state == 2:
self.target = [target_position[0] - 0.125, target_position[1], target_position[2] - 0.175, 0.045]
def clamp_action(self, action):
# Assure every action component is scaled between -1, 1
max_action = np.max(np.abs(action))
if max_action > 1:
action /= max_action
return action
def get_action(self):
dx, dy, dz, dgw = self.get_relative_observation()
pd_x = self.k_p*dx + self.k_d*dx/self.dt
pd_y = self.k_p*dy + self.k_d*dy/self.dt
pd_z = self.k_p*dz + self.k_d*dz/self.dt
pd_gw = self.k_p*dgw + self.k_d*dgw/self.dt
self.change_target(dx, dy, dz, dgw)
action = | np.array([pd_x, pd_y, pd_z, pd_gw], dtype=float) | numpy.array |
import solvers as sol
from AS1_class import Asym_slab
import numpy as np
import matplotlib.pyplot as plt
import pickle
from scipy.interpolate import interp1d
def save():
with open('pickles/threshold_density_sym_c0={}_K={}_M_A={}.p'.format(
slab.c0, slab.K, slab.M_A), 'wb') as f:
pickle.dump(threshold, f)
def load(K):
return pickle.load(open('pickles/threshold_density_sym_c0={}_K={}_M_A={}.p'
.format(c0, K, slab.M_A), 'rb'))
#R_range = np.append([0.05], np.linspace(0.25, 10, 79))
R_range = | np.linspace(7.125, 10, 24) | numpy.linspace |
# Imports
import numpy as np
import pandas as pd
import os
import pickle
from os.path import join
from time import time
import pwlf
from sklearn.model_selection import KFold
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
from sklearn.linear_model import LinearRegression
from sklearn.metrics import log_loss
def node_scores_pwlf_with_crossvalidation(p_hat, y, n_splits = 5, add_error = False, seed = 0,
max_nodes = 15, degree = 1):
node_scores = [0]*(max_nodes+1)
node_ECEs_abs = [0]*(max_nodes+1)
node_ECEs_square = [0]*(max_nodes+1)
node_loss = [0]*(max_nodes+1)
node_scores[0] = np.inf
node_ECEs_abs[0] = np.inf
node_ECEs_square[0] = np.inf
node_loss[0] = np.inf
all_weights = []
all_cv_scores = []
for n_nodes in range(1, max_nodes+1, 1):
weights = []
cv_scores = []
cv_ECE_square = []
cv_ECE_abs = []
cv_loss = []
print("Nodes:", n_nodes)
start_cv = time()
kf = KFold(n_splits=n_splits, shuffle=True, random_state=seed)
ij = 0
for train_index, test_index in kf.split(p_hat):
try:
p_hat_train, p_hat_test = p_hat[train_index], p_hat[test_index]
y_train, y_test = y[train_index], y[test_index]
if n_nodes == 1:
model= make_pipeline(PolynomialFeatures(degree),LinearRegression())
model.fit(p_hat_train.reshape(-1,1), y_train)
c_hat_test = model.predict(p_hat_test.reshape(-1,1))
c_hat_train = model.predict(p_hat_train.reshape(-1,1))
else:
model = pwlf.PiecewiseLinFit(p_hat_train, y_train, degree = degree)
h = model.fit(n_nodes)
c_hat_test = model.predict(p_hat_test)
c_hat_train = model.predict(p_hat_train)
cv_scores.append(np.mean((c_hat_test - y_test)**2)) # Squared error
cv_ECE_square.append(np.mean((c_hat_test - p_hat_test)**2))
cv_ECE_abs.append(np.mean(np.abs(c_hat_test - p_hat_test)))
cv_loss.append(np.mean(np.square(c_hat_train - y_train))) # Train loss
print("Split:", ij)
ij += 1
except:
print("error for " + str(n_nodes) + " with method pwlf")
node_scores[n_nodes] += 9999
node_scores[n_nodes] += np.mean(cv_scores)
node_ECEs_square[n_nodes] = np.mean(cv_ECE_square)
node_ECEs_abs[n_nodes] = np.mean(cv_ECE_abs)
node_loss[n_nodes] = cv_loss
all_cv_scores.append(cv_scores)
all_weights.append(weights)
print("Time for %i-fold CV for %i nodes was %f" % (n_splits, n_nodes, (time()-start_cv)))
if add_error:
error = 1 / len(p_hat)**0.5
errors = [0 for i in range(max_nodes+1)]
for i in range(max_nodes+1):
errors[i] = error * i**0.125 * node_scores[i]
node_scores = node_scores + | np.asarray(errors) | numpy.asarray |
import networkx as nx
import numpy as np
import os
from os.path import join as pjoin
import copy
import torch
from .graph_subsampling import graph_dataset_subsampling
from .graph_node_random_walk import get_node_random_walk
"""
References: https://github.com/bknyaz/graph_nn
"""
class DataReader():
'''
Class to read the txt files containing all data of the dataset
'''
def __init__(self,
data_dir, # Folder with txt files
random_walk,
node2vec_hidden,
walk_length,
num_walk,
p,
q,
workers=3,
rnd_state=None,
use_cont_node_attr=False, # Use or not additional float valued node attributes available in some datasets
folds=10):
self.data_dir = data_dir
self.rnd_state = np.random.RandomState() if rnd_state is None else rnd_state
self.use_cont_node_attr = use_cont_node_attr
files = os.listdir(self.data_dir)
print('data path:', self.data_dir)
data = {}
# Read adj list
nodes, graphs = self.read_graph_nodes_relations(list(filter(lambda f: f.find('graph_indicator') >= 0, files))[0])
data['adj_list'] = self.read_graph_adj(list(filter(lambda f: f.find('_A') >= 0, files))[0], nodes, graphs)
print('complete to build adjacency matrix list')
# Make node count list
data['node_count_list'] = self.get_node_count_list(data['adj_list'])
print('complete to build node count list')
# Make edge matrix list
data['edge_matrix_list'], data['max_edge_matrix'] = self.get_edge_matrix_list(data['adj_list'])
print('complete to build edge matrix list')
# Make node count list
data['edge_matrix_count_list'] = self.get_edge_matrix_count_list(data['edge_matrix_list'])
print('complete to build edge matrix count list')
# Make degree_features and max neighbor list
degree_features = self.get_node_features_degree(data['adj_list'])
data['max_neighbor_list'] = self.get_max_neighbor(degree_features)
print('complete to build max neighbor list')
# Read features or make features
if len(list(filter(lambda f: f.find('node_labels') >= 0, files))) != 0:
print('node label: node label in dataset')
data['features'] = self.read_node_features(list(filter(lambda f: f.find('node_labels') >= 0, files))[0],
nodes, graphs, fn=lambda s: int(s.strip()))
else:
print('node label: degree of nodes')
data['features'] = degree_features
print('complete to build node features list')
data['targets'] = np.array(self.parse_txt_file(list(filter(lambda f: f.find('graph_labels') >= 0, files))[0],
line_parse_fn=lambda s: int(float(s.strip()))))
print('complete to build targets list')
if self.use_cont_node_attr:
data['attr'] = self.read_node_features(list(filter(lambda f: f.find('node_attributes') >= 0, files))[0],
nodes, graphs, fn=lambda s: np.array(list(map(float, s.strip().split(',')))))
features, n_edges, degrees = [], [], []
for sample_id, adj in enumerate(data['adj_list']):
N = len(adj) # Number of nodes
if data['features'] is not None:
assert N == len(data['features'][sample_id]), (N, len(data['features'][sample_id]))
n = np.sum(adj) # Total sum of edges
n_edges.append( int(n / 2) ) # Undirected edges, so need to divide by 2
if not np.allclose(adj, adj.T):
print(sample_id, 'not symmetric')
degrees.extend(list(np.sum(adj, 1)))
features.append(np.array(data['features'][sample_id]))
# Create features over graphs as one-hot vectors for each node
features_all = np.concatenate(features)
features_min = features_all.min()
features_dim = int(features_all.max() - features_min + 1) # Number of possible values
features_onehot = []
for i, x in enumerate(features):
feature_onehot = np.zeros((len(x), features_dim))
for node, value in enumerate(x):
feature_onehot[node, value - features_min] = 1
if self.use_cont_node_attr:
feature_onehot = np.concatenate((feature_onehot, np.array(data['attr'][i])), axis=1)
features_onehot.append(feature_onehot)
if self.use_cont_node_attr:
features_dim = features_onehot[0].shape[1]
shapes = [len(adj) for adj in data['adj_list']]
labels = data['targets'] # Graph class labels
labels -= np.min(labels) # To start from 0
N_nodes_max = np.max(shapes)
classes = np.unique(labels)
n_classes = len(classes)
if not np.all(np.diff(classes) == 1):
print('making labels sequential, otherwise pytorch might crash')
labels_new = np.zeros(labels.shape, dtype=labels.dtype) - 1
for lbl in range(n_classes):
labels_new[labels == classes[lbl]] = lbl
labels = labels_new
classes = np.unique(labels)
assert len(np.unique(labels)) == n_classes, np.unique(labels)
print('-'*50)
print('The number of graphs:', len(data['adj_list']))
print('N nodes avg/std/min/max: \t%.2f/%.2f/%d/%d' % (np.mean(shapes), np.std(shapes), np.min(shapes), np.max(shapes)))
print('N edges avg/std/min/max: \t%.2f/%.2f/%d/%d' % (np.mean(n_edges), np.std(n_edges), np.min(n_edges), np.max(n_edges)))
print('Node degree avg/std/min/max: \t%.2f/%.2f/%d/%d' % (np.mean(degrees), np.std(degrees), np.min(degrees), np.max(degrees)))
print('Node features dim: \t\t%d' % features_dim)
print('N classes: \t\t\t%d' % n_classes)
print('Classes: \t\t\t%s' % str(classes))
for lbl in classes:
print('Class %d: \t\t\t%d samples' % (lbl, np.sum(labels == lbl)))
for u in np.unique(features_all):
print('feature {}, count {}/{}'.format(u, np.count_nonzero(features_all == u), len(features_all)))
N_graphs = len(labels) # Number of samples (graphs) in data
assert N_graphs == len(data['adj_list']) == len(features_onehot), 'invalid data'
# Create test sets first
train_ids, test_ids = self.split_ids(np.arange(N_graphs), rnd_state=self.rnd_state, folds=folds)
# Create train sets
splits = []
for fold in range(folds):
splits.append({'train': train_ids[fold],
'test': test_ids[fold]})
data['features_onehot'] = features_onehot
data['targets'] = labels
data['splits'] = splits
data['N_nodes_max'] = np.max(shapes) # Max number of nodes
data['features_dim'] = features_dim
data['n_classes'] = n_classes
# Make neighbor dictionary
#data['neighbor_dic_list'] = self.get_neighbor_dic_list(data['adj_list'], data['N_nodes_max'])
#print('complete to build neighbor dictionary list')
# Make node randomwalk
if random_walk:
print('building node randomwalk list ...')
data['random_walks'] = get_node_random_walk(data['features_onehot'], data['adj_list'], node2vec_hidden, walk_length, num_walk, p, q, workers)
print('complete to build node randomwalk list')
self.data = data
def split_ids(self, ids_all, rnd_state=None, folds=10):
n = len(ids_all)
ids = ids_all[rnd_state.permutation(n)]
stride = int(np.ceil(n / float(folds)))
test_ids = [ids[i: i + stride] for i in range(0, n, stride)]
assert np.all(np.unique(np.concatenate(test_ids)) == sorted(ids_all)), 'some graphs are missing in the test sets'
assert len(test_ids) == folds, 'invalid test sets'
train_ids = []
for fold in range(folds):
train_ids.append(np.array([e for e in ids if e not in test_ids[fold]]))
assert len(train_ids[fold]) + len(test_ids[fold]) == len(np.unique(list(train_ids[fold]) + list(test_ids[fold]))) == n, 'invalid splits'
return train_ids, test_ids
def parse_txt_file(self, fpath, line_parse_fn=None):
with open(pjoin(self.data_dir, fpath), 'r') as f:
lines = f.readlines()
data = [line_parse_fn(s) if line_parse_fn is not None else s for s in lines]
return data
def read_graph_adj(self, fpath, nodes, graphs):
edges = self.parse_txt_file(fpath, line_parse_fn=lambda s: s.split(','))
adj_dict = {}
for edge in edges:
node1 = int(edge[0].strip()) - 1 # -1 because of zero-indexing in our code
node2 = int(edge[1].strip()) - 1
graph_id = nodes[node1]
assert graph_id == nodes[node2], ('invalid data', graph_id, nodes[node2])
if graph_id not in adj_dict:
n = len(graphs[graph_id])
adj_dict[graph_id] = np.zeros((n, n))
ind1 = np.where(graphs[graph_id] == node1)[0]
ind2 = np.where(graphs[graph_id] == node2)[0]
assert len(ind1) == len(ind2) == 1, (ind1, ind2)
adj_dict[graph_id][ind1, ind2] = 1
adj_list = [adj_dict[graph_id] for graph_id in sorted(list(graphs.keys()))]
return adj_list
def read_graph_nodes_relations(self, fpath):
graph_ids = self.parse_txt_file(fpath, line_parse_fn=lambda s: int(s.rstrip()))
nodes, graphs = {}, {}
for node_id, graph_id in enumerate(graph_ids):
if graph_id not in graphs:
graphs[graph_id] = []
graphs[graph_id].append(node_id)
nodes[node_id] = graph_id
graph_ids = np.unique(list(graphs.keys()))
for graph_id in graphs:
graphs[graph_id] = np.array(graphs[graph_id])
return nodes, graphs
def read_node_features(self, fpath, nodes, graphs, fn):
node_features_all = self.parse_txt_file(fpath, line_parse_fn=fn)
node_features = {}
for node_id, x in enumerate(node_features_all):
graph_id = nodes[node_id]
if graph_id not in node_features:
node_features[graph_id] = [ None ] * len(graphs[graph_id])
ind = np.where(graphs[graph_id] == node_id)[0]
assert len(ind) == 1, ind
assert node_features[graph_id][ind[0]] is None, node_features[graph_id][ind[0]]
node_features[graph_id][ind[0]] = x
node_features_lst = [node_features[graph_id] for graph_id in sorted(list(graphs.keys()))]
return node_features_lst
def get_node_features_degree(self, adj_list):
node_features_list = []
for adj in adj_list:
sub_list = []
for feature in nx.from_numpy_matrix(np.array(adj)).degree():
sub_list.append(feature[1])
node_features_list.append(np.array(sub_list))
return node_features_list
def get_max_neighbor(self, degree_list):
max_neighbor_list = []
for degrees in degree_list:
max_neighbor_list.append(int(max(degrees)))
return max_neighbor_list
def get_node_count_list(self, adj_list):
node_count_list = []
for adj in adj_list:
node_count_list.append(len(adj))
return node_count_list
def get_edge_matrix_list(self, adj_list):
edge_matrix_list = []
max_edge_matrix = 0
for adj in adj_list:
edge_matrix = []
for i in range(len(adj)):
for j in range(len(adj[0])):
if adj[i][j] == 1:
edge_matrix.append((i,j))
if len(edge_matrix) > max_edge_matrix:
max_edge_matrix = len(edge_matrix)
edge_matrix_list.append(np.array(edge_matrix))
return edge_matrix_list, max_edge_matrix
def get_edge_matrix_count_list(self, edge_matrix_list):
edge_matrix_count_list = []
for edge_matrix in edge_matrix_list:
edge_matrix_count_list.append(len(edge_matrix))
return edge_matrix_count_list
def get_neighbor_dic_list(self, adj_list, N_nodes_max):
neighbor_dic_list = []
for adj in adj_list:
neighbors = []
for i, row in enumerate(adj):
idx = | np.where(row == 1.0) | numpy.where |
# coding=utf-8
import pandas
import numpy as np
import scipy
import statsmodels.api as sm
import traceback
import logging
import math
import random
from time import time
from msgpack import unpackb, packb
from redis import StrictRedis
from scipy import stats
from sklearn.ensemble import IsolationForest
from sklearn.cluster import KMeans
from settings import (
ALGORITHMS,
CONSENSUS,
FULL_DURATION,
MAX_TOLERABLE_BOREDOM,
MIN_TOLERABLE_LENGTH,
STALE_PERIOD,
REDIS_SOCKET_PATH,
ENABLE_SECOND_ORDER,
BOREDOM_SET_SIZE,
K_MEANS_CLUSTER,
VERTEX_WEIGHT_ETA,
VERTEX_THRESHOLD,
ANOMALY_COLUMN,
ANOMALY_PATH,
CSHL_NUM,
CSHL_PATH,
)
from algorithm_exceptions import *
logger = logging.getLogger("AnalyzerLog")
redis_conn = StrictRedis(unix_socket_path=REDIS_SOCKET_PATH)
vertex_centers = np.zeros((1, 1))
vertex_avg_score = -1
cshl_weight = [-1.35455734e-01, -5.44036064e-04, -1.35455734e-01, -5.44036064e-04,
-1.35455734e-01, -1.35455734e-01, -5.44036064e-04, -1.35455734e-01,
-5.44036064e-04, -1.35455734e-01, -5.44036064e-04, -5.44036064e-04,
-1.67484694e+00, 1.04843752e+00, 6.61651030e-01, 4.13469487e-08,
1.78945321e-01, -3.60150391e-01, 1.21850659e-01, 4.61800469e-01,
-1.00200490e-01, -1.33467708e-06, 9.32745829e-19, 4.21863030e-09,
-3.36662454e-10, -8.90717918e-06, -4.42558069e-05, -2.87667856e-09]
"""
This is no man's land. Do anything you want in here,
as long as you return a boolean that determines whether the input
timeseries is anomalous or not.
To add an algorithm, define it here, and add its name to settings.ALGORITHMS.
"""
def vertex_score(timeseries):
"""
A timeseries is anomalous if vertex score in hypergraph is greater than average score of observed anomalous vertex.
:return: True or False
"""
if vertex_centers.shape[0] <= 1:
update_vertex_param()
timeseries = np.array(timeseries)
test_data = timeseries[:, 1:]
test_data = (test_data - np.min(test_data, axis=0)) / (np.max(test_data, axis=0) - np.min(test_data, axis=0))
test_data = np.nan_to_num(test_data)
score = calculate_vertex_score(test_data, vertex_centers)
if np.sum(score[score > vertex_avg_score]) > VERTEX_THRESHOLD:
return True
return False
def cshl_detect(timeseries):
timeseries = np.delete(np.array(timeseries), [0,1,2,15], axis=1)
abnormal_num = 0
for i in range(timeseries.shape[0]):
zeta = np.dot(timeseries[i], cshl_weight)
if zeta < 0:
abnormal_num = abnormal_num + 1
if abnormal_num >= CSHL_NUM:
return True
return False
def update_vertex_param():
"""
Read observed abnormal data and update cluster centers
"""
global vertex_centers
global vertex_avg_score
origin_data = pandas.read_csv(ANOMALY_PATH).values
abnormal = origin_data[:, 3:]
abnormal = (abnormal - | np.min(abnormal, axis=0) | numpy.min |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# This module tests the HRSModel class
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
from numpy.testing import assert_array_equal
from astropy.tests.helper import pytest
from astropy import units as u
from ..hrsorder import HRSOrder
def create_hrsorder():
h = HRSOrder(order=16)
y = np.arange(25)
y = y.reshape(5, 5)
h.set_order_from_array(y)
h.set_flux_from_array(y, flux_unit=u.electron)
def f(x, y):
return 2 * x + y
h.set_wavelength_from_model(f, h.region, wavelength_unit=u.nm)
return h
def test_hrsorder_empty():
with pytest.raises(TypeError):
h = HRSOrder()
def test_hrsorder():
h = HRSOrder(order=67)
assert h.order == 67
# test setting it with an order
def test_hrsorder_bad():
with pytest.raises(TypeError):
h = HRSOrder(order=37.5)
# test order type
def test_hrsorder_order_type_object():
h = HRSOrder(order=37, order_type='object')
assert h.order_type == 'object'
def test_hrsorder_order_type_sky():
h = HRSOrder(order=37, order_type='sky')
assert h.order_type == 'sky'
def test_hrsorder_order_type_None():
h = HRSOrder(order=37, order_type='sky')
h.order_type = None
assert h.order_type is None
def test_hrsorder_order_type_bad():
with pytest.raises(TypeError):
h = HRSOrder(order=37, order_type='badtype')
# test defining a region
def test_hrsorder_region():
r = [(3, 3, 3, 4, 4, 4, 5, 5, 5), (1, 2, 3, 1, 2, 3, 1, 2, 3)]
h = HRSOrder(order=37, region=r)
assert h.region == r
def test_hrsorder_region_length_bad():
r = [(3, 3, 3, 4, 4, 4, 5, 5, 5)]
with pytest.raises(TypeError):
h = HRSOrder(order=37, region=r)
def test_hrsorder_region_pixels_bad():
r = [(3, 3, 3, 4, 4, 4, 5, 5, 5), (1, 2, 3, 1, 2, 3, 1, 2)]
with pytest.raises(TypeError):
h = HRSOrder(order=37, region=r)
# test setting the flux
def test_hrsorder_flux():
r = [(3, 3, 3, 4, 4, 4, 5, 5, 5), (1, 2, 3, 1, 2, 3, 1, 2, 3)]
f = np.arange(len(r[0]))
h = HRSOrder(order=37, region=r, flux=f)
assert_array_equal(h.flux, f)
def test_hrsorder_flux_length():
with pytest.raises(TypeError):
r = [(3, 3, 3, 4, 4, 4, 5, 5, 5), (1, 2, 3, 1, 2, 3, 1, 2, 3)]
f = np.arange(len(r[0] - 1))
h = HRSOrder(order=37, region=r, flux=f)
def test_hrsorder_flux_noregion():
with pytest.raises(ValueError):
r = [(3, 3, 3, 4, 4, 4, 5, 5, 5), (1, 2, 3, 1, 2, 3, 1, 2, 3)]
f = np.arange(len(r[0]))
h = HRSOrder(order=37, flux=f)
# test setting the flux unit
def test_hrsorder_flux_unit():
h = HRSOrder(order=37, flux_unit=u.electron)
assert h.flux_unit == u.electron
# test setting the flux unit
def test_hrsorder_wavelength_unit():
h = HRSOrder(order=37, wavelength_unit=u.nm)
assert h.wavelength_unit == u.nm
# test setting the wavelength
def test_hrsorder_wavelength():
r = [(3, 3, 3, 4, 4, 4, 5, 5, 5), (1, 2, 3, 1, 2, 3, 1, 2, 3)]
w = np.arange(len(r[0]))
h = HRSOrder(order=37, region=r, wavelength=w)
assert_array_equal(h.wavelength, w)
def test_hrsorder_wavelength_length():
with pytest.raises(TypeError):
r = [(3, 3, 3, 4, 4, 4, 5, 5, 5), (1, 2, 3, 1, 2, 3, 1, 2, 3)]
w = np.arange(len(r[0] - 1))
h = HRSOrder(order=37, region=r, wavelength=w)
def test_hrsorder_wavelength_noregion():
with pytest.raises(ValueError):
r = [(3, 3, 3, 4, 4, 4, 5, 5, 5), (1, 2, 3, 1, 2, 3, 1, 2, 3)]
w = np.arange(len(r[0]))
h = HRSOrder(order=37, wavelength=w)
# test setting the array from the data
def test_hrsorder_set_order_from_array():
h = HRSOrder(order=16)
y = np.arange(25)
y = y.reshape(5, 5)
h.set_order_from_array(y)
assert h.region == (np.array([3]), np.array([1]))
assert y[h.region] == [16]
def test_hrsorder_set_order_from_array_baddata():
h = HRSOrder(order=16)
y = | np.arange(25) | numpy.arange |
"""
Module to run tests on core.procimg functions.
"""
from IPython import embed
import numpy as np
from pypeit.core import procimg
def test_replace_columns():
y = np.zeros((10,3), dtype=float)
y[:,2] = 2
bad_col = np.array([False, True, False])
assert np.array_equal(procimg.replace_columns(y, bad_col, copy=True, replace_with='mean'),
procimg.replace_columns(y, bad_col, copy=True, replace_with='linear')), \
'Interpolation and mean should provide the same result.'
bad_col = np.array([False, True, True])
assert np.array_equal(procimg.replace_columns(y, bad_col, copy=True, replace_with='mean'),
np.zeros_like(y)), 'Should set everything to 0.'
bad_col = np.array([True, True, False])
assert np.array_equal(procimg.replace_columns(y, bad_col, copy=True, replace_with='mean'),
np.full_like(y, 2)), 'Should set everything to 2.'
y = np.zeros((10,4), dtype=float)
y[:,3] = 3
bad_col = np.array([False, True, True, False])
assert np.array_equal(procimg.replace_columns(y, bad_col, copy=True, replace_with='linear'),
np.repeat(np.arange(4),10).reshape(4,10).T), \
'Interpolation failed.'
def test_rn2_frame():
# Bogus image
datasec = np.ones((10,10), dtype=int)
datasec[5:] = 2
rn = np.array([2.5, 3.5])
gain = | np.array([1.2, 1.5]) | numpy.array |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Mon Apr. 1 2019
NCA-18650 cell
@author: shpark
Parameters from NCA-18650
Reference:
Park et al., "Optimal Experimental Design for Parameterization of an Electrochemical Lithium-ion Battery Model"
Journal of The Electrochemical Society, 165(7), 2018
"""
import numpy as np
p={}
#==============================================================================
# Geometric params
#==============================================================================
# Thickness of each layer
p['L_n'] = 79.0e-6 # Thickness of negative electrode [m]
p['L_s'] = 80.0e-6 # Thickness of separator [m]
p['L_p'] = 61.5e-6 # Thickness of positive electrode [m]
L_ccn = 25e-6; # Thickness of negative current collector [m]
L_ccp = 25e-6; # Thickness of negative current collector [m]
# Particle Radii
p['R_s_n'] = 2.0249e-05 # Radius of solid particles in negative electrode [m]
p['R_s_p'] = 1.6973e-05 # Radius of solid particles in positive electrode [m]
# Volume fractions
p['epsilon_s_n'] = 0.543889597565723 # Volume fraction in solid for neg. electrode
p['epsilon_s_p'] = 0.666364981170368 # Volume fraction in solid for pos. electrode
p['epsilon_e_n'] = 0.347495486967184 # Volume fraction in electrolyte for neg. electrode
p['epsilon_e_s'] = 0.5 # Volume fraction in electrolyte for separator
p['epsilon_e_p'] = 0.330000000000000 # Volume fraction in electrolyte for pos. electrode
p['epsilon_f_n'] = 1 - p['epsilon_s_n'] - p['epsilon_e_n'] # Volume fraction of filler in neg. electrode
p['epsilon_f_p'] = 1 - p['epsilon_s_p'] - p['epsilon_e_p'] # Volume fraction of filler in pos. electrode
# Specific interfacial surface area
p['a_s_n'] = 3*p['epsilon_s_n'] / p['R_s_n'] # Negative electrode [m^2/m^3]
p['a_s_p'] = 3*p['epsilon_s_p'] / p['R_s_p'] # Positive electrode [m^2/m^3]
#==============================================================================
# Transport params
#==============================================================================
p['D_s_n0'] = 2.63029669224544e-14 # Diffusion coeff for solid in neg. electrode, [m^2/s]
p['D_s_p0'] = 6.81035680483463e-14 # Diffusion coeff for solid in pos. electrode, [m^2/s]
# Conductivity of solid
p['sig_n'] = 100 # Conductivity of solid in neg. electrode, [1/Ohms*m]
p['sig_p'] = 100 # Conductivity of solid in pos. electrode, [1/Ohms*m]
#==============================================================================
# Kinetic params
#==============================================================================
p['R_f_n'] = 0 # Resistivity of SEI layer, [Ohms*m^2]
p['R_f_p'] = 0 # Resistivity of SEI layer, [Ohms*m^2]
#p.R_c = 2.5e-03;%5.1874e-05/p.Area; % Contact Resistance/Current Collector Resistance, [Ohms-m^2]
# Nominal Reaction rates
p['k_n0'] = 7.50e-03 # Reaction rate in neg. electrode, [(A/m^2)*(mol^3/mol)^(1+alpha)]
p['k_p0'] = 2.30e-03 # Reaction rate in pos. electrode, [(A/m^2)*(mol^3/mol)^(1+alpha)]
#==============================================================================
# Thermodynamic params
#==============================================================================
# Thermal dynamics
p['C_p'] = 2000 # Heat capacity, [J/kg-K]
p['R_th'] = 2 # Thermal resistance, [K/W]
p['mth'] = 0.834 # Mass of cell [Kg]
# Activation Energies
# Taken from Zhang et al (2014) [Harbin]
# http://dx.doi.org/10.1016/j.jpowsour.2014.07.110
# All units are [J/mol]
p['E_kn'] = 37.48e+3
p['E_kp'] = 39.57e+3
p['E_Dsn'] = 42.77e+3
p['E_Dsp'] = 18.55e+3
p['E_De'] = 37.04e+3
p['E_kappa_e'] = 34.70e+3
# Ambient Temperature
p['T_amb'] = 298.15 # [K]
p['T_ref'] = 298.15 # [K] for ElectrolyteACT
#==============================================================================
# Miscellaneous
#==============================================================================
p['R'] = 8.314472; # Gas constant, [J/mol-K]
p['Faraday'] = 96485.3329 # Faraday constant [Coulombs/mol]
p['Area'] = 1.425 # Electrode current collector area [m^2]
p['alph'] = 0.5 # Charge transfer coefficients
p['t_plus'] = 0.45 # Transference number
p['brug'] = 1.8 # Bruggeman porosity
#==============================================================================
# Concentrations
#==============================================================================
p['c_s_n_max'] = 3.71e+04 # Max concentration in anode, [mol/m^3]
p['c_s_p_max'] = 5.10e+04 # Max concentration in cathode, [mol/m^3]
p['n_Li_s'] = 0.1406 # Total moles of lithium in solid phase [mol]
p['c_e0'] = 1.0e3 # Electrolyte concentration [mol/m^3]
#==============================================================================
# Discretization params
#==============================================================================
p['PadeOrder'] = 3
p['Nr'] = 20
p['delta_r_n'] = 1/float(p['Nr'])
p['delta_r_p'] = 1/float(p['Nr'])
p['Nxn'] = 10;
p['Nxs'] = 5;
p['Nxp'] = 10;
p['Nx'] = p['Nxn']+p['Nxs']+p['Nxp']
p['delta_x_n'] = 1 / float(p['Nxn'])
p['delta_x_s'] = 1 / float(p['Nxs'])
p['delta_x_p'] = 1 / float(p['Nxp'])
def refPotentialAnode_casadi(theta):
c_n=np.array([-0.084294960339275,
0.920754744005144,
-0.500066623566425,
0.062731837918546,
0.782151587417570,
-0.373761901864611,
0.019988184317997,
0.543282314780430,
-0.295609630222051,
0.040970248093866,
0.231152288743602,
-0.217847875913234,
0.068744203951316,
0.353848415118256,
-0.114753994434564,
-0.028613032233089,
0.260671608316041,
-0.212058177468640,
-0.025506157489854,
0.211741908826122,
-0.241880220004548,
0.188872027034948,
0.867520021192469,
-0.225038983698359,
-0.111904175370177,
0.537399173641857,
-0.020780743382893,
0.108353745941168,
0.537735904911254,
-0.020226723056513,
0.171375773597772,
0.729717193288193,
-0.323902793428930,
0.064143152162965,
1.289849595601526,
0.704961322280748,
0.023028206444624,
0.481699223765299,
-0.076233450161839,
-0.182559256738691,
0.830851470359638,
-0.226362977193547,
-0.040952011143767,
1.626936110900125,
0.295695270567609,
-1.000228763094078,
0.007914258576845,
-0.016476666187381,
-0.341740372496750,
0.001274961492701,
-0.004879090290810,
-0.930906698538900,
0.001549868904555,
-0.010583717929547,
2.554274538083029,
-0.012402969675540,
-0.029257893810540,
-0.512533408582419,
0.066122834568301,
-0.077930639597751,
-0.499673574757569,
0.044470609922510,
-0.134483437256594,
1.904111886758372,
-0.035336812622768,
-0.306171040837701,
-1.122974595772499,
0.028740372472439,
-0.079271479637875,
-0.093855421675871,
0.930843806570863,
-0.516652668839875,
-0.846383609865041,
0.012151749801329,
-0.029511731110250,
-0.561782895480513,
0.098392530745244,
-0.109853910868333,
-0.818206413176353,
0.026850808833446,
-0.051805538572186,
-0.525543070925015,
0.188590232596615,
-0.192054642003214,
-0.046580230674248,
0.002863828671823,
-0.000914487593373,
2.650656293235332,
-0.008182255230700,
-0.117937922743741,
-0.295664205008775,
0.137690106957231,
-0.310460986123659,
-0.835065551163236,
0.711574616090746,
-0.997353098073145,
0.415746756470558,
0.423984781966332,
3.189835673119072,
0.413779708001205,
0.426343693564050,
3.190867502582611])
Uref=c_n[0]*np.exp(-((theta - c_n[1])**2/c_n[2]**2))+ \
c_n[3]*np.exp(-((theta - c_n[4])**2/c_n[5]**2))+ \
c_n[6]*np.exp(-((theta - c_n[7])**2/c_n[8]**2))+ \
c_n[9]* | np.exp(-((theta - c_n[10])**2/c_n[11]**2)) | numpy.exp |
# -*- coding: utf-8 -*-
import numpy as np
import scipy.stats as st
from abc import ABCMeta, abstractmethod
from .mvar.comp import ldl
from .mvarmodel import Mvar
from .aec.utils import filter_band, calc_ampenv, FQ_BANDS
import six
from six.moves import map
from six.moves import range
from six.moves import zip
########################################################################
# Spectrum functions:
########################################################################
def spectrum(acoef, vcoef, fs=1, resolution=100):
"""
Generating data point from matrix *A* with MVAR coefficients.
Args:
*acoef* : numpy.array
array of shape (k, k, p) where *k* is number of channels and
*p* is a model order.
*vcoef* : numpy.array
prediction error matrix (k, k)
*fs* = 1 : int
sampling rate
*resolution* = 100 : int
number of spectrum data points
Returns:
*A_z* : numpy.array
z-transformed A(f) complex matrix in shape (*resolution*, k, k)
*H_z* : numpy.array
inversion of *A_z*
*S_z* : numpy.array
spectrum matrix (*resolution*, k, k)
References:
.. [1] <NAME>, <NAME>, <NAME> (2004) โGranger causality
and information flow in multivariate processesโ
Physical Review E 70, 050902.
"""
p, k, k = acoef.shape
freqs = np.linspace(0, fs*0.5, resolution)
A_z = np.zeros((len(freqs), k, k), complex)
H_z = np.zeros((len(freqs), k, k), complex)
S_z = np.zeros((len(freqs), k, k), complex)
I = np.eye(k, dtype=complex)
for e, f in enumerate(freqs):
epot = np.zeros((p, 1), complex)
ce = np.exp(-2.j*np.pi*f*(1./fs))
epot[0] = ce
for k in range(1, p):
epot[k] = epot[k-1]*ce
A_z[e] = I - np.sum([epot[x]*acoef[x] for x in range(p)], axis=0)
H_z[e] = np.linalg.inv(A_z[e])
S_z[e] = np.dot(np.dot(H_z[e], vcoef), H_z[e].T.conj())
return A_z, H_z, S_z
def spectrum_inst(acoef, vcoef, fs=1, resolution=100):
"""
Generating data point from matrix *A* with MVAR coefficients taking
into account zero-lag effects.
Args:
*acoef* : numpy.array
array of shape (k, k, p+1) where *k* is number of channels and
*p* is a model order. acoef[0] - is (k, k) matrix for zero lag,
acoef[1] for one data point lag and so on.
*vcoef* : numpy.array
prediction error matrix (k, k)
*fs* = 1 : int
sampling rate
*resolution* = 100 : int
number of spectrum data points
Returns:
*A_z* : numpy.array
z-transformed A(f) complex matrix in shape (*resolution*, k, k)
*H_z* : numpy.array
inversion of *A_z*
*S_z* : numpy.array
spectrum matrix (*resolution*, k, k)
References:
.. [1] <NAME>, Multivariate Autoregressive Model with
Instantaneous Effects to Improve Brain Connectivity Estimation,
Int. J. Bioelectromagn. 11, 74โ79 (2009).
"""
p, k, k = acoef.shape
freqs = np.linspace(0, fs/2, resolution)
B_z = np.zeros((len(freqs), k, k), complex)
L, U, Lt = ldl(vcoef)
Linv = np.linalg.inv(L)
I = np.eye(k, dtype=complex)
bcoef = np.array([np.dot(Linv, acoef[x]) for x in range(p)])
b0 = np.eye(k) - Linv
for e, f in enumerate(freqs):
epot = np.zeros((p, 1), complex)
ce = np.exp(-2.j*np.pi*f*(1./fs))
epot[0] = ce
for k in range(1, p):
epot[k] = epot[k-1]*ce
B_z[e] = I - b0 - np.sum([epot[x]*bcoef[x] for x in range(p)], axis=0)
return B_z
########################################################################
# Connectivity classes:
########################################################################
class Connect(six.with_metaclass(ABCMeta, object)):
"""
Abstract class governing calculation of various connectivity estimators
with concrete methods: *short_time*, *significance* and
abstract *calculate*.
"""
def __init__(self):
self.values_range = [None, None] # normalization bands
self.two_sided = False # only positive, or also negative values
@abstractmethod
def calculate(self):
"""Abstract method to calculate values of estimators from specific
parameters"""
pass
def short_time(self, data, nfft=None, no=None, **params):
"""
Short-tme version of estimator, where data is windowed into parts
of length *nfft* and overlap *no*. *params* catch additional
parameters specific for estimator.
Args:
*data* : numpy.array
data matrix (kXN) or (kXNxR) where k - channels,
N - data points, R - nr of trials
*nfft* = None : int
window length (if None it's N/5)
*no* = None : int
overlap length (if None it's N/10)
*params* :
additional parameters specific for chosen estimator
Returns:
*stvalues* : numpy.array
short time values (time points, frequency, k, k), where k
is number of channels
"""
assert nfft > no, "overlap must be smaller than window"
if data.ndim > 2:
k, N, trls = data.shape
else:
k, N = data.shape
trls = 0
if not nfft:
nfft = int(N/5)
if not no:
no = int(N/10)
slices = range(0, N, int(nfft-no))
for e, i in enumerate(slices):
if i+nfft >= N:
if trls:
datcut = np.concatenate((data[:, i:i+nfft], np.zeros((k, i+nfft-N, trls))), axis=1)
else:
datcut = np.concatenate((data[:, i:i+nfft], np.zeros((k, i+nfft-N))), axis=1)
else:
datcut = data[:, i:i+nfft]
if e == 0:
rescalc = self.calculate(datcut, **params)
stvalues = np.zeros((len(slices), rescalc.shape[0], k, k))
stvalues[e] = rescalc
continue
stvalues[e] = self.calculate(datcut, **params)
return stvalues
def short_time_significance(self, data, Nrep=10, alpha=0.05,
nfft=None, no=None, verbose=True, **params):
"""
Significance of short-tme versions of estimators. It base on
bootstrap :func:`Connect.bootstrap` for multitrial case and
surrogate data :func:`Connect.surrogate` for one trial.
Args:
*data* : numpy.array
data matrix (kXN) or (kXNxR) where k - channels,
N - data points, R - nr of trials
*Nrep* = 100 : int
number of resamples
*alpha* = 0.05 : float
type I error rate (significance level)
*nfft* = None : int
window length (if None it's N/5)
*no* = None : int
overlap length (if None it's N/10)
*verbose* = True : bool
if True it prints dot on every realization, if False it's
quiet.
*params* :
additional parameters specific for chosen estimator
Returns:
*signi_st* : numpy.array
short time significance values in shape of
- (tp, k, k) for one sided estimator
- (tp, 2, k, k) for two sided
where k is number of channels and tp number of time points
"""
assert nfft > no, "overlap must be smaller than window"
if data.ndim > 2:
k, N, trls = data.shape
else:
k, N = data.shape
trls = 0
if not nfft:
nfft = int(N/5)
if not no:
no = int(N/10)
slices = range(0, N, int(nfft-no))
if self.two_sided:
signi_st = np.zeros((len(slices), 2, k, k))
else:
signi_st = np.zeros((len(slices), k, k))
for e, i in enumerate(slices):
if i+nfft >= N:
if trls:
datcut = np.concatenate((data[:, i:i+nfft], np.zeros((k, i+nfft-N, trls))), axis=1)
else:
datcut = np.concatenate((data[:, i:i+nfft], np.zeros((k, i+nfft-N))), axis=1)
else:
datcut = data[:, i:i+nfft]
signi_st[e] = self.significance(datcut, Nrep=Nrep,
alpha=alpha, verbose=verbose, **params)
return signi_st
def significance(self, data, Nrep=10, alpha=0.05, verbose=True, **params):
"""
Significance of connectivity estimators. It base on
bootstrap :func:`Connect.bootstrap` for multitrial case and
surrogate data :func:`Connect.surrogate` for one trial.
Args:
*data* : numpy.array
data matrix (kXN) or (kXNxR) where k - channels,
N - data points, R - nr of trials
*Nrep* = 100 : int
number of resamples
*alpha* = 0.05 : float
type I error rate (significance level)
*verbose* = True : bool
if True it prints dot on every realization, if False it's
quiet.
*params* :
additional parameters specific for chosen estimator
Returns:
*signific* : numpy.array
significance values, check :func:`Connect.levels`
"""
if data.ndim > 2:
signific = self.bootstrap(data, Nrep=10, alpha=alpha, verbose=verbose, **params)
else:
signific = self.surrogate(data, Nrep=10, alpha=alpha, verbose=verbose, **params)
return signific
def levels(self, signi, alpha, k):
"""
Levels of significance
Args:
*signi* : numpy.array
bootstraped values of each channel
*alpha* : float
type I error rate (significance level) - from 0 to 1
- (1-*alpha*) for onesided estimators (e.g. class:`DTF`)
- *alpha* and (1-*alpha*) for twosided (e.g. class:`PSI`)
*k* : int
number of channels
Returns:
*ficance* : numpy.array
maximal value throughout frequency of score at percentile
at level 1-*alpha*
- (k, k) for one sided estimator
- (2, k, k) for two sided
"""
if self.two_sided:
ficance = np.zeros((2, k, k))
else:
ficance = np.zeros((k, k))
for i in range(k):
for j in range(k):
if self.two_sided:
ficance[0][i][j] = np.min(st.scoreatpercentile(signi[:, :, i, j], alpha*100, axis=1))
ficance[1][i][j] = np.max(st.scoreatpercentile(signi[:, :, i, j], (1-alpha)*100, axis=1))
else:
ficance[i][j] = np.min(st.scoreatpercentile(signi[:, :, i, j], (1-alpha)*100, axis=1))
return ficance
def __calc_multitrial(self, data, **params):
"Calc multitrial averaged estimator for :func:`Connect.bootstrap`"
trials = data.shape[2]
chosen = np.random.randint(trials, size=trials)
bc = np.bincount(chosen)
idxbc = np.nonzero(bc)[0]
flag = True
for num, occurence in zip(idxbc, bc[idxbc]):
if occurence > 0:
trdata = data[:, :, num]
if flag:
rescalc = self.calculate(trdata, **params)*occurence
flag = False
continue
rescalc += self.calculate(trdata, **params)*occurence
return rescalc/trials
def bootstrap(self, data, Nrep=100, alpha=0.05, verbose=True, **params):
"""
Bootstrap - random sampling with replacement of trials.
Args:
*data* : numpy.array
multichannel data matrix
*Nrep* = 100 : int
number of resamples
*alpha* = 0.05 : float
type I error rate (significance level)
*verbose* = True : bool
if True it prints dot on every realization, if False it's
quiet.
*params* :
additional parameters specific for chosen estimator
Returns:
*levelsigni* : numpy.array
significance values, check :func:`Connect.levels`
"""
for i in range(Nrep):
if verbose:
print('.', end=' ')
if i == 0:
tmpsig = self.__calc_multitrial(data, **params)
fres, k, k = tmpsig.shape
signi = np.zeros((Nrep, fres, k, k))
signi[i] = tmpsig
else:
signi[i] = self.__calc_multitrial(data, **params)
if verbose:
print('|')
return self.levels(signi, alpha, k)
def surrogate(self, data, Nrep=100, alpha=0.05, verbose=True, **params):
"""
Surrogate data testing. Mixing data points in each channel.
Significance level in calculated over all *Nrep* surrogate sets.
Args:
*data* : numpy.array
multichannel data matrix
*Nrep* = 100 : int
number of resamples
*alpha* = 0.05 : float
type I error rate (significance level)
*verbose* = True : bool
if True it prints dot on every realization, if False it's
quiet.
*params* :
additional parameters specific for chosen estimator
Returns:
*levelsigni* : numpy.array
significance values, check :func:`Connect.levels`
"""
k, N = data.shape
shdata = data.copy()
for i in range(Nrep):
if verbose:
print('.', end=' ')
for ch in range(k):
np.random.shuffle(shdata[ch,:])
if i == 0:
rtmp = self.calculate(shdata, **params)
reskeeper = np.zeros((Nrep, rtmp.shape[0], k, k))
reskeeper[i] = rtmp
continue
reskeeper[i] = self.calculate(shdata, **params)
if verbose:
print('|')
return self.levels(reskeeper, alpha, k)
class ConnectAR(six.with_metaclass(ABCMeta, Connect)):
"""
Inherits from *Connect* class and governs calculation of various
connectivity estimators basing on MVAR model methods. It overloads
*short_time*, *significance* methods but *calculate* remains abstract.
"""
def __init__(self):
super(ConnectAR, self).__init__()
self.values_range = [0, 1]
def short_time(self, data, nfft=None, no=None, mvarmethod='yw',
order=None, resol=None, fs=1):
"""
It overloads :class:`ConnectAR` method :func:`Connect.short_time`.
Short-tme version of estimator, where data is windowed into parts
of length *nfft* and overlap *no*. *params* catch additional
parameters specific for estimator.
Args:
*data* : numpy.array
data matrix (kXN) or (kXNxR) where k - channels,
N - data points, R - nr of trials
*nfft* = None : int
window length (if None it's N/5)
*no* = None : int
overlap length (if None it's N/10)
*mvarmethod* = 'yw' :
MVAR parameters estimation method
all avaiable methods you can find in *fitting_algorithms*
*order* = None:
MVAR model order; it None, it is set automatically basing
on default criterion.
*resol* = None:
frequency resolution; if None, it is 100.
*fs* = 1 :
sampling frequency
Returns:
*stvalues* : numpy.array
short time values (time points, frequency, k, k), where k
is number of channels
"""
assert nfft > no, "overlap must be smaller than window"
if data.ndim > 2:
k, N, trls = data.shape
else:
k, N = data.shape
trls = 0
if not nfft:
nfft = int(N/5)
if not no:
no = int(N/10)
slices = range(0, N, int(nfft-no))
for e, i in enumerate(slices):
if i+nfft >= N:
if trls:
datcut = np.concatenate((data[:, i:i+nfft], np.zeros((k, i+nfft-N, trls))), axis=1)
else:
datcut = np.concatenate((data[:, i:i+nfft], np.zeros((k, i+nfft-N))), axis=1)
else:
datcut = data[:, i:i+nfft]
ar, vr = Mvar().fit(datcut, order, mvarmethod)
if e == 0:
rescalc = self.calculate(ar, vr, fs, resol)
stvalues = np.zeros((len(slices), rescalc.shape[0], k, k))
stvalues[e] = rescalc
continue
stvalues[e] = self.calculate(ar, vr, fs, resol)
return stvalues
def short_time_significance(self, data, Nrep=100, alpha=0.05, method='yw',
order=None, fs=1, resolution=None,
nfft=None, no=None, verbose=True, **params):
"""
Significance of short-tme versions of estimators. It base on
bootstrap :func:`ConnectAR.bootstrap` for multitrial case and
surrogate data :func:`ConnectAR.surrogate` for one trial.
Args:
*data* : numpy.array
data matrix (kXN) or (kXNxR) where k - channels,
N - data points, R - nr of trials
*Nrep* = 100 : int
number of resamples
*alpha* = 0.05 : float
type I error rate (significance level)
*method* = 'yw': str
method of MVAR parameters estimation
all avaiable methods you can find in *fitting_algorithms*
*order* = None : int
MVAR model order, if None, it's chosen using default criterion
*fs* = 1 : int
sampling frequency
*resolution* = None : int
resolution (if None, it's 100 points)
*nfft* = None : int
window length (if None it's N/5)
*no* = None : int
overlap length (if None it's N/10)
*verbose* = True : bool
if True it prints dot on every realization, if False it's
quiet.
*params* :
additional parameters specific for chosen estimator
Returns:
*signi_st* : numpy.array
short time significance values in shape of
- (tp, k, k) for one sided estimator
- (tp, 2, k, k) for two sided
where k is number of channels and tp number of time points
"""
assert nfft > no, "overlap must be smaller than window"
if data.ndim > 2:
k, N, trls = data.shape
else:
k, N = data.shape
trls = 0
if not nfft:
nfft = int(N/5)
if not no:
no = int(N/10)
slices = range(0, N, int(nfft-no))
signi_st = np.zeros((len(slices), k, k))
for e, i in enumerate(slices):
if i+nfft >= N:
if trls:
datcut = np.concatenate((data[:, i:i+nfft], np.zeros((k, i+nfft-N, trls))), axis=1)
else:
datcut = np.concatenate((data[:, i:i+nfft], np.zeros((k, i+nfft-N))), axis=1)
else:
datcut = data[:, i:i+nfft]
signi_st[e] = self.significance(datcut, method, order=order, resolution=resolution,
Nrep=Nrep, alpha=alpha, verbose=verbose, **params)
return signi_st
def __calc_multitrial(self, data, method='yw', order=None, fs=1, resolution=None, **params):
"Calc multitrial averaged estimator for :func:`ConnectAR.bootstrap`"
trials = data.shape[2]
chosen = np.random.randint(trials, size=trials)
ar, vr = Mvar().fit(data[:, :, chosen], order, method)
rescalc = self.calculate(ar, vr, fs, resolution)
return rescalc
def significance(self, data, method, order=None, resolution=None, Nrep=10, alpha=0.05, verbose=True, **params):
"""
Significance of connectivity estimators. It base on
bootstrap :func:`ConnectAR.bootstrap` for multitrial case and
surrogate data :func:`ConnectAR.surrogate` for one trial.
Args:
*data* : numpy.array
data matrix
*method* = 'yw': str
method of MVAR parametersestimation
all avaiable methods you can find in *fitting_algorithms*
*order* = None : int
MVAR model order, if None, it's chosen using default criterion
*Nrep* = 100 : int
number of resamples
*alpha* = 0.05 : float
type I error rate (significance level)
*resolution* = None : int
resolution (if None, it's 100 points)
*verbose* = True : bool
if True it prints dot on every realization, if False it's
quiet.
*params* :
additional parameters specific for chosen estimator
Returns:
*signi_st* : numpy.array
significance values, check :func:`Connect.levels`
"""
if data.ndim > 2:
signific = self.bootstrap(data, method, order=order, resolution=resolution,
Nrep=Nrep, alpha=alpha, verbose=verbose, **params)
else:
signific = self.surrogate(data, method, order=order, resolution=resolution,
Nrep=Nrep, alpha=alpha, verbose=verbose, **params)
return signific
def bootstrap(self, data, method, order=None, Nrep=10, alpha=0.05, fs=1, verbose=True, **params):
"""
Bootstrap - random sampling with replacement of trials for *ConnectAR*.
Args:
*data* : numpy.array
multichannel data matrix
*method* : str
method of MVAR parametersestimation
all avaiable methods you can find in *fitting_algorithms*
*Nrep* = 100 : int
number of resamples
*alpha* = 0.05 : float
type I error rate (significance level)
*order* = None : int
MVAR model order, if None, it's chosen using default criterion
*verbose* = True : bool
if True it prints dot on every realization, if False it's
quiet.
*params* :
additional parameters specific for chosen estimator
Returns:
*levelsigni* : numpy.array
significance values, check :func:`Connect.levels`
"""
resolution = 100
if 'resolution' in params and params['resolution']:
resolution = params['resolution']
for i in range(Nrep):
if verbose:
print('.', end=' ')
if i == 0:
tmpsig = self.__calc_multitrial(data, method, order, fs, resolution)
fres, k, k = tmpsig.shape
signi = np.zeros((Nrep, fres, k, k))
signi[i] = tmpsig
else:
signi[i] = self.__calc_multitrial(data, method, order, fs, resolution)
if verbose:
print('|')
return self.levels(signi, alpha, k)
def surrogate(self, data, method, Nrep=10, alpha=0.05, order=None, fs=1, verbose=True, **params):
"""
Surrogate data testing for *ConnectAR* . Mixing data points in each channel.
Significance level in calculated over all *Nrep* surrogate sets.
Args:
*data* : numpy.array
multichannel data matrix
*method* : str
method of MVAR parameters estimation
all avaiable methods you can find in *fitting_algorithms*
*Nrep* = 100 : int
number of resamples
*alpha* = 0.05 : float
type I error rate (significance level)
*order* = None : int
MVAR model order, if None, it's chosen using default criterion
*verbose* = True : bool
if True it prints dot on every realization, if False it's
quiet.
*params* :
additional parameters specific for chosen estimator
Returns:
*levelsigni* : numpy.array
significance values, check :func:`Connect.levels`
"""
shdata = data.copy()
k, N = data.shape
resolution = 100
if 'resolution' in params and params['resolution']:
resolution = params['resolution']
for i in range(Nrep):
if verbose:
print('.', end=' ')
list(map(np.random.shuffle, shdata))
ar, vr = Mvar().fit(shdata, order, method)
if i == 0:
rtmp = self.calculate(ar, vr, fs, resolution)
reskeeper = np.zeros((Nrep, rtmp.shape[0], k, k))
reskeeper[i] = rtmp
continue
reskeeper[i] = self.calculate(ar, vr, fs, resolution)
if verbose:
print('|')
return self.levels(reskeeper, alpha, k)
############################
# MVAR based methods:
def dtf_fun(Acoef, Vcoef, fs, resolution, generalized=False):
"""
Directed Transfer Function estimation from MVAR parameters.
Args:
*Acoef* : numpy.array
array of shape (k, k, p) where *k* is number of channels and
*p* is a model order.
*Vcoef* : numpy.array
prediction error matrix (k, k)
*fs* = 1 : int
sampling rate
*resolution* = 100 : int
number of spectrum data points
*generalized* = False : bool
generalized version or not
Returns:
*DTF* : numpy.array
matrix with estimation results (*resolution*, k, k)
References:
.. [1] <NAME>, <NAME>. A new method of the description
of the information flow. Biol.Cybern. 65:203-210, (1991).
"""
A_z, H_z, S_z = spectrum(Acoef, Vcoef, fs, resolution=resolution)
res, k, k = A_z.shape
DTF = np.zeros((res, k, k))
if generalized:
sigma = np.diag(Vcoef)
else:
sigma = np.ones(k)
for i in range(res):
mH = sigma*np.dot(H_z[i], H_z[i].T.conj()).real
DTF[i] = (np.sqrt(sigma)*np.abs(H_z[i]))/np.sqrt(np.diag(mH)).reshape((k, 1))
return DTF
def pdc_fun(Acoef, Vcoef, fs, resolution, generalized=False):
"""
Partial Directed Coherence estimation from MVAR parameters.
Args:
*Acoef* : numpy.array
array of shape (k, k, p) where *k* is number of channels and
*p* is a model order.
*Vcoef* : numpy.array
prediction error matrix (k, k)
*fs* = 1 : int
sampling rate
*resolution* = 100 : int
number of spectrum data points
*generalized* = False : bool
generalized version or not
Returns:
*PDC* : numpy.array
matrix with estimation results (*resolution*, k, k)
References:
.. [1] <NAME>., <NAME>., Partial directed
coherence: a new concept in neural structure determination.,
2001, Biol. Cybern. 84, 463โ474.
"""
A_z, H_z, S_z = spectrum(Acoef, Vcoef, fs, resolution=resolution)
res, k, k = A_z.shape
PDC = np.zeros((res, k, k))
sigma = np.diag(Vcoef)
for i in range(res):
mA = (1./sigma[:, None])*np.dot(A_z[i].T.conj(), A_z[i]).real
PDC[i] = np.abs(A_z[i]/np.sqrt(sigma))/np.sqrt(np.diag(mA))
return PDC
class PartialCoh(ConnectAR):
"""
PartialCoh - class inherits from :class:`ConnectAR` and overloads
:func:`Connect.calculate` method.
"""
def calculate(self, Acoef=None, Vcoef=None, fs=None, resolution=None):
"""
Partial Coherence estimation from MVAR parameters.
Args:
*Acoef* : numpy.array
array of shape (k, k, p) where *k* is number of channels and
*p* is a model order.
*Vcoef* : numpy.array
prediction error matrix (k, k)
*fs* = 1 : int
sampling rate
*resolution* = 100 : int
number of spectrum data points
*generalized* = False : bool
generalized version or not
Returns:
*PC* : numpy.array
matrix with estimation results (*resolution*, k, k)
References:
.. [1] <NAME>, <NAME>. Spectral Analysis and its
Applications. Holden-Day, USA, 1969
"""
A_z, H_z, S_z = spectrum(Acoef, Vcoef, fs, resolution=resolution)
res, k, k = A_z.shape
PC = np.zeros((res, k, k))
before = np.ones((k, k))
before[0::2, :] *= -1
before[:, 0::2] *= -1
for i in range(res):
D_z = np.linalg.inv(S_z[i])
dd = np.tile(np.diag(D_z), (k, 1))
mD = (dd*dd.T).real
PC[i] = -1*before*(np.abs(D_z)/np.sqrt(mD))
return np.abs(PC)
class PDC(ConnectAR):
"""
PDC - class inherits from :class:`ConnectAR` and overloads
:func:`Connect.calculate` method.
"""
def calculate(self, Acoef=None, Vcoef=None, fs=None, resolution=100):
"More in :func:`pdc_fun`."
return pdc_fun(Acoef, Vcoef, fs, resolution)
class gPDC(ConnectAR):
"""
gPDC - class inherits from :class:`ConnectAR` and overloads
:func:`Connect.calculate` method.
"""
def calculate(self, Acoef=None, Vcoef=None, fs=None, resolution=100):
"More in :func:`pdc_fun`"
return pdc_fun(Acoef, Vcoef, fs, resolution, generalized=True)
class DTF(ConnectAR):
"""
DTF - class inherits from :class:`ConnectAR` and overloads
:func:`Connect.calculate` method.
"""
def calculate(self, Acoef=None, Vcoef=None, fs=None, resolution=100):
"More in :func:`dtf_fun`."
return dtf_fun(Acoef, Vcoef, fs, resolution)
class gDTF(ConnectAR):
"""
gDTF - class inherits from :class:`ConnectAR` and overloads
:func:`Connect.calculate` method.
"""
def calculate(self, Acoef=None, Vcoef=None, fs=None, resolution=100):
"More in :func:`dtf_fun`."
return dtf_fun(Acoef, Vcoef, fs, resolution, generalized=True)
class ffDTF(ConnectAR):
"""
ffDTF - class inherits from :class:`ConnectAR` and overloads
:func:`Connect.calculate` method.
"""
def calculate(self, Acoef=None, Vcoef=None, fs=None, resolution=100):
"""
full-frequency Directed Transfer Function estimation from MVAR
parameters.
Args:
*Acoef* : numpy.array
array of shape (k, k, p) where *k* is number of channels and
*p* is a model order.
*Vcoef* : numpy.array
prediction error matrix (k, k)
*fs* = 1 : int
sampling rate
*resolution* = 100 : int
number of spectrum data points
*generalized* = False : bool
generalized version or not
Returns:
*ffDTF* : numpy.array
matrix with estimation results (*resolution*, k, k)
References:
.. [1] Korzeniewska, A.et. all. Determination of information flow direction
among brain structures by a modified directed transfer function (dDTF)
method. J. Neurosci. Methods 125, 195โ207 (2003).
"""
A_z, H_z, S_z = spectrum(Acoef, Vcoef, fs, resolution=resolution)
res, k, k = A_z.shape
mH = np.zeros((res, k, k))
for i in range(res):
mH[i] = np.abs(np.dot(H_z[i], H_z[i].T.conj()))
mHsum = np.sum(mH, axis=0)
ffDTF = np.zeros((res, k, k))
for i in range(res):
ffDTF[i] = (np.abs(H_z[i]).T/np.sqrt(np.diag(mHsum))).T
return ffDTF
class dDTF(ConnectAR):
"""
dDTF - class inherits from :class:`ConnectAR` and overloads
:func:`Connect.calculate` method.
"""
def calculate(self, Acoef=None, Vcoef=None, fs=None, resolution=100):
"""
direct Directed Transfer Function estimation from MVAR
parameters. dDTF is a DTF multiplied in each frequency by
Patrial Coherence.
Args:
*Acoef* : numpy.array
array of shape (k, k, p) where *k* is number of channels and
*p* is a model order.
*Vcoef* : numpy.array
prediction error matrix (k, k)
*fs* = 1 : int
sampling rate
*resolution* = 100 : int
number of spectrum data points
*generalized* = False : bool
generalized version or not
Returns:
*dDTF* : numpy.array
matrix with estimation results (*resolution*, k, k)
References:
.. [1] <NAME>. all. Determination of information flow direction
among brain structures by a modified directed transfer function (dDTF)
method. J. Neurosci. Methods 125, 195โ207 (2003).
"""
A_z, H_z, S_z = spectrum(Acoef, Vcoef, fs, resolution=resolution)
res, k, k = A_z.shape
mH = np.zeros((res, k, k))
for i in range(res):
mH[i] = np.abs(np.dot(H_z[i], H_z[i].T.conj()))
mHsum = np.sum(mH, axis=0)
dDTF = np.zeros((res, k, k))
before = np.ones((k, k))
before[0::2, :] *= -1
before[:, 0::2] *= -1
for i in range(res):
D_z = np.linalg.inv(S_z[i])
dd = np.tile(np.diag(D_z), (k, 1))
mD = (dd*dd.T).real
PC = np.abs(-1*before*(np.abs(D_z)/np.sqrt(mD)))
dDTF[i] = PC*(np.abs(H_z[i]).T/np.sqrt(np.diag(mHsum))).T
return dDTF
class iPDC(ConnectAR):
"""
iPDC - class inherits from :class:`ConnectAR` and overloads
:func:`Connect.calculate` method.
"""
def calculate(self, Acoef=None, Vcoef=None, fs=None, resolution=100):
"""
instantaneous Partial Directed Coherence from MVAR
parameters.
Args:
*Acoef* : numpy.array
array of shape (k, k, p+1) where *k* is number of channels and
*p* is a model order. It's zero lag case.
*Vcoef* : numpy.array
prediction error matrix (k, k)
*fs* = 1 : int
sampling rate
*resolution* = 100 : int
number of spectrum data points
*generalized* = False : bool
generalized version or not
Returns:
*iPDC* : numpy.array
matrix with estimation results (*resolution*, k, k)
References:
.. [1] <NAME>. et all Multivariate Autoregressive Model with Instantaneous
Effects to Improve Brain Connectivity Estimation.
Int. J. Bioelectromagn. 11, 74โ79 (2009).
"""
B_z = spectrum_inst(Acoef, Vcoef, fs, resolution=resolution)
res, k, k = B_z.shape
PDC = np.zeros((res, k, k))
for i in range(res):
mB = np.dot(B_z[i].T.conj(), B_z[i]).real
PDC[i] = np.abs(B_z[i])/np.sqrt(np.diag(mB))
return PDC
class iDTF(ConnectAR):
"""
iDTF - class inherits from :class:`ConnectAR` and overloads
:func:`Connect.calculate` method.
"""
def calculate(self, Acoef=None, Vcoef=None, fs=None, resolution=100):
"""
instantaneous Partial Directed Coherence from MVAR
parameters.
Args:
*Acoef* : numpy.array
array of shape (k, k, p+1) where *k* is number of channels and
*p* is a model order. It's zero lag case.
*Vcoef* : numpy.array
prediction error matrix (k, k)
*fs* = 1 : int
sampling rate
*resolution* = 100 : int
number of spectrum data points
*generalized* = False : bool
generalized version or not
Returns:
*iPDC* : numpy.array
matrix with estimation results (*resolution*, k, k)
References:
.. [1] <NAME>, Multivariate Autoregressive Model with Instantaneous
Effects to Improve Brain Connectivity Estimation.
Int. J. Bioelectromagn. 11, 74โ79 (2009).
"""
B_z = spectrum_inst(Acoef, Vcoef, fs, resolution=resolution)
res, k, k = B_z.shape
DTF = | np.zeros((res, k, k)) | numpy.zeros |
from unittest import TestCase
from math import *
import numpy as np
from numpy.random import random, randint, uniform
from AnyQt.QtGui import QColor
from Orange.widgets.utils.classdensity import compute_density, grid_sample
class TestScatterplotDensity(TestCase):
def setUp(self):
np.random.seed(1)
def random_data(self, n_grid, n_colors, n_data):
mx, Mx = 200, 2000
my, My = 300, 3000
mr, Mr = 10, 500
x_grid = sorted(uniform(mx, Mx, n_grid))
y_grid = sorted(uniform(my, My, n_grid))
colors = [
QColor(randint(256), randint(256), randint(256), randint(256))
for i in range(n_colors)
]
cx = uniform(mx, Mx, n_colors)
cy = uniform(my, My, n_colors)
cr = uniform(mr, Mr, n_colors)
x_data, y_data, rgb_data = [], [], []
for i in range(n_data):
c = randint(n_colors)
r = uniform(1, cr[c])
a = | random() | numpy.random.random |
import numpy as np
from sfsimodels.models.abstract_models import PhysicalObject
from sfsimodels.models.systems import TwoDSystem
from sfsimodels.functions import interp_left, interp2d, interp3d
from .fns import remove_close_items, build_ele2_node_array
import hashlib
def sort_slopes(sds):
"""Sort slopes from bottom to top then right to left"""
sds = np.array(sds)
scores = sds[:, 0, 1] + sds[:, 1, 1] * 1e6
inds = np.argsort(scores)
return sds[inds]
def adjust_slope_points_for_removals(sds, x, removed_y, retained_y):
for sd in sds:
for i in range(2):
if sd[0][i] == x and sd[1][i] == removed_y:
sd[1][i] = retained_y
def adj_slope_by_layers(xm, ym, sgn=1):
"""
Given mesh coordinates, adjust the mesh to be match the slope by adjust each layer
bottom left and top right coords of mesh are the slope
Parameters
----------
xm
ym
x_slope - NOT needed
y_slope
Returns
-------
"""
# TODO use centroid formula - and use o3plot to get ele-coords
ym = sgn * np.array(ym)
xm = sgn * np.array(xm)
if sgn == -1:
xm = xm[::-1]
ym = ym[::-1]
nh = len(ym[0]) - 1
# dy = min([(ym[0][-1] - ym[0][0]) / nh, (ym[-1][-1] - ym[-1][0]) / nh, 0.2])
dy1 = min([(ym[-1][-1] - ym[-1][0]) / nh])
dy0 = 0.2
y0s = ym[0][0] + np.arange(nh + 1) * dy0
y1s = ym[-1][-1] - np.arange(nh + 1) * dy1
y1s = y1s[::-1]
for i in range(nh + 1):
ym[:, i] = np.interp(xm[:, i], [xm[0][0], xm[-1][-1]], [y0s[i], y1s[i]])
xm[:, i] = xm[:, 0]
y_centres_at_xns = (ym[1:] + ym[:-1]) / 2
y_centres = (y_centres_at_xns[:, 1:] + y_centres_at_xns[:, :-1]) / 2
# get x-coordinates of centres of relevant elements
included_ele = []
dy_inds = len(ym[0, :]) - 1
for i in range(0, dy_inds):
# account for shift before assessing position of centroid
xcens = (xm[1:, i] + xm[:-1, i]) / 2 + 0.375 * (xm[1:, -1] - xm[:-1, -1])
y_surf_at_x_cens = np.interp(xcens, [xm[0][0], xm[-1][-1]], [ym[0][0], ym[-1][-1]])
inds = np.where(y_centres[:, i] < y_surf_at_x_cens)
if len(inds[0]):
included_ele.append(inds[0][0])
else:
included_ele.append(len(y_surf_at_x_cens))
included_ele.append(len(y_surf_at_x_cens))
new_xm = xm
new_ym = ym
for j in range(1, nh + 1):
new_ym[included_ele[0], j] += dy1
for i in range(1, dy_inds + 1):
x_ind_adj = included_ele[i - 1]
x_ind_adj_next = included_ele[i]
if x_ind_adj == x_ind_adj_next:
continue
# shift by half of the ele
dx = (xm[x_ind_adj + 1, i] - xm[x_ind_adj, i]) * 0.5
dxs = np.interp(xm[x_ind_adj:x_ind_adj_next, i], [xm[x_ind_adj, i], xm[x_ind_adj_next, i]], [dx, 0])
new_xm[x_ind_adj:x_ind_adj_next, i] = xm[x_ind_adj:x_ind_adj_next, i] + dxs
for j in range(i + 1, nh + 1):
new_ym[x_ind_adj_next, j] += dy1
if sgn == -1:
new_xm = new_xm[::-1]
new_ym = new_ym[::-1]
return new_xm * sgn, new_ym * sgn
def calc_centroid(xs, ys):
import numpy as np
x0 = np.array(xs)
y0 = np.array(ys)
x1 = np.roll(xs, 1, axis=-1)
y1 = np.roll(ys, 1, axis=-1)
a = x0 * y1 - x1 * y0
xc = np.sum((x0 + x1) * a, axis=-1)
yc = np.sum((y0 + y1) * a, axis=-1)
area = 0.5 * np.sum(a, axis=-1)
xc /= (6.0 * area)
yc /= (6.0 * area)
return xc, yc
def calc_mesh_centroids(fem):
x_inds = []
y_inds = []
if hasattr(fem.y_nodes[0], '__len__'): # can either have varying y-coordinates or single set
n_y = len(fem.y_nodes[0])
else:
n_y = 0
import numpy as np
for xx in range(len(fem.soil_grid)):
x_ele = [xx, xx + 1, xx + 1, xx]
x_inds += [x_ele for i in range(n_y - 1)]
for yy in range(len(fem.soil_grid[xx])):
y_ele = [yy, yy, yy + 1, yy + 1]
y_inds.append(y_ele)
n_eles = len(np.array(x_inds))
x_inds = | np.array(x_inds) | numpy.array |
import numpy as np
import os
from sklearn import neural_network, tree
try:
import cPickle as pickle
except:
import pickle
class ExperienceReplay(object):
def __init__(self, max_memory=5000, discount=0.9):
self.memory = []
self.max_memory = max_memory
self.discount = discount
def remember(self, experience):
self.memory.append(experience)
def get_batch(self, batch_size=10):
if len(self.memory) > self.max_memory:
del self.memory[:len(self.memory) - self.max_memory]
if batch_size < len(self.memory):
timerank = range(1, len(self.memory) + 1)
p = timerank / np.sum(timerank, dtype=float)
batch_idx = np.random.choice(range(len(self.memory)), replace=False, size=batch_size, p=p)
batch = [self.memory[idx] for idx in batch_idx]
else:
batch = self.memory
return batch
class BaseAgent(object):
def __init__(self, histlen):
self.single_testcases = True
self.train_mode = True
self.histlen = histlen
def get_action(self, s):
return 0
def get_all_actions(self, states):
""" Returns list of actions for all states """
return [self.get_action(s) for s in states]
def reward(self, reward):
pass
def save(self, filename):
""" Stores agent as pickled file """
pickle.dump(self, open(filename + '.p', 'wb'), 2)
@classmethod
def load(cls, filename):
return pickle.load(open(filename + '.p', 'rb'))
class NetworkAgent(BaseAgent):
def __init__(self, state_size, action_size, hidden_size, histlen):
super(NetworkAgent, self).__init__(histlen=histlen)
self.name = 'mlpclassifier'
self.experience_length = 10000
self.experience_batch_size = 1000
self.experience = ExperienceReplay(max_memory=self.experience_length)
self.episode_history = []
self.iteration_counter = 0
self.action_size = action_size
if isinstance(hidden_size, tuple):
self.hidden_size = hidden_size
else:
self.hidden_size = (hidden_size,)
self.model = None
self.model_fit = False
self.init_model(True)
# TODO This could improve performance (if necessary)
# def get_all_actions(self, states):
# try:
def init_model(self, warm_start=True):
if self.action_size == 1:
self.model = neural_network.MLPClassifier(hidden_layer_sizes=self.hidden_size, activation='relu',
warm_start=warm_start, solver='adam', max_iter=750)
else:
self.model = neural_network.MLPRegressor(hidden_layer_sizes=self.hidden_size, activation='relu',
warm_start=warm_start, solver='adam', max_iter=750)
self.model_fit = False
def get_action(self, s):
if self.model_fit:
if self.action_size == 1:
a = self.model.predict_proba(np.array(s).reshape(1, -1))[0][1]
else:
a = self.model.predict(np.array(s).reshape(1, -1))[0]
else:
a = np.random.random()
if self.train_mode:
self.episode_history.append((s, a))
return a
def reward(self, rewards):
if not self.train_mode:
return
try:
x = float(rewards)
rewards = [x] * len(self.episode_history)
except:
if len(rewards) < len(self.episode_history):
raise Exception('Too few rewards')
self.iteration_counter += 1
for ((state, action), reward) in zip(self.episode_history, rewards):
self.experience.remember((state, reward))
self.episode_history = []
if self.iteration_counter == 1 or self.iteration_counter % 5 == 0:
self.learn_from_experience()
def learn_from_experience(self):
experiences = self.experience.get_batch(self.experience_batch_size)
x, y = zip(*experiences)
if self.model_fit:
try:
self.model.partial_fit(x, y)
except ValueError:
self.init_model(warm_start=False)
self.model.fit(x, y)
self.model_fit = True
else:
self.model.fit(x, y) # Call fit once to learn classes
self.model_fit = True
# Decision Tree based agent
class DTAgent(BaseAgent):
def __init__(self, action_size, histlen, criterion, max_depth, min_samples_split):
super(DTAgent, self).__init__(histlen=histlen)
self.name = 'dtclassifier'
self.experience_length = 10000
self.experience_batch_size = 1000
self.experience = ExperienceReplay(max_memory=self.experience_length)
self.episode_history = []
self.iteration_counter = 0
self.action_size = action_size
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.model = None
self.model_fit = False
self.init_model()
# TODO This could improve performance (if necessary)
# def get_all_actions(self, states):
# try:
def init_model(self):
if self.action_size == 1:
self.model = tree.DecisionTreeClassifier(criterion=self.criterion, max_depth=self.max_depth,
min_samples_split=self.min_samples_split)
else:
self.model = tree.DecisionTreeClassifier(criterion=self.criterion, max_depth=self.max_depth,
min_samples_split=self.min_samples_split)
self.model_fit = False
def get_action(self, s):
if self.model_fit:
if self.action_size == 1:
a = self.model.predict_proba(np.array(s).reshape(1, -1))[0][0]
else:
a = self.model.predict( | np.array(s) | numpy.array |
# encoding=utf-8
import sys
from PyQt5.QtWidgets import QApplication, QSizePolicy, QMessageBox, QWidget, \
QPushButton, QLineEdit, QDesktopWidget, QGridLayout, QFileDialog, QListWidget, QLabel,QFrame,QGroupBox
from PyQt5.QtCore import Qt, QThread
import PyQt5.QtWidgets as QtWidgets
import PyQt5.QtCore as QtCore
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.figure import Figure
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
from matplotlib import cm
from matplotlib.widgets import Slider
import numpy as np
from scipy.signal import spectrogram, butter, filtfilt, lfilter
from scipy.ndimage import gaussian_filter
from scipy.signal import iirnotch
from scipy.signal import convolve2d
# import h5py
from sklearn.decomposition import PCA
from sklearn.cluster import KMeans
import mne
from gui_forms.ictal_form import Ictal_gui
class figure_thread(QThread):
def __init__(self, parent=None):
super(figure_thread, self).__init__(parent=parent)
self.ei = parent.ei_ei
def run(self):
pass
class fullband_computation_thread(QThread):
fullband_done_sig = QtCore.pyqtSignal(object)
def __init__(self, parent=None, raw_signal=None, ei=None, fs=2000):
super(fullband_computation_thread, self).__init__(parent=parent)
self.raw_signal = raw_signal
self.fs = fs
self.ei = ei
def run(self):
spec_pca, fullband_labels, fullband_ind = compute_full_band(self.raw_signal, self.fs, self.ei)
fullband_res = [spec_pca, fullband_labels, fullband_ind]
self.fullband_done_sig.emit(fullband_res)
def get_name_fromEdf(file_absPath):
with open(file_absPath,'rb') as fh:
fh.read(8)
pinfo=fh.read(80).decode('latin-1').rstrip()
pinfo=pinfo.split(' ')
patient_name=pinfo[3]
return patient_name
def compute_hfer(target_data, base_data, fs):
target_sq = target_data ** 2
base_sq = base_data ** 2
window = int(fs / 2.0)
target_energy=convolve2d(target_sq,np.ones((1,window)),'same')
base_energy=convolve2d(base_sq,np.ones((1,window)),'same')
base_energy_ref = np.sum(base_energy, axis=1) / base_energy.shape[1]
target_de_matrix = base_energy_ref[:, np.newaxis] * np.ones((1, target_energy.shape[1]))
base_de_matrix = base_energy_ref[:, np.newaxis] * np.ones((1, base_energy.shape[1]))
norm_target_energy = target_energy / target_de_matrix.astype(np.float32)
norm_base_energy = base_energy / base_de_matrix.astype(np.float32)
return norm_target_energy, norm_base_energy
def determine_threshold_onset(target, base):
base_data = base.copy()
target_data = target.copy()
sigma = np.std(base_data, axis=1, ddof=1)
channel_max_base = np.max(base_data, axis=1)
thresh_value = channel_max_base + 20 * sigma
onset_location = np.zeros(shape=(target_data.shape[0],))
for channel_idx in range(target_data.shape[0]):
logic_vec = target_data[channel_idx, :] > thresh_value[channel_idx]
if np.sum(logic_vec) == 0:
onset_location[channel_idx] = len(logic_vec)
else:
onset_location[channel_idx] = np.where(logic_vec != 0)[0][0]
return onset_location
def compute_ei_index(target, base, fs):
ei = np.zeros([1, target.shape[0]])
hfer = np.zeros([1, target.shape[0]])
onset_rank = np.zeros([1, target.shape[0]])
channel_onset = determine_threshold_onset(target, base)
seizure_location = np.min(channel_onset)
onset_channel = np.argmin(channel_onset)
hfer = np.sum(target[:, int(seizure_location):int(seizure_location + 0.25 * fs)], axis=1) / (fs * 0.25)
onset_asend = np.sort(channel_onset)
time_rank_tmp = np.argsort(channel_onset)
onset_rank = np.argsort(time_rank_tmp) + 1
onset_rank = np.ones((onset_rank.shape[0],)) / np.float32(onset_rank)
ei = np.sqrt(hfer * onset_rank)
for i in range(len(ei)):
if np.isnan(ei[i]) or np.isinf(ei[i]):
ei[i] = 0
if np.max(ei) > 0:
ei = ei / np.max(ei)
return ei, hfer, onset_rank#,channel_onset
def choose_kmeans_k(data, k_range):
k_sse = []
for k in k_range:
tmp_kmeans = KMeans(n_clusters=k)
tmp_kmeans.fit(data)
k_sse.append(tmp_kmeans.inertia_)
k_sse = np.array(k_sse)
k_sseDiff = -np.diff(k_sse)
k_sseDiffMean = np.mean(k_sseDiff)
best_index = np.where(k_sseDiff < k_sseDiffMean)[0][0]
return k_range[best_index]
def find_ei_cluster_ratio(pei, labels, ei_elec_num=10):
top_elec_ind = list(np.argsort(-pei)[:ei_elec_num])
top_elec_labels = list(labels[top_elec_ind])
top_elec_count = {}
top_elec_set = set(top_elec_labels)
for i in top_elec_set:
top_elec_count[i] = top_elec_labels.count(i)
cluster_ind1 = [k for k, v in top_elec_count.items() if v > ei_elec_num / 2]
if len(cluster_ind1):
return np.array(cluster_ind1)
else:
cluster_ind2 = [k for k, v in top_elec_count.items() if v > ei_elec_num / 3]
if len(cluster_ind2):
return np.array(cluster_ind2)
else:
return None
def pad_zero(data, length):
data_len = len(data)
if data_len < length:
# tmp_data = np.zeros(length) ### test!!!
tmp_data = np.zeros(int(length))
tmp_data[:data_len] = data
return tmp_data
return data
def cal_zscore(data):
dmean = np.mean(data, axis=1)
dstd = np.std(data, axis=1)
norm_data = (data - dmean[:, None]) / dstd[:, None]
return norm_data
def cal_specs_matrix(raw, sfreq, method='STFT'):
win_len = 0.5
overlap = 0.8
freq_range = 300
half_width = win_len * sfreq
ch_num = raw.shape[0]
if method == 'STFT':
for i in range(ch_num):
if i % 10 == 0:
print(str(i) + '/' + str(ch_num))
time_signal = raw[i, :].ravel()
time_signal = pad_zero(time_signal, 2 * half_width)
f, t, hfo_spec = spectrogram(time_signal, fs=int(sfreq), nperseg=int(half_width),
noverlap=int(overlap * half_width),
nfft=1024, mode='magnitude')
hfo_new = 20 * np.log10(hfo_spec + 1e-10)
hfo_new = gaussian_filter(hfo_new, sigma=2)
freq_nums = int(len(f) * freq_range / f.max())
hfo_new = hfo_new[:freq_nums, :]
tmp_specs = np.reshape(hfo_new, (-1,))
if i == 0:
chan_specs = tmp_specs
else:
chan_specs = np.row_stack((chan_specs, tmp_specs))
f_cut = f[:freq_range]
return chan_specs, hfo_new.shape, t, f_cut
def norm_specs(specs):
specs_mean = specs - specs.mean(axis=0)
specs_norm = specs_mean / specs_mean.std(axis=0)
return specs_norm
def compute_full_band(raw_data, sfreq, ei):
ei_elec_num = 10
print('computing spectrogram')
raw_specs, spec_shape, t, f = cal_specs_matrix(raw_data, sfreq, 'STFT')
raw_specs_norm = norm_specs(raw_specs)
print('dimensionality reducing')
proj_pca = PCA(n_components=10)
# raw_specs_norm[np.where(raw_specs_norm == np.nan)] = 0
# raw_specs_norm[np.where(raw_specs_norm == np.inf)] = 0
spec_pca = proj_pca.fit_transform(raw_specs_norm)
top_elec_ind = np.argsort(-ei)[:ei_elec_num]
top_elec_pca = np.zeros([ei_elec_num, spec_pca.shape[1]])
for i in range(ei_elec_num):
top_elec_pca[i] = spec_pca[top_elec_ind[i]]
print('clustering')
k_num = choose_kmeans_k(spec_pca, range(2, 8))
tmp_kmeans = KMeans(n_clusters=k_num)
tmp_kmeans.fit(spec_pca)
pre_labels = tmp_kmeans.labels_
cluster_ind_ratio = find_ei_cluster_ratio(ei, pre_labels)
chosen_cluster_ind = np.where(pre_labels == cluster_ind_ratio)[0]
return spec_pca, pre_labels, chosen_cluster_ind
# main class
class IctalModule(QWidget, Ictal_gui):
def __init__(self,parent):
super(IctalModule, self).__init__()
self.setupUi(self)
self.parent=parent
# self.initUI()
# set functions
def center(self):
qr = self.frameGeometry()
cp = QDesktopWidget().availableGeometry().center()
qr.moveCenter(cp)
self.move(qr.topLeft())
# input edf data
def dialog_inputedfdata(self):
self.mat_filename, b = QFileDialog.getOpenFileName(self, 'open edf file', './', '(*.edf)')
if self.mat_filename:
# load data
self.patient_name = self.lineedit_patient_name.text()
self.edf_data = mne.io.read_raw_edf(self.mat_filename, preload=True, stim_channel=None)
self.preprocess_xw()
self.band_low = 1.0
self.band_high = 500
self.edf_time_max = self.modified_edf_data.shape[1] / self.fs
self.disp_flag = 0
self.data_fomat = 1 #edf
QMessageBox.information(self, '', 'data loaded')
# init display params
self.init_display_params()
self.disp_refresh()
# enable buttons
self.reset_data_display.setEnabled(True)
self.target_button.setEnabled(True)
self.baseline_button.setEnabled(True)
self.chans_del_button.setEnabled(True)
self.filter_button.setEnabled(True)
self.dis_up.setEnabled(True)
self.dis_down.setEnabled(True)
self.dis_add_mag.setEnabled(True)
self.dis_drop_mag.setEnabled(True)
self.dis_more_chans.setEnabled(True)
self.dis_less_chans.setEnabled(True)
self.dis_shrink_time.setEnabled(True)
self.dis_expand_time.setEnabled(True)
self.dis_left.setEnabled(True)
self.dis_right.setEnabled(True)
# init display
def init_display_params(self):
self.disp_chans_num = 20
self.disp_chans_start = 0
self.disp_wave_mul = 10
self.disp_time_win = 5
self.disp_time_start = 0
# self.baseline_pos = np.array([0.0, int(self.edf_time_max / 5)])
self.baseline_pos = np.array([0.0, 1.0])
self.target_pos = np.array([0.0, self.edf_time_max])
self.baseline_mouse = 0
self.target_mouse = 0
self.ei_target_start = self.target_pos[0]
self.ei_target_end = self.target_pos[1]
self.modified_edf_data = self.origin_data.copy()
self.disp_ch_names = self.origin_chans.copy()
self.chans_list.clear()
self.chans_list.addItems(self.disp_ch_names)
self.edf_time = self.modified_edf_data.shape[1]/self.fs
self.edf_nchans = len(self.chans_list)
self.edf_line_colors = np.array([cm.jet(x) for x in np.random.rand(self.edf_nchans)])
self.edf_dmin = self.modified_edf_data[:, :].min()
self.edf_dmax = self.modified_edf_data[:, :].max()
self.disp_press = 0.7
self.dr = (self.edf_dmax - self.edf_dmin) * self.disp_press
self.y0 = self.edf_dmin
self.y1 = (self.disp_chans_num - 1) * self.dr + self.edf_dmax
self.disp_flag = 0
# refresh display
def disp_refresh(self):
self.canvas.axes.cla()
self.canvas.axes.set_ylim(self.y0, self.y1)
segs = []
ticklocs = []
self.disp_start = int(self.disp_time_start*self.fs)
self.disp_end = int((self.disp_time_start + self.disp_time_win)*self.fs)
self.disp_end=min(self.disp_end,self.modified_edf_data.shape[1])
if self.disp_chans_num>=self.modified_edf_data.shape[0]:
self.disp_chans_start=0
self.disp_chans_num=self.modified_edf_data.shape[0]
elif self.disp_chans_start+self.disp_chans_num>=self.modified_edf_data.shape[0]:
self.disp_chans_start=self.modified_edf_data.shape[0]-self.disp_chans_num
for i in range(self.disp_chans_start, self.disp_chans_start + self.disp_chans_num):
tmp_data = self.modified_edf_data[i, self.disp_start:self.disp_end]
tmp_time = np.linspace(self.disp_start/self.fs, self.disp_end/self.fs, self.disp_end-self.disp_start)
tmp_data = tmp_data * self.disp_wave_mul
segs.append(np.hstack((tmp_time[:, np.newaxis], tmp_data[:, np.newaxis])))
ticklocs.append((i - self.disp_chans_start) * self.dr)
offsets = np.zeros((self.disp_chans_num, 2), dtype=float)
offsets[:, 1] = ticklocs
colors = self.edf_line_colors[self.disp_chans_start:self.disp_chans_start + self.disp_chans_num]
# linewidths=
lines = LineCollection(segs, offsets=offsets, linewidths=0.7,transOffset=None,colors='k') # ,colors=colors,transOffset=None)
disp_chan_names = self.disp_ch_names[
self.disp_chans_start:(self.disp_chans_start + self.disp_chans_num)]
self.canvas.axes.set_xlim(segs[0][0, 0], segs[0][-1, 0])
self.canvas.axes.add_collection(lines)
self.canvas.axes.set_yticks(ticklocs)
self.canvas.axes.set_yticklabels(disp_chan_names)
self.canvas.axes.set_ylim(self.edf_dmin, (self.disp_chans_num - 1) * self.dr + self.edf_dmax)
self.canvas.axes.set_xlabel('time(s)')
#add first line
if hasattr(self,'baseline_count') and self.baseline_count==1 and (self.baseline_pos[0]>segs[0][0,0] and self.baseline_pos[0]<segs[0][-1,0]):
self.canvas.axes.axvline(self.baseline_pos[0])
if hasattr(self,'target_count') and self.target_count==1 and (self.target_pos[0]>segs[0][0,0] and self.target_pos[0]<segs[0][-1,0]):
self.canvas.axes.axvline(self.target_pos[0])
self.canvas.draw()
# preprecess xw
def preprocess_xw(self):
self.fs = self.edf_data.info['sfreq']
self.disp_ch_names = self.edf_data.ch_names
self.chans_list.addItems(self.disp_ch_names)
# self.modified_edf_data, self.times = self.edf_data[:]
self.origin_data, self.times = self.edf_data[:]
# self.origin_data = self.modified_edf_data.copy()
self.modified_edf_data=self.origin_data.copy()
self.origin_chans = self.disp_ch_names.copy()
# disp button slot functions
def reset_data_display_func(self):
self.target_pos = np.array([0.0, self.edf_time_max])
self.baseline_pos = np.array([0.0, 1.0])
self.init_display_params()
self.disp_refresh()
self.ei_button.setEnabled(False)
self.hfer_button.setEnabled(False)
self.fullband_button.setEnabled(False)
def origin_data_display_func(self):
self.disp_flag = 0
self.disp_refresh()
def disp_win_down_func(self):
self.disp_chans_start -= self.disp_chans_num
if self.disp_chans_start <= 0:
self.disp_chans_start = 0
self.disp_refresh()
def disp_win_up_func(self):
self.disp_chans_start += self.disp_chans_num
# if self.disp_chans_start + self.disp_chans_num >= self.edf_nchans:
if self.disp_chans_start + self.disp_chans_num >= self.modified_edf_data.shape[0]:
# self.disp_chans_start = self.edf_nchans - self.disp_chans_num-1
self.disp_chans_start = self.modified_edf_data.shape[0] - self.disp_chans_num
self.disp_refresh()
def disp_more_chans_func(self):
self.disp_chans_num *= 2
# if self.disp_chans_num >= self.edf_nchans:
if self.disp_chans_num >= self.modified_edf_data.shape[0]:
self.disp_chans_start=0
self.disp_chans_num = self.modified_edf_data.shape[0]
elif self.disp_chans_start+self.disp_chans_num>=self.modified_edf_data.shape[0]:
self.disp_chans_start=self.modified_edf_data.shape[0]-self.disp_chans_num
self.disp_refresh()
def disp_less_chans_func(self):
self.disp_chans_num = int(self.disp_chans_num / 2.0)
if self.disp_chans_num <= 1:
self.disp_chans_num = 1
self.disp_refresh()
def disp_add_mag_func(self):
self.disp_wave_mul *= 1.5
print(self.disp_wave_mul)
self.disp_refresh()
def disp_drop_mag_func(self):
self.disp_wave_mul *= 0.75
print(self.disp_wave_mul)
self.disp_refresh()
def disp_win_left_func(self):
self.disp_time_start -= 0.2 * self.disp_time_win
if self.disp_time_start <= 0:
self.disp_time_start = 0
self.disp_refresh()
def disp_win_right_func(self):
self.disp_time_start += 0.2 * self.disp_time_win
if self.disp_time_start + self.disp_time_win >= self.edf_time:
self.disp_time_start = self.edf_time - self.disp_time_win
self.disp_refresh()
def disp_shrink_time_func(self):
self.disp_time_win += 2
if self.disp_time_win >= self.edf_time:
self.disp_time_win = self.edf_time
self.disp_refresh()
def disp_expand_time_func(self):
self.disp_time_win -= 2
if self.disp_time_win <= 2:
self.disp_time_win = 2
self.disp_refresh()
def disp_scroll_mouse(self, e):
if e.button == 'up':
self.disp_win_left_func()
elif e.button == 'down':
self.disp_win_right_func()
# ei functions
# filter & del chans
def filter_data(self):
self.modified_edf_data=self.modified_edf_data-np.mean(self.modified_edf_data,axis=0)
#notch filter
notch_freqs=np.arange(50,151,50)
for nf in notch_freqs:
tb,ta=iirnotch(nf/(self.fs/2),30)
self.modified_edf_data=filtfilt(tb,ta,self.modified_edf_data,axis=-1)
#band filter
self.band_low = float(self.disp_filter_low.text())
self.band_high = float(self.disp_filter_high.text())
nyq = self.fs/2
b, a = butter(5, np.array([self.band_low/nyq, self.band_high/nyq]), btype = 'bandpass')
self.modified_edf_data = filtfilt(b,a,self.modified_edf_data)
self.disp_flag = 1
self.disp_refresh()
self.ei_button.setEnabled(True)
self.hfer_button.setEnabled(True)
def delete_chans(self):
deleted_chans = self.chans_list.selectedItems()
deleted_list = [i.text() for i in deleted_chans]
deleted_ind_list = []
for deleted_name in deleted_list:
deleted_ind_list.append(self.disp_ch_names.index(deleted_name))
new_modified_data = np.delete(self.modified_edf_data, deleted_ind_list, axis=0)
self.modified_edf_data = new_modified_data
for d_chan in deleted_list:
self.disp_ch_names.remove(d_chan)
self.chans_list.clear()
self.chans_list.addItems(self.disp_ch_names)
self.disp_refresh()
# select base time & target time
def choose_baseline(self):
self.baseline_mouse = 1
self.baseline_count = 0
def choose_target(self):
self.target_mouse = 1
self.target_count = 0
def canvas_press_button(self, e):
if hasattr(self,'baseline_mouse') and self.baseline_mouse == 1:
self.baseline_pos[self.baseline_count] = e.xdata
print(e.xdata)
self.canvas.axes.axvline(e.xdata)
self.canvas.draw()
self.baseline_count += 1
if self.baseline_count == 2:
self.baseline_mouse = 0
print('baseline time', self.baseline_pos)
reply = QMessageBox.question(self, 'confirm', 'confirm baseline?', QMessageBox.Yes | QMessageBox.No,
QMessageBox.Yes)
if reply == QMessageBox.Yes:
pass
else:
self.baseline_pos = np.array([0.0, 1.0])
self.disp_refresh()
elif hasattr(self,'target_mouse') and self.target_mouse == 1:
self.target_pos[self.target_count] = e.xdata
self.canvas.axes.axvline(e.xdata)
self.canvas.draw()
self.target_count += 1
if self.target_count == 2:
self.target_mouse = 0
print('target time', self.target_pos)
reply = QMessageBox.question(self, 'confim', 'confirm target time?', QMessageBox.Yes | QMessageBox.No,
QMessageBox.Yes)
if reply == QMessageBox.Yes:
self.disp_time_start = self.target_pos[0]
self.disp_time_win = self.target_pos[1] - self.target_pos[0]
self.disp_refresh()
else:
self.target_pos = np.array([0.0, self.edf_time_max])
self.disp_refresh()
self.canvas.axes.axvline(self.baseline_pos[0])
self.canvas.axes.axvline(self.baseline_pos[1])
self.canvas.draw()
else:
pass
# ei computation
def ei_computation_func(self):
# local
QMessageBox.information(self,'','EI computation starting, please wait')
self.ei_base_start = int(self.baseline_pos[0]*self.fs)
self.ei_base_end = int(self.baseline_pos[1]*self.fs)
self.ei_target_start = int(self.target_pos[0]*self.fs)
self.ei_target_end = int(self.target_pos[1]*self.fs)
self.ei_baseline_data = self.modified_edf_data.copy()[:, self.ei_base_start:self.ei_base_end]
self.ei_target_data = self.modified_edf_data.copy()[:, self.ei_target_start:self.ei_target_end]
self.ei_norm_target, self.ei_norm_base = compute_hfer(self.ei_target_data, self.ei_baseline_data, self.fs)
self.ei_ei, self.ei_hfer, self.ei_onset_rank = compute_ei_index(self.ei_norm_target, self.ei_norm_base,
self.fs)
#for click-display signals
self.tmp_origin_edf_data = self.origin_data.copy()
remain_chInd = np.array([x in self.disp_ch_names for x in self.origin_chans])
self.tmp_origin_remainData = self.tmp_origin_edf_data[remain_chInd]
self.tmp_origin_remainData = self.tmp_origin_remainData - np.mean(self.tmp_origin_remainData, axis=0)
# notch filt
notch_freqs = np.arange(50, 151, 50)
for nf in notch_freqs:
tb, ta = iirnotch(nf / (self.fs / 2), 30)
self.tmp_origin_remainData = filtfilt(tb, ta, self.tmp_origin_remainData, axis=-1)
print('finish ei computation')
self.fullband_button.setEnabled(True)
self.ei_plot_xw_func()
# hfer computation
def hfer_computation_func(self):
QMessageBox.information(self,'','HFER computation starting, please wait')
self.hfer_base_start = int(self.baseline_pos[0]*self.fs)
self.hfer_base_end = int(self.baseline_pos[1]*self.fs)
self.hfer_target_start = int(self.target_pos[0]*self.fs)
self.hfer_target_end = int(self.target_pos[1]*self.fs)
self.hfer_baseline = self.modified_edf_data[:, self.hfer_base_start:self.hfer_base_end]
self.hfer_target = self.modified_edf_data[:, self.hfer_target_start:self.hfer_target_end]
self.norm_target, self.norm_base = compute_hfer(self.hfer_target, self.hfer_baseline, self.fs)
hfer_fig = plt.figure('hfer')
# hfer
hfer_ax = hfer_fig.add_axes([0.1, 0.1, 0.7, 0.8])
tmp_x, tmp_y = np.meshgrid(np.linspace(self.hfer_target_start, self.hfer_target_end, self.norm_target.shape[1]),
np.arange(self.norm_target.shape[0] + 1))
surf = hfer_ax.pcolormesh(tmp_x, tmp_y, self.norm_target, cmap=plt.cm.hot, vmax=50, vmin=0)
if 'ei_channel_onset' in dir(self):
hfer_ax.plot(self.hfer_target_start + self.ei_channel_onset, np.arange(len(self.ei_channel_onset)) + 0.5,
'ko')
hfer_ax.set_xticks(np.arange(self.hfer_target_start, self.hfer_target_start + self.norm_target.shape[1], 2000))
hfer_ax.set_xticklabels(np.rint(np.arange(self.hfer_target_start, self.hfer_target_start + self.norm_target.shape[1],
2000) / float(self.fs)).astype(np.int16))
hfer_ax.set_xlabel('time(s)')
hfer_ax.set_ylabel('channels')
hfer_fig.canvas.mpl_connect('button_press_event', self.hfer_press_func)
# colorbar
color_bar_ax = hfer_fig.add_axes([0.85, 0.1, 0.02, 0.8])
plt.colorbar(surf, cax=color_bar_ax, orientation='vertical')
plt.show()
# press hfer to show original signal and spectrogram
def hfer_press_func(self, e):
chosen_elec_index = int(e.ydata) # int(round(e.ydata))
# compute spectrogram
elec_name = self.disp_ch_names[chosen_elec_index]
raw_data_indx = self.disp_ch_names.index(elec_name)
tmp_origin_edf_data = self.tmp_origin_remainData
tmp_data = tmp_origin_edf_data[raw_data_indx, self.hfer_target_start:self.hfer_target_end]
tmp_time_target = np.linspace(self.hfer_target_start/self.fs,self.hfer_target_end/self.fs,
int((self.hfer_target_end-self.hfer_target_start)))
fig = plt.figure('signal')
ax1 = fig.add_axes([0.2, 0.6, 0.6, 0.3])
ax1.cla()
ax1.set_title(elec_name + ' signal')
if self.data_fomat == 1:
tmp_data_plot = tmp_data*1000
elif self.data_fomat == 0:
tmp_data_plot = tmp_data/1000
ax1.plot(tmp_time_target, tmp_data_plot)
ax1.set_xlabel('time(s)')
ax1.set_ylabel('signal(mV)')
ax1.set_xlim(tmp_time_target[0], tmp_time_target[-1])
ax1_ymax = np.abs(tmp_data_plot).max()
ax1.set_ylim([-ax1_ymax, ax1_ymax])
# ax2
ax2 = fig.add_axes([0.2, 0.15, 0.6, 0.3])
ax2.cla()
ax2.set_title(elec_name + ' spectrogram')
f, t, sxx = spectrogram(x=tmp_data, fs=int(self.fs), nperseg=int(0.5 * self.fs),
noverlap=int(0.9 * 0.5 * self.fs), nfft=1024, mode='magnitude')
sxx = (sxx - np.mean(sxx, axis=1, keepdims=True)) / np.std(sxx, axis=1, keepdims=True)
sxx = gaussian_filter(sxx, sigma=2)
spec_time = np.linspace(t[0] + tmp_time_target[0], t[-1] + tmp_time_target[0], sxx.shape[1])
spec_f_max = 300
spec_f_nums = int(len(f) * spec_f_max / f.max())
spec_f = np.linspace(0, spec_f_max, spec_f_nums)
spec_sxx = sxx[:spec_f_nums, :]
spec_time, spec_f = np.meshgrid(spec_time, spec_f)
surf = ax2.pcolormesh(spec_time, spec_f, spec_sxx, cmap=plt.cm.hot, vmax=2, vmin=-0.8, shading='auto')
ax2.set_xlabel('time(s)')
ax2.set_ylabel('frequency(hz)')
ax2.set_ylim((0, spec_f_max))
ax2.set_xlim(tmp_time_target[0], tmp_time_target[-1])
position = fig.add_axes([0.85, 0.15, 0.02, 0.3])
cb = plt.colorbar(surf, cax=position)
plt.show()
def ei_plot_xw_func(self):
ei_mu = np.mean(self.ei_ei)
ei_std = np.std(self.ei_ei)
self.ei_thresh = ei_mu + ei_std
self.ei_ei_fig = plt.figure('ei')
ei_ei_ax = self.ei_ei_fig.add_subplot(111)
ei_hfer_fig = plt.figure('hfer')
ei_hfer_ax = ei_hfer_fig.add_subplot(111)
ei_onset_rank_fig = plt.figure('onset')
ei_onset_rank_ax = ei_onset_rank_fig.add_subplot(111)
ei_data = np.stack([self.ei_hfer, self.ei_onset_rank], axis=0)
title_data = ['High frequency Energy Coefficient', 'Time Coefficient']
print(len(ei_data))
ei_axes = [ei_hfer_ax, ei_onset_rank_ax]
ei_ei_ax.bar(range(len(self.ei_ei)), self.ei_ei)
ei_ei_ax.set_title('High Frequency Epileptogenicity Index')
ei_ind = list(np.squeeze(np.where(self.ei_ei > self.ei_thresh)))
print(ei_ind)
for ind in ei_ind:
print(ind)
ei_ei_ax.text(ind-0.8, self.ei_ei[ind]+0.01, self.disp_ch_names[ind], fontsize=8, color='k')
ei_ei_ax.plot(np.arange(len(self.ei_ei)), self.ei_thresh * np.ones(len(self.ei_ei)), 'r--')
for i in range(len(ei_data)):
ei_axes[i].bar(range(len(ei_data[i])), ei_data[i])
ei_axes[i].set_title(title_data[i])
self.ei_ei_fig.canvas.mpl_connect('button_press_event', self.ei_press_func)
plt.show()
def ei_press_func(self, e):
if e.button == 1:
chosen_elec_index = int(round(e.xdata))
# compute spectrum
elec_name = self.disp_ch_names[chosen_elec_index]
raw_data_indx = self.disp_ch_names.index(elec_name)
tmp_origin_edf_data = self.tmp_origin_remainData
tmp_data = tmp_origin_edf_data[raw_data_indx, self.ei_target_start:self.ei_target_end]
tmp_time_target = np.linspace(self.ei_target_start/self.fs, self.ei_target_end/self.fs,
int((self.ei_target_end - self.ei_target_start)))
fig = plt.figure('signal')
ax1 = fig.add_axes([0.2, 0.6, 0.6, 0.3])
ax1.cla()
ax1.set_title(elec_name + ' signal')
if self.data_fomat == 1:
tmp_data_plot = tmp_data * 1000
elif self.data_fomat == 0:
tmp_data_plot = tmp_data/1000
ax1.plot(tmp_time_target, tmp_data_plot)
ax1.set_xlabel('time(s)')
ax1.set_ylabel('signal(mV)')
ax1.set_xlim(tmp_time_target[0], tmp_time_target[-1])
ax1_ymax = np.abs(tmp_data_plot).max()
ax1.set_ylim([-ax1_ymax, ax1_ymax])
# ax2
ax2 = fig.add_axes([0.2, 0.15, 0.6, 0.3])
ax2.cla()
ax2.set_title(elec_name + ' spectrogram')
f, t, sxx = spectrogram(x=tmp_data, fs=int(self.fs), nperseg=int(0.5 * self.fs),
noverlap=int(0.9 * 0.5 * self.fs), nfft=1024, mode='magnitude')
sxx = (sxx - np.mean(sxx, axis=1, keepdims=True)) / np.std(sxx, axis=1, keepdims=True)
sxx = gaussian_filter(sxx, sigma=2)
spec_time = np.linspace(t[0] + tmp_time_target[0], t[-1] + tmp_time_target[0], sxx.shape[1])
spec_f_max = 300
spec_f_nums = int(len(f) * spec_f_max / f.max())
spec_f = np.linspace(0, spec_f_max, spec_f_nums)
spec_sxx = sxx[:spec_f_nums, :]
spec_time, spec_f = np.meshgrid(spec_time, spec_f)
surf = ax2.pcolormesh(spec_time, spec_f, spec_sxx, cmap=plt.cm.hot, vmax=2, vmin=-0.8, shading='auto')
ax2.set_xlabel('time(s)')
ax2.set_ylabel('frequency(hz)')
ax2.set_ylim((0, spec_f_max))
ax2.set_xlim(tmp_time_target[0], tmp_time_target[-1])
position = fig.add_axes([0.85, 0.15, 0.02, 0.3])
cb = plt.colorbar(surf, cax=position)
plt.show()
elif e.button == 3:
self.ei_thresh = e.ydata
print(self.ei_thresh)
self.ei_ei_fig.clf()
ei_ei_ax = self.ei_ei_fig.add_axes([0.1, 0.1, 0.75, 0.8])
# ei_ei_ax = plt.axes()
ei_ei_ax.bar(range(len(self.ei_ei)), self.ei_ei)
ei_ei_ax.set_title('High Frequency Epileptogenicity Index')
ei_ind = list(np.squeeze(np.where(self.ei_ei > self.ei_thresh)))
print(ei_ind)
for ind in ei_ind:
print(ind)
ei_ei_ax.text(ind - 0.8, self.ei_ei[ind] + 0.01, self.disp_ch_names[ind], fontsize=8, color='k')
ei_ei_ax.plot(np.arange(len(self.ei_ei)), self.ei_thresh * np.ones(len(self.ei_ei)), 'r--')
axthresh = plt.axes([0.9, 0.1, 0.02, 0.8])
plt.show()
# full band computation
def fullband_computation_func(self):
self.fullband_button.setEnabled(False)
self.fullband_base_start = int(self.baseline_pos[0] * self.fs)
self.fullband_base_end = int(self.baseline_pos[1] * self.fs)
self.fullband_target_start = int(self.target_pos[0] * self.fs)
self.fullband_target_end = int(self.target_pos[1] * self.fs)
self.fullband_target = self.tmp_origin_remainData[:, self.fullband_target_start:self.fullband_target_end]
QMessageBox.information(self, '', 'full band computation starting, please wait')
self.fullband_thread = fullband_computation_thread(parent=self, raw_signal=self.fullband_target, ei=self.ei_ei,
fs=self.fs)
self.fullband_thread.fullband_done_sig.connect(self.fullband_plot_func)
self.fullband_thread.start()
# full band plot function
def fullband_plot_func(self, fullband_res):
QMessageBox.information(self, '', 'fullband computation done')
self.fullband_button.setEnabled(True)
self.spec_pca = fullband_res[0]
self.fullband_labels = fullband_res[1]
self.fullband_ind = fullband_res[2]
chs_labels = np.array(self.disp_ch_names)[self.fullband_ind]
print('electrodes:', chs_labels)
fullband_fig = plt.figure('full_band')
fullband_ax = fullband_fig.add_subplot(111)
fullband_fig.canvas.mpl_connect('button_press_event', self.fullband_press_func)
fullband_ax.scatter(self.spec_pca[:, 0], self.spec_pca[:, 1], alpha=0.8, c=self.fullband_labels)
for ind in self.fullband_ind:
fullband_ax.text(self.spec_pca[ind, 0], self.spec_pca[ind, 1], self.disp_ch_names[ind],
fontsize=8, color='k')
plt.show()
def fullband_press_func(self, e):
pos_x = e.xdata
pos_y = e.ydata
distance = np.sum((np.array(self.spec_pca[:, 0:2]) - np.array([pos_x, pos_y])) ** 2, axis=-1)
chosen_elec_index = np.argmin(distance)
elec_name = self.disp_ch_names[chosen_elec_index]
raw_data_indx = self.disp_ch_names.index(elec_name)
tmp_origin_edf_data = self.tmp_origin_remainData
tmp_data = tmp_origin_edf_data[raw_data_indx, self.fullband_target_start:self.fullband_target_end]
tmp_time_target = np.linspace(self.fullband_target_start / self.fs, self.fullband_target_end / self.fs,
int((self.fullband_target_end - self.fullband_target_start)))
fig = plt.figure('signal')
ax1 = fig.add_axes([0.2, 0.6, 0.6, 0.3])
ax1.cla()
ax1.set_title(elec_name + ' signal')
if self.data_fomat == 1:
tmp_data_plot = tmp_data*1000
elif self.data_fomat == 0:
tmp_data_plot = tmp_data/1000
ax1.plot(tmp_time_target, tmp_data_plot)
ax1.set_xlabel('time(s)')
ax1.set_ylabel('signal(mV)')
ax1.set_xlim(tmp_time_target[0], tmp_time_target[-1])
ax1_ymax = np.abs(tmp_data_plot).max()
ax1.set_ylim([-ax1_ymax, ax1_ymax])
# ax2
ax2 = fig.add_axes([0.2, 0.15, 0.6, 0.3])
ax2.cla()
ax2.set_title(elec_name + ' spectrogram')
f, t, sxx = spectrogram(x=tmp_data, fs=int(self.fs), nperseg=int(0.5 * self.fs),
noverlap=int(0.9 * 0.5 * self.fs), nfft=1024, mode='magnitude')
sxx = (sxx - np.mean(sxx, axis=1, keepdims=True)) / | np.std(sxx, axis=1, keepdims=True) | numpy.std |
#!/usr/bin/env python
"""
This is the module for producing predstorm plots.
Author: <NAME>, <NAME>, <NAME>, Austria
started May 2019, last update May 2019
Python 3.7
Issues:
- ...
To-dos:
- ...
Future steps:
- ...
"""
import os
import sys
import copy
import logging
import logging.config
import numpy as np
import pdb
import seaborn as sns
import scipy.signal as signal
import matplotlib.dates as mdates
from matplotlib.dates import date2num, num2date, DateFormatter
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
from matplotlib.patches import Polygon
from datetime import datetime, timedelta
from glob import iglob
import json
import urllib
from .config import plotting as pltcfg
logger = logging.getLogger(__name__)
# =======================================================================================
# --------------------------- PLOTTING FUNCTIONS ----------------------------------------
# =======================================================================================
def plot_solarwind_and_dst_prediction(DSCOVR_data, STEREOA_data, DST_data, DSTPRED_data, newell_coupling=None, dst_label='Dst Temerin & Li 2002', past_days=3.5, future_days=7., verification_mode=False, timestamp=None, times_3DCORE=[], times_nans={}, outfile='predstorm_real.png', **kwargs):
"""
Plots solar wind variables, past from DSCOVR and future/predicted from STEREO-A.
Total B-field and Bz (top), solar wind speed (second), particle density (third)
and Dst (fourth) from Kyoto and model prediction.
Parameters
==========
DSCOVR_data : list[minute data, hourly data]
DSCOVR data in different time resolutions.
STEREOA_data : list[minute data, hourly data]
STEREO-A data in different time resolutions.
DST_data : predstorm_module.SatData
Kyoto Dst
DSTPRED_data : predstorm_module.SatData
Dst predicted by PREDSTORM.
dst_method : str (default='temerin_li')
Descriptor for Dst method being plotted.
past_days : float (default=3.5)
Number of days in the past to plot.
future_days : float (default=7.)
Number of days into the future to plot.
lw : int (default=1)
Linewidth for plotting functions.
fs : int (default=11)
Font size for all text in plot.
ms : int (default=5)
Marker size for markers in plot.
figsize : tuple(float=width, float=height) (default=(14,12))
Figure size (in inches) for output file.
verification_mode : bool (default=False)
If True, verification mode will produce a plot of the predicted Dst
for model verification purposes.
timestamp : datetime obj
Time for 'now' label in plot.
Returns
=======
plt.savefig : .png file
File saved to XXX
"""
figsize = kwargs.get('figsize', pltcfg.figsize)
lw = kwargs.get('lw', pltcfg.lw)
fs = kwargs.get('fs', pltcfg.fs)
date_fmt = kwargs.get('date_fmt', pltcfg.date_fmt)
c_dst = kwargs.get('c_dst', pltcfg.c_dst)
c_dis = kwargs.get('c_dis', pltcfg.c_dis)
c_ec = kwargs.get('c_ec', pltcfg.c_ec)
c_sta = kwargs.get('c_sta', pltcfg.c_sta)
c_sta_dst = kwargs.get('c_sta_dst', pltcfg.c_sta_dst)
c_btot = kwargs.get('c_btot', pltcfg.c_btot)
c_bx = kwargs.get('c_bx', pltcfg.c_bx)
c_by = kwargs.get('c_by', pltcfg.c_by)
c_bz = kwargs.get('c_bz', pltcfg.c_bz)
ms_dst = kwargs.get('c_dst', pltcfg.ms_dst)
fs_legend = kwargs.get('fs_legend', pltcfg.fs_legend)
fs_ylabel = kwargs.get('fs_legend', pltcfg.fs_ylabel)
fs_title = kwargs.get('fs_title', pltcfg.fs_title)
# Set style:
sns.set_context(pltcfg.sns_context)
sns.set_style(pltcfg.sns_style)
# Make figure object:
fig=plt.figure(1,figsize=figsize)
axes = []
# Set data objects:
stam, sta = STEREOA_data
dism, dis = DSCOVR_data
dst = DST_data
dst_pred = DSTPRED_data
text_offset = past_days # days (for 'fast', 'intense', etc.)
# For the minute data, check which are the intervals to show for STEREO-A until end of plot
i_fut = np.where(np.logical_and(stam['time'] > dism['time'][-1], \
stam['time'] < dism['time'][-1]+future_days))[0]
if timestamp == None:
timestamp = datetime.utcnow()
timeutc = mdates.date2num(timestamp)
if newell_coupling == None:
n_plots = 4
else:
n_plots = 5
plotstart = timeutc - past_days
plotend = timeutc + future_days - 3./24.
# SUBPLOT 1: Total B-field and Bz
# -------------------------------
ax1 = fig.add_subplot(n_plots,1,1)
axes.append(ax1)
# Total B-field and Bz (DSCOVR)
plst = 2
plt.plot_date(dism['time'][::plst], dism['btot'][::plst],'-', c=c_btot, label='$B_{tot}$', linewidth=lw)
plt.plot_date(dism['time'][::plst], dism['bx'][::plst],'-', c=c_bx, label='$B_x$', linewidth=lw)
plt.plot_date(dism['time'][::plst], dism['by'][::plst],'-', c=c_by, label='$B_y$', linewidth=lw)
plt.plot_date(dism['time'][::plst], dism['bz'][::plst],'-', c=c_bz, label='$B_z$', linewidth=lw)
# STEREO-A minute resolution data with timeshift
plt.plot_date(stam['time'][i_fut], stam['btot'][i_fut], '-', c=c_btot, alpha=0.5, linewidth=0.5)
plt.plot_date(stam['time'][i_fut], stam['br'][i_fut], '-', c=c_bx, alpha=0.5, linewidth=0.5)
plt.plot_date(stam['time'][i_fut], stam['bt'][i_fut], '-', c=c_by, alpha=0.5, linewidth=0.5)
plt.plot_date(stam['time'][i_fut], stam['bn'][i_fut], '-', c=c_bz, alpha=0.5, linewidth=0.5)
# Indicate 0 level for Bz
plt.plot_date([plotstart,plotend], [0,0],'--k', alpha=0.5, linewidth=1)
plt.ylabel('Magnetic field [nT]', fontsize=fs_ylabel)
# For y limits check where the maximum and minimum are for DSCOVR and STEREO taken together:
bplotmax = np.nanmax(dism['btot'])+5
bplotmin = -bplotmax
plt.ylim(bplotmin, bplotmax)
if len(times_3DCORE) > 0:
plt.annotate('flux rope (3DCORE)', xy=(times_3DCORE[0],bplotmax-(bplotmax-bplotmin)*0.25),
xytext=(times_3DCORE[0]+0.05,bplotmax-(bplotmax-bplotmin)*0.95), color='gray', fontsize=14)
if 'stereo' in stam.source.lower():
pred_source = 'STEREO-Ahead Beacon'
elif 'dscovr' in stam.source.lower() or 'noaa' in stam.source.lower():
pred_source = '27-day SW-Recurrence Model (NOAA)'
plt.title('L1 real time solar wind from NOAA SWPC for '+ datetime.strftime(timestamp, "%Y-%m-%d %H:%M")+
' UT & {}'.format(pred_source), fontsize=fs_title)
# SUBPLOT 2: Solar wind speed
# ---------------------------
ax2 = fig.add_subplot(n_plots,1,2)
axes.append(ax2)
# Plot solar wind speed (DSCOVR):
plt.plot_date(dism['time'][::plst], dism['speed'][::plst],'-', c='black', label='speed',linewidth=lw)
plt.ylabel('Speed $\mathregular{[km \\ s^{-1}]}$', fontsize=fs_ylabel)
stam_speed_filt = signal.savgol_filter(stam['speed'],11,1)
if 'speed' in times_nans:
stam_speed_filt = np.ma.array(stam_speed_filt)
for times in times_nans['speed']:
stam_speed_filt = np.ma.masked_where(np.logical_and(stam['time'] > times[0], stam['time'] < times[1]), stam_speed_filt)
# Plot STEREO-A data with timeshift and savgol filter
plt.plot_date(stam['time'][i_fut], stam_speed_filt[i_fut],'-',
c='black', alpha=0.5, linewidth=lw, label='speed {}'.format(stam.source))
# Add speed levels:
pltcfg.plot_speed_lines(xlims=[plotstart, plotend])
# For y limits check where the maximum and minimum are for DSCOVR and STEREO taken together:
vplotmax=np.nanmax(np.concatenate((dism['speed'],stam_speed_filt[i_fut])))+100
vplotmin=np.nanmin(np.concatenate((dism['speed'],stam_speed_filt[i_fut]))-50)
plt.ylim(vplotmin, vplotmax)
plt.annotate('now', xy=(timeutc,vplotmax-(vplotmax-vplotmin)*0.25), xytext=(timeutc+0.05,vplotmax-(vplotmax-vplotmin)*0.25), color='k', fontsize=14)
# SUBPLOT 3: Solar wind density
# -----------------------------
ax3 = fig.add_subplot(n_plots,1,3)
axes.append(ax3)
stam_density_filt = signal.savgol_filter(stam['density'],5,1)
if 'density' in times_nans:
stam_density_filt = np.ma.array(stam_density_filt)
for times in times_nans['density']:
stam_density_filt = np.ma.masked_where(np.logical_and(stam['time'] > times[0], stam['time'] < times[1]), stam_density_filt)
# Plot solar wind density:
plt.plot_date(dism['time'], dism['density'],'-k', label='density L1',linewidth=lw)
plt.ylabel('Density $\mathregular{[ccm^{-3}]}$',fontsize=fs_ylabel)
# For y limits check where the maximum and minimum are for DSCOVR and STEREO taken together:
plt.ylim([0,np.nanmax(np.nanmax(np.concatenate((dism['density'],stam_density_filt[i_fut])))+10)])
#plot STEREO-A data with timeshift and savgol filter
plt.plot_date(stam['time'][i_fut], stam_density_filt[i_fut],
'-', c='black', alpha=0.5, linewidth=lw, label='density {}'.format(stam.source))
# SUBPLOT 4: Actual and predicted Dst
# -----------------------------------
ax4 = fig.add_subplot(n_plots,1,4)
axes.append(ax4)
# Observed Dst Kyoto (past):
plt.plot_date(dst['time'], dst['dst'],'o', c=c_dst, label='Dst observed',markersize=ms_dst)
plt.ylabel('Dst [nT]', fontsize=fs_ylabel)
dstplotmax = np.nanmax(np.concatenate((dst['dst'], dst_pred['dst'])))+20
dstplotmin = np.nanmin(np.concatenate((dst['dst'], dst_pred['dst'])))-20
if dstplotmin > -100: # Low activity (normal)
plt.ylim([-100, dstplotmax + 30])
else: # High activity
plt.ylim([dstplotmin, dstplotmax])
# Plot predicted Dst
dst_pred_past = dst_pred['time'] < date2num(timestamp)
plt.plot_date(dst_pred['time'][dst_pred_past], dst_pred['dst'][dst_pred_past], '-', c=c_sta_dst, label=dst_label, markersize=3, linewidth=1)
plt.plot_date(dst_pred['time'][~dst_pred_past], dst_pred['dst'][~dst_pred_past], '-', c=c_sta_dst, alpha=0.5, markersize=3, linewidth=1)
# Add generic error bars of +/-15 nT:
# Errors calculated using https://machinelearningmastery.com/prediction-intervals-for-machine-learning/
error_l1 = 5.038
error_l5 = 12.249
error_pers = 13.416
ih_fut = np.where(dst_pred['time'] > dis['time'][-1])[0]
ih_past = np.arange(0, ih_fut[0]+1)
# Error bars for data from L1:
plt.fill_between(dst_pred['time'][ih_past], dst_pred['dst'][ih_past]-error_l1, dst_pred['dst'][ih_past]+error_l1,
alpha=0.1, facecolor=c_sta_dst,
label=r'prediction interval +/- 1 & 2 $\sigma$ (68% and 95% significance)')
plt.fill_between(dst_pred['time'][ih_past], dst_pred['dst'][ih_past]-2*error_l1, dst_pred['dst'][ih_past]+2*error_l1,
alpha=0.1, facecolor=c_sta_dst)
# Error bars for data from L5/STEREO:#
plt.fill_between(dst_pred['time'][ih_fut], dst_pred['dst'][ih_fut]-error_l5, dst_pred['dst'][ih_fut]+error_l5,
alpha=0.1, facecolor=c_sta_dst)
plt.fill_between(dst_pred['time'][ih_fut], dst_pred['dst'][ih_fut]-2*error_l5, dst_pred['dst'][ih_fut]+2*error_l5,
alpha=0.1, facecolor=c_sta_dst)
# Label plot with geomagnetic storm levels
pltcfg.plot_dst_activity_lines(xlims=[plotstart, plotend])
# SUBPLOT 5: Newell Coupling
# --------------------------
if newell_coupling != None:
ax5 = fig.add_subplot(n_plots,1,5)
axes.append(ax5)
# Plot solar wind density:
ec_past = newell_coupling['time'] < date2num(timestamp)
avg_newell_coupling = newell_coupling.get_weighted_average('ec')
plt.plot_date(newell_coupling['time'][ec_past], avg_newell_coupling[ec_past]/4421., '-', color=c_ec, # past
label='Newell coupling 4h weighted mean',linewidth=1.5)
plt.plot_date(newell_coupling['time'][~ec_past], avg_newell_coupling[~ec_past]/4421., '-', color=c_ec, # future
alpha=0.5, linewidth=1.5)
plt.ylabel('Newell Coupling / 4421\n$\mathregular{[(km/s)^{4/3} nT^{2/3}]}$',fontsize=fs_ylabel)
# For y limits check where the maximum and minimum are for DSCOVR and STEREO taken together:
plt.ylim([0,np.nanmax(avg_newell_coupling/4421.)*1.2])
# Indicate level of interest (Ec/4421 = 1.0)
plt.plot_date([plotstart,plotend], [1,1],'--k', alpha=0.5, linewidth=1)
# GENERAL FORMATTING
# ------------------
for ax in axes:
ax.set_xlim([plotstart,plotend])
ax.tick_params(axis="x", labelsize=fs)
ax.tick_params(axis="y", labelsize=fs)
ax.legend(loc=2,ncol=4,fontsize=fs_legend)
# Dates on x-axes:
myformat = mdates.DateFormatter(date_fmt)
ax.xaxis.set_major_formatter(myformat)
# Vertical line for NOW:
ax.plot_date([timeutc,timeutc],[-2000,100000],'-k', linewidth=2)
# Indicate where prediction comes from 3DCORE:
if len(times_3DCORE) > 0:
ax.plot_date([times_3DCORE[0],times_3DCORE[0]],[-2000,100000], color='gray', linewidth=1, linestyle='--')
ax.plot_date([times_3DCORE[-1],times_3DCORE[-1]],[-2000,100000], color='gray', linewidth=1, linestyle='--')
# Liability text:
pltcfg.group_info_text()
pltcfg.liability_text()
#save plot
if not verification_mode:
plot_label = 'realtime'
else:
plot_label = 'verify'
filename = os.path.join('results','predstorm_v1_{}_stereo_a_plot_{}.png'.format(
plot_label, datetime.strftime(timestamp, "%Y-%m-%d-%H_%M")))
filename_eps = filename.replace('png', 'eps')
if not verification_mode:
plt.savefig(outfile)
logger.info('Real-time plot saved as {}!'.format(outfile))
#if not server: # Just plot and exit
# plt.show()
# sys.exit()
plt.savefig(filename)
logger.info('Plot saved as png:\n'+ filename)
def plot_solarwind_science(DSCOVR_data, STEREOA_data, verification_mode=False, timestamp=None, past_days=7, future_days=7, plot_step=20, outfile='predstorm_science.png', **kwargs):
"""
Plots solar wind variables, past from DSCOVR and future/predicted from STEREO-A.
Total B-field and Bz (top), solar wind speed (second), particle density (third)
and Dst (fourth) from Kyoto and model prediction.
Parameters
==========
DSCOVR_data : list[minute data, hourly data]
DSCOVR data in different time resolutions.
STEREOA_data : list[minute data, hourly data]
STEREO-A data in different time resolutions.
lw : int (default=1)
Linewidth for plotting functions.
fs : int (default=11)
Font size for all text in plot.
ms : int (default=5)
Marker size for markers in plot.
figsize : tuple(float=width, float=height) (default=(14,12))
Figure size (in inches) for output file.
verification_mode : bool (default=False)
If True, verification mode will produce a plot of the predicted Dst
for model verification purposes.
timestamp : datetime obj
Time for 'now' label in plot.
Returns
=======
plt.savefig : .png file
File saved to XXX
"""
figsize = kwargs.get('figsize', pltcfg.figsize)
lw = kwargs.get('lw', pltcfg.lw)
fs = kwargs.get('fs', pltcfg.fs)
date_fmt = kwargs.get('date_fmt', pltcfg.date_fmt)
c_dst = kwargs.get('c_dst', pltcfg.c_dst)
c_dis = kwargs.get('c_dis', pltcfg.c_dis)
c_ec = kwargs.get('c_ec', pltcfg.c_ec)
c_sta = kwargs.get('c_sta', pltcfg.c_sta)
c_sta_dst = kwargs.get('c_sta_dst', pltcfg.c_sta_dst)
ms_dst = kwargs.get('c_dst', pltcfg.ms_dst)
fs_legend = kwargs.get('fs_legend', pltcfg.fs_legend)
fs_ylabel = kwargs.get('fs_legend', pltcfg.fs_ylabel)
fs_title = kwargs.get('fs_title', pltcfg.fs_title)
# Set style:
sns.set_context(pltcfg.sns_context)
sns.set_style(pltcfg.sns_style)
# Make figure object:
fig = plt.figure(1,figsize=(20,8))
axes = []
# Set data objects:
stam, sta = STEREOA_data
dism, dis = DSCOVR_data
# For the minute data, check which are the intervals to show for STEREO-A until end of plot
sta_index_future=np.where(np.logical_and(stam['time'] > dism['time'][-1], \
stam['time'] < dism['time'][-1]+future_days))[0]
if timestamp == None:
timestamp = datetime.utcnow()
timeutc = mdates.date2num(timestamp)
n_plots = 3
plst = plot_step
plotstart = timeutc - past_days
plotend = timeutc + future_days
# SUBPLOT 1: Total B-field and Bz
# -------------------------------
ax1 = fig.add_subplot(n_plots,1,1)
axes.append(ax1)
# Total B-field and Bz (DSCOVR)
plt.plot_date(dism['time'][::plst], dism['btot'][::plst],'-', c='black', label='B', linewidth=lw)
plt.plot_date(dism['time'][::plst], dism['bx'][::plst],'-', c='teal', label='Bx', linewidth=lw)
plt.plot_date(dism['time'][::plst], dism['by'][::plst],'-', c='orange', label='By', linewidth=lw)
plt.plot_date(dism['time'][::plst], dism['bz'][::plst],'-', c='purple', label='Bz', linewidth=lw)
# STEREO-A minute resolution data with timeshift
plt.plot_date(stam['time'][sta_index_future], stam['btot'][sta_index_future],
'-', c='black', alpha=0.5, linewidth=0.5)
plt.plot_date(stam['time'][sta_index_future], stam['br'][sta_index_future],
'-', c='teal', alpha=0.5, linewidth=0.5)
plt.plot_date(stam['time'][sta_index_future], stam['bt'][sta_index_future],
'-', c='orange', alpha=0.5, linewidth=0.5)
plt.plot_date(stam['time'][sta_index_future], stam['bn'][sta_index_future],
'-', c='purple', alpha=0.5, linewidth=0.5)
# Indicate 0 level for Bz
plt.plot_date([plotstart,plotend], [0,0],'--k', alpha=0.5, linewidth=1)
plt.ylabel('Magnetic field [nT]', fontsize=fs_ylabel)
# For y limits check where the maximum and minimum are for DSCOVR and STEREO taken together:
bplotmax=np.nanmax(np.concatenate((dism['btot'],stam['btot'][sta_index_future])))+5
bplotmin=np.nanmin(np.concatenate((dism['bz'],stam['bn'][sta_index_future]))-5)
plt.ylim((-13, 13))
if 'stereo' in stam.source.lower():
pred_source = 'STEREO-Ahead Beacon'
elif 'dscovr' in stam.source.lower() or 'noaa' in stam.source.lower():
pred_source = '27-day SW-Recurrence Model (NOAA)'
plt.title('L1 real time solar wind from NOAA SWPC for '+ datetime.strftime(timestamp, "%Y-%m-%d %H:%M")+ ' UT & {}'.format(pred_source), fontsize=fs_title)
# SUBPLOT 2: Solar wind speed
# ---------------------------
ax2 = fig.add_subplot(n_plots,1,2)
axes.append(ax2)
# Plot solar wind speed (DSCOVR):
plt.plot_date(dism['time'][::plst], dism['speed'][::plst],'-', c='black', label='speed',linewidth=lw)
plt.ylabel('Speed $\mathregular{[km \\ s^{-1}]}$', fontsize=fs_ylabel)
# Plot STEREO-A data with timeshift and savgol filter
plt.plot_date(stam['time'][sta_index_future],signal.savgol_filter(stam['speed'][sta_index_future],11,1),'-',
c='black', alpha=0.5, linewidth=lw)
# Add speed levels:
pltcfg.plot_speed_lines(xlims=[plotstart, plotend])
# For y limits check where the maximum and minimum are for DSCOVR and STEREO taken together:
vplotmax=np.nanmax(np.concatenate((dism['speed'],signal.savgol_filter(stam['speed'][sta_index_future],11,1))))+100
vplotmin=np.nanmin(np.concatenate((dism['speed'],signal.savgol_filter(stam['speed'][sta_index_future],11,1)))-50)
plt.ylim(vplotmin, vplotmax)
plt.annotate('now', xy=(timeutc,vplotmax-(vplotmax-vplotmin)*0.25), xytext=(timeutc+0.05,vplotmax-(vplotmax-vplotmin)*0.25), color='k', fontsize=14)
# SUBPLOT 3: Solar wind density
# -----------------------------
ax3 = fig.add_subplot(n_plots,1,3)
axes.append(ax3)
# Plot solar wind density:
plt.plot_date(dism['time'][::plst], dism['density'][::plst],'-k', label='density',linewidth=lw)
plt.ylabel('Density $\mathregular{[ccm^{-3}]}$',fontsize=fs_ylabel)
# For y limits check where the maximum and minimum are for DSCOVR and STEREO taken together:
plt.ylim([0,np.nanmax(np.nanmax(np.concatenate((dism['density'],stam['density'][sta_index_future])))+10)])
#plot STEREO-A data with timeshift and savgol filter
plt.plot_date(stam['time'][sta_index_future], signal.savgol_filter(stam['density'][sta_index_future],5,1),
'-', c='black', alpha=0.5, linewidth=lw)
# GENERAL FORMATTING
# ------------------
for ax in axes:
ax.set_xlim([plotstart,plotend])
ax.tick_params(axis="x", labelsize=fs)
ax.tick_params(axis="y", labelsize=fs)
ax.legend(loc=2,ncol=4,fontsize=fs_legend)
# Dates on x-axes:
myformat = mdates.DateFormatter(date_fmt)
ax.xaxis.set_major_formatter(myformat)
# Vertical line for NOW:
ax.plot_date([timeutc,timeutc],[-2000,100000],'-k', linewidth=2)
# Liability text:
pltcfg.group_info_text()
pltcfg.liability_text()
#save plot
if not verification_mode:
plot_label = 'realtime'
else:
plot_label = 'verify'
if not verification_mode:
plt.savefig(outfile)
logger.info('Real-time plot saved as {}!'.format(outfile))
def plot_solarwind_pretty(sw_past, sw_future, dst, newell_coupling, timestamp):
"""Uses the package mplcyberpunk to make a simpler and more visually appealing plot.
TO-DO:
- Implement weighted average smoothing on Newell Coupling."""
import mplcyberpunk
plt.style.use("cyberpunk")
c_speed = (0.58, 0.404, 0.741)
c_dst = (0.031, 0.969, 0.996)
c_ec = (0.961, 0.827, 0)
alpha_fut = 0.5
fig, (ax1, ax2, ax3) = plt.subplots(3, figsize=(17,9), sharex=True)
time_past = dst['time'] <= date2num(timestamp)
time_future = dst['time'] >= date2num(timestamp)
# Plot data:
ax1.plot_date(sw_past['time'], sw_past['speed'], '-', c=c_speed, label="Solar wind speed [km/s]")
ax1.plot_date(sw_future['time'], sw_future['speed'], '-', c=c_speed, alpha=alpha_fut)
ax2.plot_date(dst['time'][time_past], dst['dst'][time_past], '-', c=c_dst, label="$Dst$ [nT]")
ax2.plot_date(dst['time'][time_future], dst['dst'][time_future], '-', c=c_dst, alpha=alpha_fut)
avg_newell_coupling = newell_coupling.get_weighted_average('ec')
ax3.plot_date(newell_coupling['time'][time_past], avg_newell_coupling[time_past]/4421., '-', c=c_ec, label="Newell Coupling\n[nT]")
ax3.plot_date(newell_coupling['time'][time_future], avg_newell_coupling[time_future]/4421., '-', c=c_ec, alpha=alpha_fut)
mplcyberpunk.add_glow_effects(ax1)
mplcyberpunk.add_glow_effects(ax2)
mplcyberpunk.add_glow_effects(ax3)
# Add labels:
props = dict(boxstyle='round', facecolor='silver', alpha=0.2)
# place a text box in upper left in axes coords
ax1.text(0.01, 0.95, "Solar wind speed [km/s]", transform=ax1.transAxes, fontsize=14,
verticalalignment='top', bbox=props)
ax2.text(0.01, 0.95, "Predicted $Dst$ [nT]", transform=ax2.transAxes, fontsize=14,
verticalalignment='top', bbox=props)
ax3.text(0.01, 0.95, 'Newell Coupling / 4421 $\mathregular{[(km/s)^{4/3} nT^{2/3}]}$', transform=ax3.transAxes, fontsize=14,
verticalalignment='top', bbox=props)
pltcfg.plot_dst_activity_lines(xlims=[dst['time'][0], dst['time'][-1]], ax=ax2, color='silver')
pltcfg.plot_speed_lines(xlims=[dst['time'][0], dst['time'][-1]], ax=ax1, color='silver')
# Add vertical lines for 'now' time:
print_time_lines = True
for ax in [ax1, ax2, ax3]:
# Add a line denoting "now"
ax.axvline(x=timestamp, linewidth=2, color='silver')
# Add buffer to top of plots so that labels don't overlap with data:
ax_ymin, ax_ymax = ax.get_ylim()
text_adj = (ax_ymax-ax_ymin)*0.17
ax.set_ylim((ax_ymin, ax_ymax + text_adj))
# Add lines for future days:
ax_ymin, ax_ymax = ax.get_ylim()
text_adj = (ax_ymax-ax_ymin)*0.15
for t_day in [1,2,3,4]:
t_days_timestamp = timestamp+timedelta(days=t_day)
ax.axvline(x=t_days_timestamp, ls='--', linewidth=0.7, color='silver')
if print_time_lines:
ax.annotate('now', xy=(timestamp, ax_ymax-text_adj), xytext=(timestamp+timedelta(hours=2.5),
ax_ymax-text_adj*1.03), color='silver', fontsize=14)
ax.annotate('+{} days'.format(t_day), xy=(t_days_timestamp, ax_ymax-text_adj), xytext=(t_days_timestamp+timedelta(hours=2),
ax_ymax-text_adj*1.03), color='silver', fontsize=10)
print_time_lines = False
# Formatting:
tick_date = num2date(dst['time'][0]).replace(hour=0, minute=0, second=0, microsecond=0)
ax3.set_xticks([tick_date + timedelta(days=n) for n in range(1,15)])
ax3.set_xlim([dst['time'][0], dst['time'][-1]])
myformat = DateFormatter('%a\n%b %d')
ax3.xaxis.set_major_formatter(myformat)
ax1.tick_params(axis='both', which='major', labelsize=14)
ax2.tick_params(axis='both', which='major', labelsize=14)
ax3.tick_params(axis='both', which='major', labelsize=14)
plt.subplots_adjust(hspace=0.)
ax1.set_title("Helio4Cast Geomagnetic Activity Forecast, {} UTC".format(timestamp.strftime("%Y-%m-%d %H:%M")), pad=20)
pltcfg.group_info_text_small()
plt.savefig("predstorm_pretty.png")
# To cut the final version:
# convert predstorm_pretty.png -crop 1420x1000+145+30 predstorm_pretty_cropped.png
def plot_stereo_dscovr_comparison(stam, dism, dst, timestamp=None, look_back=20, outfile=None, **kwargs):
"""Plots the last days of STEREO-A and DSCOVR data for comparison alongside
the predicted and real Dst.
Parameters
==========
stam : predstorm.SatData
Object containing minute STEREO-A data
dism : predstorm.SatData
Object containing minute DSCOVR data.
dst : predstorm.SatData
Object containing Kyoto Dst data.
timestamp : datetime obj
Time for last datapoint in plot.
look_back : float (default=20)
Number of days in the past to plot.
**kwargs : ...
See config.plotting for variables that can be tweaked.
Returns
=======
plt.savefig : .png file
File saved to XXX
"""
if timestamp == None:
timestamp = datetime.utcnow()
if outfile == None:
outfile = 'sta_dsc_comparison_{}.png'.format(datetime.strftime(timestamp, "%Y-%m-%dT%H:%M"))
figsize = kwargs.get('figsize', pltcfg.figsize)
lw = kwargs.get('lw', pltcfg.lw)
fs = kwargs.get('fs', pltcfg.fs)
date_fmt = kwargs.get('date_fmt', pltcfg.date_fmt)
c_dst = kwargs.get('c_dst', pltcfg.c_dst)
c_dis = kwargs.get('c_dis', pltcfg.c_dis)
c_sta = kwargs.get('c_sta', pltcfg.c_sta)
c_sta_dst = kwargs.get('c_sta_dst', pltcfg.c_sta_dst)
ms_dst = kwargs.get('c_dst', pltcfg.ms_dst)
fs_legend = kwargs.get('fs_legend', pltcfg.fs_legend)
fs_ylabel = kwargs.get('fs_legend', pltcfg.fs_ylabel)
# READ DATA:
# ----------
# TODO: It would be faster to read archived hourly data rather than interped minute data...
logger.info("plot_stereo_dscovr_comparison: Reading satellite data")
# Get estimate of time diff:
stam.shift_time_to_L1()
sta = stam.make_hourly_data()
sta.interp_nans()
dis = dism.make_hourly_data()
dis.interp_nans()
# CALCULATE PREDICTED DST:
# ------------------------
sta.convert_RTN_to_GSE().convert_GSE_to_GSM()
dst_pred = sta.make_dst_prediction()
# PLOT:
# -----
# Set style:
sns.set_context(pltcfg.sns_context)
sns.set_style(pltcfg.sns_style)
plotstart = timestamp - timedelta(days=look_back)
plotend = timestamp
# Make figure object:
fig = plt.figure(1,figsize=figsize)
axes = []
# SUBPLOT 1: Total B-field and Bz
# -------------------------------
ax1 = fig.add_subplot(411)
axes.append(ax1)
plt.plot_date(dis['time'], dis['bz'], '-', c=c_dis, linewidth=lw, label='DSCOVR')
plt.plot_date(sta['time'], sta['bz'], '-', c=c_sta, linewidth=lw, label='STEREO-A')
# Indicate 0 level for Bz
plt.plot_date([plotstart,plotend], [0,0],'--k', alpha=0.5, linewidth=1)
plt.ylabel('Magnetic field Bz [nT]', fontsize=fs_ylabel)
# For y limits check where the maximum and minimum are for DSCOVR and STEREO taken together:
bplotmax=np.nanmax(np.concatenate((dis['bz'], sta['bz'])))+5
bplotmin=np.nanmin( | np.concatenate((dis['bz'], sta['bz'])) | numpy.concatenate |
import yaml
import torch
import os
from glob import glob
import numpy as np
class Namespace:
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
def load_yaml(filename):
with open(filename, 'r') as stream:
try:
dict = yaml.safe_load(stream)
except yaml.YAMLError as exc:
print(exc)
return dict
def save_checkpoint(state, checkpoint_dir, epoch, n_ckpt=10):
torch.save(state, os.path.join(checkpoint_dir, "epoch{:0>4d}.pth.tar".format(epoch)))
if epoch - n_ckpt >= 0:
oldest_ckpt = os.path.join(checkpoint_dir, "epoch{:0>4d}.pth.tar".format(epoch - n_ckpt))
if os.path.isfile(oldest_ckpt):
os.remove(oldest_ckpt)
def get_last_checkpoint(checkpoint_dir):
all_ckpt = glob(os.path.join(checkpoint_dir, 'epoch*.pth.tar'))
if all_ckpt:
all_ckpt = sorted(all_ckpt)
return all_ckpt[-1]
else:
return ''
def Time2FrameNumber(t, ori_fps, fps=10):
""" function to convert segment annotations given in seconds to frame numbers
input:
ori_fps: is the original fps of the video
fps: is the fps that we are using to extract frames from the video
num_frames: is the number of frames in the video (under fps)
t: is the time (in seconds) that we want to convert to frame number
output:
numf: the frame number corresponding to the time t of a video encoded at fps
"""
ori2fps_ratio = int(ori_fps/fps)
ori_numf = t*ori_fps
numf = int(ori_numf / ori2fps_ratio)
return numf
def RemoveDuplicates(a):
""" function to remove duplicate steps """
filtered = []
keep_ids = []
nums = a.shape[0]
for i in range(nums):
if a[i] in filtered:
continue
else:
filtered.append(a[i])
keep_ids.append(i)
filtered = torch.stack(filtered)
keep_ids = torch.tensor(keep_ids)
return filtered, keep_ids
def MergeConsec(a, a_st, a_ed):
""" merge consecutibe steps"""
# find consecutive steps
a_old = 10000
merge_ids = []
mids = []
merge_st = []
mst = []
for i in range(a.shape[0]):
if a[i] == a_old:
mst.append(a[i-1])
mst.append(a[i])
mids.append(i-1)
mids.append(i)
else:
merge_ids.append(mids)
merge_st.append(mst)
mids = []
mst = []
a_old = a[i]
if i == a.shape[0]-1:
merge_ids.append(mids)
merge_st.append(mst)
# remove empty entries from list
merge_ids = list(filter(None, merge_ids))
merge_st = list(filter(None, merge_st))
# merge consec start and end times
for i in range(len(merge_ids)):
a_st[merge_ids[i][-1]] = a_st[merge_ids[i][0]]
a_ed[merge_ids[i][0]] = a_ed[merge_ids[i][-1]]
return a_st, a_ed
def VidList2Batch(samples, VID_LEN=224):
""" create a batch of videos of the same size from input sequences """
# create data needed for training
vids = []
batch_size = len(samples)
for b in range(batch_size):
numf = samples[b]['frame_features'].shape[0]
unpadded_vid = samples[b]['frame_features'].T
# if video is shorter than desired length ==> PAD
if numf < VID_LEN:
pad = torch.nn.ConstantPad1d((0, VID_LEN-numf), 0)
vids.append(pad(unpadded_vid))
# if video is longer than desired length ==> STRIDED SAMPLING
elif numf > VID_LEN:
stride = int(numf//VID_LEN)
pad = unpadded_vid[:,::stride]
vids.append(pad[:,:VID_LEN])
else:
pad = unpadded_vid
vids.append(pad)
vids = torch.stack(vids, dim=0)
return vids
def Steps2Batch(steps, num_steps):
""" create a list of lists of the steps """
st = 0
batched_steps = []
for i in range(len(num_steps)):
ed = st+ num_steps[i]
batched_steps.append(steps[st:ed,:])
st = ed
return batched_steps
def cosine_sim(x, z):
cos_sim_fn = torch.nn.CosineSimilarity(dim=1)
return cos_sim_fn(x[..., None], z.T[None, ...])
def neg_l2_dist(x, z, device):
"""Computes pairwise distances between all rows of x and z."""
#return -1*torch.cdist(x,z,2)
norm1 = torch.sum(torch.square(x), dim=-1)
norm1 = torch.reshape(norm1, [-1, 1])
norm2 = torch.sum(torch.square(z), dim=-1)
norm2 = torch.reshape(norm2, [1, -1])
# Max to ensure matmul doesn't produce anything negative due to floating
# point approximations.
dist = -1*torch.maximum(
norm1 + norm2 - torch.tensor([2.0]).to(device) * torch.matmul(x, z.T), torch.tensor([0.0]).to(device))
return dist
def linear_sim(x, z):
return x @ z.T
def whitening(frame_features, step_features, stats_file):
""" do data whitening """
# load dataset stats
data_stats = np.load(stats_file, allow_pickle=True)
vis_mean = data_stats.item().get('vis_mean')
vis_std = data_stats.item().get('vis_std')
lang_mean = data_stats.item().get('lang_mean')
lang_std = data_stats.item().get('lang_std')
frame_features = (frame_features - vis_mean) / vis_std
step_features = (step_features - lang_mean) / lang_std
return frame_features, step_features
def unique_softmax(sim, labels, gamma=1, dim=0):
assert sim.shape[0] == labels.shape[0]
labels = labels.detach().cpu().numpy()
unique_labels, unique_index, unique_inverse_index = np.unique(
labels, return_index=True, return_inverse=True)
unique_sim = sim[unique_index]
unique_softmax_sim = torch.nn.functional.softmax(unique_sim / gamma, dim=0)
softmax_sim = unique_softmax_sim[unique_inverse_index]
return softmax_sim
def framewise_accuracy(frame_assignment, sample, use_unlabeled=False):
""" calculate framewise accuracy as done in COIN """
# convert start and end times into clip-level labels
num_steps = sample['num_steps'].numpy()
num_frames = sample['num_frames'].numpy()
# non-step frames/clips are assigned label = -1
gt_assignment = -np.ones((num_frames,), dtype=np.int32)
# convert start and end times to clip/frame -wise labels
for s in range(num_steps):
st_ed = np.arange(sample['step_starts'][s],sample['step_ends'][s]+1)
gt_assignment[st_ed] = s #sample['step_ids'][s]
# to discount unlabeled frames in gt
if not use_unlabeled:
unlabled = | np.count_nonzero(gt_assignment == -1) | numpy.count_nonzero |
import numpy as np
from .utils import aryule_levinson, arburg
from logging import getLogger
logger = getLogger('ChangeFinder')
class SDAR_1D:
def __init__(self, r, k, is_yule=True):
"""Train a AR(k) model by using the SDAR algorithm (1d points only).
Args:
r (float): Discounting parameter.
k (int): Order of the AR model.
is_yule (bool): Estimate the AR model by solving the Yule-Walke eq., or not.
If not, estimate it usign the Burg's method.
"""
self.r = r
self.k = k
self.is_yule = is_yule
# initialize the parameters
self.mu = self.sigma = 0.0
self.c = np.zeros(self.k + 1)
def update(self, x, xs):
"""Update the current AR model.
Args:
x (float): A new 1d point (t).
xs (numpy array): `k` past points (..., t-k, ..., t-1).
Returns:
float: Latest PDF for the given series.
"""
assert xs.size >= self.k, 'size of xs must be greater or equal to the order of the AR model.'
# estimate mu
self.mu = (1 - self.r) * self.mu + self.r * x
if self.is_yule:
# update c (coefficients of the Yule-Walker equation)
self.c[0] = (1 - self.r) * self.c[0] + self.r * (x - self.mu) * (x - self.mu) # c_0: x_t = x_{t-j}
self.c[1:] = (1 - self.r) * self.c[1:] + self.r * (x - self.mu) * (xs[::-1][:self.k] - self.mu)
# a_1, ..., a_k
a = aryule_levinson(self.c, self.k)
else:
a = arburg(np.append(x, xs[::-1][:self.k]), self.k)
# estimate x
x_hat = np.dot(a, (xs[::-1][:self.k] - self.mu)) + self.mu
# estimate sigma
self.sigma = (1 - self.r) * self.sigma + self.r * (x - x_hat) ** 2
# compute and return the value of probability density function
if self.sigma == 0.0:
return 0.0
numerator = np.exp(-0.5 * (x - x_hat) ** 2 / self.sigma)
denominator = (2 * np.pi) ** 0.5 * (self.sigma) ** 0.5
return numerator / denominator
class ChangeFinder:
def __init__(self, r, k, T1, T2, is_yule=True, is_logloss=True):
"""ChangeFinder.
Args:
r (float): Discounting parameter.
k (int): Order of the AR model (i.e. consider a AR(k) process).
T1 (int): Window size for the simple moving average of outlier scores.
T2 (int): Window size to compute a change point score.
is_yule (bool): Estimate the AR model by solving the Yule-Walke eq., or not.
If not, estimate it usign the Burg's method.
is_logloss (bool): Compute anomaly scores based on LogLoss or the Hellinger distance.
"""
assert k > 0, 'k must be 1 or more.'
self.r = r
self.k = k
self.T1 = T1
self.T2 = T2
self.xs = | np.zeros(k) | numpy.zeros |
import jax.numpy as jnp
from jax import grad, vmap, hessian
from jax.config import config
config.update("jax_enable_x64", True)
# numpy
import numpy as onp
from numpy import random
import argparse
import logging
import datetime
from time import time
import os
# solving -grad(a*grad u) + alpha u^m = f
def get_parser():
parser = argparse.ArgumentParser(description='NonLinElliptic equation GP solver')
parser.add_argument("--freq_a", type=float, default = 1.0)
parser.add_argument("--alpha", type=float, default = 1.0)
parser.add_argument("--m", type = int, default = 3)
parser.add_argument("--dim", type = int, default = 2)
parser.add_argument("--kernel", type=str, default="Matern_7half", choices=["gaussian","inv_quadratics","Matern_3half","Matern_5half","Matern_7half","Matern_9half","Matern_11half"])
parser.add_argument("--sigma-scale", type = float, default = 0.25)
# sigma = args.sigma-scale*sqrt(dim)
parser.add_argument("--nugget", type = float, default = 1e-10)
parser.add_argument("--GNsteps", type = int, default = 6)
parser.add_argument("--logroot", type=str, default='./logs/')
parser.add_argument("--randomseed", type=int, default=1)
parser.add_argument("--num_exp", type=int, default=1)
args = parser.parse_args()
return args
def get_GNkernel_train(x,y,wx0,wx1,wxg,wy0,wy1,wyg,d,sigma):
# wx0 * delta_x + wxg * nabla delta_x + wx1 * Delta delta_x
return wx0*wy0*kappa(x,y,d,sigma) + wx0*wy1*Delta_y_kappa(x,y,d,sigma) + wy0*wx1*Delta_x_kappa(x,y,d,sigma) + wx1*wy1*Delta_x_Delta_y_kappa(x,y,d,sigma) + wx0*D_wy_kappa(x,y,d,sigma,wyg) + wy0*D_wx_kappa(x,y,d,sigma,wxg) + wx1*Delta_x_D_wy_kappa(x,y,d,sigma,wyg) + wy1*D_wx_Delta_y_kappa(x,y,d,sigma,wxg) + D_wx_D_wy_kappa(x,y,d,sigma,wxg,wyg)
def get_GNkernel_train_boundary(x,y,wy0,wy1,wyg,d,sigma):
return wy0*kappa(x,y,d,sigma) + wy1*Delta_y_kappa(x,y,d,sigma) + D_wy_kappa(x,y,d,sigma,wyg)
def get_GNkernel_val_predict(x,y,wy0,wy1,wyg,d,sigma):
return wy0*kappa(x,y,d,sigma) + wy1*Delta_y_kappa(x,y,d,sigma) + D_wy_kappa(x,y,d,sigma,wyg)
def get_GNkernel_val_predict_Delta(x,y,wy0,wy1,wyg,d,sigma):
return wy0*Delta_x_kappa(x,y,d,sigma) + wy1*Delta_x_Delta_y_kappa(x,y,d,sigma) + Delta_x_D_wy_kappa(x,y,d,sigma,wyg)
def assembly_Theta(X_domain, X_boundary, w0, w1, wg, sigma):
# X_domain, dim: N_domain*d;
# w0 col vec: coefs of Diracs, dim: N_domain;
# w1 coefs of Laplacians, dim: N_domain
N_domain,d = onp.shape(X_domain)
N_boundary,_ = onp.shape(X_boundary)
Theta = onp.zeros((N_domain+N_boundary,N_domain+N_boundary))
XdXd0 = onp.reshape(onp.tile(X_domain,(1,N_domain)),(-1,d))
XdXd1 = onp.tile(X_domain,(N_domain,1))
XbXd0 = onp.reshape(onp.tile(X_boundary,(1,N_domain)),(-1,d))
XbXd1 = onp.tile(X_domain,(N_boundary,1))
XbXb0 = onp.reshape(onp.tile(X_boundary,(1,N_boundary)),(-1,d))
XbXb1 = onp.tile(X_boundary,(N_boundary,1))
arr_wx0 = onp.reshape(onp.tile(w0,(1,N_domain)),(-1,1))
arr_wx1 = onp.reshape(onp.tile(w1,(1,N_domain)),(-1,1))
arr_wxg = onp.reshape(onp.tile(wg,(1,N_domain)),(-1,d))
arr_wy0 = onp.tile(w0,(N_domain,1))
arr_wy1 = onp.tile(w1,(N_domain,1))
arr_wyg = onp.tile(wg,(N_domain,1))
arr_wy0_bd = onp.tile(w0,(N_boundary,1))
arr_wy1_bd = onp.tile(w1,(N_boundary,1))
arr_wyg_bd = onp.tile(wg,(N_boundary,1))
val = vmap(lambda x,y,wx0,wx1,wxg,wy0,wy1,wyg: get_GNkernel_train(x,y,wx0,wx1,wxg,wy0,wy1,wyg,d,sigma))(XdXd0,XdXd1,arr_wx0,arr_wx1,arr_wxg,arr_wy0,arr_wy1,arr_wyg)
Theta[:N_domain,:N_domain] = onp.reshape(val, (N_domain,N_domain))
val = vmap(lambda x,y,wy0,wy1,wyg: get_GNkernel_train_boundary(x,y,wy0,wy1,wyg,d,sigma))(XbXd0,XbXd1,arr_wy0_bd,arr_wy1_bd,arr_wyg_bd)
Theta[N_domain:,:N_domain] = onp.reshape(val, (N_boundary,N_domain))
Theta[:N_domain,N_domain:] = onp.transpose(onp.reshape(val, (N_boundary,N_domain)))
val = vmap(lambda x,y: kappa(x,y,d,sigma))(XbXb0, XbXb1)
Theta[N_domain:,N_domain:] = onp.reshape(val, (N_boundary, N_boundary))
return Theta
def assembly_Theta_value_predict(X_infer, X_domain, X_boundary, w0, w1, wg, sigma):
N_infer, d = onp.shape(X_infer)
N_domain, _ = onp.shape(X_domain)
N_boundary, _ = onp.shape(X_boundary)
Theta = onp.zeros((2*N_infer,N_domain+N_boundary))
XiXd0 = onp.reshape(onp.tile(X_infer,(1,N_domain)),(-1,d))
XiXd1 = onp.tile(X_domain,(N_infer,1))
XiXb0 = onp.reshape(onp.tile(X_infer,(1,N_boundary)),(-1,d))
XiXb1 = onp.tile(X_boundary,(N_infer,1))
arr_wy0 = onp.tile(w0,(N_infer,1))
arr_wy1 = onp.tile(w1,(N_infer,1))
arr_wyg = onp.tile(wg,(N_infer,1))
val = vmap(lambda x,y,wy0,wy1,wyg: get_GNkernel_val_predict(x,y,wy0,wy1,wyg,d,sigma))(XiXd0,XiXd1,arr_wy0,arr_wy1,arr_wyg)
Theta[:N_infer,:N_domain] = onp.reshape(val, (N_infer,N_domain))
val = vmap(lambda x,y: kappa(x,y,d,sigma))(XiXb0, XiXb1)
Theta[:N_infer,N_domain:] = onp.reshape(val, (N_infer,N_boundary))
val = vmap(lambda x,y,wy0,wy1,wyg: get_GNkernel_val_predict_Delta(x,y,wy0,wy1,wyg,d,sigma))(XiXd0,XiXd1,arr_wy0,arr_wy1,arr_wyg)
Theta[N_infer:,:N_domain] = onp.reshape(val, (N_infer,N_domain))
val = vmap(lambda x,y: Delta_x_kappa(x,y,d,sigma))(XiXb0, XiXb1)
Theta[N_infer:,N_domain:] = onp.reshape(val, (N_infer,N_boundary))
return Theta
def GPsolver(X_domain, X_boundary, X_test, sigma, nugget, sol_init, GN_step = 4):
N_domain, d = onp.shape(X_domain)
sol = sol_init
rhs_f = vmap(f)(X_domain)[:,onp.newaxis]
bdy_g = vmap(g)(X_boundary)[:,onp.newaxis]
wg = -vmap(grad_a)(X_domain) #size?
w1 = -vmap(a)(X_domain)[:,onp.newaxis]
time_begin = time()
for i in range(GN_step):
w0 = alpha*m*(sol**(m-1))
Theta_train = assembly_Theta(X_domain, X_boundary, w0, w1, wg, sigma)
Theta_test = assembly_Theta_value_predict(X_domain, X_domain, X_boundary, w0, w1, wg, sigma)
rhs = rhs_f + alpha*(m-1)*(sol**m)
rhs = onp.concatenate((rhs, bdy_g), axis = 0)
sol = Theta_test[:N_domain,:] @ (onp.linalg.solve(Theta_train + nugget*onp.diag(onp.diag(Theta_train)),rhs))
total_mins = (time() - time_begin) / 60
logging.info(f'[Timer] GP iteration {i+1}/{GN_step}, finished in {total_mins:.2f} minutes')
Theta_test = assembly_Theta_value_predict(X_test, X_domain, X_boundary, w0, w1, wg, sigma)
result_test = Theta_test @ (onp.linalg.solve(Theta_train + nugget*onp.diag(onp.diag(Theta_train)),rhs))
N_infer, d = onp.shape(X_test)
sol_test = result_test[:N_infer]
Delta_sol_test = result_test[N_infer:]
return sol, sol_test, Delta_sol_test
# def sample_points(N_domain, N_boundary, d, choice = 'random'):
# X_domain = onp.zeros((N_domain,d))
# X_boundary = onp.zeros((N_boundary,d))
# X_domain = onp.random.randn(N_domain,d) # N_domain*d
# X_domain /= onp.linalg.norm(X_domain, axis=1)[:,onp.newaxis] # the divisor is of N_domain*1
# random_radii = onp.random.rand(N_domain,1) ** (1/d)
# X_domain *= random_radii
# X_boundary = onp.random.randn(N_boundary,d)
# X_boundary /= onp.linalg.norm(X_boundary, axis=1)[:,onp.newaxis]
# return X_domain, X_boundary
def sample_points(N_domain, N_boundary, d, choice = 'random'):
x1l = 0.0
x1r = 1.0
x2l = 0.0
x2r = 1.0
#(x,y) in [x1l,x1r]*[x2l,x2r] default = [0,1]*[0,1]
# interior nodes
X_domain = onp.concatenate((random.uniform(x1l, x1r, (N_domain, 1)), random.uniform(x2l, x2r, (N_domain, 1))), axis = 1)
N_boundary_per_bd = int(N_boundary/4)
X_boundary = onp.zeros((N_boundary_per_bd*4, 2))
# bottom face
X_boundary[0:N_boundary_per_bd, 0] = random.uniform(x1l, x1r, N_boundary_per_bd)
X_boundary[0:N_boundary_per_bd, 1] = x2l
# right face
X_boundary[N_boundary_per_bd:2*N_boundary_per_bd, 0] = x1r
X_boundary[N_boundary_per_bd:2*N_boundary_per_bd, 1] = random.uniform(x2l, x2r, N_boundary_per_bd)
# top face
X_boundary[2*N_boundary_per_bd:3*N_boundary_per_bd, 0] = random.uniform(x1l, x1r, N_boundary_per_bd)
X_boundary[2*N_boundary_per_bd:3*N_boundary_per_bd, 1] = x2r
# left face
X_boundary[3*N_boundary_per_bd:4*N_boundary_per_bd, 1] = random.uniform(x2l, x2r, N_boundary_per_bd)
X_boundary[3*N_boundary_per_bd:4*N_boundary_per_bd, 0] = x1l
return X_domain, X_boundary
def logger(args, level = 'INFO'):
log_root = args.logroot + 'NonVarLinElliptic_rate'
log_name = 'dim' + str(args.dim) + '_kernel' + str(args.kernel)
logdir = os.path.join(log_root, log_name)
os.makedirs(logdir, exist_ok=True)
log_para = 's' + str(args.sigma_scale) + str(args.nugget).replace(".","") + '_fa' + str(args.freq_a) + '_cos' + '_nexp' + str(args.num_exp)
date = str(datetime.datetime.now())
log_base = date[date.find("-"):date.rfind(".")].replace("-", "").replace(":", "").replace(" ", "_")
filename = log_para + '_' + log_base + '.log'
logging.basicConfig(level=logging.__dict__[level],
format="%(asctime)s [%(levelname)s] %(message)s",
handlers=[
logging.FileHandler(logdir+'/'+filename),
logging.StreamHandler()]
)
return logdir+'/'+filename
def set_random_seeds(args):
random_seed = args.randomseed
random.seed(random_seed)
## get argument parser
args = get_parser()
filename = logger(args, level = 'INFO')
logging.info(f'argument is {args}')
def a(x):
# return jnp.exp(jnp.sin(jnp.sum(args.freq_a * jnp.cos(x))))
return 1.0
def grad_a(x):
return grad(a)(x)
freq = 600
arr_s = [4,5,6,7,8,9]
# arr_s = [4]
num_s = | onp.size(arr_s) | numpy.size |
# -*- coding: utf-8 -*-
"""
Created on Nov 04 2020
@author: <NAME>
@supervisor: <NAME>
Calculate distance for VVV RR Lyrae.
"""
import os
import sys
import copy
import math
import numpy as np
import pandas as pd
from scipy.stats import norm
from astropy.coordinates import SkyCoord
from astropy import units as u
import matplotlib
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
from matplotlib.ticker import LogFormatter
from matplotlib.patches import Rectangle
from scipy.optimize import curve_fit
from scipy.optimize import leastsq
from scipy.signal import find_peaks
import scipy
# Use LaTex fonts
from matplotlib import rc
rc('text', usetex=True)
plt.rcParams.update({'font.size': 12})
# set comma to dot - Brazilian decimal notation
import locale
locale.setlocale(locale.LC_ALL, 'pt_BR.UTF-8')
locale.setlocale(locale.LC_NUMERIC, 'pt_BR.UTF-8')
import matplotlib as mpl
mpl.rcParams['axes.formatter.use_locale'] = True
# my library
sys.path.append('/home/botan/OneDrive/Doutorado/VVV_DATA/my_modules/')
import math_functions
import red_clump_tools as rct
class RRLTools(object):
def __init__(self,gc_distance):
self.gc_distance = gc_distance
self.path = '/home/botan/OneDrive/Doutorado/VVV_DATA'
def LCmagKs(self):
# calculate mean Ks magnitude
tiles = sorted(os.listdir(f'{self.path}/data/psf_ts/'))
for tile in tiles:
print(f'Working on tile {tile}. Wait...')
chips = [_[:-3] for _ in os.listdir(f'{self.path}/data/psf_ts/{tile}/chips/') if _.endswith('.ts')]
for chip in chips:
chipData = pd.read_csv(f'{self.path}/data/psf_ts/{tile}/chips/{chip}.ts',index_col='ID')
magCols = [_ for _ in chipData.columns if _[:3] == 'MAG']
errCols = [_ for _ in chipData.columns if _[:3] == 'ERR']
err_msk = (chipData[errCols] > 0.2).values
nEpoch = (~chipData[errCols].mask(err_msk).isnull()).sum(axis=1)
ks_mag = chipData[magCols].mask(err_msk).sum(axis=1) / nEpoch
ks_err = np.sqrt((chipData[errCols].mask(err_msk)**2).sum(axis=1)) / nEpoch
for star in self.all_dat.index:
if star[:-20] == chip:
self.all_dat.loc[star,'lc_mean'] = ks_mag.loc[star]
self.all_dat.loc[star,'lc_mean_err'] = ks_err.loc[star]
print(' --> Done')
self.all_dat.to_csv(f'{self.path}/data/all_variables_match_vsx_ogle_gaia_viva2.csv',sep=',')
def psf_color(self):
tiles = sorted(os.listdir(f'{self.path}/data/psf_ts/'))
for tile in tiles:
print(f'Working on tile {tile}. Wait...')
chips = [_.split('.')[0] for _ in os.listdir(f'{self.path}/data/psf_ts/{tile}/chips/') if _.endswith('.ts')]
for chip in chips:
chipData = pd.read_csv(f'{self.path}/data/psf_ts/{tile}/chips/{chip}.ts',index_col='ID')
magCols = [_ for _ in chipData.columns if _.split("_")[0] == 'mag']
errCols = [_ for _ in chipData.columns if _.split("_")[0] == 'er']
for star in self.all_dat.index:
if star[:-20] == chip:
self.all_dat.loc[star,magCols] = chipData.loc[star,magCols]
self.all_dat.loc[star,errCols] = chipData.loc[star,errCols]
print(' --> Done')
cols = ['RA', 'DEC', 'mag_Ks', 'mag_J','mag_Z', 'mag_Y','lc_mean', 'lc_mean_err',
'mag_H', 'er_Z', 'er_Y', 'er_J', 'er_H', 'er_Ks', 'period', 'amplitude', 'dup_id1',
'dup_id2', 'dup_id3', 'OGLE_ID', 'OGLE_Type', 'OGLE_Subtype', 'OGLE_I',
'OGLE_V', 'OGLE_P', 'OGLE_A', 'VSX_ID', 'VSX_Name', 'VSX_Type', 'VSX_P',
'GAIA_ID', 'GAIA_AngDist', 'GAIA_Parallax', 'GAIA_Parallax_ERROR',
'GAIA_PMRA', 'GAIA_PMRA_ERROR', 'GAIA_PMDEC', 'GAIA_PMDEC_ERROR',
'gal_l', 'gal_b', 'rest', 'b_rest_x',
'B_rest_xa', 'rlen', 'ResFlag', 'vivaID', 'vivaP']
#self.all_dat.to_csv(f'{self.path}/data/all_variables_match_vsx_ogle_gaia_viva2.csv',sep=',')
return self.all_dat[cols]
def read_data(self):
tiles = sorted(os.listdir(f'{self.path}/data/psf_ts/'))
duplicates = pd.read_csv(f'{self.path}/data/chip_overlap_ids.csv',index_col=0)
rrl_fitparams = []
for tile in tiles:
path = f'{self.path}/data/psf_ts/{tile}/lc_plots/short_period/pos_visual_inspection'
rrl_fitparams.append(pd.read_csv(f'{path}/{tile}_rrlyr_bona_parameters.csv',sep=',',index_col='ID'))
rrl_fitparams = pd.concat(rrl_fitparams)
magCols = [_ for _ in rrl_fitparams.columns if _[:3] == 'mag']
errCols = [_ for _ in rrl_fitparams.columns if _[:3] == 'er_']
id2drop = []
for _ in rrl_fitparams.index:
if _ in duplicates.index:
for col in duplicates.columns:
star2drop = duplicates.loc[_,col]
if star2drop not in id2drop:
id2drop.append(star2drop)
self.rrl_ids = [_ for _ in rrl_fitparams.index if _ not in id2drop]
self.rrl_fitparams = rrl_fitparams.loc[self.rrl_ids].fillna(-99)
self.BEAM_extintion = pd.read_csv(f'{self.path}/data/all_variables_extintion_.csv',index_col='ID')
self.extintion3D = pd.read_csv(f'{self.path}/3D_Extintion_Map/table1jk.csv')
self.all_dat = pd.read_csv(f'{self.path}/data/all_variables_match_vsx_ogle_gaia_viva2_.csv',index_col='ID')
def Fe_abundance(self,period,mode=1):
# mode = 1: Sarajedini (2006) https://ui.adsabs.harvard.edu/abs/2006AJ....132.1361S/abstract
# mode = 1: Feast (2010) https://ui.adsabs.harvard.edu/abs/2010MNRAS.408L..76F/abstract
if mode==1:
FeH = -7.82*np.log10(period) - 3.43
sigma = 0.45
if mode==2:
FeH = -5.62*np.log10(period) - 2.81
sigma = 0.42
return FeH,sigma
def metallicity_from_Fe_abundance(self,abundance,sigma,alpha=0.3):
z = 10**(abundance + np.log10(0.638*10**alpha + 0.362) - 1.765)
z_sigma = 10**(abundance + np.log10(0.638*10**alpha + 0.362) - 1.765) * np.log(10) * sigma
return z,z_sigma
def M_Ks(self,period,metallicity,offset=0):
abs_ks = - 0.6365 - 2.347*np.log10(period) + 0.1747*np.log10(metallicity) + offset
# this theoretical absolute magnitude has errors bellow survey photometric precision.
# Thus it has been ignored.
return abs_ks
def M_Ks2(self,period,FeH,offset=0):
# This is PL relationship (equation 5) from Muraveva et. al. (2015) AJ 807:127
abs_ks = -1.27 - 2.73*np.log10(period) + 0.03*FeH + offset
return abs_ks
def M_H(self,period,metallicity,offset=0):
abs_h = - 0.5539 - 2.302*np.log10(period) + 0.1781*np.log10(metallicity) + offset
return abs_h
def M_J(self,period,metallicity,offset=0):
abs_j = - 0.2361 - 1.830*np.log10(period) + 0.1886*np.log10(metallicity) + offset
return abs_j
def M_Y(self,period,metallicity,offset=0):
abs_y = 0.0090 - 1.467*np.log10(period) + 0.1966*np.log10(metallicity) + offset
return abs_y
def M_Z(self,period,metallicity,offset=0):
abs_z = 0.1570 - 1.247*np.log10(period) + 0.2014*np.log10(metallicity) + offset
return abs_z
def extintion(self, magA, magAerr, magB, magBerr, abs_magA, abs_magB):
#color excess
EJKs = (magA - magB) - (abs_magA - abs_magB)
sigma = abs(np.sqrt(magAerr**2 + magBerr**2))
return EJKs,sigma
def reddenig(self,extintion,extintionSigma,redIndex):
red = redIndex*extintion
sigma = abs(redIndex*extintionSigma)
return red,sigma
def red_free_mag(self,mag,err,red_mag,red_err):
mag_0 = mag - red_mag
sigma = abs(np.sqrt(err**2 + red_err**2))
return mag_0,sigma
def dist_module(self,Ks_0,ErrKs,AbsKs):
dist = 10**(1 + (Ks_0 - AbsKs)/5) #in parsec
sigma = abs(2**(1 + (Ks_0 - AbsKs)/5) * 5**((Ks_0 - AbsKs)/5) * np.log(10) * ErrKs)
return dist,sigma
def calc_distances_color(self,lcmean,lcmean_err,MagKs,ErrKs,MagJ,ErrJ,MagH,ErrH,
beam_E_JK,beam_E_JK_err,period,
abundanceMode=1,magoffset=0):
# MagKs is the Ks mean magnitude from light curve
# MagJ and MagH are the magnitudes from color campain
FeH, FeH_sigma = self.Fe_abundance(period, mode=abundanceMode)
Z, Z_sigma = self.metallicity_from_Fe_abundance(FeH,FeH_sigma,alpha=0.3)
AbsKs = self.M_Ks(period,Z,offset=magoffset)
#AbsKs = self.M_Ks2(period,FeH,offset=magoffset)
AbsJ = self.M_J(period,Z,offset=magoffset)
AbsH = self.M_H(period,Z,offset=magoffset)
E_HKs = np.nan
E_HKS_sigma = np.nan
E_JKs = np.nan
E_JKS_sigma = np.nan
if MagKs > 0:
if MagJ > 0:
# if J magnitude is availeable we calculate extintion
# from the difference between observed and intrinsec magnitude
redIndex = 0.689 # Cardelli
#redIndex = 0.398#0.464# 0.689 Alonso-Garcรญa (2017) https://iopscience.iop.org/article/10.3847/2041-8213/aa92c3
#redIndex = 0.575 # mine, obteined by RC in tile b309
E_JKs, E_JKS_sigma = self.extintion(MagJ,ErrJ,MagKs,ErrKs,AbsJ,AbsKs)
AKs, AKs_sigma = self.reddenig(E_JKs,E_JKS_sigma,redIndex=redIndex)
extintion_flag = 1
else:
if MagH > 0:
# if J is missing but not H we calculate extintion
# from the difference between observed and intrinsec magnitude
redIndex = 1.888 # Cardeli
#redIndex = 1.30#1.888 Alonso-Garcรญa (2017) https://iopscience.iop.org/article/10.3847/2041-8213/aa92c3
#redIndex = 1.04 # mine, obteined by RC in tile b309
E_HKs, E_HKS_sigma = self.extintion(MagH,ErrH,MagKs,ErrKs,AbsH,AbsKs)
AKs, AKs_sigma = self.reddenig(E_HKs,E_HKS_sigma,redIndex=redIndex)
extintion_flag = 2
else:
# if J and H magnitude is missing we used BEAM.
redIndex = 0.689 # Cardeli
#redIndex = 0.575 # mine, obteined by RC in tile b309
E_JKs = beam_E_JK
E_JKS_sigma = beam_E_JK_err
AKs,AKs_sigma = self.reddenig(E_JKs,E_JKS_sigma,redIndex=redIndex)
extintion_flag = 3
else:
# if J and H magnitude is missing we used BEAM.
redIndex = 0.689 # Cardeli
#redIndex = 0.575 # mine, obteined by RC in tile b309
E_JKs = beam_E_JK
E_JKS_sigma = beam_E_JK_err
AKs,AKs_sigma = self.reddenig(E_JKs,E_JKS_sigma,redIndex=redIndex)
extintion_flag = 3
Ks_0, Ks_0_err = self.red_free_mag(lcmean,lcmean_err,AKs,AKs_sigma)
dist, dist_err = self.dist_module(Ks_0,Ks_0_err,AbsKs)
return [dist, dist_err, FeH, FeH_sigma, Z, Z_sigma, AbsKs, AbsJ, AbsH, E_HKs, E_HKS_sigma, E_JKs, E_JKS_sigma, AKs, AKs_sigma, extintion_flag]
def calc_distances_BEAM(self,lcmean,lcmean_err,MagKs,ErrKs,MagJ,ErrJ,MagH,ErrH,
beam_E_JK,beam_E_JK_err,period,
abundanceMode=1,magoffset=0):
# MagKs is the Ks mean magnitude from light curve
# MagJ and MagH are the magnitudes from color campain
FeH, FeH_sigma = self.Fe_abundance(period, mode=abundanceMode)
Z, Z_sigma = self.metallicity_from_Fe_abundance(FeH,FeH_sigma,alpha=0.3)
AbsKs = self.M_Ks(period,Z,offset=magoffset)
#AbsKs = self.M_Ks2(period,FeH,offset=magoffset)
AbsJ = self.M_J(period,Z,offset=magoffset)
AbsH = self.M_H(period,Z,offset=magoffset)
E_HKs = np.nan
E_HKS_sigma = np.nan
E_JKs = np.nan
E_JKS_sigma = np.nan
redIndex = 0.689 # Cardeli
#redIndex = 0.575 # mine, obteined by RC in tile b309
E_JKs = beam_E_JK
E_JKS_sigma = beam_E_JK_err
AKs,AKs_sigma = self.reddenig(E_JKs,E_JKS_sigma,redIndex=redIndex)
extintion_flag = 3
Ks_0, Ks_0_err = self.red_free_mag(lcmean,lcmean_err,AKs,AKs_sigma)
dist, dist_err = self.dist_module(Ks_0,Ks_0_err,AbsKs)
return [dist, dist_err, FeH, FeH_sigma, Z, Z_sigma, AbsKs, AbsJ, AbsH, E_HKs, E_HKS_sigma, E_JKs, E_JKS_sigma, AKs, AKs_sigma, extintion_flag]
def cartezian_projections(self,d,gal_l,gal_b):
dx = d*np.cos(math.radians(gal_b))*np.cos(math.radians(gal_l))
rx = dx - self.gc_distance
ry = d*np.cos(math.radians(gal_b))*np.sin(math.radians(gal_l))
rz = d*np.sin(math.radians(gal_b))
return rx,ry,rz
def get_distance(self,magoffset,campain='variability',method='color'):
# setting campain to variability uses lc mean for Ks
# setting campain to color uses Ks from color campain
# seting method to color, it uses color and BEAM to get extintion
# seting method to BEAM, it uses only BEAM to get extintion
dist_table = pd.DataFrame()
for star in self.rrl_ids:
#if self.rrl_fitparams.loc[star].mag_Ks != -99:
ra = self.all_dat.loc[star,'RA']
dec = self.all_dat.loc[star,'DEC']
gal_l = self.all_dat.loc[star,'gal_l']
gal_b = self.all_dat.loc[star,'gal_b']
period = 1./self.rrl_fitparams.loc[star].Freq
if campain == 'variability':
lcmean = self.all_dat.loc[star].lc_mean
lcmean_err = self.all_dat.loc[star].lc_mean_err
elif campain == 'color':
lcmean = self.all_dat.loc[star].mag_Ks
lcmean_err = self.all_dat.loc[star].er_Ks
else:
raise ValueError(f'{campain} is not a valid setting.')
MagKs = self.all_dat.loc[star].mag_Ks
ErrKs = self.all_dat.loc[star].er_Ks
MagH = self.all_dat.loc[star].mag_H
ErrH = self.all_dat.loc[star].er_H
MagJ = self.all_dat.loc[star].mag_J
ErrJ = self.all_dat.loc[star].er_J
beam_E_JK = self.BEAM_extintion.loc[star].E_JK
beam_E_JK_err = self.BEAM_extintion.loc[star].SigmaE_JK
if method=='color':
params = self.calc_distances_color(lcmean,lcmean_err,MagKs,ErrKs,MagJ,ErrJ,MagH,ErrH,
beam_E_JK,beam_E_JK_err,period,
abundanceMode=1,magoffset=magoffset)
if method=='BEAM':
params = self.calc_distances_BEAM(lcmean,lcmean_err,MagKs,ErrKs,MagJ,ErrJ,MagH,ErrH,
beam_E_JK,beam_E_JK_err,period,
abundanceMode=1,magoffset=magoffset)
x,y,z = self.cartezian_projections(params[0],gal_l,gal_b)
cols = ['RA','DEC','gal_l','gal_b','x','y','z',
'VVVtype','distance','distanceSigma',
'[Fe/H]','[Fe/H]_err','Z','Z_err',
'M_Ks','M_J','M_H','E(H-Ks)','E(H-KS)_err',
'E(J-Ks)','E(J-KS)_err',
'AKs','AKs_err','ExtintionFlag']
dist_table.loc[star,cols] = [ra,dec,gal_l,gal_b,x,y,z]+['RRL']+params
return dist_table
#======== RED CLUMP PEAKS ========#
def get_RC_peaks(self,Rv):
rc_tools = rct.RedClump(Rv)
rc_peaks = rc_tools.find_RC_peaks(plot=False,show=False)
return rc_peaks
def plot_RC_CMD(sels,Rv):
rc_tools = rct.RedClump(Rv)
rc_cmd = rc_tools.red_clump_inclination(method='2gaussian',plotHist=False)
if __name__ == "__main__":
import importlib
importlib.reload(sys.modules['math_functions'])
path = '/home/botan/OneDrive/Doutorado/VVV_DATA'
GC_dist = 8178 # +- 13 pc https://www.aanda.org/articles/aa/full_html/2019/05/aa35656-19/aa35656-19.html
d = RRLTools(gc_distance=GC_dist)
d.read_data()
rrl_ids = d.rrl_ids
rrl_dat = d.all_dat.loc[rrl_ids]
distances = d.get_distance(magoffset=0,campain='variability',method='color')
distances_BEAM = d.get_distance(magoffset=0,campain='variability',method='BEAM')
ogle_bulge_rrl = pd.read_csv('/home/botan/OneDrive/Doutorado/VVV_DATA/ogle_iv_bulge/dat/RRab.csv',index_col=0)
ogle_dat = pd.read_csv('/home/botan/OneDrive/Doutorado/VVV_DATA/data/ogle_dat.csv',index_col=0)
ogle_rrl = ogle_dat[ogle_dat.Subtype == 'RRab']
rrl_names = {}
for n,star in enumerate(rrl_ids):
rrl_names[star]= '%s-RRL-%04d'%(star.split('_')[0],n)
# Red Clump distance, color and magnitude peaks.
Rv = 0.689
rc_peaks_path = f'{path}/data/red_clump_peaks_{Rv}.csv'
if not os.path.exists(rc_peaks_path):
Rc_peaks = d.get_RC_peaks(Rv=Rv)
Rc_peaks.to_csv(rc_peaks_path)
else:
Rc_peaks = pd.read_csv(rc_peaks_path, index_col=0)
# Red Clump Peaks cartesian projection
for tile in Rc_peaks.index:
gal_l = Rc_peaks.loc[tile,'tile_central_l']
gal_b = Rc_peaks.loc[tile,'tile_central_b']
RC_dist = Rc_peaks.loc[tile,'RC_peak1_dist']
RC_dist_err = Rc_peaks.loc[tile,'RC_peak1_dist_sigma']
rx,ry,rz = d.cartezian_projections(RC_dist,gal_l,gal_b)
rxErr,ryErr,rzErr = d.cartezian_projections(RC_dist_err,gal_l,gal_b)
# globular clusters from: https://www.physics.mcmaster.ca/~harris/mwgc.dat
# aglomerados 'NAME':(l,b,d(kpc),r_c(arcmin),r_h(arcmin),[Fe/H])
glob_clusters = {'NGC 6540':(3.28510,-3.31300,5.3,0.03,-999,-1.35),
'NGC 6544' :(5.83815,-2.20354,3.0,0.05,1.21,-1.40),
'NGC 6553' :(5.25024,-3.02296,6.0,0.53,1.03,-0.18),
'Djorg 2' :(2.76357,-2.50848,6.3,0.33,1.05,-0.65),
'Terzan 9' :(3.60314,-1.98878,7.1,0.03,0.78,-1.05),
'Terzan 10':(4.42075,-1.86289,5.8,0.90,1.55,-1.00)}
glob_cluster_df = pd.DataFrame(glob_clusters,index=['l','b','d','r_c','r_d','[Fe/H]']).T
glob_cluster_df.to_csv('glob_cluster.csv')
EB_data = pd.read_csv(f'/home/botan/OneDrive/Doutorado/VVV_DATA/data/ecl_dat.csv', index_col='ID')
BEAM_extintion = pd.read_csv(f'/home/botan/OneDrive/Doutorado/VVV_DATA/data/all_variables_extintion_.csv', index_col='ID')
# match with globular clusters
def sep_2d(l_cat, b_cat, l_targ, b_targ):
sep = np.sqrt((l_targ-l_cat)**2 + (b_targ-b_cat)**2)
return sep
def match(rfactor=3):
match_ids = pd.DataFrame()
for cluster in glob_clusters.keys():
l_cat = glob_clusters[cluster][0]
b_cat = glob_clusters[cluster][1]
r_h = glob_clusters[cluster][4]*u.arcmin
if r_h < 0:
r_h = glob_clusters[cluster][3]*u.arcmin
id_list = {}
for star in rrl_ids:
l_targ = rrl_dat.loc[star,'gal_l']
b_targ = rrl_dat.loc[star,'gal_b']
dist2d = sep_2d(l_cat, b_cat, l_targ, b_targ)*u.deg
if dist2d < r_h.to('deg') * rfactor:
id_list[star]= [dist2d.value / r_h.to('deg').value, 'RRL']
for star in EB_data.index:
l_targ = EB_data.loc[star,'gal_l']
b_targ = EB_data.loc[star,'gal_b']
dist2d = sep_2d(l_cat, b_cat, l_targ, b_targ)*u.deg
if dist2d < r_h.to('deg') * rfactor:
id_list[star]= [dist2d.value / r_h.to('deg').value, 'EB']
for n,id in enumerate(list(id_list.keys())):
match_ids.loc[id,['cluster','sep_factor','n','type']] = [cluster,round(id_list[id][0],1),f'{int(n+1)}',id_list[id][1]]
return match_ids
# make a table for rrl inside globular cluster:
# flag 0 : known RRL
# flag 1 : New RRL
matches = match()
for _ in matches.index:
if matches.loc[_,'type'] == 'RRL':
matches.loc[_,'OID'] = rrl_names[_]
matches.loc[_,'OGLE_ID'] = rrl_dat.loc[_,'OGLE_ID']
matches.loc[_,['d','d_err','[Fe/H]','[Fe/H]_err','E(J-Ks)','E(J-KS)_err','E(H-Ks)','E(H-KS)_err','ExtintionFlag']] = distances.loc[_,['distance','distanceSigma','[Fe/H]', '[Fe/H]_err','E(J-Ks)','E(J-KS)_err','E(H-Ks)','E(H-KS)_err','ExtintionFlag']].values
matches.loc[_,['RA','DEC','gal_l','gal_b','period','amplitude','mag_Ks','er_Ks','mag_J','er_J','mag_H','er_H']] = rrl_dat.loc[_,['RA','DEC','gal_l', 'gal_b','period', 'amplitude','lc_mean','lc_mean_err','mag_J','er_J','mag_H','er_H']].values
matches.loc[_,['J-Ks']] = rrl_dat.loc[_,'mag_J'] - rrl_dat.loc[_,'mag_Ks']
matches.loc[_,['H-Ks']] = rrl_dat.loc[_,'mag_H'] - rrl_dat.loc[_,'mag_Ks']
if matches.loc[_,'type'] == 'EB':
matches.loc[_,['OID','OGLE_ID','RA','DEC','gal_l','gal_b','mag_Ks','er_Ks']] = EB_data.loc[_,['OID','OGLE_ID','RA','DEC','gal_l','gal_b','lc_mean','lc_mean_err']].values
matches.loc[_,['E(J-Ks)','E(J-KS)_err']] = BEAM_extintion.loc[_,['E_JK','SigmaE_JK']].values
matches.loc[_,['period']] = d.all_dat.loc[_,'period']*2
matches.loc[_,['mag_J','er_J','mag_H','er_H']] = d.all_dat.loc[_,['mag_J','er_J','mag_H','er_H']]
matches.loc[_,['J-Ks']] = d.all_dat.loc[_,'mag_J'] - d.all_dat.loc[_,'mag_Ks']
matches.loc[_,['H-Ks']] = d.all_dat.loc[_,'mag_H'] - d.all_dat.loc[_,'mag_Ks']
matches.loc[_,'ExtintionFlag'] = 3
matches.to_csv('rrl_match_glob_clusters.csv',sep='\t')
cluster_distances = {} # distance, sigma_stat, sigma_syst
# Culster distances based on RRL distances:
for cluster in ['NGC 6544', 'Djorg 2', 'Terzan 9', 'Terzan 10']:
ids_cluster = matches.index[matches.cluster == cluster]
msk = matches.loc[ids_cluster,'type'] == 'RRL'
if cluster == 'NGC 6544':
ids_rrl = [_ for _ in ids_cluster[msk] if _ != ids_cluster[msk][3]]
elif cluster == 'Terzan 10':
ids_rrl = [_ for _ in ids_cluster[msk] if _ != 'b308_3_z_14_k_270.76699_-26.09268']
else:
ids_rrl = [_ for _ in ids_cluster[msk]]
if cluster == 'Terzan 9':
cluster_distances[cluster] = ( matches.loc[ids_rrl,'d'].mean(),
matches.loc[ids_rrl,'d'].std(),
np.sqrt(np.power(matches.loc[ids_rrl,'d_err'],2).sum())/(len(ids_rrl)),
matches.loc[ids_rrl,'period'].mean())
else:
cluster_distances[cluster] = ( matches.loc[ids_rrl,'d'].mean(),
matches.loc[ids_rrl,'d'].std(),
np.sqrt(np.power(matches.loc[ids_rrl,'d_err'],2).sum())/(len(ids_rrl)-1),
matches.loc[ids_rrl,'period'].mean())
# Globular Cluster CMD
# ax, ,xlim,
plot_par = {'NGC 6544' :([0,0],[0.40,1.8]),
'Djorg 2' :([0,1],[0.25,1.4]),
'Terzan 9' :([1,0],[0.37,1.9]),
'Terzan 10':([1,1],[0.25,2.0])}
extintion = distances['E(J-Ks)'].mean()
reddening = extintion * 0.689
xlabel='(J-Ks)'
ckey = 'mag_J'
arrow_xpos = 0.4
EB_data = pd.read_csv(f'/home/botan/OneDrive/Doutorado/VVV_DATA/data/ecl_dat.csv', index_col='ID')
font_size = 11
plt.rcParams.update({'font.size': font_size})
fig, axes = plt.subplots(2, 2, figsize=(7,7))
fig.subplots_adjust(wspace=0,hspace=0)
for cluster in [_ for _ in list(glob_clusters.keys()) if _ != 'NGC 6553']:
r_h = glob_clusters[cluster][4]*u.arcmin.to('deg')
if r_h < 0:
r_h = glob_clusters[cluster][3]*u.arcmin.to('deg')
rfactor = 3
tiles = []
for _ in matches[matches.cluster == cluster].index:
if _[:4] not in tiles:
tiles.append(_[:4])
for tile in tiles:
tileData = []
chips = [_[:-3] for _ in os.listdir(f'/home/botan/OneDrive/Doutorado/VVV_DATA/data/psf_ts/{tile}/chips/') if _.endswith('.ts')]
for chip in chips:
chipData = pd.read_csv(f'/home/botan/OneDrive/Doutorado/VVV_DATA/data/psf_ts/{tile}/chips/{chip}.ts',index_col='ID')
tileData.append(chipData)
tileData = pd.concat(tileData)
tileData = tileData.drop_duplicates()
color = tileData.mag_J - tileData.mag_Ks
icrs = SkyCoord(ra=tileData.RA, dec=tileData.DEC,unit=(u.deg, u.deg))
gal = icrs.galactic
l_cat = glob_clusters[cluster][0]
b_cat = glob_clusters[cluster][1]
l_targ = gal.l.deg
b_targ = gal.b.deg
dist2d = sep_2d(l_cat, b_cat, l_targ, b_targ)
cluster_ids = tileData.index[dist2d < rfactor*r_h]
mag_cols = [_ for _ in tileData.columns if _[:3] == 'MAG']
err_cols = [_ for _ in tileData.columns if _[:3] == 'ERR']
Ks_mag = tileData.loc[cluster_ids,mag_cols].mean(axis=1)
Ks_err = np.sqrt((tileData.loc[cluster_ids,err_cols]**2).sum(axis=1))/((~tileData.loc[cluster_ids,err_cols].isna()).sum(axis=1) -1 )
# mag and color inside cluster 3xr_h
c_color = color.loc[cluster_ids]
mask = ~c_color.isnull()
c_color = c_color[mask]
c_Ks_mag = Ks_mag[mask]
c_Ks_err = Ks_err[mask]
# mag and color from 2d matched RRL
c_rrl_ids = [_ for _ in matches.index if _[:4] == tile and matches.loc[_,"cluster"] == cluster and matches.loc[_,"type"] == 'RRL']
c_rrl_mag = rrl_dat.loc[c_rrl_ids,'lc_mean']
c_rrl_color = rrl_dat.loc[c_rrl_ids,'mag_J'] - rrl_dat.loc[c_rrl_ids,'mag_Ks']
# mag and color from 2d matched EBs
EB_ids = [_ for _ in matches.index if _[:4] == tile and matches.loc[_,"cluster"] == cluster and matches.loc[_,"type"] == 'EB']
EB_mag = EB_data.loc[EB_ids,'lc_mean']
EB_color = EB_data.loc[EB_ids,'mag_J'] - EB_data.loc[EB_ids,'mag_Ks']
ax1 = plot_par[cluster][0][0]
ax2 = plot_par[cluster][0][1]
#fig, axes = plt.subplots(1, 1, figsize=(7,7))
axes[ax1,ax2].scatter( c_color,
c_Ks_mag,
marker='.',
c='dimgray',
s=10,
alpha=.1,)
axes[ax1,ax2].scatter( c_rrl_color,
c_rrl_mag,
marker='^',
c='red',
label='RRL',
s=30)
for _ in c_rrl_ids:
axes[ax1,ax2].text( s=f'{matches.n[_]}',
x=c_rrl_color.loc[_],
y=c_rrl_mag.loc[_] - 0.6,
c='red',
ha='center',
va='top',
weight="bold")
axes[ax1,ax2].scatter( EB_color,
EB_mag,
marker='s',
c='blue',
label='BE',
s=30)
for _ in EB_ids:
axes[ax1,ax2].text( s=f'{matches.n[_]}',
x=EB_color.loc[_],
y=EB_mag.loc[_] + 0.6,
c='blue',
ha='center',
va='bottom',
weight="bold")
if ax1==0 and ax2==1:
axes[ax1,ax2].legend()
axes[ax1,ax2].set_xlim(0.25,1.99)
axes[ax1,ax2].set_ylim(11.1,17.9)
axes[ax1,ax2].set_xlabel(r'$\mathrm{%s}$'%xlabel)
axes[ax1,ax2].set_ylabel(r'$\mathrm{K_s\ [mag]}$')
axes[ax1,ax2].invert_yaxis()
# reddening vector
axes[ax1,ax2].annotate("", xy=(arrow_xpos+extintion, 11.4+reddening),
xytext=(arrow_xpos, 11.4),
arrowprops=dict(arrowstyle="->", color='r'))
axes[ax1,ax2].text(0.5,0.04, f'{tile} | {cluster}',c='k',ha='center',transform=axes[ax1,ax2].transAxes)
for ax in fig.get_axes():
ax.label_outer()
plt.savefig(f'CMD_GC_{ckey}.png',dpi=200,bbox_inches='tight',pad_inches=0.05)
plt.show()
# b309 Red Clump Dust Lane
plt.rcParams.update({'font.size': 13})
tiles = sorted(os.listdir(f'{path}/data/psf_ts/'))
all_dat = pd.read_csv(f'/home/botan/OneDrive/Doutorado/VVV_DATA/data/all_variables_match_vsx_ogle_gaia.csv',index_col='ID')
all_var_extintion = pd.read_csv(f'/home/botan/OneDrive/Doutorado/VVV_DATA/data/all_variables_extintion.csv',index_col='ID')
AKs = 0.689 * all_var_extintion.E_JK #http://mill.astro.puc.cl/BEAM/coffinfo.php
tileData = []
tile = 'b309'
chips = [_[:-3] for _ in os.listdir(f'{path}/data/psf_ts/{tile}/chips/') if _.endswith('.ts')]
for chip in chips:
chipData = pd.read_csv(f'{path}/data/psf_ts/{tile}/chips/{chip}.ts',index_col='ID')
tileData.append(chipData[['RA','DEC','mag_J','mag_Ks']])
tileData = pd.concat(tileData)
tileData = tileData.drop_duplicates()
icrs = SkyCoord(ra=tileData.RA, dec=tileData.DEC,unit=(u.deg, u.deg))
gal = icrs.galactic
l = gal.l.deg
b = gal.b.deg
tileData.loc[tileData.index, 'gal_l'] = l
tileData.loc[tileData.index, 'gal_b'] = b
color = tileData.mag_J - tileData.mag_Ks
msk = ~color.isnull()
mag = tileData.mag_Ks
mag = mag[msk]
color = color[msk]
#RC selection
rc_msk1 = ((color > 0.8) & (color < 1.35) & (mag < 14.5))
rc_msk2 = ((color > 1.35) & (mag < 14.5))
bins=(400,300)
cmap = copy.copy(mpl.cm.get_cmap("jet"))# plt.cm.jet
cmap.set_bad('w', 1.)
cmap_multicolor = copy.copy(mpl.cm.get_cmap("jet")) # plt.cm.jet
cmap_multicolor.set_bad('w', 1.)
N, xedges, yedges = np.histogram2d(color,mag,bins=bins)
fig, axes = plt.subplots(2,3, figsize=(9,6), gridspec_kw={'width_ratios': [10,10,2],'hspace':0.2,'wspace':0.1})
img = axes[0,0].imshow(np.log10(N.T),
origin='lower',
extent=[xedges[0], xedges[-1], yedges[0], yedges[-1]],
aspect='auto',
interpolation='nearest',
cmap=cmap)
axes[0,0].invert_yaxis()
axes[0,0].set_xlabel(r'$\mathrm{(J-K_s)}$')
axes[0,0].set_ylabel(r'$\mathrm{K_s \ [mag]}$')
axes[0,0].xaxis.set_label_position('top')
left, bottom, width, height = (0.8, mag.min(), color.max()-0.8, 14.5-mag.min())
rect = plt.Rectangle((left, bottom), width, height,
facecolor="black", alpha=0.3)
axes[0,0].add_patch(rect)
axes[0,0].vlines(x=1.35,ymin=mag.min(), ymax=14.5, color='k',linestyles='dashed',lw=1)
axes[0,0].set_xlim(0.01,2.8)
axes[0,0].tick_params(top=True, bottom=False, left=True, right=False, labelleft=True, labelright=False, labelbottom=False, labeltop=True)
indexes = [_ for _ in all_dat.index if _ in AKs.index and _[:4] == 'b309']
color_morm = AKs.loc[indexes]/AKs.loc[indexes].max()
axes[0,1].scatter(all_dat.loc[indexes].gal_l, all_dat.loc[indexes].gal_b,
c=1-color_morm, marker='s', s=50, lw = 0,
cmap='inferno', alpha=.75)
axes[0,1].invert_xaxis()
axes[0,1].set_xlim(tileData['gal_l'].max(),tileData['gal_l'].min())
axes[0,1].set_ylim(tileData['gal_b'].min(),tileData['gal_b'].max())
axes[0,1].tick_params(top=True, bottom=False, left=False, right=True, labelleft=False, labelright=True, labelbottom=False, labeltop=True)
axes[0,1].set_xlabel(r'$l\ \mathrm{[graus]}$')
axes[0,1].set_ylabel(r'$b\ \mathrm{[graus]}$')
axes[0,1].xaxis.set_label_position('top')
axes[0,1].yaxis.set_label_position('right')
#colorbar
a = np.array([[AKs.loc[indexes].max(),AKs.loc[indexes].min()]])
cmap = plt.get_cmap('inferno').reversed()
cax = plt.axes([0.91, 0.56, 0.01, 0.3])
img = axes[0,2].imshow(a, cmap=cmap)
axes[0,2].set_visible(False)
cbar = plt.colorbar(img, orientation="vertical", cax=cax)
cbar.ax.set_ylabel('$A_{Ks}$', rotation=90)
bins=(150,150)
cmap = copy.copy(mpl.cm.get_cmap('inferno'))# plt.cm.jet
cmap.set_bad('w', 1.)
cmap_multicolor = copy.copy(mpl.cm.get_cmap('inferno')) # plt.cm.jet
cmap_multicolor.set_bad('w', 1.)
N, xedges, yedges = np.histogram2d(tileData['gal_l'].loc[color[rc_msk1].index],tileData['gal_b'].loc[color[rc_msk1].index],bins=bins)
axes[1,0].imshow(np.log10(N.T),
origin='lower',
extent=[xedges[0], xedges[-1], yedges[0], yedges[-1]],
aspect='auto',
interpolation='nearest',
cmap=cmap)
axes[1,0].invert_xaxis()
axes[1,0].set_xlim(tileData['gal_l'].max(),tileData['gal_l'].min())
axes[1,0].set_ylim(tileData['gal_b'].min(),tileData['gal_b'].max())
axes[1,0].text(0.5,1.05, r'$\mathrm{0,8 < (J-K_s) < 1,35}$',c='k',ha='center',transform=axes[1,0].transAxes)
axes[1,0].tick_params(top=False, bottom=True, left=True, right=False, labelleft=True, labelright=False, labelbottom=True, labeltop=False)
axes[1,0].set_xlabel(r'$l\ \mathrm{[graus]}$')
axes[1,0].set_ylabel(r'$b\ \mathrm{[graus]}$')
N, xedges, yedges = np.histogram2d(tileData['gal_l'].loc[color[rc_msk2].index],tileData['gal_b'].loc[color[rc_msk2].index],bins=bins)
img = axes[1,1].imshow(np.log10(N.T),
origin='lower',
extent=[xedges[0], xedges[-1], yedges[0], yedges[-1]],
aspect='auto',
interpolation='nearest',
cmap=cmap)
axes[1,1].invert_xaxis()
axes[1,1].set_xlim(tileData['gal_l'].max(),tileData['gal_l'].min())
axes[1,1].set_ylim(tileData['gal_b'].min(),tileData['gal_b'].max())
axes[1,1].text(0.5,1.05, r'$\mathrm{(J-K_s) > 1,35}$',c='k',ha='center',transform=axes[1,1].transAxes)
axes[1,1].tick_params(top=False, bottom=True, left=False, right=True, labelleft=False, labelright=True, labelbottom=True, labeltop=False)
axes[1,1].set_xlabel(r'$l\ \mathrm{[graus]}$')
axes[1,1].set_ylabel(r'$b\ \mathrm{[graus]}$')
axes[1,1].yaxis.set_label_position('right')
cbar_ax = plt.axes([0.91, 0.14, 0.01, 0.3])
cb = fig.colorbar(img,
ticks=[0, 1, 2, 3],
format=r'$10^{%i}$',
shrink=0.6 ,
cax=cbar_ax)
cb.set_label(r'n\'{u}mero por pixel',rotation=90)
axes[0,2].set_visible(False)
axes[1,2].set_visible(False)
plt.savefig('b309_RC_dust_lane.png',dpi=300,bbox_inches = 'tight',pad_inches=0.05)
plt.show()
# RRL Bailey diagram and histogram of periods
# <NAME> (Navarrete 2015, ZOROTOVIC, 2010)
def OoI_curve(period):
A_V = -2.627 - 22.046*np.log10(period) - 30.876*(np.log10(period))**2 # ZOROTOVIC, 2010
A_OoI = 0.32*np.power(A_V,2./3) # conversion to Ks mag Navarrete 2015
return A_OoI
def OoII_curve(period):
A_J = 0.064 - 2.481*np.log10(period) +10.345*(np.log10(period))**3 #Navarrete 2015
A_OoII = np.power(A_J/2.6,2./3) # conversion to Ks mag Navarrete 2015
return A_OoII
def OoI_ogle(period):
#A_V = -2.627 - 22.046*np.log10(period) - 30.876*(np.log10(period))**2 # ZOROTOVIC, 2010
#A_I = A_V/1.6
A_I = -1.64 - 13.78*np.log10(period) - 19.30*(np.log10(period))**2 # Kunder 2013 ; https://iopscience.iop.org/article/10.1088/0004-6256/145/2/33
return A_I
def OoII_ogle(period):
#_V = -2.627 - 22.046*(np.log10(period)-0.03) - 30.876*(np.log10(period)-0.03)**2 # ZOROTOVIC, 2010
#_I = A_V/1.6
A_I = -0.89 - 11.46*(np.log10(period)) - 19.30*(np.log10(period))**2 # Kunder 2013 ; https://iopscience.iop.org/article/10.1088/0004-6256/145/2/33
return A_I
import matplotlib.gridspec as gridspec
from matplotlib import cm
from matplotlib.colors import Normalize
from scipy.interpolate import interpn
p1 = np.linspace(0.4,0.7,100)
p2 = np.linspace(0.55,0.9,100)
periods = 1./d.rrl_fitparams.Freq
amplitudes = d.rrl_fitparams.Amplitude
fig = plt.figure(figsize=[7,7],tight_layout=True)
gs = gridspec.GridSpec(2, 2, height_ratios=[2,1.5])
ax1 = fig.add_subplot(gs[0,0])
cmap = copy.copy(mpl.cm.get_cmap("viridis"))# plt.cm.jet
cmap.set_bad('w', 1.)
cmap_multicolor = copy.copy(mpl.cm.get_cmap("viridis")) # plt.cm.jet
cmap_multicolor.set_bad('w', 1.)
bins=(30,30)
N, xedges, yedges = np.histogram2d(periods,amplitudes,bins=bins)
Z = interpn( ( 0.5*(xedges[1:] + xedges[:-1]) , 0.5*(yedges[1:]+yedges[:-1]) ) , N , np.vstack([periods,amplitudes]).T , method = "splinef2d", bounds_error = False)
#To be sure to plot all data
Z[np.where(np.isnan(Z))] = 0.0
# Sort the points by density, so that the densest points are plotted last
idx = Z.argsort()
x, y, z = periods[idx], amplitudes[idx], Z[idx]
ax1.scatter(x,
y,
marker='.',
s=20,
c=z,
cmap=cmap,
ec='none',
alpha=1)
ax1.plot(p1,OoI_curve(p1),'r-')
ax1.plot(p2,OoII_curve(p2),'r--')
ax1.text(s=r'$\mathrm{Nossas\ RRL}$', x=0.5, y=1.02,transform=ax1.transAxes,ha='center')
ax1.set_ylabel('$\mathrm{Amplitude\ K_s\ [mag]}$')
ax1.set_xlabel('$\mathrm{P\ [dias]}$')
ax1.set_xlim(0.21,0.99)
ax1.set_ylim(0.0,1)
bins=(30,30)
N, xedges, yedges = np.histogram2d(ogle_bulge_rrl.P,ogle_bulge_rrl.A_I,bins=bins)
Z = interpn( ( 0.5*(xedges[1:] + xedges[:-1]) , 0.5*(yedges[1:]+yedges[:-1]) ) , N , np.vstack([ogle_bulge_rrl.P,ogle_bulge_rrl.A_I]).T , method = "splinef2d", bounds_error = False)
#To be sure to plot all data
Z[np.where(np.isnan(Z))] = 0.0
# Sort the points by density, so that the densest points are plotted last
idx = Z.argsort()
x, y, z = ogle_bulge_rrl.P[idx], ogle_bulge_rrl.A_I[idx], Z[idx]
ax2 = fig.add_subplot(gs[0,1])
ax2.scatter(x,
y,
marker='.',
s=10,
c=z,
cmap=cmap,
ec='none',
alpha=1)
ax2.plot(p1,OoI_ogle(p1),'r-')
ax2.plot(p2,OoII_ogle(p2),'r--')
ax2.text(s=r'$\mathrm{RRab\ do\ OGLE}$', x=0.5, y=1.02,transform=ax2.transAxes,ha='center')
ax2.set_ylabel('$\mathrm{Amplitude\ I\ [mag]}$')
ax2.set_xlabel('$\mathrm{P\ [dias]}$')
ax2.set_xlim(0.21,0.99)
ax2.set_ylim(0.0,1)
ax3 = fig.add_subplot(gs[1,:])
weights = | np.ones_like(periods) | numpy.ones_like |
import numpy as np
from ipyatom import process_vstruct
from ipyatom.geometry3d_utils import transform_to_crystal
from scipy.spatial.ckdtree import cKDTree
import scipy.cluster.hierarchy as hcluster
def compute_vacancies(ccoords, cell, center, grid_spacing=0.2, include_periodic=True,
min_dist=3.5, remove_dups=True, ignore_vacuum=None):
""" compute vacancy positions in a unit cell
Parameters
----------
ccoords: list or numpy.array((n, 3))
cell: list or numpy.array((3, 3))
center: list or numpy.array((3,))
grid_spacing: float
include_periodic: bool
include evaluation of distances to periodic atom images
min_dist: float
minimum distance to be considered a vacancy
remove_dups: bool
remove duplicate vacancy points (by cluster centering)
ignore_vacuum: float or None
if not None, crop the search space in the c-direction to the min/max atomic position +/- ignore_vacuum
Returns
-------
"""
a, b, c = np.asarray(cell)
origin = np.asarray(center) - 0.5 * (a + b + c)
ccoords = np.asarray(ccoords)
if ignore_vacuum is not None:
fcoords = transform_to_crystal(ccoords, a, b, c, origin)
cmin, cmax = fcoords[:, 2].min() + ignore_vacuum, fcoords[:, 2].max() - ignore_vacuum
# TODO this needs to be done more rigourously (get some spurious vacancies at surfaces)
if cmin < -0.0000001:
cmin += 1
cmax += 1
cmod = abs(cmax - cmin)
if cmod < 0.1:
cmax += 0.1
cmod = abs(cmax - cmin)
#print(cmin, cmax, cmod)
else:
cmin, cmax, cmod = (0., 1., 1.)
vcoords = []
for i in np.linspace(0., 1., int(np.linalg.norm(a)/grid_spacing)):
for j in np.linspace(0., 1., int( | np.linalg.norm(b) | numpy.linalg.norm |
import numpy as np
import pytest
from scipy.stats import (bootstrap, BootstrapDegenerateDistributionWarning,
monte_carlo_test, permutation_test)
from numpy.testing import assert_allclose, assert_equal, suppress_warnings
from scipy import stats
from scipy import special
from .. import _resampling as _resampling
from scipy._lib._util import rng_integers
from scipy.optimize import root
def test_bootstrap_iv():
message = "`data` must be a sequence of samples."
with pytest.raises(ValueError, match=message):
bootstrap(1, np.mean)
message = "`data` must contain at least one sample."
with pytest.raises(ValueError, match=message):
bootstrap(tuple(), np.mean)
message = "each sample in `data` must contain two or more observations..."
with pytest.raises(ValueError, match=message):
bootstrap(([1, 2, 3], [1]), np.mean)
message = ("When `paired is True`, all samples must have the same length ")
with pytest.raises(ValueError, match=message):
bootstrap(([1, 2, 3], [1, 2, 3, 4]), np.mean, paired=True)
message = "`vectorized` must be `True` or `False`."
with pytest.raises(ValueError, match=message):
bootstrap(1, np.mean, vectorized='ekki')
message = "`axis` must be an integer."
with pytest.raises(ValueError, match=message):
bootstrap(([1, 2, 3],), np.mean, axis=1.5)
message = "could not convert string to float"
with pytest.raises(ValueError, match=message):
bootstrap(([1, 2, 3],), np.mean, confidence_level='ni')
message = "`n_resamples` must be a positive integer."
with pytest.raises(ValueError, match=message):
bootstrap(([1, 2, 3],), np.mean, n_resamples=-1000)
message = "`n_resamples` must be a positive integer."
with pytest.raises(ValueError, match=message):
bootstrap(([1, 2, 3],), np.mean, n_resamples=1000.5)
message = "`batch` must be a positive integer or None."
with pytest.raises(ValueError, match=message):
bootstrap(([1, 2, 3],), np.mean, batch=-1000)
message = "`batch` must be a positive integer or None."
with pytest.raises(ValueError, match=message):
bootstrap(([1, 2, 3],), np.mean, batch=1000.5)
message = "`method` must be in"
with pytest.raises(ValueError, match=message):
bootstrap(([1, 2, 3],), np.mean, method='ekki')
message = "`method = 'BCa' is only available for one-sample statistics"
def statistic(x, y, axis):
mean1 = np.mean(x, axis)
mean2 = np.mean(y, axis)
return mean1 - mean2
with pytest.raises(ValueError, match=message):
bootstrap(([.1, .2, .3], [.1, .2, .3]), statistic, method='BCa')
message = "'herring' cannot be used to seed a"
with pytest.raises(ValueError, match=message):
bootstrap(([1, 2, 3],), np.mean, random_state='herring')
@pytest.mark.parametrize("method", ['basic', 'percentile', 'BCa'])
@pytest.mark.parametrize("axis", [0, 1, 2])
def test_bootstrap_batch(method, axis):
# for one-sample statistics, batch size shouldn't affect the result
np.random.seed(0)
x = | np.random.rand(10, 11, 12) | numpy.random.rand |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: <NAME>, CWI, Amsterdam
<EMAIL>
TODO
fhe full
data of one of the data sets described in
"A Cone-Beam X-Ray CT Data Collection Designed for Machine Learning" by
<NAME>, <NAME>, <NAME>,
<NAME>, <NAME>, <NAME>
"""
import numpy as np
import astra
import os
import imageio
import time
import matplotlib.pyplot as plt
import nesterov_gradient
from scipy.interpolate import RegularGridInterpolator as rgi
def rotate_astra_vec_geom(vecs, theta):
s = np.asmatrix(vecs[:,0:3])
d = np.asmatrix(vecs[:,3:6])
u = np.asmatrix(vecs[:,6:9])
v = np.asmatrix(vecs[:,9:12])
du = d + u
dv = d + v
rot_mat = np.matrix([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
s = s * rot_mat.transpose()
d = d * rot_mat.transpose()
du = du * rot_mat.transpose()
dv = dv * rot_mat.transpose()
u = du - d
v = dv - d
vecs = np.concatenate((np.asarray(s), np.asarray(d), np.asarray(u), np.asarray(v)), axis=1)
return vecs
#### user defined settings ####################################################
# select the ID of the sample you want to reconstruct
walnut_id = 1
# define a sub-sampling factor in angular direction
# (all reference reconstructions are computed with full angular resolution)
angluar_sub_sampling = 1
# select of voxels per mm in one direction (higher = larger res)
# volume size in one direction will be 50 * voxel_per_mm + 1
voxel_per_mm = 10
# the number of slices to be extracted will be number of voxels in one direction
# times this factor
radial_slice_fac = np.sqrt(2)
# to avoid artefacts from the radial slicing, we compute multiple iterative
# reconstructions with rotated geometries and only extract the radial slices that are close
# t0 0 and 90 degrees. n_div is the number of reconstruction
n_div = 24
# we enter here some intrinsic details of the dataset needed for our reconstruction scripts
# set the variable "data_path" to the path where the dataset is stored on your own workstation
data_path = '/bigstore/felix/Walnuts/'
# set the variable "recon_path" to the path where you would like to store the
# reconstructions you compute
rad_slice_path = '/bigstore/felix/WalnutsRadialSlices/'
# set index of gpu to use
gpu_index = 3;
astra.astra.set_gpu_index(gpu_index)
print('computing Walnut', walnut_id, ',on GPU', gpu_index, flush=True)
#### general settings #########################################################
# projection index
# there are in fact 1201, but the last and first one come from the same angle
projs_idx = range(0,1200, angluar_sub_sampling)
nb_projs_orbit = len(projs_idx)
projs_name = 'scan_{:06}.tif'
dark_name = 'di000000.tif'
flat_name = ['io000000.tif', 'io000001.tif']
vecs_name = 'scan_geom_corrected.geom'
projs_rows = 972
projs_cols = 768
# transformation to apply to each image, we need to get the image from
# the way the scanner reads it out into to way described in the projection
# geometry
trafo = lambda image : np.transpose(np.flipud(image))
# size of the reconstruction volume in voxels
n_x = 50 * voxel_per_mm + 1
# size of a cubic voxel in mm
vox_sz = 1/voxel_per_mm
# number of radial slices to be extracted
n_rad = int(np.round(n_x * radial_slice_fac))
# angles of radial slices to be extracted
theta = np.linspace(0,np.pi, n_rad, False)
### set up angle division
center_angles = np.linspace(0, np.pi/2, 2*n_div+1)[1::2]
angle_div_half_width = (center_angles[1] - center_angles[0])/2 + np.pi * 10**(-16)
#### FDK reconstructions ######################################################
t_fdk = time.time();
for orbit_id in [1,2,3]: # loop over orbits
### load data #############################################################
t = time.time();
print('load data for oribit', orbit_id, flush=True)
# we add the info about walnut and orbit ID
data_path_full = os.path.join(data_path, 'Walnut{}'.format(walnut_id), 'Projections', 'tubeV{}'.format(orbit_id))
# create the numpy array which will receive projection data from tiff files
projs = np.zeros((len(projs_idx), projs_rows, projs_cols), dtype=np.float32)
# load the numpy array describing the scan geometry from file
vecs = np.loadtxt(os.path.join(data_path_full, vecs_name))
# get the positions we need
vecs = vecs[projs_idx]
# load flat-field and dark-fields
# there are two flat-field images (taken before and after acquisition), we simply average them
dark = trafo(imageio.imread(os.path.join(data_path_full, dark_name)))
flat = np.zeros((2, projs_rows, projs_cols), dtype=np.float32)
for i, fn in enumerate(flat_name):
flat[i] = trafo(imageio.imread(os.path.join(data_path_full, fn)))
flat = np.mean(flat,axis=0)
# load projection data
for i in range(len(projs_idx)):
projs[i] = trafo(imageio.imread(os.path.join(data_path_full, projs_name.format(projs_idx[i]))))
print(np.round_(time.time() - t, 3), 'sec elapsed', flush=True)
### pre-process data ######################################################
t = time.time();
print('pre-process data', flush=True)
# subtract the dark field, divide by the flat field, and take the negative log to linearize the data according to the Beer-Lambert law
projs -= dark
projs /= (flat - dark)
np.log(projs, out=projs)
np.negative(projs, out=projs)
# we need to apply some transformations to the projections to get them from
# the way the scanner reads it out into to way described in the projection
# geometry and used by ASTRA
projs = projs[::-1,...]
projs = np.transpose(projs, (1,0,2))
projs = np.ascontiguousarray(projs)
print(np.round_(time.time() - t, 3), 'sec elapsed')
### FDL reconstructions on single slices
# numpy array holding the reconstruction volume
vol_rec = np.zeros((n_x, n_x, 1), dtype=np.float32)
# we need to specify the details of the reconstruction space to ASTRA
# this is done by a "volume geometry" type of structure, in the form of a Python dictionary
# by default, ASTRA assumes a voxel size of 1, we need to scale the reconstruction space here by the actual voxel size
vol_geom = astra.create_vol_geom((n_x, 1, n_x))
vol_geom['option']['WindowMinX'] = vol_geom['option']['WindowMinX'] * vox_sz
vol_geom['option']['WindowMaxX'] = vol_geom['option']['WindowMaxX'] * vox_sz
vol_geom['option']['WindowMinY'] = vol_geom['option']['WindowMinY'] * vox_sz
vol_geom['option']['WindowMaxY'] = vol_geom['option']['WindowMaxY'] * vox_sz
vol_geom['option']['WindowMinZ'] = vol_geom['option']['WindowMinZ'] * vox_sz
vol_geom['option']['WindowMaxZ'] = vol_geom['option']['WindowMaxZ'] * vox_sz
# register both volume and projection geometries and arrays to ASTRA
vol_id = astra.data3d.link('-vol', vol_geom, vol_rec)
# construct full path for storing the results
rad_slice_path_full = os.path.join(rad_slice_path, 'Walnut{}'.format(walnut_id))
# create the directory in case it doesn't exist yet
if not os.path.exists(rad_slice_path_full):
os.makedirs(rad_slice_path_full)
for i_div in range(n_div):
t = time.time();
print('computing FDK reconstruction', i_div+1, '/', n_div, flush=True)
# rotate astra geometry
theta_rot = center_angles[i_div]
vecs_rot = rotate_astra_vec_geom(vecs, - theta_rot)
# numpy array holding the reconstruction volume
vol_rec = np.zeros((n_x, n_x, n_x), dtype=np.float32)
# we need to specify the details of the reconstruction space to ASTRA
# this is done by a "volume geometry" type of structure, in the form of a Python dictionary
# by default, ASTRA assumes a voxel size of 1, we need to scale the reconstruction space here by the actual voxel size
vol_geom = astra.create_vol_geom((n_x, n_x, n_x))
vol_geom['option']['WindowMinX'] = vol_geom['option']['WindowMinX'] * vox_sz
vol_geom['option']['WindowMaxX'] = vol_geom['option']['WindowMaxX'] * vox_sz
vol_geom['option']['WindowMinY'] = vol_geom['option']['WindowMinY'] * vox_sz
vol_geom['option']['WindowMaxY'] = vol_geom['option']['WindowMaxY'] * vox_sz
vol_geom['option']['WindowMinZ'] = vol_geom['option']['WindowMinZ'] * vox_sz
vol_geom['option']['WindowMaxZ'] = vol_geom['option']['WindowMaxZ'] * vox_sz
# we need to specify the details of the projection space to ASTRA
# this is done by a "projection geometry" type of structure, in the form of a Python dictionary
proj_geom_rot = astra.create_proj_geom('cone_vec', projs_rows, projs_cols, vecs_rot)
# register both volume and projection geometries and arrays to ASTRA
vol_id = astra.data3d.link('-vol', vol_geom, vol_rec)
proj_id = astra.data3d.link('-sino', proj_geom_rot, projs)
# finally, create an ASTRA configuration.
# this configuration dictionary setups an algorithm, a projection and a volume
# geometry and returns a ASTRA algorithm, which can be run on its own
cfg_fdk = astra.astra_dict('FDK_CUDA')
cfg_fdk['ProjectionDataId'] = proj_id
cfg_fdk['ReconstructionDataId'] = vol_id
cfg_fdk['option'] = {}
cfg_fdk['option']['ShortScan'] = False
alg_id = astra.algorithm.create(cfg_fdk)
# run FDK algorithm
astra.algorithm.run(alg_id, 1)
# release memory allocated by ASTRA structures
astra.algorithm.delete(alg_id)
astra.data3d.delete(proj_id)
astra.data3d.delete(vol_id)
print(np.round_(time.time() - t, 3), 'sec elapsed')
### extract radial slices
# set up interpolation
x_gr = np.linspace(-1.0, 1.0, n_x)
interpolator = rgi((x_gr,x_gr,x_gr), vol_rec)
x_rad = np.zeros((n_x, n_x))
y_rad = np.zeros((n_x, n_x))
z_rad = np.zeros((n_x, n_x))
for i_theta in range(n_rad):
# check if this slice should be extracted from this volume
extract_slice = abs(theta[i_theta] - theta_rot) < angle_div_half_width
extract_slice = extract_slice | (abs(theta[i_theta] - (theta_rot + np.pi/2)) < angle_div_half_width)
if extract_slice:
for i_z in range(n_x):
x_rad[:,i_z] = x_gr[i_z]
y_rad[:,i_z] = x_gr * np.cos(theta[i_theta] - theta_rot)
z_rad[:,i_z] = x_gr * -np.sin(theta[i_theta] - theta_rot)
rad_slice = (interpolator(np.vstack((x_rad.flatten(), y_rad.flatten(), z_rad.flatten())).T)).reshape([n_x,n_x])
rad_slice = np.float32(rad_slice.T)
# save slice
slice_path = os.path.join(rad_slice_path_full, 'fdk_pos{}_ass{}_vmm{}_nut{:02}_{:03}.tiff'.format(
orbit_id, angluar_sub_sampling, voxel_per_mm, walnut_id, i_theta))
imageio.imwrite(slice_path, rad_slice)
print('all FDK reconstructions completed,', np.round_(time.time() - t_fdk, 3), 'sec elapsed')
#### iterative reconstructions of combined data ###############################
### load and pre-process data #################################################
t = time.time();
print('load and pre-process data from all orbits', flush=True)
# we add the info about walnut
data_path_full = os.path.join(data_path, 'Walnut{}'.format(walnut_id), 'Projections')
# Create the numpy array which will receive projection data from tiff files
projs = np.zeros((projs_rows, 0, projs_cols), dtype=np.float32)
# And create the numpy array receiving the motor positions read from the geometry file
vecs = np.zeros((0, 12), dtype=np.float32)
# Loop over the subset of orbits we want to load at the same time
for orbit_id in [1,2,3]:
orbit_data_path = os.path.join(data_path_full, 'tubeV{}'.format(orbit_id))
# load the numpy array describing the scan geometry of the orbit from file
vecs_orbit = np.loadtxt(os.path.join(orbit_data_path, vecs_name))
# get the positions we need and write into vecs
vecs = np.concatenate((vecs, vecs_orbit[projs_idx]), axis=0)
# load flat-field and dark-fields
# there are two flat-field images (taken before and after acquisition), we simply average them
dark = trafo(imageio.imread(os.path.join(orbit_data_path, dark_name)))
flat = np.zeros((2, projs_rows, projs_cols), dtype=np.float32)
for i, fn in enumerate(flat_name):
flat[i] = trafo(imageio.imread(os.path.join(orbit_data_path, fn)))
flat = np.mean(flat,axis=0)
# load projection data directly on the big projection array
projs_orbit = np.zeros((nb_projs_orbit, projs_rows, projs_cols), dtype=np.float32)
for i in range(len(projs_idx)):
projs_orbit[i] = trafo(imageio.imread(os.path.join(orbit_data_path, projs_name.format(projs_idx[i]))))
# subtract the dark field, devide by the flat field, and take the negative log to linearize the data according to the Beer-Lambert law
projs_orbit -= dark
projs_orbit /= (flat - dark)
# take negative log
np.log(projs_orbit, out=projs_orbit)
| np.negative(projs_orbit, out=projs_orbit) | numpy.negative |
#!/usr/bin/env python
#
# Copyright (c) 2018 10X Genomics, Inc. All rights reserved.
#
"""
Simple Good-Turing estimator.
Based on S implementation in
<NAME> & <NAME> (1995) Good-turing frequency estimation without tears,
Journal of Quantitative Linguistics, 2:3, 217-237, DOI: 10.1080/09296179508590051
"""
import numpy as np
import scipy.stats as sp_stats
import itertools
class SimpleGoodTuringError(Exception):
pass
def _averaging_transform(r, nr):
d = np.concatenate((np.ones(1, dtype=int), np.diff(r)))
dr = np.concatenate((
0.5 * (d[1:] + d[0:-1]),
np.array((d[-1],), dtype=float),
))
return nr.astype(float)/dr
def _rstest(r, coef):
return r * np.power(1 + 1/r, 1 + coef)
def simple_good_turing(xr, xnr):
"""Make a Simple Good-Turing estimate of the frequencies.
Args:
xr (np.array(int)): Non-zero item frequencies
xnr (np.array(int)): Non-zero frequencies of frequencies
Returns:
(rstar (np.array(float)), p0 (float)):
rstar: The adjusted non-zero frequencies
p0: The total probability of unobserved items
"""
xr = xr.astype(float)
xnr = xnr.astype(float)
xN = np.sum(xr*xnr)
# Get Linear Good-Turing estimate
xnrz = _averaging_transform(xr, xnr)
slope, intercept, _, _, _ = sp_stats.linregress(np.log(xr), np.log(xnrz))
if slope > -1:
raise SimpleGoodTuringError("The log-log slope is > -1 (%d); the SGT estimator is not applicable to these data." % slope)
xrst = _rstest(xr,slope)
xrstrel = xrst/xr
# Get traditional Good-Turing estimate
xrtry = xr == np.concatenate((xr[1:]-1, np.zeros(1)))
xrstarel = np.zeros(len(xr))
xrstarel[xrtry] = (xr[xrtry]+1) / xr[xrtry] * \
np.concatenate((xnr[1:], np.zeros(1)))[xrtry] / xnr[xrtry]
# Determine when to switch from GT to LGT estimates
tursd = np.ones(len(xr))
for i in range(len(xr)):
if xrtry[i]:
tursd[i] = float(i+2) / xnr[i] * np.sqrt(xnr[i+1] * (1 + xnr[i+1]/xnr[i]))
xrstcmbrel = np.zeros(len(xr))
useturing = True
for r in range(len(xr)):
if not useturing:
xrstcmbrel[r] = xrstrel[r]
else:
if np.abs(xrstrel[r]-xrstarel[r]) * (1+r)/tursd[r] > 1.65:
xrstcmbrel[r] = xrstarel[r]
else:
useturing = False
xrstcmbrel[r] = xrstrel[r]
# Renormalize the probabilities for observed objects
sumpraw = np.sum(xrstcmbrel * xr * xnr / xN)
xrstcmbrel = xrstcmbrel * (1 - xnr[0] / xN) / sumpraw
p0 = xnr[0]/xN
return (xr * xrstcmbrel, p0)
def sgt_proportions(frequencies):
"""Use Simple Good-Turing estimate to adjust for unobserved items
Args:
frequencies (np.array(int)): Nonzero frequencies of items
Returns:
(pstar (np.array(float)), p0 (float)):
pstar: The adjusted non-zero proportions
p0: The total probability of unobserved items
"""
if len(frequencies) == 0:
raise ValueError("Input frequency vector is empty")
if np.count_nonzero(frequencies) != len(frequencies):
raise ValueError("Frequencies must be greater than zero")
freqfreqs = np.bincount(frequencies.astype(int))
assert freqfreqs[0] == 0
use_freqs = np.flatnonzero(freqfreqs)
if len(use_freqs) < 10:
raise SimpleGoodTuringError("Too few non-zero frequency items (%d). Aborting SGT." % len(use_freqs))
rstar, p0 = simple_good_turing(use_freqs, freqfreqs[use_freqs])
# rstar contains the smoothed frequencies.
# Map each original frequency r to its smoothed rstar.
rstar_dict = dict(zip(use_freqs, rstar))
rstar_sum = np.sum(freqfreqs[use_freqs] * rstar)
rstar_i = np.fromiter((rstar_dict[f] for f in frequencies),
dtype=float, count=len(frequencies))
pstar = (1 - p0) * (rstar_i / rstar_sum)
assert np.isclose(p0 + np.sum(pstar), 1)
return (pstar, p0)
def test_prosody():
data = (
(1, 120),
(2, 40),
(3, 24),
(4, 13),
(5, 15),
(6, 5),
(7, 11),
(8, 2),
(9, 2),
(10, 1),
(12, 3),
(14, 2),
(15, 1),
(16, 1),
(17, 3),
(19, 1),
(20, 3),
(21, 2),
(23, 3),
(24, 3),
(25, 3),
(26, 2),
(27, 2),
(28, 1),
(31, 2),
(32, 2),
(33, 1),
(34, 2),
(36, 2),
(41, 3),
(43, 1),
(45, 3),
(46, 1),
(47, 1),
(50, 1),
(71, 1),
(84, 1),
(101, 1),
(105, 1),
(121, 1),
(124, 1),
(146, 1),
(162, 1),
(193, 1),
(199, 1),
(224, 1),
(226, 1),
(254, 1),
(257, 1),
(339, 1),
(421, 1),
(456, 1),
(481, 1),
(483, 1),
(1140, 1),
(1256, 1),
(1322, 1),
(1530, 1),
(2131, 1),
(2395, 1),
(6925, 1),
(7846, 1),
)
# Computed using R 3.5.1 w/ the Gale S code
expect_p0 = 0.003883244
expect_rstar = np.array((
0.7628079,
1.706448,
2.679796,
3.663988,
4.653366,
5.645628,
6.63966,
7.634856,
8.63086,
9.627446,
11.62182,
13.61725,
14.61524,
15.61336,
16.6116,
18.60836,
19.60685,
20.6054,
22.60264,
23.60133,
24.60005,
25.5988,
26.59759,
27.59639,
30.59294,
31.59183,
32.59073,
33.58964,
35.58751,
40.58235,
42.58035,
44.57836,
45.57738,
46.57641,
49.57351,
70.55399,
83.54229,
100.5272,
104.5237,
120.5097,
123.507,
145.4879,
161.474,
192.4472,
198.4421,
223.4205,
225.4188,
253.3947,
256.3922,
338.3218,
420.2514,
455.2215,
480.2,
482.1983,
1138.636,
1254.537,
1320.48,
1528.302,
2128.788,
2392.562,
6918.687,
7838.899,
))
xr = np.array([d[0] for d in data], dtype=int)
xnr = np.array([d[1] for d in data], dtype=int)
rstar, p0 = simple_good_turing(xr, xnr)
assert | np.abs(p0 - expect_p0) | numpy.abs |
import sympy as sp
import numpy as np
from numpy import linalg as npla
def analyze_homo_sys_2x2(npA, spA, eigW1, eigW2, eigV1, eigV2):
t = sp.symbols('t')
C1, C2 = sp.symbols('C1 C2', real = True, constant = True)
cpKind = ''
sym_sol = None
if not np.iscomplex(eigW1):
eigV1r = sp.Array(eigV1)
eigV2r = sp.Array(eigV2)
sol1 = C1 * sp.exp(eigW1 * t) * eigV1r
sol2 = C2 * sp.exp(eigW2 * t) * eigV2r
if np.isclose(eigW1, eigW2):
if not | np.allclose(npA, [[0., 0.], [0., 0.]]) | numpy.allclose |
"""
author: ikhtiyor
date: 27.11.2019
this script is only designed to work with multiple gpus, i think it will work with one gpu as well ))
original :https://github.com/mcdavid109/Multi-GPU-Training/blob/master/TrainingDemo.ipynb
"""
import tensorflow as tf
from threading import Thread
import cv2
import numpy as np
class Dataset():
def __init__(self,x_paths, y_paths,y_exist, batch_size, img_height, img_width, no_of_classes=5):
self.x_paths = x_paths
self.y_paths = y_paths
self.y_exist = y_exist
self.batch_size = batch_size
self.len_data = len(x_paths)
self._make_inputs()
self.idx = -1
self.num_threads = 2
self.num_batch = self.len_data // self.batch_size + 1
self.img_height = img_height
self.img_width = img_width
self.no_of_classes = no_of_classes
self.augmentators = []
# this is only for segmentation
self.layer_idx = np.arange(self.img_height).reshape(self.img_height, 1)
self.component_idx = np.tile(np.arange(self.img_width), (self.img_height, 1))
def make_augmentators(self, augment_fc):
self.augmentators.append(augment_fc)
def _make_inputs(self):
self.inputs = tf.placeholder(shape=[self.img_height,self.img_width,3],dtype=tf.float32,name='data_x')
self.labels = tf.placeholder(shape=[self.img_height, self.img_width, self.no_of_classes],dtype=tf.int32,name='data_y')
self.line_existance_labels = tf.placeholder(tf.float32, shape=[self.no_of_classes-1], name="data_existance_y")
self.queue = tf.FIFOQueue(shapes=[[self.img_height,self.img_width,3],[self.img_height, self.img_width, self.no_of_classes], [self.no_of_classes-1]],
dtypes=[tf.float32, tf.float32, tf.float32],
shared_name="fifoqueue",capacity=self.batch_size*2)
self.enqueue_op = self.queue.enqueue([self.inputs,self.labels, self.line_existance_labels])
self._queue_close = self.queue.close(cancel_pending_enqueues=True)
def next_batch(self):
batch_x , batch_y, batch_existance_y = self.queue.dequeue_many(self.batch_size)
return batch_x, batch_y, batch_existance_y
def close_queue(self, session):
session.run(self._queue_close)
def _pre_batch_queue(self,sess,coord):
while not coord.should_stop():
self.idx += 1
index = self.idx % self.len_data
# read the next img:
img = cv2.imread(self.x_paths[index], -1)
# read existance label as well
train_existance_label= self.y_exist[index]
# read the next label:
trainId_label = cv2.imread(self.y_paths[index], -1)
for augment_fc in self.augmentators:
img, trainId_label = augment_fc((img, trainId_label))
# convert the label to onehot:
onehot_label = | np.zeros((self.img_height, self.img_width, self.no_of_classes), dtype=np.float32) | numpy.zeros |
"""pytest unit tests for the feature_grouper module"""
import numpy as np
import pandas as pd
from scipy.linalg import cholesky
from scipy.stats import norm
import feature_grouper
def get_test_features(covariance_matrix, num_samples):
"""
Generate num_samples drawn from a normal distribution
given a square covariance matrix.
"""
# Ensure the covariance matrix is square
shape = covariance_matrix.shape
assert len(shape) == 2
assert shape[0] == shape[1]
c = cholesky(covariance_matrix, lower=True)
xr = np.random.RandomState(11).normal(size=(shape[0], num_samples))
X = np.dot(c, xr)
return X.T
def test_version():
assert feature_grouper.__version__ == "0.1.0"
def test_cluster():
"""Test that the function finds expected clusters given example data"""
# Features 1 and 2 have correlation of 0.6, both are negatively
# correlated with Feature 0
cov = np.array([[3.4, -2.75, -2.0], [-2.75, 5.5, 1.5], [-2.0, 1.5, 1.25]])
features = get_test_features(cov, 30)
clusters = feature_grouper.cluster(features, 0.1)
assert np.array_equal(clusters, | np.array([1, 0, 0]) | numpy.array |
import numpy as np
import gym
from gym import spaces
from numpy.random import default_rng
import pickle
import os
import math
import matplotlib.pyplot as plt
from PIL import Image
from gym_flp import rewards
from IPython.display import display, clear_output
import anytree
from anytree import Node, RenderTree, PreOrderIter, LevelOrderIter, LevelOrderGroupIter
'''
v0.0.3
Significant changes:
08.09.2020:
- Dicrete option removed from spaces; only Box allowed
- Classes for quadtratic set covering and mixed integer programming (-ish) added
- Episodic tasks: no more terminal states (exception: max. no. of trials reached)
12.10.2020:
- mip added
- fbs added
'''
class qapEnv(gym.Env):
metadata = {'render.modes': ['rgb_array', 'human']}
def __init__(self, mode=None, instance=None):
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
self.DistanceMatrices, self.FlowMatrices = pickle.load(open(os.path.join(__location__,'discrete', 'qap_matrices.pkl'), 'rb'))
self.transport_intensity = None
self.instance = instance
self.mode = mode
while not (self.instance in self.DistanceMatrices.keys() or self.instance in self.FlowMatrices.keys() or self.instance in ['Neos-n6', 'Neos-n7', 'Brewery']):
print('Available Problem Sets:', self.DistanceMatrices.keys())
self.instance = input('Pick a problem:').strip()
self.D = self.DistanceMatrices[self.instance]
self.F = self.FlowMatrices[self.instance]
# Determine problem size relevant for much stuff in here:
self.n = len(self.D[0])
# Action space has two option:
# 1) Define as Box with shape (1, 2) and allow values to range from 1 through self.n
# 2) Define as Discrete with x = 1+((n^2-n)/2) actions (one half of matrix + 1 value from diagonal) --> Omit "+1" to obtain range from 0 to x!
# self.action_space = spaces.Box(low=-1, high=6, shape=(1,2), dtype=np.int) # Doubles complexity of the problem as it allows the identical action (1,2) and (2,1)
self.action_space = spaces.Discrete(int((self.n**2-self.n)*0.5)+1)
# If you are using images as input, the input values must be in [0, 255] as the observation is normalized (dividing by 255 to have values in [0, 1]) when using CNN policies.
if self.mode == "rgb_array":
self.observation_space = spaces.Box(low = 0, high = 255, shape=(1, self.n, 3), dtype = np.uint8) # Image representation
elif self.mode == 'human':
self.observation_space = spaces.Box(low=1, high = self.n, shape=(self.n,), dtype=np.float32)
self.states = {} # Create an empty dictonary where states and their respective reward will be stored for future reference
self.actions = self.pairwiseExchange(self.n)
# Initialize Environment with empty state and action
self.action = None
self.state = None
self.internal_state = None
#Initialize moving target to incredibly high value. To be updated if reward obtained is smaller.
self.movingTargetReward = np.inf
self.MHC = rewards.mhc.MHC() # Create an instance of class MHC in module mhc.py from package rewards
def reset(self):
state = default_rng().choice(range(1,self.n+1), size=self.n, replace=False)
#MHC, self.TM = self.MHC.compute(self.D, self.F, state)
self.internal_state = state.copy()
return state
def step(self, action):
# Create new State based on action
fromState = self.internal_state.copy()
swap = self.actions[action]
fromState[swap[0]-1], fromState[swap[1]-1] = fromState[swap[1]-1], fromState[swap[0]-1]
newState = fromState.copy()
#MHC, self.TM = self.MHC.compute(self.D, self.F, current_permutation)
MHC, self.TM = self.MHC.compute(self.D, self.F, newState)
if self.mode == 'human':
self.states[tuple(fromState)] = MHC
if self.movingTargetReward == np.inf:
self.movingTargetReward = MHC
#reward = self.movingTargetReward - MHC
reward = -1 if MHC > self.movingTargetReward else 10
self.movingTargetReward = MHC if MHC < self.movingTargetReward else self.movingTargetReward
if self.mode == "rgb_array":
rgb = np.zeros((1,self.n,3), dtype=np.uint8)
sources = np.sum(self.TM, axis = 1)
sinks = np.sum(self.TM, axis = 0)
R = np.array((fromState-np.min(fromState))/(np.max(fromState)-np.min(fromState))*255).astype(int)
G = np.array((sources-np.min(sources))/(np.max(sources)-np.min(sources))*255).astype(int)
B = np.array((sinks-np.min(sinks))/(np.max(sinks)-np.min(sinks))*255).astype(int)
for i, s in enumerate(fromState):
rgb[0:1, i] = [R[s-1], G[s-1], B[s-1]]
newState = np.array(rgb)
self.state = newState.copy()
self.internal_state = fromState.copy()
return newState, reward, False, {}
def render(self, mode=None):
if self.mode == "human":
SCALE = 1 # Scale size of pixels for displayability
img_h, img_w = SCALE, (len(self.internal_state))*SCALE
data = np.zeros((img_h, img_w, 3), dtype=np.uint8)
sources = np.sum(self.TM, axis = 1)
sinks = np.sum(self.TM, axis = 0)
R = np.array((self.internal_state-np.min(self.internal_state))/(np.max(self.internal_state)-np.min(self.internal_state))*255).astype(int)
G = np.array((sources-np.min(sources))/(np.max(sources)-np.min(sources))*255).astype(int)
B = np.array((sinks-np.min(sinks))/(np.max(sinks)-np.min(sinks))*255).astype(int)
for i, s in enumerate(self.internal_state):
data[0*SCALE:1*SCALE, i*SCALE:(i+1)*SCALE] = [R[s-1], G[s-1], B[s-1]]
img = Image.fromarray(data, 'RGB')
if self.mode == 'rgb_array':
img = Image.fromarray(self.state, 'RGB')
plt.imshow(img)
plt.axis('off')
plt.show()
return img
def close(self):
pass
def pairwiseExchange(self, x):
actions = [(i,j) for i in range(1,x) for j in range(i+1,x+1) if not i==j]
actions.append((1,1))
return actions
class fbsEnv(gym.Env):
metadata = {'render.modes': ['rgb_array', 'human']}
def __init__(self, mode=None, instance = None):
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
self.problems, self.FlowMatrices, self.sizes, self.LayoutWidths, self.LayoutLengths = pickle.load(open(os.path.join(__location__,'continual', 'cont_instances.pkl'), 'rb'))
self.mode = mode
self.instance = instance
while not (self.instance in self.FlowMatrices.keys() or self.instance in ['Brewery']):
print('Available Problem Sets:', self.FlowMatrices.keys())
self.instance = input('Pick a problem:').strip()
self.F = self.FlowMatrices[self.instance]
self.n = self.problems[self.instance]
self.AreaData = self.sizes[self.instance]
# Obtain size data: FBS needs a length and area
self.beta, self.l, self.w, self.a, self.min_side_length = getAreaData(self.AreaData) #Investigate available area data and compute missing values if needed
'''
Nomenclature:
W --> Width of Plant (y coordinate)
L --> Length of Plant (x coordinate)
w --> Width of facility/bay (x coordinate)
l --> Length of facility/bay (y coordinate)
A --> Area of Plant
a --> Area of facility
Point of origin analoguous to numpy indexing (top left corner of plant)
beta --> aspect ratios (as alpha is reserved for learning rate)
'''
#if self.l is None or self.w is None:
# self.l = np.random.randint(max(self.min_side_length, np.min(self.a)/self.min_side_length), max(self.min_side_length, np.min(self.a)/self.min_side_length), size=(self.n,))
# self.l = np.sqrt(self.A/self.aspect_ratio)
# self.w = np.round(self.a/self.l)
# Check if there are Layout Dimensions available, if not provide enough (sqrt(a)*1.5)
if self.instance in self.LayoutWidths.keys() and self.instance in self.LayoutLengths.keys():
self.L = int(self.LayoutLengths[self.instance]) # We need both values to be integers for converting into image
self.W = int(self.LayoutWidths[self.instance])
else:
self.A = np.sum(self.a)
# Design a squared plant layout
self.L = int(round(math.sqrt(self.A),0)) # We want the plant dimensions to be integers to fit them into an image
self.W = self.L
# Design a layout with l = 1,5 * w
#self.L = divisor(int(self.A))
#self.W = self.A/self.L
# These values need to be set manually, e.g. acc. to data from literature. Following Eq. 1 in Ulutas & Kulturel-Konak (2012), the minimum side length can be determined by assuming the smallest facility will occupy alone.
self.aspect_ratio = int(max(self.beta)) if not self.beta is None else 1
self.min_length = np.min(self.a) / self.L
self.min_width = np.min(self.a) / self.W
# We define minimum side lengths to be 1 in order to be displayable in array
self.min_length = 1
self.min_width = 1
self.action_space = spaces.Discrete(5) #Taken from doi:10.1016/j.engappai.2020.103697
self.actions = {0: 'Randomize', 1: 'Bit Swap', 2: 'Bay Exchange', 3: 'Inverse', 4: 'Idle'}
#self.state_space = spaces.Box(low=1, high = self.n, shape=(self.n,), dtype=np.int)
self.bay_space = spaces.Box(low=0, high = 1, shape=(self.n,), dtype=np.int) # binary vector indicating bay breaks (i = 1 means last facility in bay)
self.state = None
self.permutation = None # Permutation of all n facilities, read from top to bottom
self.bay = None
self.done = False
self.MHC = rewards.mhc.MHC()
if self.mode == "rgb_array":
self.observation_space = spaces.Box(low = 0, high = 255, shape= (self.W, self.L,3), dtype = np.uint8) # Image representation
elif self.mode == "human":
observation_low = np.tile(np.array([0,0,self.min_length,self.min_width],dtype=int), self.n)
observation_high = np.tile(np.array([self.W, self.L, self.W, self.L], dtype=int), self.n)
self.observation_space = spaces.Box(low=observation_low, high=observation_high, dtype = int) # Vector representation of coordinates
else:
print("Nothing correct selected")
def reset(self):
# 1. Get a random permutation and bays
self.permutation, self.bay = self.sampler()
# 2. Last position in bay break vector has to be 1 by default.
self.bay[-1] = 1
self.fac_x, self.fac_y, self.fac_b, self.fac_h = self.getCoordinates()
self.D = getDistances(self.fac_x, self.fac_y)
reward, self.TM = self.MHC.compute(self.D, self.F, self.permutation[:])
self.state = self.constructState(self.fac_x, self.fac_y, self.fac_b, self.fac_h, self.n)
return self.state
def constructState(self, x, y, l, w, n):
# Construct state
state_prelim = | np.zeros((4*n,), dtype=float) | numpy.zeros |
# Script for Da-Tacos cover song identification from Feature Fused Matrices
#Importing
import librosa
import numpy as np
import scipy
from scipy.spatial.distance import pdist, squareform
from scipy.interpolate import interp2d
from scipy.sparse.csgraph import laplacian
from scipy.spatial.distance import directed_hausdorff
from scipy.cluster import hierarchy
from scipy.linalg import eigh
from scipy.ndimage import median_filter
from sklearn.metrics import average_precision_score
from sklearn.preprocessing import normalize
import cv2
from sklearn import metrics
import dill
import sys
import glob
import os
import random
import json
import deepdish as dd
#change matplotlib backend to save rendered plots correctly on linux
import matplotlib as mpl
mpl.use('Agg')
from matplotlib import pyplot as plt
# #--supress warnings--#
# import warnings
# warnings.filterwarnings("ignore")
#---Load metadata---#
with open('/home/ismir/Documents/ISMIR/Datasets/da-tacos/da-tacos_benchmark_subset_metadata.json') as f:
benchmark_metadata = json.load(f)
#---Segmentation parameters---#
rs_size = 128
kmin = 8
kmax = 12
#---Counters---#
count = 0
W_count=0
P_count = 0
#---Loading limits---#
min_covers = 5 #load works for which there are at least min_covers performances
max_covers = 5 #stop loading performances if over max_covers per work
max_works = 15
#---Storage---#
all_sets = []
#all_shapeDNAs = []
all_WP = []
y = []
#for all Works
for W in benchmark_metadata.keys():
if len(benchmark_metadata[W].keys()) >= min_covers: #if it contains at least 5 covers
P_count = 0
#for all performances
for P in benchmark_metadata[W].keys():
P_count += 1
#Computations
try:
SSM = dd.io.load("/home/ismir/Documents/ISMIR/Datasets/da-tacosSSMs/StructureLaplacian_datacos_crema_" + P + ".h5")['WFused']
except:
print("Couldn't load " + P + ".")
continue
N = dd.io.load("/home/ismir/Documents/ISMIR/Datasets/da-tacosSSMs/StructureLaplacian_datacos_crema_" + P + ".h5")['N']
#Construct square matrix from flattened upper triangle
A = np.zeros((N,N))
iN = np.triu_indices(N) #return indices for upper-triangle of (N,N) matrix
for i in range(len(SSM)):
A[iN[0][i]][iN[1][i]] = SSM[i]
B = np.transpose(A)
square_SSM = A+B
#Resample
SSM_ds = cv2.resize(square_SSM, (rs_size,rs_size))
#Compute the Laplacian
L = laplacian(SSM_ds, normed=True)
#Laplacian eigenvalues and eigenvectors
evals, evecs = eigh(L)
# #Shape DNA
# shapeDNA = evals[:30]
# all_shapeDNAs.append(shapeDNA)
#Hierarchical structure
evecs = median_filter(evecs, size=(9, 1))
Cnorm = np.cumsum(evecs**2, axis=1)**0.5
# #temporary replacement for bug
# a_min_value = 3.6934424e-08
# Cnorm[Cnorm == 0.0] = a_min_value
# if (np.isnan(np.sum(Cnorm))):
# print("WOOOOOAH")
dist_set = []
for k in range(kmin, kmax):
X = evecs[:, :k] / Cnorm[:, k-1:k]
distance = squareform(pdist(X, metric='euclidean'))
dist_set.append(distance)
all_sets.append(dist_set)
y.append(W)
#append W and P
all_WP.append([W, P])
#plt.matshow()
#plt.colorbar()
#plt.show()
if (P_count >=max_covers):
break
W_count +=1
sys.stdout.write("\rLoading %i works." % W_count)
sys.stdout.flush()
if (W_count >= max_works):
break
all_sets = np.asarray(all_sets)
file_no = all_sets.shape[0]
# all_shapeDNAs = np.asarray(all_shapeDNAs)
print("\nLoaded Da-TACOS SMMs.")
print("Data shape:", all_sets.shape)
#------------#
#-Formatting-#
#------------#
all_flat = [] #kmin-kmin sets each with a flattened matrix
all_merged = [] #single concatenated vector with all flattened matrices
all_shingled2 = [] #shingle adjacent pairs of flat approoximations
all_shingled3 = [] #shingle adjacent triples of flat approoximations
#traverse songs
for f in range(file_no):
#formatting
flat_approximations = []
merged_approximations = np.empty((0))
for j in range(kmax-kmin):
flat_approximations.append(all_sets[f][j].flatten())
merged_approximations = np.concatenate((merged_approximations, flat_approximations[j]))
all_flat.append( | np.asarray(flat_approximations) | numpy.asarray |
import numpy as np
import pytest
import astropy.units as u
from astropy.nddata import StdDevUncertainty
from astropy.tests.helper import quantity_allclose
from ..spectra import Spectrum1D, SpectralRegion
from ..manipulation import extract_region
from .spectral_examples import simulated_spectra
def test_region_simple(simulated_spectra):
np.random.seed(42)
spectrum = simulated_spectra.s1_um_mJy_e1
uncertainty = StdDevUncertainty(0.1*np.random.random(len(spectrum.flux))*u.mJy)
spectrum.uncertainty = uncertainty
region = SpectralRegion(0.6*u.um, 0.8*u.um)
sub_spectrum = extract_region(spectrum, region)
sub_spectrum_flux_expected = np.array(
[1605.71612173, 1651.41650744, 2057.65798618, 2066.73502361, 1955.75832537,
1670.52711471, 1491.10034446, 1637.08084112, 1471.28982259, 1299.19484483,
1423.11195734, 1226.74494917, 1572.31888312, 1311.50503403, 1474.05051673,
1335.39944397, 1420.61880528, 1433.18623759, 1290.26966668, 1605.67341284,
1528.52281708, 1592.74392861, 1568.74162534, 1435.29407808, 1536.68040935,
1157.33825995, 1136.12679394, 999.92394692, 1038.61546167, 1011.60297294])
assert np.allclose(sub_spectrum.flux.value, sub_spectrum_flux_expected)
def test_region_ghz(simulated_spectra):
spectrum = Spectrum1D(flux=simulated_spectra.s1_um_mJy_e1,
spectral_axis=simulated_spectra.s1_um_mJy_e1.frequency)
region = SpectralRegion(374740.5725*u.GHz, 499654.09666667*u.GHz)
sub_spectrum = extract_region(spectrum, region)
sub_spectrum_flux_expected = np.array(
[1605.71612173, 1651.41650744, 2057.65798618, 2066.73502361, 1955.75832537,
1670.52711471, 1491.10034446, 1637.08084112, 1471.28982259, 1299.19484483,
1423.11195734, 1226.74494917, 1572.31888312, 1311.50503403, 1474.05051673,
1335.39944397, 1420.61880528, 1433.18623759, 1290.26966668, 1605.67341284,
1528.52281708, 1592.74392861, 1568.74162534, 1435.29407808, 1536.68040935,
1157.33825995, 1136.12679394, 999.92394692, 1038.61546167, 1011.60297294])
assert np.allclose(sub_spectrum.flux.value, sub_spectrum_flux_expected)
def test_region_simple_check_ends(simulated_spectra):
np.random.seed(42)
spectrum = Spectrum1D(spectral_axis=np.linspace(1, 25, 25)*u.um, flux=np.random.random(25)*u.Jy)
region = SpectralRegion(8*u.um, 15*u.um)
sub_spectrum = extract_region(spectrum, region)
assert sub_spectrum.spectral_axis.value[0] == 8
assert sub_spectrum.spectral_axis.value[-1] == 15
def test_region_empty(simulated_spectra):
np.random.seed(42)
# Region past upper range of spectrum
spectrum = Spectrum1D(spectral_axis=np.linspace(1, 25, 25)*u.um, flux=np.random.random(25)*u.Jy)
region = SpectralRegion(28*u.um, 30*u.um)
sub_spectrum = extract_region(spectrum, region)
assert sub_spectrum is None
# Region below lower range of spectrum
spectrum = Spectrum1D(spectral_axis=np.linspace(1, 25, 25)*u.um, flux=np.random.random(25)*u.Jy)
region = SpectralRegion(0.1*u.um, 0.3*u.um)
sub_spectrum = extract_region(spectrum, region)
assert sub_spectrum is None
# Region below lower range of spectrum and upper range in the spectrum.
spectrum = Spectrum1D(spectral_axis= | np.linspace(1, 25, 25) | numpy.linspace |
import time
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.datasets import make_blobs
import lithopsext
from lithopsext.datasets.array import PartitionedArray
import lithops
# lithops.utils.setup_lithops_logger(log_level='DEBUG')
N_POINTS = 200
N_DIM = 2
K = 4
def kmeans(points, k, max_iterations, compute_group):
np.random.seed(42)
# get random initial centroids
labels, centroids = np.zeros(N_DIM), | np.random.rand(k, N_DIM) | numpy.random.rand |
import colorsys
import os
import time
import numpy as np
import tensorflow as tf
from PIL import ImageDraw, ImageFont
from tensorflow.keras.layers import Input, Lambda
from tensorflow.keras.models import Model
from nets.yolo import yolo_body
from utils.utils import cvtColor, get_classes, preprocess_input, resize_image, show_config
from utils.utils_bbox import DecodeBox
class YOLO(object):
_defaults = {
#--------------------------------------------------------------------------#
# ไฝฟ็จ่ชๅทฑ่ฎญ็ปๅฅฝ็ๆจกๅ่ฟ่ก้ขๆตไธๅฎ่ฆไฟฎๆนmodel_pathๅclasses_path๏ผ
# model_pathๆๅlogsๆไปถๅคนไธ็ๆๅผๆไปถ๏ผclasses_pathๆๅmodel_dataไธ็txt
#
# ่ฎญ็ปๅฅฝๅlogsๆไปถๅคนไธๅญๅจๅคไธชๆๅผๆไปถ๏ผ้ๆฉ้ช่ฏ้ๆๅคฑ่พไฝ็ๅณๅฏใ
# ้ช่ฏ้ๆๅคฑ่พไฝไธไปฃ่กจmAP่พ้ซ๏ผไป
ไปฃ่กจ่ฏฅๆๅผๅจ้ช่ฏ้ไธๆณๅๆง่ฝ่พๅฅฝใ
# ๅฆๆๅบ็ฐshapeไธๅน้
๏ผๅๆถ่ฆๆณจๆ่ฎญ็ปๆถ็model_pathๅclasses_pathๅๆฐ็ไฟฎๆน
#--------------------------------------------------------------------------#
"model_path" : 'model_data/yolox_s.h5',
"classes_path" : 'model_data/coco_classes.txt',
#---------------------------------------------------------------------#
# ่พๅ
ฅๅพ็็ๅคงๅฐ๏ผๅฟ
้กปไธบ32็ๅๆฐใ
#---------------------------------------------------------------------#
"input_shape" : [640, 640],
#---------------------------------------------------------------------#
# ๆไฝฟ็จ็YoloX็็ๆฌใtinyใsใmใlใx
#---------------------------------------------------------------------#
"phi" : 's',
#---------------------------------------------------------------------#
# ๅชๆๅพๅๅคงไบ็ฝฎไฟกๅบฆ็้ขๆตๆกไผ่ขซไฟ็ไธๆฅ
#---------------------------------------------------------------------#
"confidence" : 0.5,
#---------------------------------------------------------------------#
# ้ๆๅคงๆๅถๆ็จๅฐ็nms_iouๅคงๅฐ
#---------------------------------------------------------------------#
"nms_iou" : 0.3,
#---------------------------------------------------------------------#
# ๆๅค้ขๆตๆก็ๆฐ้
#---------------------------------------------------------------------#
"max_boxes" : 100,
#---------------------------------------------------------------------#
# ่ฏฅๅ้็จไบๆงๅถๆฏๅฆไฝฟ็จletterbox_imageๅฏน่พๅ
ฅๅพๅ่ฟ่กไธๅคฑ็็resize๏ผ
# ๅจๅคๆฌกๆต่ฏๅ๏ผๅ็ฐๅ
ณ้ญletterbox_image็ดๆฅresize็ๆๆๆดๅฅฝ
#---------------------------------------------------------------------#
"letterbox_image" : True,
}
@classmethod
def get_defaults(cls, n):
if n in cls._defaults:
return cls._defaults[n]
else:
return "Unrecognized attribute name '" + n + "'"
#---------------------------------------------------#
# ๅๅงๅyolo
#---------------------------------------------------#
def __init__(self, **kwargs):
self.__dict__.update(self._defaults)
for name, value in kwargs.items():
setattr(self, name, value)
#---------------------------------------------------#
# ่ทๅพ็ง็ฑปๅๅ
้ชๆก็ๆฐ้
#---------------------------------------------------#
self.class_names, self.num_classes = get_classes(self.classes_path)
#---------------------------------------------------#
# ็ปๆก่ฎพ็ฝฎไธๅ็้ข่ฒ
#---------------------------------------------------#
hsv_tuples = [(x / self.num_classes, 1., 1.) for x in range(self.num_classes)]
self.colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
self.colors = list(map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)), self.colors))
self.generate()
show_config(**self._defaults)
#---------------------------------------------------#
# ่ฝฝๅ
ฅๆจกๅ
#---------------------------------------------------#
def generate(self):
model_path = os.path.expanduser(self.model_path)
assert model_path.endswith('.h5'), 'Keras model or weights must be a .h5 file.'
self.model = yolo_body([None, None, 3], num_classes = self.num_classes, phi = self.phi)
self.model.load_weights(self.model_path)
print('{} model, and classes loaded.'.format(model_path))
#---------------------------------------------------------#
# ๅจDecodeBoxๅฝๆฐไธญ๏ผๆไปฌไผๅฏน้ขๆต็ปๆ่ฟ่กๅๅค็
# ๅๅค็็ๅ
ๅฎนๅ
ๆฌ๏ผ่งฃ็ ใ้ๆๅคงๆๅถใ้จ้็ญ้็ญ
#---------------------------------------------------------#
self.input_image_shape = Input([2,],batch_size=1)
inputs = [*self.model.output, self.input_image_shape]
outputs = Lambda(
DecodeBox,
output_shape = (1,),
name = 'yolo_eval',
arguments = {
'num_classes' : self.num_classes,
'input_shape' : self.input_shape,
'confidence' : self.confidence,
'nms_iou' : self.nms_iou,
'max_boxes' : self.max_boxes,
'letterbox_image' : self.letterbox_image
}
)(inputs)
self.yolo_model = Model([self.model.input, self.input_image_shape], outputs)
@tf.function
def get_pred(self, image_data, input_image_shape):
out_boxes, out_scores, out_classes = self.yolo_model([image_data, input_image_shape], training=False)
return out_boxes, out_scores, out_classes
#---------------------------------------------------#
# ๆฃๆตๅพ็
#---------------------------------------------------#
def detect_image(self, image, crop = False, count = False):
#---------------------------------------------------------#
# ๅจ่ฟ้ๅฐๅพๅ่ฝฌๆขๆRGBๅพๅ๏ผ้ฒๆญข็ฐๅบฆๅพๅจ้ขๆตๆถๆฅ้ใ
# ไปฃ็ ไป
ไป
ๆฏๆRGBๅพๅ็้ขๆต๏ผๆๆๅ
ถๅฎ็ฑปๅ็ๅพๅ้ฝไผ่ฝฌๅๆRGB
#---------------------------------------------------------#
image = cvtColor(image)
#---------------------------------------------------------#
# ็ปๅพๅๅขๅ ็ฐๆก๏ผๅฎ็ฐไธๅคฑ็็resize
# ไนๅฏไปฅ็ดๆฅresize่ฟ่ก่ฏๅซ
#---------------------------------------------------------#
image_data = resize_image(image, (self.input_shape[1], self.input_shape[0]), self.letterbox_image)
#---------------------------------------------------------#
# ๆทปๅ ไธbatch_size็ปดๅบฆ๏ผๅนถ่ฟ่กๅฝไธๅ
#---------------------------------------------------------#
image_data = np.expand_dims(preprocess_input(np.array(image_data, dtype='float32')), 0)
#---------------------------------------------------------#
# ๅฐๅพๅ่พๅ
ฅ็ฝ็ปๅฝไธญ่ฟ่ก้ขๆต๏ผ
#---------------------------------------------------------#
input_image_shape = np.expand_dims(np.array([image.size[1], image.size[0]], dtype='float32'), 0)
out_boxes, out_scores, out_classes = self.get_pred(image_data, input_image_shape)
print('Found {} boxes for {}'.format(len(out_boxes), 'img'))
#---------------------------------------------------------#
# ่ฎพ็ฝฎๅญไฝไธ่พนๆกๅๅบฆ
#---------------------------------------------------------#
font = ImageFont.truetype(font='model_data/simhei.ttf', size=np.floor(3e-2 * image.size[1] + 0.5).astype('int32'))
thickness = int(max((image.size[0] + image.size[1]) // np.mean(self.input_shape), 1))
#---------------------------------------------------------#
# ่ฎกๆฐ
#---------------------------------------------------------#
if count:
print("top_label:", out_classes)
classes_nums = np.zeros([self.num_classes])
for i in range(self.num_classes):
num = np.sum(out_classes == i)
if num > 0:
print(self.class_names[i], " : ", num)
classes_nums[i] = num
print("classes_nums:", classes_nums)
#---------------------------------------------------------#
# ๆฏๅฆ่ฟ่ก็ฎๆ ็่ฃๅช
#---------------------------------------------------------#
if crop:
for i, c in list(enumerate(out_boxes)):
top, left, bottom, right = out_boxes[i]
top = max(0, np.floor(top).astype('int32'))
left = max(0, np.floor(left).astype('int32'))
bottom = min(image.size[1], np.floor(bottom).astype('int32'))
right = min(image.size[0], np.floor(right).astype('int32'))
dir_save_path = "img_crop"
if not os.path.exists(dir_save_path):
os.makedirs(dir_save_path)
crop_image = image.crop([left, top, right, bottom])
crop_image.save(os.path.join(dir_save_path, "crop_" + str(i) + ".png"), quality=95, subsampling=0)
print("save crop_" + str(i) + ".png to " + dir_save_path)
#---------------------------------------------------------#
# ๅพๅ็ปๅถ
#---------------------------------------------------------#
for i, c in list(enumerate(out_classes)):
predicted_class = self.class_names[int(c)]
box = out_boxes[i]
score = out_scores[i]
top, left, bottom, right = box
top = max(0, np.floor(top).astype('int32'))
left = max(0, np.floor(left).astype('int32'))
bottom = min(image.size[1], np.floor(bottom).astype('int32'))
right = min(image.size[0], np.floor(right).astype('int32'))
label = '{} {:.2f}'.format(predicted_class, score)
draw = ImageDraw.Draw(image)
label_size = draw.textsize(label, font)
label = label.encode('utf-8')
print(label, top, left, bottom, right)
if top - label_size[1] >= 0:
text_origin = np.array([left, top - label_size[1]])
else:
text_origin = np.array([left, top + 1])
for i in range(thickness):
draw.rectangle([left + i, top + i, right - i, bottom - i], outline=self.colors[c])
draw.rectangle([tuple(text_origin), tuple(text_origin + label_size)], fill=self.colors[c])
draw.text(text_origin, str(label,'UTF-8'), fill=(0, 0, 0), font=font)
del draw
return image
def get_FPS(self, image, test_interval):
#---------------------------------------------------------#
# ๅจ่ฟ้ๅฐๅพๅ่ฝฌๆขๆRGBๅพๅ๏ผ้ฒๆญข็ฐๅบฆๅพๅจ้ขๆตๆถๆฅ้ใ
# ไปฃ็ ไป
ไป
ๆฏๆRGBๅพๅ็้ขๆต๏ผๆๆๅ
ถๅฎ็ฑปๅ็ๅพๅ้ฝไผ่ฝฌๅๆRGB
#---------------------------------------------------------#
image = cvtColor(image)
#---------------------------------------------------------#
# ็ปๅพๅๅขๅ ็ฐๆก๏ผๅฎ็ฐไธๅคฑ็็resize
# ไนๅฏไปฅ็ดๆฅresize่ฟ่ก่ฏๅซ
#---------------------------------------------------------#
image_data = resize_image(image, (self.input_shape[1], self.input_shape[0]), self.letterbox_image)
#---------------------------------------------------------#
# ๆทปๅ ไธbatch_size็ปดๅบฆ๏ผๅนถ่ฟ่กๅฝไธๅ
#---------------------------------------------------------#
image_data = np.expand_dims(preprocess_input( | np.array(image_data, dtype='float32') | numpy.array |
import sympy
import numpy as np
import lie_operator
import jet
import bases
import combinatorics
class normal_form(object):
"""A normal form of an autonomous vector field :math:`f:\\mathbb{R}^n\\rightarrow\\mathbb{R}^m`.
Arguments
---------
f : callable
function that accepts ``n`` arguments and returns tuple of length ``m`` numbers, corresponding to mathematical function :math:`f:\\mathbb{R}^n\\rightarrow\\mathbb{R}^m`
x : number if ``n==1`` or tuple of length ``n`` if ``n>=1``
center about which normal form is computed
k : int
maximum degree of normal form
Attributes
----------
n : int
dimension of domain of :math:`f`
m : int
dimension of codomain of :math:`f`
jet : ``normal_forms.jet.jet``
series representation of normal form
L1 : ``normal_forms.lie_operator.lie_operator``
fundamental operator of the normal form, Lie bracket with the linear term :math:`f_1(x)=f'(x)x`, that is :math:`L_{f_1}(\cdot) = [f_1,\cdot]`, see ``normal_forms.lie_operator.lie_operator``
g : list of ``k-1`` ``sympy.Matrix(m,1)`` objects
generators, i.e. homogenous :math:`j^{th}` degree :math:`m`-dimensional polynomial vector fields :math:`g_j` for :math:`j\geq2` used to carry out sequence of near-identity transformations :math:`e^{L_{g_j}}` of :math:`f`
L : ``normal_forms.lie_operator.lie_operator``
Lie operators :math:`L_{g_j}` of the generators in ``g``, see ``normal_forms.lie_operator.lie_operator``
eqv : list of shape ``(k-1,2,.,.)``
coefficients and ``sympy.Matrix(m,1)`` object representation of normal form equivariant vector fields
fun : sympy.Matrix(m,1) object
symbolic representation of normal form
"""
def __init__(self, f, x, k, f_args=None):
self.f = f
self.x = x
self.k = k
if np.array(x).shape == ():
n, x = 1, [x]
else:
n = len(x)
# call to f
if f_args is None:
f_eval = f(*x)
else:
f_eval = f(*(list(x) + list(f_args)))
if np.array(f_eval).shape == ():
m = 1
else:
# call to f
m = len(f_eval)
self.m = m
self.n = n
# list of symbolic variables
var = sympy.symarray('x', (n, ))
# polynomial basis
pb = bases.poly_basis(var)
# vector basis
vb = bases.vf_basis(pb, m)
# k-jet of f centered at x
# call to f
self.jet = jet.jet(f, x, k, f_args, var, pb)
# fundamental operator of normal form theory, Lie bracket with f'
self.L1 = lie_operator.lie_operator(self.jet.fun_deg[1], var, 1, pb, vb)
# work space of coefficients
n_terms = combinatorics.simplicial_list(n, k)
wrk = [[np.zeros(m * n_terms[i + j + 1]) for j in range(k - i)]
for i in range(k)]
# initialize first row of workspace as k-jet
for j in range(k):
wrk[0][j] = np.concatenate(self.jet.coeff[j + 1])
# generators
g = []
# Lie brackets with generators
L = []
# equivariant vector fields
eqv = []
# list of factorials
fac = combinatorics.factorial_list(k)
# algorithm based on Murdock
for deg in range(2, k + 1):
# update workspace and solve for generator
for j, l in enumerate(L):
wrk[1][deg - 2] += l[deg - 1 - j].dot(wrk[0][deg - 2 - j])
f_coeff = np.zeros(m * n_terms[deg])
for i in range(deg):
f_coeff += wrk[i][deg - 1 - i] / fac[i]
g_coeff = np.linalg.lstsq(self.L1[deg], f_coeff)[0]
# normal form coefficients
h_coeff = f_coeff - self.L1[deg].dot(g_coeff)
# represent normal form term in L1.T nullspace basis
u, s, v = | np.linalg.svd(self.L1[deg]) | numpy.linalg.svd |
# To import required modules:
import numpy as np
import time
import os
import sys
import matplotlib
import matplotlib.cm as cm #for color maps
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec #for specifying plot attributes
from matplotlib import ticker #for setting contour plots to log scale
import scipy.integrate #for numerical integration
import scipy.misc #for factorial function
from scipy.special import erf #error function, used in computing CDF of normal distribution
import scipy.interpolate #for interpolation functions
import corner #corner.py package for corner plots
#matplotlib.rc('text', usetex=True)
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))))
from src.functions_general import *
from src.functions_compare_kepler import *
from src.functions_load_sims import *
from src.functions_plot_catalogs import *
from src.functions_plot_params import *
from src.functions_compute_RVs import *
savefigures = False
savefigures_directory = '/Users/hematthi/Documents/GradSchool/Research/ExoplanetsSysSim_Clusters/Figures/Model_Optimization/AMD_system/Split_stars/Singles_ecc/Params11_KS/Distribute_AMD_per_mass/durations_norm_circ_singles_multis_GF2020_KS/Best_models/GP_best_models/RV/'
save_name = 'Maximum_AMD_model'
##### To load the underlying populations:
loadfiles_directory = '/Users/hematthi/Documents/GradSchool/Research/ACI/Simulated_Data/AMD_system/Split_stars/Singles_ecc/Params11_KS/Distribute_AMD_per_mass/durations_norm_circ_singles_multis_GF2020_KS/GP_med/'
run_number = ''
N_sim, cos_factor, P_min, P_max, radii_min, radii_max = read_targets_period_radius_bounds(loadfiles_directory + 'periods%s.out' % run_number)
param_vals_all = read_sim_params(loadfiles_directory + 'periods%s.out' % run_number)
sssp_per_sys, sssp = compute_summary_stats_from_cat_phys(file_name_path=loadfiles_directory, run_number=run_number, load_full_tables=True)
##### To compute and plot RV semi-amplitudes:
n_bins = 50
lw = 2 #linewidth
alpha = 0.2
afs = 20 #axes labels font size
tfs = 20 #text labels font size
lfs = 16 #legend labels font size
##### To compute RV semi-amplitudes:
loadfiles_directory = '/Users/hematthi/Documents/GradSchool/Research/ACI/Simulated_Data/AMD_system/Split_stars/Singles_ecc/Params11_KS/Distribute_AMD_per_mass/durations_norm_circ_singles_multis_GF2020_KS/GP_best_models/'
runs = 100
# Aggregate = 1% of the points from each catalog:
factor = 5
pr_all_aggregate = []
pr_obs_aggregate = []
Kr_all_aggregate = []
Kr_obs_aggregate = []
pr_min, pr_max = 3./10., 300./5.
pr_bins = np.logspace(np.log10(pr_min), np.log10(pr_max), n_bins+1)
pr_bins_mid = (pr_bins[:-1] + pr_bins[1:])/2.
pr_counts_obs = []
pr_counts_obs_above1 = [] # K_max/K_[5-10]d > 1
pr_counts_obs_below1 = [] # K_2ndmax/K_[5-10]d < 1
Kr_bins = np.logspace(-3., 3., n_bins+1)
Kr_bins_mid = (Kr_bins[:-1] + Kr_bins[1:])/2.
Kr_counts_all = []
Kr_counts_obs = []
# To store some other statistics:
f_all_kr_above1 = [] # fractions of all 5-10d planets where K_max/K_[5-10]d > 1
f_obs_kr_above1 = [] # fractions of observed 5-10d planets where K_max/K_[5-10]d > 1
f_obs_kr_above1_given_pr_above1 = [] # fractions of observed 5-10d planets where K_max/K_[5-10]d > 1 given that the K_max or K_2ndmax planet is outer
f_obs_kr_above1_given_pr_below1 = [] # fractions of observed 5-10d planets where K_max/K_[5-10]d > 1 given that the K_max or K_2ndmax planet is inner
f_obs_pr_above1_given_kr_above1 = [] # fractions of observed 5-10d planets where P_Kmax is an outer planet to P_[5-10]d given that K_max/K_[5-10]d > 1
f_obs_pr_above1_given_kr_below1 = [] # fractions of observed 5-10d planets where P_Kmax is an outer planet to P_[5-10]d given that K_2ndmax/K_[5-10]d < 1
for i in range(runs): #range(1,runs+1)
run_number = i+1
print(i)
N_sim_i = read_targets_period_radius_bounds(loadfiles_directory + 'periods%s.out' % run_number)[0]
param_vals_i = read_sim_params(loadfiles_directory + 'periods%s.out' % run_number)
sssp_per_sys_i, sssp_i = compute_summary_stats_from_cat_phys(file_name_path=loadfiles_directory, run_number=run_number, load_full_tables=True)
# For multi-planet systems where at least one planet is in the period range:
det_inrange_all = []
P_inrange_all = []
Rp_inrange_all = []
Mp_inrange_all = []
K_K_max_inrange_all = []
K_max_or_2ndmax_all = [] # either K_max (when planet in range is not the max) or the 2nd K_max (when planet in range is the max); also includes repeats, for systems where more than one planet is in the range
P_K_max_or_2ndmax_all = [] # period of either the planet with K_max (when planet in range is not the max) or of the planet with the 2nd K_max (when planet in range is the max)
# For single systems where the planet is in the period range:
P_inrange_singles = []
Rp_inrange_singles = []
Mp_inrange_singles = []
K_max_singles = []
for i,P_sys in enumerate(sssp_per_sys_i['P_all']):
det_sys = sssp_per_sys_i['det_all'][i]
Mp_sys = sssp_per_sys_i['mass_all'][i][P_sys > 0]
Rp_sys = sssp_per_sys_i['radii_all'][i][P_sys > 0]
e_sys = sssp_per_sys_i['e_all'][i][P_sys > 0]
incl_sys = sssp_per_sys_i['incl_all'][i][P_sys > 0]
P_sys = P_sys[P_sys > 0]
n_pl = len(P_sys)
if any((P_sys > 5.) & (P_sys < 10.)):
if n_pl == 1: # singles
K_single = rv_K(Mp_sys, P_sys, Mstar=sssp_i['Mstar_all'][i])
P_inrange_singles.append(P_sys[0])
Rp_inrange_singles.append(Rp_sys[0])
Mp_inrange_singles.append(Mp_sys[0])
K_max_singles.append(K_single[0])
else: # multi-planet systems
j_inrange = np.arange(len(P_sys))[(P_sys > 5.) & (P_sys < 10.)]
#print(i, ': planets in [5,10]d = ', len(j_inrange))
#K_sys = rv_K(Mp_sys, P_sys, Mstar=sssp_i['Mstar_all'][i])
K_sys = rv_K(Mp_sys, P_sys, e=e_sys, i=incl_sys, Mstar=sssp_i['Mstar_all'][i])
idsort_K_sys = np.argsort(K_sys)
K_max, K_2ndmax = K_sys[idsort_K_sys[-1]], K_sys[idsort_K_sys[-2]]
P_K_max, P_K_2ndmax = P_sys[idsort_K_sys[-1]], P_sys[idsort_K_sys[-2]]
K_K_max_sys = K_sys/K_max
for j in j_inrange:
det_inrange_all.append(det_sys[j])
P_inrange_all.append(P_sys[j])
Rp_inrange_all.append(Rp_sys[j])
Mp_inrange_all.append(Mp_sys[j])
#K_max_all.append(K_max)
if K_K_max_sys[j] == 1:
K_K_max_inrange_all.append(K_max/K_2ndmax)
K_max_or_2ndmax_all.append(K_2ndmax)
P_K_max_or_2ndmax_all.append(P_K_2ndmax)
else:
K_K_max_inrange_all.append(K_K_max_sys[j])
K_max_or_2ndmax_all.append(K_max)
P_K_max_or_2ndmax_all.append(P_K_max)
det_inrange_all = np.array(det_inrange_all)
P_inrange_all = np.array(P_inrange_all)
Rp_inrange_all = np.array(Rp_inrange_all)
Mp_inrange_all = np.array(Mp_inrange_all)
K_K_max_inrange_all = np.array(K_K_max_inrange_all)
K_max_or_2ndmax_all = np.array(K_max_or_2ndmax_all)
P_K_max_or_2ndmax_all = np.array(P_K_max_or_2ndmax_all)
P_inrange_singles = np.array(P_inrange_singles)
Rp_inrange_singles = np.array(Rp_inrange_singles)
Mp_inrange_singles = np.array(Mp_inrange_singles)
K_max_singles = np.array(K_max_singles)
# Collect a sample of points from each catalog:
kr_all = 1./K_K_max_inrange_all
pr_all = P_K_max_or_2ndmax_all/P_inrange_all
n_all, n_obs = len(kr_all), np.sum(det_inrange_all == 1)
pr_all_aggregate = pr_all_aggregate + list(pr_all[:int(n_all*factor/runs)])
pr_obs_aggregate = pr_obs_aggregate + list(pr_all[det_inrange_all == 1][:int(n_obs*factor/runs)])
Kr_all_aggregate = Kr_all_aggregate + list(kr_all[:int(n_all*factor/runs)])
Kr_obs_aggregate = Kr_obs_aggregate + list(kr_all[det_inrange_all == 1][:int(n_obs*factor/runs)])
# Compute the histograms:
counts, bins = np.histogram(pr_all[det_inrange_all == 1], bins=pr_bins)
pr_counts_obs.append(counts)
counts, bins = np.histogram(pr_all[(kr_all > 1.) & (det_inrange_all == 1)], bins=pr_bins)
pr_counts_obs_above1.append(counts)
counts, bins = np.histogram(pr_all[(kr_all < 1.) & (det_inrange_all == 1)], bins=pr_bins)
pr_counts_obs_below1.append(counts)
counts, bins = np.histogram(kr_all, bins=Kr_bins)
Kr_counts_all.append(counts/float(np.sum(counts)))
counts, bins = np.histogram(kr_all[det_inrange_all == 1], bins=Kr_bins)
Kr_counts_obs.append(counts/float(np.sum(counts)))
# Compute other statistics:
n_kr_obs = len(kr_all[det_inrange_all == 1])
n_kr_obs_above1 = np.sum(kr_all[det_inrange_all == 1] > 1.)
n_kr_obs_below1 = np.sum(kr_all[det_inrange_all == 1] < 1.)
n_pr_obs_above1 = np.sum(pr_all[det_inrange_all == 1] > 1.)
n_pr_obs_below1 = np.sum(pr_all[det_inrange_all == 1] < 1.)
n_kr_obs_above1_pr_obs_above1 = np.sum(kr_all[(det_inrange_all == 1) & (pr_all > 1.)] > 1.)
n_kr_obs_above1_pr_obs_below1 = np.sum(kr_all[(det_inrange_all == 1) & (pr_all < 1.)] > 1.)
n_pr_obs_above1_kr_obs_above1 = np.sum(pr_all[(det_inrange_all == 1) & (kr_all > 1.)] > 1.)
n_pr_obs_above1_kr_obs_below1 = np.sum(pr_all[(det_inrange_all == 1) & (kr_all < 1.)] > 1.)
f_all_kr_above1.append(np.sum(kr_all > 1.)/len(kr_all))
f_obs_kr_above1.append(n_kr_obs_above1/n_kr_obs)
f_obs_kr_above1_given_pr_above1.append(n_kr_obs_above1_pr_obs_above1/n_pr_obs_above1)
f_obs_kr_above1_given_pr_below1.append(n_kr_obs_above1_pr_obs_below1/n_pr_obs_below1)
f_obs_pr_above1_given_kr_above1.append(n_pr_obs_above1_kr_obs_above1/n_kr_obs_above1)
f_obs_pr_above1_given_kr_below1.append(n_pr_obs_above1_kr_obs_below1/n_kr_obs_below1)
pr_all_aggregate = np.array(pr_all_aggregate)
pr_obs_aggregate = np.array(pr_obs_aggregate)
Kr_all_aggregate = np.array(Kr_all_aggregate)
Kr_obs_aggregate = np.array(Kr_obs_aggregate)
pr_counts_obs = np.array(pr_counts_obs)
pr_counts_obs_above1 = np.array(pr_counts_obs_above1)
pr_counts_obs_below1 = np.array(pr_counts_obs_below1)
Kr_counts_all = np.array(Kr_counts_all)
Kr_counts_obs = np.array(Kr_counts_obs)
f_all_kr_above1 = np.array(f_all_kr_above1)
f_obs_kr_above1 = np.array(f_obs_kr_above1)
f_obs_kr_above1_given_pr_above1 = np.array(f_obs_kr_above1_given_pr_above1)
f_obs_kr_above1_given_pr_below1 = np.array(f_obs_kr_above1_given_pr_below1)
f_obs_pr_above1_given_kr_above1 = np.array(f_obs_pr_above1_given_kr_above1)
f_obs_pr_above1_given_kr_below1 = np.array(f_obs_pr_above1_given_kr_below1)
pr_counts_obs_qtls = np.zeros((n_bins,3))
pr_counts_obs_above1_qtls = np.zeros((n_bins,3))
pr_counts_obs_below1_qtls = np.zeros((n_bins,3))
Kr_counts_all_qtls = np.zeros((n_bins,3))
Kr_counts_obs_qtls = np.zeros((n_bins,3))
for b in range(n_bins):
pr_counts_obs_qtls[b] = np.quantile(pr_counts_obs[:,b], [0.16,0.5,0.84])
pr_counts_obs_above1_qtls[b] = np.quantile(pr_counts_obs_above1[:,b], [0.16,0.5,0.84])
pr_counts_obs_below1_qtls[b] = np.quantile(pr_counts_obs_below1[:,b], [0.16,0.5,0.84])
Kr_counts_all_qtls[b] = np.quantile(Kr_counts_all[:,b], [0.16,0.5,0.84])
Kr_counts_obs_qtls[b] = np.quantile(Kr_counts_obs[:,b], [0.16,0.5,0.84])
##### To compute some statistics and plot RV semi-amplitudes:
q16, q50, q84 = np.quantile(f_all_kr_above1, [0.16,0.5,0.84])
print('Fraction of all 5-10d planets where another planet has a larger K: %s_{%s}^{+%s}' % (np.round(q50,3), | np.round(q16-q50,3) | numpy.round |
#!/usr/bin/env python3
import numpy as np
import os
import pytest
import torch
from botorch.exceptions import BotorchTensorDimensionError
from botorch.utils.containers import TrainingData
from scipy.optimize import Bounds
from bayesopt4ros.data_handler import DataHandler
@pytest.fixture(params=[1, 3, 10])
def test_data(request):
"""Set up a simple dataset to test the DataHandler class. The dimensionality
of the input data is specified by the fixture parameters."""
dim, n = request.param, 1000
x = torch.rand(n, dim) * 10 - 5
y = 3 + 0.5 * torch.randn(n, 1)
return TrainingData(Xs=x, Ys=y)
def test_data_handling(test_data):
dim = test_data.Xs.shape[1]
bounds = Bounds(lb=-5 * np.ones((dim,)), ub=5 * np.ones((dim,)))
# Using initilizer for setting data
dh = DataHandler(x=test_data.Xs, y=test_data.Ys)
x, y = dh.get_xy()
np.testing.assert_array_equal(x, test_data.Xs)
np.testing.assert_array_equal(y, test_data.Ys)
d = dh.get_xy(as_dict=True)
np.testing.assert_array_equal(d["train_inputs"], test_data.Xs)
np.testing.assert_array_equal(d["train_targets"], test_data.Ys)
# Using setter for setting data
dh = DataHandler(bounds)
np.testing.assert_equal(dh.n_data, 0)
dh.set_xy(x=test_data.Xs, y=test_data.Ys)
x, y = dh.get_xy()
np.testing.assert_array_equal(x, test_data.Xs)
np.testing.assert_array_equal(y, test_data.Ys)
d = dh.get_xy(as_dict=True)
np.testing.assert_array_equal(d["train_inputs"], test_data.Xs)
np.testing.assert_array_equal(d["train_targets"], test_data.Ys)
def test_adding_data(test_data):
dim = test_data.Xs.shape[1]
# Single data point
dh = DataHandler(x=test_data.Xs, y=test_data.Ys)
x_new, y_new = torch.rand(1, dim), torch.randn(1, 1)
dh.add_xy(x=x_new, y=y_new)
x, y = dh.get_xy()
np.testing.assert_array_equal(x, torch.cat((test_data.Xs, x_new)))
np.testing.assert_array_equal(y, torch.cat((test_data.Ys, y_new)))
np.testing.assert_equal(dh.n_data, test_data.Xs.shape[0] + 1)
np.testing.assert_equal(len(dh), test_data.Xs.shape[0] + 1)
# Multiple data points
dh = DataHandler(x=test_data.Xs, y=test_data.Ys)
x_new, y_new = torch.rand(10, dim), torch.randn(10, 1)
dh.add_xy(x=x_new, y=y_new)
x, y = dh.get_xy()
np.testing.assert_array_equal(x, torch.cat((test_data.Xs, x_new)))
np.testing.assert_array_equal(y, torch.cat((test_data.Ys, y_new)))
| np.testing.assert_equal(dh.n_data, test_data.Xs.shape[0] + 10) | numpy.testing.assert_equal |
import astropy.units as u
import numpy as np
from lofti_gaia.loftitools import *
from lofti_gaia.cFunctions import calcOFTI_C
#from loftitools import *
import pickle
import time
import matplotlib.pyplot as plt
# Astroquery throws some warnings we can ignore:
import warnings
warnings.filterwarnings("ignore")
'''This module obtaines measurements from Gaia EDR3 (Gaia DR2 is also available as a secondary option) and runs through the LOFTI Gaia/OFTI
wide stellar binary orbit fitting technique.
'''
class Fitter(object):
'''Initialize the Fitter object for the binary system, and compute observational constraints
to be used in the orbit fit. User must provide Gaia source ids, tuples of mass estimates for
both objects, specify the number of desired orbits in posterior sample. Fit will be
for object 2 relative to object 1.
Attributes are tuples of (value,uncertainty) unless otherwise indicated. Attributes
with astropy units are retrieved from Gaia archive, attributes without units are
computed from Gaia values. All relative values are for object 2 relative to object 1.
Args:
sourceid1, sourceid2 (int): Gaia source ids for the two objects, fit will be for motion of \
object 2 relative to object 1
mass1, mass2 (tuple, flt): tuple os mass estimate for object 1 and 2, of the form (value, uncertainty)
Norbits (int): Number of desired orbits in posterior sample. Default = 100000
results_filename (str): Filename for fit results files. If none, results will be written to files \
named FitResults.yr.mo.day.hr.min.s
astrometry (dict): User-supplied astrometric measurements. Must be dictionary or table or pandas dataframe with\
column names "sep,seperr,pa,paerr,dates" or "ra,raerr,dec,decerr,dates". May be same as the rv table. \
Sep, deltaRA, and deltaDEC must be in arcseconds, PA in degrees, dates in decimal years. \
Default = None
user_rv (dict): User-supplied radial velocity measurements. Must be dictionary or table or pandas dataframe with\
column names "rv,rverr,rv_dates". May be same as the astrometry table. Default = None.
catalog (str): name of Gaia catalog to query. Default = 'gaiaedr3.gaia_source'
ruwe1, ruwe2 (flt): RUWE value from Gaia archive
ref_epoch (flt): reference epoch in decimal years. For Gaia DR2 this is 2015.5, for Gaia EDR3 it is 2016.0
plx1, plx2 (flt): parallax from Gaia in mas
RA1, RA2 (flt): right ascension from Gaia; RA in deg, uncertainty in mas
Dec1, Dec2 (flt): declination from Gaia; Dec in deg, uncertainty in mas
pmRA1, pmRA2 (flt): proper motion in RA in mas yr^-1 from Gaia
pmDec1, pmDec2 (flt): proper motion in DEC in mas yr^-1 from Gaia
rv1, rv2 (flt, optional): radial velocity in km s^-1 from Gaia
rv (flt, optional): relative RV of 2 relative to 1, if both are present in Gaia
plx (flt): weighted mean parallax for the binary system in mas
distance (flt): distance of system in pc, computed from Gaia parallax using method \
of Bailer-Jones et. al 2018.
deltaRA, deltaDec (flt): relative separation in RA and Dec directions, in mas
pmRA, pmDec (flt): relative proper motion in RA/Dec directions in km s^-1
sep (flt): total separation vector in mas
pa (flt): postion angle of separation vector in degrees from North
sep_au (flt): separation in AU
sep_km (flt): separation in km
total_vel (flt): total velocity vector in km s^-1. If RV is available for both, \
this is the 3d velocity vector; if not it is just the plane of sky velocity.
total_planeofsky_vel (flt): total velocity in the plane of sky in km s^-1. \
In the absence of RV this is equivalent to the total velocity vector.
deltaGmag (flt): relative contrast in Gaia G magnitude. Does not include uncertainty.
inflateProperMOtionError (flt): an optional factor to mulitply default gaia proper motion error by.
Written by <NAME>, 2020
'''
def __init__(self, sourceid1, sourceid2, mass1, mass2, Norbits = 100000, \
results_filename = None,
astrometry = None,
user_rv = None,
catalog = 'gaiaedr3.gaia_source',
inflateProperMotionError=1
):
self.sourceid1 = sourceid1
self.sourceid2 = sourceid2
try:
self.mass1 = mass1[0]
self.mass1err = mass1[1]
self.mass2 = mass2[0]
self.mass2err = mass2[1]
self.mtot = [self.mass1 + self.mass2, np.sqrt((self.mass1err**2) + (self.mass2err**2))]
except:
raise ValueError('Masses must be tuples of (value,error), ex: mass1 = (1.0,0.05)')
self.Norbits = Norbits
if not results_filename:
self.results_filename = 'FitResults.'+time.strftime("%Y.%m.%d.%H.%M.%S")+'.txt'
self.stats_filename = 'FitResults.Stats.'+time.strftime("%Y.%m.%d.%H.%M.%S")+'.txt'
else:
self.results_filename = results_filename
self.stats_filename = results_filename+'.Stats.txt'
self.astrometry = False
# check if user supplied astrometry:
if astrometry is not None:
# if so, set astrometric flag to True:
self.astrometry = True
# store observation dates:
self.astrometric_dates = astrometry['dates']
# if in sep/pa, convert to ra/dec:
if 'sep' in astrometry:
try:
astr_ra = [MonteCarloIt([astrometry['sep'][i],astrometry['seperr'][i]]) * \
np.sin(np.radians(MonteCarloIt([astrometry['pa'][i],astrometry['paerr'][i]]))) \
for i in range(len(astrometry['sep']))]
astr_dec = [MonteCarloIt([astrometry['sep'][i],astrometry['seperr'][i]]) * \
np.cos(np.radians(MonteCarloIt([astrometry['pa'][i],astrometry['paerr'][i]]))) \
for i in range(len(astrometry['sep']))]
self.astrometric_ra = np.array([
[np.mean(astr_ra[i]) for i in range(len(astrometry['sep']))],
[np.std(astr_ra[i]) for i in range(len(astrometry['sep']))]
])
self.astrometric_dec = np.array([
[np.mean(astr_dec[i]) for i in range(len(astrometry['sep']))],
[np.std(astr_dec[i]) for i in range(len(astrometry['sep']))]
])
except:
raise ValueError('Astrometry keys not recognized. Please provide dictionary or table or pandas dataframe with\
column names "sep,seperr,pa,paerr,dates" or "ra,raerr,dec,decerr,dates"')
elif 'ra' in astrometry:
# else store the ra/dec as attributes:
try:
self.astrometric_ra = np.array([astrometry['ra'], astrometry['raerr']])
self.astrometric_dec = np.array([astrometry['dec'], astrometry['decerr']])
except:
raise ValueError('Astrometry keys not recognized. Please provide dictionary or table or pandas dataframe with\
column names "sep,seperr,pa,paerr,dates" or "ra,raerr,dec,decerr,dates"')
else:
raise ValueError('Astrometry keys not recognized. Please provide dictionary or table or pandas dataframe with\
column names "sep,seperr,pa,paerr,dates" or "ra,raerr,dec,decerr,dates"')
# Check if user supplied rv:
self.use_user_rv = False
if user_rv is not None:
# set user rv flag to true:
self.use_user_rv = True
try:
# set attributes; multiply rv by -1 due to difference in coordinate systems:
self.user_rv = np.array([user_rv['rv']*-1,user_rv['rverr']])
self.user_rv_dates = np.array(user_rv['rv_dates'])
except:
raise ValueError('RV keys not recognized. Please use column names "rv,rverr,rv_dates"')
self.catalog = catalog
# Get Gaia measurements, compute needed constraints, and add to object:
self.PrepareConstraints(catalog=self.catalog,inflateFactor=inflateProperMotionError)
def edr3ToICRF(self,pmra,pmdec,ra,dec,G):
''' Corrects for biases in proper motion. The function is from https://arxiv.org/pdf/2103.07432.pdf
Args:
pmra,pmdec (float): proper motion
ra, dec (float): right ascension and declination
G (float): G magnitude
Written by <NAME>, 2021
'''
if G>=13:
return pmra , pmdec
import numpy as np
def sind(x):
return np.sin(np.radians(x))
def cosd(x):
return np.cos(np.radians(x))
table1="""
0.0 9.0 9.0 9.5 9.5 10.0 10.0 10.5 10.5 11.0 11.0 11.5 11.5 11.75 11.75 12.0 12.0 12.25 12.25 12.5 12.5 12.75 12.75 13.0
18.4 33.8 -11.3 14.0 30.7 -19.4 12.8 31.4 -11.8 13.6 35.7 -10.5 16.2 50.0 2.1 19.4 59.9 0.2 21.8 64.2 1.0 17.7 65.6 -1.9 21.3 74.8 2.1 25.7 73.6 1.0 27.3 76.6 0.5
34.9 68.9 -2.9 """
table1 = np.fromstring(table1,sep=" ").reshape((12,5)).T
Gmin = table1[0]
Gmax = table1[1]
#pick the appropriate omegaXYZ for the sourceโs magnitude:
omegaX = table1[2][(Gmin<=G)&(Gmax>G)][0]
omegaY = table1[3][(Gmin<=G)&(Gmax>G)][0]
omegaZ = table1[4][(Gmin<=G)&(Gmax>G)][0]
pmraCorr = -1*sind(dec)*cosd(ra)*omegaX -sind(dec)*sind(ra)*omegaY + cosd(dec)*omegaZ
pmdecCorr = sind(ra)*omegaX -cosd(ra)*omegaY
return pmra-pmraCorr/1000., pmdec-pmdecCorr/1000.
def PrepareConstraints(self, rv=False, catalog='gaiaedr3.gaia_source', inflateFactor=1.):
'''Retrieves parameters for both objects from Gaia EDR3 archive and computes system attriubtes,
and assigns them to the Fitter object class.
Args:
rv (bool): flag for handling the presence or absence of RV measurements for both objects \
in EDR3. Gets set to True if both objects have Gaia RV measurements. Default = False
catalog (str): name of Gaia catalog to query. Default = 'gaiaedr3.gaia_source'
inflateFactor (flt): Factor by which to inflate the errors on Gaia proper motions to \
account for improper uncertainty estimates. Default = 1.0
Written by <NAME>, 2020
'''
from astroquery.gaia import Gaia
deg_to_mas = 3600000.
mas_to_deg = 1./3600000.
# Retrieve astrometric solution from Gaia EDR3
job = Gaia.launch_job("SELECT * FROM "+catalog+" WHERE source_id = "+str(self.sourceid1))
j = job.get_results()
job = Gaia.launch_job("SELECT * FROM "+catalog+" WHERE source_id = "+str(self.sourceid2))
k = job.get_results()
if catalog == 'gaiadr2.gaia_source':
# Retrieve RUWE from RUWE catalog for both sources and add to object state:
job = Gaia.launch_job("SELECT * FROM gaiadr2.ruwe WHERE source_id = "+str(self.sourceid1))
jruwe = job.get_results()
job = Gaia.launch_job("SELECT * FROM gaiadr2.ruwe WHERE source_id = "+str(self.sourceid2))
kruwe = job.get_results()
self.ruwe1 = jruwe['ruwe'][0]
self.ruwe2 = kruwe['ruwe'][0]
else:
# EDR3 contains ruwe in the main catalog:
self.ruwe1 = j['ruwe'][0]
self.ruwe2 = k['ruwe'][0]
# Check RUWE for both objects and warn if too high:
if self.ruwe1>1.2 or self.ruwe2>1.2:
print('''WARNING: RUWE for one or more of your solutions is greater than 1.2. This indicates
that the source might be an unresolved binary or experiencing acceleration
during the observation. Orbit fit results may not be trustworthy.''')
# reference epoch:
self.ref_epoch = j['ref_epoch'][0]
# parallax:
self.plx1 = [j[0]['parallax']*u.mas, j[0]['parallax_error']*u.mas]
self.plx2 = [k[0]['parallax']*u.mas, k[0]['parallax_error']*u.mas]
# RA/DEC
self.RA1 = [j[0]['ra']*u.deg, j[0]['ra_error']*mas_to_deg*u.deg]
self.RA2 = [k[0]['ra']*u.deg, k[0]['ra_error']*mas_to_deg*u.deg]
self.Dec1 = [j[0]['dec']*u.deg, j[0]['dec_error']*mas_to_deg*u.deg]
self.Dec2 = [k[0]['dec']*u.deg, k[0]['dec_error']*mas_to_deg*u.deg]
# Proper motions
pmRACorrected1,pmDecCorrected1 = self.edr3ToICRF(j[0]['pmra'],j[0]['pmdec'],j[0]['ra'],j[0]['dec'],j[0]["phot_g_mean_mag"])
pmRACorrected2,pmDecCorrected2 = self.edr3ToICRF(k[0]['pmra'],k[0]['pmdec'],k[0]['ra'],k[0]['dec'],k[0]["phot_g_mean_mag"])
self.pmRA1 = [pmRACorrected1*u.mas/u.yr, j[0]['pmra_error']*u.mas/u.yr*inflateFactor]
self.pmRA2 = [pmRACorrected2*u.mas/u.yr, k[0]['pmra_error']*u.mas/u.yr*inflateFactor]
self.pmDec1 = [pmDecCorrected1*u.mas/u.yr, j[0]['pmdec_error']*u.mas/u.yr*inflateFactor]
self.pmDec2 = [pmDecCorrected2*u.mas/u.yr, k[0]['pmdec_error']*u.mas/u.yr*inflateFactor]
# See if both objects have RV's in DR2:
if catalog == 'gaiaedr3.gaia_source':
key = 'dr2_radial_velocity'
error_key = 'dr2_radial_velocity_error'
elif catalog == 'gaiadr2.gaia_source':
key = 'radial_velocity'
error_key = 'radial_velocity_error'
if type(k[0][key]) == np.float64 and type(j[0][key]) == np.float64 or type(k[0][key]) == np.float32 and type(j[0][key]) == np.float32:
rv = True
self.rv1 = [j[0][key]*u.km/u.s,j[0][error_key]*u.km/u.s]
self.rv2 = [k[0][key]*u.km/u.s,k[0][error_key]*u.km/u.s]
rv1 = MonteCarloIt(self.rv1)
rv2 = MonteCarloIt(self.rv2)
self.rv = [ -np.mean(rv2-rv1) , np.std(rv2-rv1) ] # km/s
# negative to relfect change in coordinate system from RV measurements to lofti
# pos RV = towards observer in this coord system
else:
self.rv = [0,0]
# weighted mean of parallax values:
plx = np.average([self.plx1[0].value,self.plx2[0].value], weights = [self.plx1[1].value,self.plx2[1].value])
plxerr = np.max([self.plx1[1].value,self.plx2[1].value])
self.plx = [plx,plxerr] # mas
self.distance = distance(*self.plx) # pc
# Compute separations of component 2 relative to 1:
r1 = MonteCarloIt(self.RA1)
r2 = MonteCarloIt(self.RA2)
d1 = MonteCarloIt(self.Dec1)
d2 = MonteCarloIt(self.Dec2)
ra = (r2*deg_to_mas - r1*deg_to_mas) * np.cos(np.radians(np.mean([self.Dec1[0].value,self.Dec2[0].value])))
dec = ((d2 - d1)*u.deg).to(u.mas).value
self.deltaRA = [np.mean(ra),np.std(ra)] # mas
self.deltaDec = [np.mean(dec),np.std(dec)] # mas
# compute relative proper motion:
pr1 = MonteCarloIt(self.pmRA1)
pr2 = MonteCarloIt(self.pmRA2)
pd1 = MonteCarloIt(self.pmDec1)
pd2 = MonteCarloIt(self.pmDec2)
pmRA = [np.mean(pr2 - pr1), np.std(pr2-pr1)] # mas/yr
pmDec = [np.mean(pd2 - pd1), np.std(pd2 - pd1)] # mas/yr
self.pmRA = masyr_to_kms(pmRA,self.plx) # km/s
self.pmDec = masyr_to_kms(pmDec,self.plx) # km/s
# Compute separation/position angle:
r, p = to_polar(r1,r2,d1,d2)
self.sep = tuple([np.mean(r).value, np.std(r).value]) # mas
self.pa = tuple([np.mean(p).value, np.std(p).value]) # deg
self.sep_au = tuple([((self.sep[0]/1000)*self.distance[0]), ((self.sep[1]/1000)*self.distance[0])])
self.sep_km = tuple([ self.sep_au[0]*u.au.to(u.km) , self.sep_au[1]*u.au.to(u.km)])
# compute total velocities:
if rv:
self.total_vel = [ add_in_quad([self.pmRA[0],self.pmDec[0],self.rv[0]]) ,
add_in_quad([self.pmRA[1],self.pmDec[1],self.rv[1]]) ] # km/s
self.total_planeofsky_vel = [ add_in_quad([self.pmRA[0],self.pmDec[0]]) ,
add_in_quad([self.pmRA[1],self.pmDec[1]]) ] # km/s
else:
self.total_vel = [ add_in_quad([self.pmRA[0],self.pmDec[0]]) ,
add_in_quad([self.pmRA[1],self.pmDec[1]]) ] # km/s
self.total_planeofsky_vel = self.total_vel.copy() # km/s
# compute deltamag:
self.deltaGmag = j[0]['phot_g_mean_mag'] - k[0]['phot_g_mean_mag']
class FitOrbit(object):
''' Object for performing an orbit fit. Takes attributes from Fitter class.
ex: orbits = FitOrbit(fitterobject)
Args:
fitterobject (Fitter object): Fitter object initialized from the Fitter class
write_stats (bool): If True, write out summary statistics of orbit sample at \
conclusion of fit. Default = True.
write_results (bool): If True, write out the fit results to a pickle file \
in addition to the text file created during the fit. Default = True.
deltaRA, deltaDec (flt): relative separation in RA and Dec directions, in mas
pmRA, pmDec (flt): relative proper motion in RA/Dec directions in km s^-1
rv (flt, optional): relative RV of 2 relative to 1, if both are present in Gaia EDR3
mtot_init (flt): initial total system mass in Msun from user input
distance (flt): distance of system in pc, computed from Gaia parallax using method of Bailer-Jones et. al 2018.
sep (flt): separation vector in mas
pa (flt): postion angle of separation vector in degrees from North
ref_epoch (flt): epoch of the measurement, 2016.0 for Gaia EDR3 and 2015.5 for Gaia DR2.
Norbits (int): number of desired orbit samples
write_stats (bool): if True, write summary of sample statistics to human-readable file at end of run. Default = True
write_results (bool): if True, write out current state of sample orbits in pickle file in periodic intervals during \
run, and again at the end of the run. RECOMMENDED. Default = True
results_filename (str): name of file for saving pickled results to disk. If not supplied, \
defaul name is FitResults.y.mo.d.h.m.s.pkl, saved in same directory as fit was run.
stats_filename (str): name of file for saving human-readable file of stats of sample results. If not supplied, \
defaul name is FitResults.Stats.y.mo.d.h.m.s.pkl, saved in same directory as fit was run.
run_time (flt): run time for the last fit. astropy units object
Written by <NAME>, 2020
'''
def __init__(self, fitterobject, write_stats = True, write_results = True, python_version=False, \
use_pm_cross_term = False, corr_coeff = None):
# establish fit parameters:
self.deltaRA = fitterobject.deltaRA
self.deltaDec = fitterobject.deltaDec
self.pmRA = fitterobject.pmRA
self.pmDec = fitterobject.pmDec
self.rv = fitterobject.rv
self.mtot_init = fitterobject.mtot
self.distance = fitterobject.distance
self.sep = fitterobject.sep
self.pa = fitterobject.pa
self.ref_epoch = fitterobject.ref_epoch
self.Norbits = fitterobject.Norbits
self.write_results = write_results
self.write_stats = write_stats
self.results_filename = fitterobject.results_filename
self.stats_filename = fitterobject.stats_filename
self.astrometry = fitterobject.astrometry
if self.astrometry:
self.astrometric_ra = fitterobject.astrometric_ra
self.astrometric_dec = fitterobject.astrometric_dec
self.astrometric_dates = fitterobject.astrometric_dates
self.use_user_rv = fitterobject.use_user_rv
if self.use_user_rv:
self.user_rv = fitterobject.user_rv
self.user_rv_dates = fitterobject.user_rv_dates
# run orbit fitter:
self.fitorbit(python_fitOFTI=python_version, use_pm_cross_term = use_pm_cross_term, corr_coeff = corr_coeff)
def fitorbit(self, save_results_every_X_loops = 100, python_fitOFTI=False, use_pm_cross_term = False, corr_coeff = None):
'''Run the OFTI fitting run on the Fitter object. Called when FitOrbit object
is created.
Args:
save_results_every_X_loops (int): on every Xth loop, save status of the \
orbit sample arrays to a pickle file, if write_results = True (Default)
python_fitOFTI (bool): If True, fit using python only without using C Kepler's equation solver. Default = False
use_pm_cross_term (bool): If True, include the proper motion correlation cross term in the Chi^2 computation \
Default = False
Written by <NAME>, 2020
'''
# write header:
print('Saving orbits in',self.results_filename)
k = open(self.results_filename, 'w')
output_file_header = '# sma [arcsec] period [yrs] orbit phase t_0 [yr] ecc incl [deg]\
argp [deg] lan [deg] m_tot [Msun] dist [pc] chi^2 ln(prob) ln(randn)'
k.write(output_file_header + "\n")
k.close()
import time as tm
########### Perform initial run to get initial chi-squared: #############
# Draw random orbits:
#parameters = a,T,const,to,e,i,w,O,m1,dist
numSamples = 10000
parameters_init = draw_samples(numSamples, self.mtot_init, self.distance, self.ref_epoch)
# Compute positions and velocities:
if(python_fitOFTI):
X,Y,Z,Xdot,Ydot,Zdot,Xddot,Yddot,Zddot,parameters=calc_OFTI(parameters_init,self.ref_epoch,self.sep,self.pa)
else:
returnArray = np.zeros((19,numSamples))
returnArray = calcOFTI_C(parameters_init,self.ref_epoch,self.sep,self.pa,returnArray.copy())
X,Y,Z,Xdot,Ydot,Zdot,Xddot,Yddot,Zddot = returnArray[0:9]
parameters = returnArray[9:]
# Compute chi squared:
if self.rv[0] != 0:
model = np.array([Y,X,Ydot,Xdot,Zdot])
data = np.array([self.deltaRA, self.deltaDec, self.pmRA, self.pmDec, self.rv])
else:
model = np.array([Y,X,Ydot,Xdot])
data = np.array([self.deltaRA, self.deltaDec, self.pmRA, self.pmDec])
chi2 = ComputeChi2(data,model)
if use_pm_cross_term:
chi2 -= ( 2 * corr_coeff * (data[2][0] - model[2]) * (data[3][0] - model[3]) ) / (data[2][1] * data[3][1])
if self.astrometry:
p = parameters.copy()
a,T,const,to,e,i,w,O,m1,dist = p[0],p[1],p[2],p[3],p[4],p[5],p[6],p[7],p[8],p[9]
chi2_astr = np.zeros(10000)
# Calculate predicted positions at astr observation dates for each orbit:
for j in range(self.astrometric_ra.shape[1]):
# for each date, compute XYZ for each 10000 trial orbit. We can
# skip scale and rotate because that was accomplished in the calc_OFTI call above.
X1,Y1,Z1,E1 = calc_XYZ(a,T,to,e,i,w,O,self.astrometric_dates[j])
# Place astrometry into data array where: data[0][0]=ra obs, data[0][1]=ra err, etc:
data = np.array([self.astrometric_ra[:,j], self.astrometric_dec[:,j]])
# place corresponding predicited positions at that date for each trial orbit in arcsec:
model = np.array([Y1*1000,X1*1000])
# compute chi2 for trial orbits at that date and add to the total chi2 sum:
chi2_astr += ComputeChi2(data,model)
chi2 = chi2 + chi2_astr
if self.use_user_rv:
p = parameters.copy()
a,T,const,to,e,i,w,O,m1,dist = p[0],p[1],p[2],p[3],p[4],p[5],p[6],p[7],p[8],p[9]
chi2_rv = np.zeros(10000)
for j in range(self.user_rv.shape[1]):
# compute ecc anomaly at that date:
X1,Y1,Z1,E1 = calc_XYZ(a,T,to,e,i,w,O,self.user_rv_dates[j])
# compute velocities at that ecc anom:
Xdot,Ydot,Zdot = calc_velocities(a,T,to,e,i,w,O,dist,E1)
# compute chi2:
chi2_rv += ComputeChi2(np.array([self.user_rv[:,j]]),np.array([Zdot]))
chi2 = chi2 + chi2_rv
print('inital chi min',np.nanmin(chi2))
self.chi_min = np.nanmin(chi2)
# Accept/reject:
accepted, lnprob, lnrand = AcceptOrReject(chi2,self.chi_min)
# count number accepted:
number_orbits_accepted = np.size(accepted)
# tack on chi2, log probability, log random unif number to parameters array:
parameters = np.concatenate((parameters,chi2[None,:],lnprob[None,:],lnrand[None,:]), axis = 0)
# transpose:
parameters=np.transpose(parameters)
# write results to file:
k = open(self.results_filename, 'a')
for params in parameters[accepted]:
string = ' '.join([str(p) for p in params])
k.write(string + "\n")
k.close()
###### start loop ########
# initialize:
loop_count = 0
start=tm.time()
while number_orbits_accepted < self.Norbits:
# Draw random orbits:
numSamples = 10000
parameters_init = draw_samples(numSamples, self.mtot_init, self.distance, self.ref_epoch)
# Compute positions and velocities and new parameters array with scaled and rotated values:
if(python_fitOFTI):
X,Y,Z,Xdot,Ydot,Zdot,Xddot,Yddot,Zddot,parameters=calc_OFTI(parameters_init,self.ref_epoch,self.sep,self.pa)
else:
returnArray = np.zeros((19,numSamples))
returnArray = calcOFTI_C(parameters_init,self.ref_epoch,self.sep,self.pa,returnArray.copy())
X,Y,Z,Xdot,Ydot,Zdot,Xddot,Yddot,Zddot = returnArray[0:9]
parameters = returnArray[9:]
returnArray = None
# compute chi2 for orbits using Gaia observations:
if self.rv[0] != 0:
model = np.array([Y,X,Ydot,Xdot,Zdot])
data = np.array([self.deltaRA, self.deltaDec, self.pmRA, self.pmDec, self.rv])
else:
model = np.array([Y,X,Ydot,Xdot])
data = np.array([self.deltaRA, self.deltaDec, self.pmRA, self.pmDec])
chi2 = ComputeChi2(data,model)
if use_pm_cross_term:
chi2 -= ( 2 * (data[2][0] - model[2]) * (data[3][0] - model[3]) ) / (data[2][1] * data[3][1])
# add user astrometry if given:
if self.astrometry:
p = parameters.copy()
a,T,const,to,e,i,w,O,m1,dist = p[0],p[1],p[2],p[3],p[4],p[5],p[6],p[7],p[8],p[9]
chi2_astr = np.zeros(10000)
# Calculate predicted positions at astr observation dates for each orbit:
for j in range(self.astrometric_ra.shape[1]):
# for each date, compute XYZ for each 10000 trial orbit. We can
# skip scale and rotate because that was accomplished in the calc_OFTI call above.
X1,Y1,Z1,E1 = calc_XYZ(a,T,to,e,i,w,O,self.astrometric_dates[j])
# Place astrometry into data array where: data[0][0]=ra obs, data[0][1]=ra err, etc:
data = np.array([self.astrometric_ra[:,j], self.astrometric_dec[:,j]])
# place corresponding predicited positions at that date for each trial orbit:
model = np.array([Y1*1000,X1*1000])
# compute chi2 for trial orbits at that date and add to the total chi2 sum:
chi2_astr += ComputeChi2(data,model)
chi2 = chi2 + chi2_astr
# add user rv if given:
if self.use_user_rv:
p = parameters.copy()
a,T,const,to,e,i,w,O,m1,dist = p[0],p[1],p[2],p[3],p[4],p[5],p[6],p[7],p[8],p[9]
chi2_rv = np.zeros(10000)
for j in range(self.user_rv.shape[1]):
# compute ecc anomaly at that date:
X1,Y1,Z1,E1 = calc_XYZ(a,T,to,e,i,w,O,self.user_rv_dates[j])
# compute velocities at that ecc anom:
Xdot,Ydot,Zdot = calc_velocities(a,T,to,e,i,w,O,dist,E1)
# compute chi2:
chi2_rv += ComputeChi2(np.array([self.user_rv[:,j]]),np.array([Zdot]))
chi2 = chi2 + chi2_rv
# Accept/reject:
accepted, lnprob, lnrand = AcceptOrReject(chi2,self.chi_min)
if np.size(accepted) == 0:
pass
else:
# count num accepted
p = parameters.copy()
a,T,const,to,e,i,w,O,m1,dist = p[0],p[1],p[2],p[3],p[4],p[5],p[6],p[7],p[8],p[9]
sampleResults = calc_XYZ(a,T,to,e,i/180*np.pi,w/180*np.pi,O/180*np.pi,2016.0)
number_orbits_accepted += np.size(accepted)
parameters = np.concatenate((parameters,chi2[None,:],lnprob[None,:],lnrand[None,:]), axis = 0)
parameters=np.transpose(parameters)
k = open(self.results_filename, 'a')
for params in parameters[accepted]:
string = ' '.join([str(p) for p in params])
k.write(string + "\n")
k.close()
if np.nanmin(chi2) < self.chi_min:
# If there is a new min chi2:
self.chi_min = np.nanmin(chi2)
#print('found new chi min:',self.chi_min)
# re-evaluate to accept/reject with new chi_min:
if number_orbits_accepted != 0:
dat = np.loadtxt(open(self.results_filename,"r"),delimiter=' ',ndmin=2)
lnprob = -(dat[:,10]-self.chi_min)/2.0
dat[:,11] = lnprob
accepted_retest = np.where(lnprob > dat[:,12])
q = open(self.results_filename, 'w')
q.write(output_file_header + "\n")
for data in dat[accepted_retest]:
string = ' '.join([str(d) for d in data])
q.write(string + "\n")
q.close()
dat2 = np.loadtxt(open(self.results_filename,"r"),delimiter=' ',ndmin=2)
number_orbits_accepted=dat2.shape[0]
loop_count += 1
#print('loop count',loop_count)
update_progress(number_orbits_accepted,self.Norbits)
# one last accept/reject with final chi_min value:
dat = np.loadtxt(open(self.results_filename,"r"),delimiter=' ',ndmin=2)
lnprob = -(dat[:,10]-self.chi_min)/2.0
dat[:,11] = lnprob
accepted_retest = np.where(lnprob > dat[:,12])
q = open(self.results_filename, 'w')
q.write(output_file_header + "\n")
for data in dat[accepted_retest]:
string = ' '.join([str(d) for d in data])
q.write(string + "\n")
q.close()
# when finished, upload results and store in object:
dat = np.loadtxt(open(self.results_filename,"r"),delimiter=' ',ndmin=2)
number_orbits_accepted=dat.shape[0]
print('Final Norbits:', number_orbits_accepted)
# intialise results object and store accepted orbits:
if self.rv[0] != 0:
self.results = Results(orbits = dat, limit_lan = False, limit_aop = False)
else:
self.results = Results(orbits = dat, limit_lan = True, limit_aop = False)
self.results.Update(self.results.orbits)
# pickle dump the results attribute:
if self.write_results:
self.results.SaveResults(self.results_filename.replace(".txt", ".pkl"), write_text_file = False)
stop = tm.time()
self.results.run_time = (stop - start)*u.s
# compute stats and write to file:
self.results.stats = Stats(orbits = self.results.orbits, write_to_file = self.write_stats, filename = self.stats_filename)
class Results(object):
'''A class for storing and manipulating the results of the orbit fit.
Args:
orbits (Norbits x 13 array): array of accepted orbits from \
OFTI fit in the same order as the following attributes
sma (1 x Norbits array): semi-major axis in arcsec
period (1 x Norbits array): period in years
orbit_fraction (1 x Norbits array): fraction of orbit past periastron \
passage the observation (2016) occured on. Values: [0,1)
t0 (1 x Norbits array): date of periastron passage in decimal years
ecc (1 x Norbits array): eccentricity
inc (1 x Norbits array): inclination relative to plane of the sky in deg
aop (1 x Norbits array): arguement of periastron in deg
lan (1 x Norbits array): longitude of ascending node in deg
mtot (1 x Norbits array): total system mass in Msun
distance (1 x Norbits array): distance to system in parsecs
chi2 (1 x Norbits array): chi^2 value for the orbit
lnprob (1 x Norbits array): log probability of orbit
lnrand (1 x Norbits array): log of random "dice roll" for \
orbit acceptance
limit_aop, limit_lan (bool): In the absence of radial velocity info, \
there is a degeneracy between arg of periastron and long of ascending \
node. Common practice is to limit one to the interval [0,180] deg. \
By default, lofti limits lan to this interval if rv = False. The user can \
choose to limit aop instead by setting limit_aop = True, limit_lan = False. \
The orbits[:,6] (aop) and orbits[:,7] (lan) arrays preserve the original values. \
Written by <NAME>, 2020
'''
def __init__(self, orbits = [], limit_aop = False, limit_lan = True):
self.orbits = orbits
self.limit_lan = limit_lan
self.limit_aop = limit_aop
def Update(self, orbits):
'''Take elements of the "orbits" attribute and populate
the orbital element attributes
Args:
orbits (arr): orbits array from Results class
Written by <NAME>, 2020
'''
self.sma = orbits[:,0]
self.period = orbits[:,1]
self.orbit_fraction = orbits[:,2]
self.t0 = orbits[:,3]
self.ecc = orbits[:,4]
self.inc = orbits[:,5]
self.aop = orbits[:,6]
if self.limit_aop:
self.aop = limit_to_180deg(self.aop)
self.lan = orbits[:,7] % 360
if self.limit_lan:
self.lan = limit_to_180deg(self.lan)
self.mtot = orbits[:,8]
self.distance = orbits[:,9]
self.chi2 = orbits[:,10]
self.lnprob = orbits[:,11]
self.lnrand = orbits[:,12]
def SaveResults(self, filename, write_text_file = False, text_filename = None):
'''Save the orbits and orbital parameters attributes in a pickle file
Args:
filename (str): filename for pickle file
write_text_file (bool): if True, also write out the accepted orbits to a \
human readable text file
text_filename (bool): if write_to_text = True, specifify filename for text file
Written by <NAME>, 2020
'''
pickle.dump(self, open( filename, "wb" ) )
# write results to file:
if write_text_file:
k = open(text_filename, 'a')
for params in self.orbits:
string = ' '.join([str(p) for p in params])
k.write(string + "\n")
k.close()
def LoadResults(self, filename, append = False):
'''Read in the orbits and orbital parameters attributes from a pickle file
Args:
filename (str): filename of pickle file to load
append (bool): if True, append read in orbit samples to another Results \
object. Default = False.
Written by <NAME>, 2020
'''
results_in = pickle.load( open( filename, "rb" ) )
if append == False:
self.orbits = results_in.orbits
self.Update(self.orbits)
else:
self.orbits = np.vstack((self.orbits,results_in.orbits))
self.Update(self.orbits)
# plotting results:
def PlotHists(self):
'''Plot 1-d histograms of orbital elements 'sma','ecc','inc','aop','lan','t0' from fit results.
Written by <NAME>, 2020
'''
if len(self.sma < 50):
bins = 50
else:
bins = 'fd'
fig = plt.figure(figsize=(30, 5.5))
params = np.array([self.sma,self.ecc,self.inc,self.aop,self.lan,self.t0])
names = np.array(['sma','ecc','inc','aop','lan','t0'])
for i in range(len(params)):
ax = plt.subplot2grid((1,len(params)), (0,i))
plt.hist(params[i],bins=bins,edgecolor='none',alpha=0.8)
plt.tick_params(axis='both', left=False, top=False, right=False, bottom=True, \
labelleft=False, labeltop=False, labelright=False, labelbottom=True)
plt.xticks(rotation=45, fontsize = 20)
plt.xlabel(names[i], fontsize = 25)
plt.tight_layout()
return fig
def PlotOrbits(self, color = True, colorbar = True, ref_epoch = 2016.0, size = 100, plot3d = False, cmap = 'viridis',xlim=False,ylim=False):
'''Plot a random selection of orbits from the sample in the plane of the sky.
Args:
color (bool): if True, plot orbit tracks using a colormap scale to orbit fraction (phase) \
past observation date (2015.5). If False, orbit tracks will be black. Default = True
colorbar (bool): if True and color = True, plot colorbar for orbit phase
ref_epoch (flt): reference epoch for drawing orbits. Default = 2015.5
size (int): Number of orbits to plot. Default = True
plot3d (bool): If True, return a plot of orbits in 3D space. Default = False
cmap (str): colormap for orbit phase plot
Written by <NAME>, 2020
'''
# Random selection of orbits to plot:
if len(self.sma) > size:
# if there are more orbits than desired size, randomly select orbits from
# the posterior sample:
ind = np.random.choice(range(0,len(self.sma)),replace=False,size=size)
else:
# if there are fewer orbits than desired size, take all of them:
ind = np.random.choice(range(0,len(self.sma)),replace=False,size=len(self.sma))
from numpy import tan, arctan, sqrt, cos, sin, arccos
# label for colormap axis:
colorlabel = 'Phase'
# create figure:
fig = plt.figure(figsize = (7.5, 6.))
plt.grid(ls=':')
# invert X axis for RA:
plt.gca().invert_xaxis()
if plot3d:
# Make 3d axis object:
ax = fig.add_subplot(111, projection='3d')
# plot central star:
ax.scatter(0,0,0,color='orange',marker='*',s=300,zorder=10)
ax.set_zlabel('Z (")',fontsize=20)
else:
# plot central star:
plt.scatter(0,0,color='orange',marker='*',s=300,zorder=10)
# For each orbit in the random selection from the posterior samples:
for a,T,to,e,i,w,O in zip(self.sma[ind],self.period[ind],self.t0[ind],self.ecc[ind],np.radians(self.inc[ind]),\
np.radians(self.aop[ind]),np.radians(self.lan[ind])):
# define an array of times along orbit:
times = np.linspace(ref_epoch,ref_epoch+T,5000)
X,Y,Z = np.array([]),np.array([]),np.array([])
E = np.array([])
# Compute X,Y,Z positions for each time:
for t in times:
n = (2*np.pi)/T
M = n*(t-to)
nextE = [danby_solve(eccentricity_anomaly, varM,vare, 0.001) for varM,vare in zip([M],[e])]
E = | np.append(E,nextE) | numpy.append |
import numpy as np
from math import floor, ceil
import cv2
from skimage import filters
# Todo: make it work
def reflect_x_image(image): # okay for 0, 8 (maybe 1)
N = len(image)
ref_mat = []
for i in range(N):
ref_mat.append([[-1,0],[0,1]])
new_img = np.zeros((N,28,28))
for i in range(28):
for j in range(28):
data = image[:,i,j]
new_idx = np.array([i,j])@ref_mat
if np.all((new_idx[:,0]< 28) & (new_idx[:,1]<28)):
new_img[new_idx[:,0],new_idx[:,1]] = data
result = new_img
return result
# Todo: make it work
def reflect_y_image(image, axis): # okay for 0, 1, 8
N = len(image)
ref_mat = []
for i in range(N):
ref_mat.append([[1,0],[0,-1]])
new_img = np.zeros((N,28,28))
for i in range(28):
for j in range(28):
data = image[:,i,j]
new_idx = np.array([i,j])@ref_mat
if np.all((new_idx[:,0]< 28) & (new_idx[:,1]<28)):
new_img[new_idx[:,0],new_idx[:,1]] = data
result = new_img
return result
# Todo: make it work
def rotate_cv_image(image):
angle = np.random.randint(-20,20,1)
image_center = tuple(np.array(image.shape[1::-1]) / 2)
rot_mat = cv2.getRotationMatrix2D(image_center, angle, 1.0)
result = cv2.warpAffine(image, rot_mat, image.shape[1::-1], flags=cv2.INTER_LINEAR)
return result
# Todo: vectorize
def rotate_image(image):
N = len(image)
angle = np.random.randint(-20,20,N)
theta = angle/180*np.pi
rot = []
for i in range(N):
rot.append(np.array([[np.cos(theta), -np.sin(theta)],[np.sin(theta), np.cos(theta)]]))
new_img = np.zeros((N,28,28))
for i in range(28):
for j in range(28):
data = image[:,i,j]
new_idx = np.round(np.array([i,j])@rot).astype(int)
if np.all((new_idx[:,0]< 28) & (new_idx[:,1]<28)):
new_img[:,new_idx[:,0],new_idx[:,1]] = data
result = new_img
return result
# Todo: make it work
def translate_image(image):
N = len(image)
translation = []
for i in range(N):
translation.append(np.random.randint(1,7,(N,2)))
new_img = np.zeros((N,28,28))
for i in range(28):
for j in range(28):
data = image[:,i,j]
new_idx = np.array([N,i,j]) + translation
if np.all((new_idx[:,0]< 28) & (new_idx[:,1]<28)):
new_img[:,new_idx[:,0],new_idx[:,1]] = data
result = new_img
return result
# Todo: make it work
def crop_image(image): # How do we resize after crop??
pixels_crop = np.random.randint(1, 4)
result = image[pixels_crop:-pixels_crop, pixels_crop:-pixels_crop]
return result
# Todo: vectorize
def sqeeze_image(image):
N = len(image)
frac = np.random.randint(70,100,N)/100
mat = []
for i in range(N):
mat.append(frac[i]*np.array([[1,0],[0,1]]))
new_imgs = np.zeros((N,28,28))
for i in range(28):
for j in range(28):
data = image[:,i,j]
new_idx = np.round(np.array([i,j])@mat).astype(int)
if | np.all((new_idx[:,0]< 28) & (new_idx[:,1]<28)) | numpy.all |
# Use the filter on lots of things and save the data and generate plots
import numpy as np
import h5py
import scipy.sparse
import scipy.io
from constants import *
import ipdb
import sys
import cPickle as pickle
flen = DEE
flen_2 = 3
dt = EPSILON
st = 0.75 #kind of equivalent to sigma
root = '/home/bjkomer/deep_learning/DeepSLAM/'
prefix = 'conf_mat_smush_full_'
dname = "dataset"
res_dict = {}
ground_truth = scipy.io.loadmat('GroundTruth_Eynsham_40meters.mat')['ground_truth']
def main():
if len(sys.argv) == 2:
fnames = read_file(sys.argv[1])
else:
fnames = read_file('all.txt')
sys.argv.append('all.txt') # for file name consistency
#fname = '/home/bjkomer/deep_learning/DeepSLAM/conf_mat_smush_full_googlenet_inception_4b-output.h5'
#fname = '/home/bjkomer/deep_learning/DeepSLAM/conf_mat_avg.h5'
results = []
avg_matrix = None
for fname in fnames:
print(fname)
h5f = h5py.File(root + fname.rstrip(), 'r')
#conf_matrix = h5f[dname][:]
if ('conf_mat' in fname) and ('full' not in fname):
conf_matrix = h5f[dname][:] # Only the train vs test data
else:
conf_matrix = h5f[dname][0:4789, 4789:9575] # Only the train vs test data
if avg_matrix is None:
avg_matrix = conf_matrix
else:
avg_matrix += conf_matrix
h5f.close()
precision, recall, f1 = filter_mat(conf_matrix)
b_precision, b_recall, b_f1 = filter_boost_mat(conf_matrix)
results.append((fname.rstrip(), precision, recall, f1))
res_dict[fname.rstrip()] = (precision, recall, f1, b_precision, b_recall, b_f1)
print("")
print("averaging")
precision, recall, f1 = filter_mat(avg_matrix)
b_precision, b_recall, b_f1 = filter_boost_mat(avg_matrix)
avg_name = 'average_' + sys.argv[1][:-4]
results.append((avg_name.rstrip(), precision, recall, f1))
res_dict[avg_name.rstrip()] = (precision, recall, f1, b_precision, b_recall, b_f1)
pickle.dump(res_dict, open("filter_res_"+sys.argv[1][:-4]+".p", "wb"))
def read_file(file_name):
with open(file_name, 'rb') as f:
ret = f.readlines()
return ret
def filter_mat(test_matrix):
# grab the testing matrix from the confusion matrix
#test_matrix = conf_matrix[0:4789, 4789:9575]
# the min score is the best match
b = np.argmin(test_matrix, axis=0)
# Percentage of top matches used in the vibration calculation, allows the occasional outlier
inlier_fraction = 5/6.0
p = np.zeros(b.size)
matches = np.zeros(int(b.size - flen + flen_2))
max_diff = 0
for i in range(0, b.size - flen):
match_index = int(i + flen_2)
vibrations = np.abs( np.diff(b[i:i + flen]) )
sorted_vib = np.sort(vibrations)
max_diff = np.max(sorted_vib[ 0 : int( | np.round(inlier_fraction * flen) | numpy.round |
# Copyright 2016 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Various function to manipulate graphs for computing distances.
"""
import skimage.morphology
import numpy as np
import networkx as nx
import itertools
import logging
import graph_tool as gt
import graph_tool.topology
import graph_tool.generation
import src.utils as utils
# Compute shortest path from all nodes to or from all source nodes
def get_distance_node_list(gtG, source_nodes, direction, weights=None):
gtG_ = gt.Graph(gtG)
v = gtG_.add_vertex()
if weights is not None:
weights = gtG_.edge_properties[weights]
for s in source_nodes:
e = gtG_.add_edge(s, int(v))
if weights is not None:
weights[e] = 0.
if direction == 'to':
dist = gt.topology.shortest_distance(
gt.GraphView(gtG_, reversed=True), source=gtG_.vertex(int(v)),
target=None, weights=weights)
elif direction == 'from':
dist = gt.topology.shortest_distance(
gt.GraphView(gtG_, reversed=False), source=gtG_.vertex(int(v)),
target=None, weights=weights)
dist = np.array(dist.get_array())
dist = dist[:-1]
if weights is None:
dist = dist - 1
return dist
# Functions for semantically labelling nodes in the traversal graph.
def generate_lattice(sz_x, sz_y):
"""Generates a lattice with sz_x vertices along x and sz_y vertices along y
direction Each of these vertices is step_size distance apart. Origin is at
(0,0). """
g = gt.generation.lattice([sz_x, sz_y])
x, y = np.meshgrid(np.arange(sz_x), np.arange(sz_y))
x = np.reshape(x, [-1, 1]);
y = np.reshape(y, [-1, 1]);
nodes = np.concatenate((x, y), axis=1)
return g, nodes
def add_diagonal_edges(g, nodes, sz_x, sz_y, edge_len):
offset = [sz_x + 1, sz_x - 1]
for o in offset:
s = np.arange(nodes.shape[0] - o - 1)
t = s + o
ind = np.all(np.abs(nodes[s, :] - nodes[t, :]) == np.array([[1, 1]]), axis=1)
s = s[ind][:, np.newaxis]
t = t[ind][:, np.newaxis]
st = np.concatenate((s, t), axis=1)
for i in range(st.shape[0]):
e = g.add_edge(st[i, 0], st[i, 1], add_missing=False)
g.ep['wts'][e] = edge_len
def convert_traversible_to_graph(traversible, ff_cost=1., fo_cost=1.,
oo_cost=1., connectivity=4):
assert (connectivity == 4 or connectivity == 8)
sz_x = traversible.shape[1]
sz_y = traversible.shape[0]
g, nodes = generate_lattice(sz_x, sz_y)
# Assign costs.
edge_wts = g.new_edge_property('float')
g.edge_properties['wts'] = edge_wts
wts = np.ones(g.num_edges(), dtype=np.float32)
edge_wts.get_array()[:] = wts
if connectivity == 8:
add_diagonal_edges(g, nodes, sz_x, sz_y, np.sqrt(2.))
se = np.array([[int(e.source()), int(e.target())] for e in g.edges()])
s_xy = nodes[se[:, 0]]
t_xy = nodes[se[:, 1]]
s_t = np.ravel_multi_index((s_xy[:, 1], s_xy[:, 0]), traversible.shape)
t_t = np.ravel_multi_index((t_xy[:, 1], t_xy[:, 0]), traversible.shape)
s_t = traversible.ravel()[s_t]
t_t = traversible.ravel()[t_t]
wts = np.zeros(g.num_edges(), dtype=np.float32)
wts[np.logical_and(s_t == True, t_t == True)] = ff_cost
wts[np.logical_and(s_t == False, t_t == False)] = oo_cost
wts[np.logical_xor(s_t, t_t)] = fo_cost
edge_wts = g.edge_properties['wts']
for i, e in enumerate(g.edges()):
edge_wts[e] = edge_wts[e] * wts[i]
# d = edge_wts.get_array()*1.
# edge_wts.get_array()[:] = d*wts
return g, nodes
def label_nodes_with_class(nodes_xyt, class_maps, pix):
"""
Returns:
class_maps__: one-hot class_map for each class.
node_class_label: one-hot class_map for each class, nodes_xyt.shape[0] x n_classes
"""
# Assign each pixel to a node.
selem = skimage.morphology.disk(pix)
class_maps_ = class_maps * 1.
for i in range(class_maps.shape[2]):
class_maps_[:, :, i] = skimage.morphology.dilation(class_maps[:, :, i] * 1, selem)
class_maps__ = np.argmax(class_maps_, axis=2)
class_maps__[np.max(class_maps_, axis=2) == 0] = -1
# For each node pick out the label from this class map.
x = np.round(nodes_xyt[:, [0]]).astype(np.int32)
y = np.round(nodes_xyt[:, [1]]).astype(np.int32)
ind = np.ravel_multi_index((y, x), class_maps__.shape)
node_class_label = class_maps__.ravel()[ind][:, 0]
# Convert to one hot versions.
class_maps_one_hot = np.zeros(class_maps.shape, dtype=np.bool)
node_class_label_one_hot = | np.zeros((node_class_label.shape[0], class_maps.shape[2]), dtype=np.bool) | numpy.zeros |
import numpy as np
import os
import re
import requests
import sys
import time
from netCDF4 import Dataset
import pandas as pd
from bs4 import BeautifulSoup
from tqdm import tqdm
# setup constants used to access the data from the different M2M interfaces
BASE_URL = 'https://ooinet.oceanobservatories.org/api/m2m/' # base M2M URL
SENSOR_URL = '12576/sensor/inv/' # Sensor Information
# setup access credentials
AUTH = ['OOIAPI-853A3LA6QI3L62', '<KEY>']
def M2M_Call(uframe_dataset_name, start_date, end_date):
options = '?beginDT=' + start_date + '&endDT=' + end_date + '&format=application/netcdf'
r = requests.get(BASE_URL + SENSOR_URL + uframe_dataset_name + options, auth=(AUTH[0], AUTH[1]))
if r.status_code == requests.codes.ok:
data = r.json()
else:
return None
# wait until the request is completed
print('Waiting for OOINet to process and prepare data request, this may take up to 20 minutes')
url = [url for url in data['allURLs'] if re.match(r'.*async_results.*', url)][0]
check_complete = url + '/status.txt'
with tqdm(total=400, desc='Waiting') as bar:
for i in range(400):
r = requests.get(check_complete)
bar.update(1)
if r.status_code == requests.codes.ok:
bar.n = 400
bar.last_print_n = 400
bar.refresh()
print('\nrequest completed in %f minutes.' % elapsed)
break
else:
time.sleep(3)
elapsed = (i * 3) / 60
return data
def M2M_Files(data, tag=''):
"""
Use a regex tag combined with the results of the M2M data request to collect the data from the THREDDS catalog.
Collected data is gathered into an xarray dataset for further processing.
:param data: JSON object returned from M2M data request with details on where the data is to be found for download
:param tag: regex tag to use in discriminating the data files, so we only collect the correct ones
:return: the collected data as an xarray dataset
"""
# Create a list of the files from the request above using a simple regex as a tag to discriminate the files
url = [url for url in data['allURLs'] if re.match(r'.*thredds.*', url)][0]
files = list_files(url, tag)
return files
def list_files(url, tag=''):
"""
Function to create a list of the NetCDF data files in the THREDDS catalog created by a request to the M2M system.
:param url: URL to user's THREDDS catalog specific to a data request
:param tag: regex pattern used to distinguish files of interest
:return: list of files in the catalog with the URL path set relative to the catalog
"""
page = requests.get(url).text
soup = BeautifulSoup(page, 'html.parser')
pattern = re.compile(tag)
return [node.get('href') for node in soup.find_all('a', text=pattern)]
def M2M_Data(nclist,variables):
thredds = 'https://opendap.oceanobservatories.org/thredds/dodsC/ooi/'
#nclist is going to contain more than one url eventually
for jj in range(len(nclist)):
url=nclist[jj]
url=url[25:]
dap_url = thredds + url + '#fillmismatch'
openFile = Dataset(dap_url,'r')
for ii in range(len(variables)):
dum = openFile.variables[variables[ii].name]
variables[ii].data = np.append(variables[ii].data, dum[:].data)
tmp = variables[0].data/60/60/24
time_converted = pd.to_datetime(tmp, unit='D', origin=pd.Timestamp('1900-01-01'))
return variables, time_converted
class var(object):
def __init__(self):
"""A Class that generically holds data with a variable name
and the units as attributes"""
self.name = ''
self.data = np.array([])
self.units = ''
def __repr__(self):
return_str = "name: " + self.name + '\n'
return_str += "units: " + self.units + '\n'
return_str += "data: size: " + str(self.data.shape)
return return_str
class structtype(object):
def __init__(self):
""" A class that imitates a Matlab structure type
"""
self._data = []
def __getitem__(self, index):
"""implement index behavior in the struct"""
if index == len(self._data):
self._data.append(var())
return self._data[index]
def __len__(self):
return len(self._data)
def M2M_URLs(platform_name,node,instrument_class,method):
var_list = structtype()
#MOPAK
if platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#METBK
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
#FLORT
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/04-FLORTK000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
#FDCHP
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/telemetered/fdchp_a_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#DOSTA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/02-DOFSTK000/telemetered/dofst_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
#ADCP
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
#ZPLSC
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#WAVSS
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
#VELPT
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
#PCO2W
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
#PHSEN
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
#SPKIR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
#PRESF
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/02-PRESFA000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/02-PRESFA000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/02-PRESFB000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/02-PRESFC000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
#CTDBP
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/06-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/06-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#VEL3D
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
#VEL3DK
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/01-VEL3DK000/telemetered/vel3d_k_wfp_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/03-CTDPFK000/telemetered/ctdpf_ckl_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#PCO2A
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
#PARAD
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/05-PARADK000/telemetered/parad_k__stc_imodem_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
#OPTAA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/01-OPTAAC000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#NUTNR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
##
#MOPAK
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSPM/SBS01/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#METBK
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
#FLORT
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
#FDCHP
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/recovered_host/fdchp_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#DOSTA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
#ADCP
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
#WAVSS
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
#VELPT
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
#uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
#PCO2W
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
#PHSEN
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
#SPKIR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
#PRESF
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/02-PRESFA000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/02-PRESFA000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/02-PRESFB000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/02-PRESFC000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
#CTDBP
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/06-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/06-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#VEL3D
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
#PCO2A
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
#OPTAA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/01-OPTAAC000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#NUTNR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD37/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/SBD17/06-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD37/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/SBD17/06-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/03-CTDPFK000/recovered_wfp/ctdpf_ckl_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/SBD17/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/01-VEL3DK000/recovered_wfp/vel3d_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = | np.array([]) | numpy.array |
from __future__ import division
from __future__ import print_function
import numpy as np
import scipy.sparse as sp
import pickle as pkl
import os
import h5py
import pandas as pd
import pdb
from data_utils import load_data, map_data, download_dataset
def normalize_features(feat):
sum= feat.sum(1)
sum_flat=np.array(sum.flatten())
degree = np.asarray(feat.sum(1)).flatten()
# set zeros to inf to avoid dividing by zero
degree[degree == 0.] = np.inf
degree_inv = 1. / degree
degree_inv_mat = sp.diags([degree_inv], [0])
feat_norm = degree_inv_mat.dot(feat)
if feat_norm.nnz == 0:
print('ERROR: normalized adjacency matrix has only zero entries!!!!!')
exit
return feat_norm
def normalize_edge_features_3D(feat):
sum= feat.sum(2)
degree=sum.reshape(-1)
# set zeros to inf to avoid dividing by zero
degree[degree == 0.] = np.inf
degree_inv = 1. / degree
degree_inv_mat = sp.diags([degree_inv], [0])
feat_r=np.reshape(feat,((feat.shape[0]*feat.shape[1]), feat.shape[2]))
feat_norm = degree_inv_mat.dot(feat_r)
if np.nonzero(feat_norm) == 0:
print('ERROR: normalized adjacency matrix has only zero entries!!!!!')
exit
feat_norm=feat_norm.reshape(feat.shape[0],feat.shape[1],feat.shape[2])
return feat_norm
def normalize_edge_features_2D(feat):
sum= feat.sum(1)
degree=sum.reshape(-1)
# set zeros to inf to avoid dividing by zero
degree[degree == 0.] = np.inf
degree_inv = 1. / degree
degree_inv_mat = sp.diags([degree_inv], [0])
feat_norm = degree_inv_mat.dot(feat)
if np.nonzero(feat_norm) == 0:
print('ERROR: normalized adjacency matrix has only zero entries!!!!!')
exit
return feat_norm
def normalize_edge_features_3Dto_2D(feat):
prob_r = [0.2, 0.3, 0.5, 0.7, 0.8]
i=0
adj_tot = [np.sum(adj,axis=2) for adj in feat]
adjacencies_prioritize = adj_tot
for adj in adjacencies_prioritize:
adj = adj * prob_r[i]
i += 1
adj_sp = [sp.csr_matrix(adj) for adj in adjacencies_prioritize]
adj_sp = globally_normalize_bipartite_adjacency(adj_sp)
return adj_sp
def load_matlab_file(path_file, name_field):
"""
load '.mat' files
inputs:
path_file, string containing the file path
name_field, string containig the field name (default='shape')
warning:
'.mat' files should be saved in the '-v7.3' format
"""
db = h5py.File(path_file, 'r')
ds = db[name_field]
try:
if 'ir' in ds.keys():
data = np.asarray(ds['data'])
ir = np.asarray(ds['ir'])
jc = np.asarray(ds['jc'])
out = sp.csc_matrix((data, ir, jc)).astype(np.float32)
except AttributeError:
# Transpose in case is a dense matrix because of the row- vs column- major ordering between python and matlab
out = np.asarray(ds).astype(np.float32).T
db.close()
return out
def preprocess_user_item_features(u_features, v_features):
"""
Creates one big feature matrix out of user features and item features.
Stacks item features under the user features.
"""
zero_csr_u = sp.csr_matrix((u_features.shape[0], v_features.shape[1]), dtype=u_features.dtype) #121 x 1232
zero_csr_v = sp.csr_matrix((v_features.shape[0], u_features.shape[1]), dtype=v_features.dtype) # 1232 x 121
u_features = sp.hstack([u_features, zero_csr_u], format='csr') # 121 x 121 stack 121 x 1232= 121 x [121 + 1232]
v_features = sp.hstack([zero_csr_v, v_features], format='csr') # 1232 x 121 stack 1232 x 1232= 1232 x [121 + 1232]
return u_features, v_features
def globally_normalize_bipartite_adjacency(adjacencies, verbose=False, symmetric=True):
""" Globally Normalizes set of bipartite adjacency matrices """
#a=isinstance(adjacencies,list) #true
if verbose:
print('Symmetrically normalizing bipartite adj')
# degree_u and degree_v are row and column sums of adj+I
adj_tot = np.sum(adj for adj in adjacencies)
degree_u = np.asarray(adj_tot.sum(1)).flatten()
degree_v = np.asarray(adj_tot.sum(0)).flatten()
# set zeros to inf to avoid dividing by zero
degree_u[degree_u == 0.] = np.inf
degree_v[degree_v == 0.] = np.inf
degree_u_inv_sqrt = 1. / np.sqrt(degree_u) # 1 /sqroot degree of u
degree_v_inv_sqrt = 1. / np.sqrt(degree_v) # 1 /sqroot degree of v
degree_u_inv_sqrt_mat = sp.diags([degree_u_inv_sqrt], [0])
degree_v_inv_sqrt_mat = sp.diags([degree_v_inv_sqrt], [0])
degree_u_inv = degree_u_inv_sqrt_mat.dot(degree_u_inv_sqrt_mat)
if symmetric:
#print("yes sym") called for ml _100k
adj_norm = [degree_u_inv_sqrt_mat.dot(adj).dot(degree_v_inv_sqrt_mat) for adj in adjacencies]
else:
adj_norm = [degree_u_inv.dot(adj) for adj in adjacencies]
return adj_norm
def globally_normalize_tripartite_adjacency_matrix(adjacencies, verbose=False, symmetric=True):
""" Globally Normalizes set of bipartite adjacency matrices """
# a=isinstance(adjacencies,list) #true
if verbose:
print('Symmetrically normalizing bipartite adj')
# degree_u and degree_v are row and column sums of adj+I
adjacencies_uv_c= [np.reshape(adj,(adj.shape[0]*adj.shape[1], adj.shape[2])) for adj in adjacencies]
print(f"adjacencies_uv_c.shape[0] {adjacencies_uv_c[0].shape}")
adj_tot = np.sum(adj for adj in adjacencies)
adj_tot_uv_c=np.reshape(adj_tot, (adj_tot.shape[0]*adj_tot.shape[1], adj_tot.shape[2]))
adj_tot_t=np.transpose(adj_tot, (1,0,2))
adj_tot_vu_c = np.reshape(adj_tot_t, (adj_tot_t.shape[0] * adj_tot_t.shape[1], adj_tot_t.shape[2]))
degree_uv_c = np.asarray(adj_tot_uv_c.sum(1)).flatten()
degree_vu_c = np.asarray(adj_tot_vu_c.sum(1)).flatten()
# set zeros to inf to avoid dividing by zero
degree_uv_c[degree_uv_c == 0.] = np.inf
degree_vu_c[degree_vu_c == 0.] = np.inf
degree_uv_inv_sqrt = 1. / np.sqrt(degree_uv_c) # 1 /sqroot degree of u
degree_vu_inv_sqrt = 1. / np.sqrt(degree_vu_c) # 1 /sqroot degree of v
degree_uv_inv_sqrt_mat = sp.diags([degree_uv_inv_sqrt], [0])
degree_vu_inv_sqrt_mat = sp.diags([degree_vu_inv_sqrt], [0])
degree_uv_inv = degree_uv_inv_sqrt_mat.dot(degree_uv_inv_sqrt_mat)
degree_vu_inv = degree_vu_inv_sqrt_mat.dot(degree_vu_inv_sqrt_mat)
if symmetric:
# print("yes sym") called for ml _100k
adj_norm_uv_c = [degree_uv_inv_sqrt_mat.dot(adj) for adj in adjacencies_uv_c]
adj_norm_u_v_c=[np.reshape(adj, (adjacencies[0].shape[0],adjacencies[0].shape[1], adjacencies[0].shape[2])) for adj in adj_norm_uv_c]
adj_norm_v_u_c = [np.transpose(adj, (1, 0, 2)) for adj in adj_norm_u_v_c]
adj_norm_vu_c=[np.reshape(adj, (adj.shape[0]*adj.shape[1], adj.shape[2])) for adj in adj_norm_v_u_c]
adj_norm_vu_c = [degree_vu_inv_sqrt_mat.dot(adj) for adj in adj_norm_vu_c]
adj_norm = [np.reshape(adj,(adjacencies[0].shape[0], adjacencies[0].shape[1], adjacencies[0].shape[2])) for adj in adj_norm_vu_c]
else:
adj_norm = [degree_uv_inv.dot(adj) for adj in adjacencies]
print(f"adj_normc {adj_norm[0].shape}")
return adj_norm
def user_context_adjacency(adjacencies):
""" Find importance of context for users
giving high probability to context with rating 5
"""
print(f"I am user_context_adjacency {type(adjacencies)} adjacencies[0].shape {adjacencies[0].shape}")
adj_tot = np.sum(adj for adj in adjacencies)
deg_u=np.sum(adj_tot, axis = 1)
deg_u = np.sum(deg_u, axis=1)
print(f"degree_u {deg_u.shape}")
# set zeros to inf to avoid dividing by zero
deg_u[deg_u == 0.] = np.inf
degree_u_inv_sqrt = 1. / np.sqrt(deg_u)
degree_u_inv_sqrt_mat = sp.diags([degree_u_inv_sqrt], [0])
adju_c=[np.sum(adj, axis=1) for adj in adjacencies]
adju_c_norm = [degree_u_inv_sqrt_mat.dot(adj) for adj in adju_c]
#normalize this matrix by divisding squareroot of degtree
#print(f"degree_u_inv_sqrt_mat shape {degree_u_inv_sqrt_mat.shape} {degree_u_inv_sqrt_mat}")
prob_r=[0.2,0.3,0.5,0.7,0.8]
i=0
adjacencies_prioritize=adju_c_norm
for adj in adjacencies_prioritize:
adj= adj * prob_r[i]
i+=1
adju_c_imp=np.sum(adj for adj in adjacencies_prioritize)
#adjacencies_temp_tot = np.sum(adj for adj in adjacencies_temp)
return adju_c_imp
def item_context_adjacency(adjacencies):
""" Find importance of context for items
giving high probability to context with rating 5
"""
adj_tot = np.sum(adj for adj in adjacencies)
deg_v = np.sum(adj_tot, axis=1)
deg_v = np.sum(deg_v, axis=1)
# set zeros to inf to avoid dividing by zero
deg_v[deg_v == 0.] = np.inf
degree_v_inv_sqrt = 1. / np.sqrt(deg_v)
degree_v_inv_sqrt_mat = sp.diags([degree_v_inv_sqrt], [0])
adjv_c = [np.sum(adj, axis=1) for adj in adjacencies]
adjv_c_norm = [degree_v_inv_sqrt_mat.dot(adj) for adj in adjv_c]
# normalize this matrix by divisding squareroot of degtree
prob_r = [0.2, 0.3, 0.5, 0.7, 0.8]
i = 0
adjacencies_prioritize = adjv_c_norm
for adj in adjacencies_prioritize:
adj = adj * prob_r[i]
i += 1
adjv_c_imp = np.sum(adj for adj in adjacencies_prioritize)
# adjacencies_temp_tot = np.sum(adj for adj in adjacencies_temp)
return adjv_c_imp
def user_context_fromedge(adjacency):
""" Find importance of context for users
giving high probability to context with rating 5
"""
deg_u = np.sum(adjacency, axis=1)
deg_u = np.sum(deg_u, axis=1)
print(f"degree_u {deg_u.shape}")
# set zeros to inf to avoid dividing by zero
deg_u[deg_u == 0.] = np.inf
degree_u_inv_sqrt = 1. / np.sqrt(deg_u)
degree_u_inv_sqrt_mat = sp.diags([degree_u_inv_sqrt], [0])
adju_c = np.sum(adjacency, axis=1)
adju_c_norm = degree_u_inv_sqrt_mat.dot(adju_c)
# normalize this matrix by divisding squareroot of degtree
return adju_c_norm
def item_context_fromedge(adjacency):
""" Find importance of context for users
giving high probability to context with rating 5
"""
deg_v = np.sum(adjacency, axis=1)
deg_v = np.sum(deg_v, axis=1)
# set zeros to inf to avoid dividing by zero
deg_v[deg_v == 0.] = np.inf
degree_v_inv_sqrt = 1. / np.sqrt(deg_v)
degree_v_inv_sqrt_mat = sp.diags([degree_v_inv_sqrt], [0])
adjv_c = np.sum(adjacency, axis=1)
adjv_c_norm = degree_v_inv_sqrt_mat.dot(adjv_c)
# adjacencies_temp_tot = np.sum(adj for adj in adjacencies_temp)
return adjv_c_norm
def sparse_to_tuple(sparse_mx):
""" change of format for sparse matrix. This format is used
for the feed_dict where sparse matrices need to be linked to placeholders
representing sparse matrices. """
if not sp.isspmatrix_coo(sparse_mx):
sparse_mx = sparse_mx.tocoo()
coords = np.vstack((sparse_mx.row, sparse_mx.col)).transpose()
values = sparse_mx.data
shape = sparse_mx.shape
return coords, values, shape
def create_trainvaltest_split(dataset, seed=1234, testing=False, datasplit_path=None, datasplit_from_file=False,
verbose=True, rating_map=None, post_rating_map=None, ratio=1.0):
"""
Splits data set into train/val/test sets from full bipartite adjacency matrix. Shuffling of dataset is done in
load_data function.
For each split computes 1-of-num_classes labels. Also computes training
adjacency matrix.
"""
if datasplit_from_file and os.path.isfile(datasplit_path):
print('Reading dataset splits from file...')
with open(datasplit_path, 'rb') as f:
num_users, num_items, u_nodes, v_nodes, ratings, u_features, v_features = pkl.load(f)
if verbose:
print('Number of users = %d' % num_users)
print('Number of items = %d' % num_items)
print('Number of links = %d' % ratings.shape[0])
print('Fraction of positive links = %.4f' % (float(ratings.shape[0]) / (num_users * num_items),))
else:
print(f"I am preprocessing {dataset} ")
num_users, num_items, u_nodes, v_nodes, ratings, u_features, v_features, edge_feactures,sim_users = load_data(dataset, seed=seed,
verbose=verbose)
with open(datasplit_path, 'wb') as f:
pkl.dump([num_users, num_items, u_nodes, v_nodes, ratings, u_features, v_features], f)
if rating_map is not None:
for i, x in enumerate(ratings):
ratings[i] = rating_map[x]
neutral_rating = -1
rating_dict = {r: i for i, r in enumerate(np.sort(np.unique(ratings)).tolist())}
labels = np.full((num_users, num_items), neutral_rating, dtype=np.int32)
labels[u_nodes, v_nodes] = np.array([rating_dict[r] for r in ratings])
labels = labels.reshape([-1])
# number of test and validation edges
num_test = int(np.ceil(ratings.shape[0] * 0.1))
if dataset == 'ml_100k':
num_val = int(np.ceil(ratings.shape[0] * 0.9 * 0.05))
else:
num_val = int(np.ceil(ratings.shape[0] * 0.9 * 0.05))
num_train = ratings.shape[0] - num_val - num_test
pairs_nonzero = np.array([[u, v] for u, v in zip(u_nodes, v_nodes)])
idx_nonzero = np.array([u * num_items + v for u, v in pairs_nonzero])
train_idx = idx_nonzero[0:int(num_train * ratio)]
val_idx = idx_nonzero[num_train:num_train + num_val]
test_idx = idx_nonzero[num_train + num_val:]
train_pairs_idx = pairs_nonzero[0:int(num_train * ratio)]
val_pairs_idx = pairs_nonzero[num_train:num_train + num_val]
test_pairs_idx = pairs_nonzero[num_train + num_val:]
u_test_idx, v_test_idx = test_pairs_idx.transpose()
u_val_idx, v_val_idx = val_pairs_idx.transpose()
u_train_idx, v_train_idx = train_pairs_idx.transpose()
# create labels
train_labels = labels[train_idx]
val_labels = labels[val_idx]
test_labels = labels[test_idx]
if testing:
u_train_idx = np.hstack([u_train_idx, u_val_idx])
v_train_idx = np.hstack([v_train_idx, v_val_idx])
train_labels = np.hstack([train_labels, val_labels])
# for adjacency matrix construction
train_idx = np.hstack([train_idx, val_idx])
class_values = np.sort(np.unique(ratings))
# make training adjacency matrix
rating_mx_train = np.zeros(num_users * num_items, dtype=np.float32)
if post_rating_map is None:
rating_mx_train[train_idx] = labels[train_idx].astype(np.float32) + 1.
else:
rating_mx_train[train_idx] = np.array([post_rating_map[r] for r in class_values[labels[train_idx]]]) + 1.
rating_mx_train = sp.csr_matrix(rating_mx_train.reshape(num_users, num_items))
return u_features, v_features, rating_mx_train, train_labels, u_train_idx, v_train_idx, \
val_labels, u_val_idx, v_val_idx, test_labels, u_test_idx, v_test_idx, class_values
def create_trainvaltest_split_Context( LDcall=False, dataset='LDOS', seed=1234, testing=False, datasplit_path=None, datasplit_from_file=False,
verbose=True, rating_map=None, post_rating_map=None, ratio=1.0):
"""
Splits data set into train/val/test sets from full bipartite adjacency matrix. Shuffling of dataset is done in
load_data function.
For each split computes 1-of-num_classes labels. Also computes training
adjacency matrix.
"""
edge_f_mx_train=None
u_features=v_features=rating_mx_train=train_labels=u_train_idx=v_train_idx=None
val_labels=u_val_idx=v_val_idx=test_labels=u_test_idx=v_test_idx=class_values=None
if datasplit_from_file and os.path.isfile(datasplit_path):
print('Reading dataset splits from file...')
with open(datasplit_path, 'rb') as f:
num_users, num_items, u_nodes, v_nodes, ratings, u_features, v_features, edge_features,edge_features_list_f = pkl.load(f)
if verbose:
print('Number of users = %d' % num_users)
print('Number of items = %d' % num_items)
print('Number of links = %d' % ratings.shape[0])
print('Fraction of positive links = %.4f' % (float(ratings.shape[0]) / (num_users * num_items),))
else:
print("split() in processig calling load() in data_utils")
num_users, num_items, u_nodes, v_nodes, ratings, u_features, v_features, edge_features,edge_features_list_f,sim_users,rating_dict = load_data(dataset, seed=seed,
verbose=verbose)
with open(datasplit_path, 'wb') as f:
pkl.dump([num_users, num_items, u_nodes, v_nodes, ratings, u_features, v_features, edge_features], f)
if rating_map is not None:
for i, x in enumerate(ratings):
ratings[i] = rating_map[x]
neutral_rating = -1
rating_dict = {r: i for i, r in enumerate(np.sort(np.unique(ratings)).tolist())} #{0:1, 1:2, 2:3, 3:4, 4:5}
labels = np.full((num_users, num_items), neutral_rating, dtype=np.int32)
labels[u_nodes, v_nodes] = np.array([rating_dict[r] for r in ratings]) #(121, 1232)
a=np.array([rating_dict[r] for r in ratings]) #(121, 1232)
labels = labels.reshape([-1]) #(149072,)
edge_f = np.full((num_users, num_items, edge_features.shape[2]),0, dtype=np.float32)
for u,v in zip(u_nodes,v_nodes):
edge_f[u, v, :]=edge_features[u, v, :]
edge_f=np.reshape(edge_f,((edge_f.shape[0]*edge_f.shape[1]),edge_f.shape[2]))
ind = np.array(np.where(edge_f != 0)).T # (u v c) #non zero indices
#edge_f[u_nodes, v_nodes] = np.array(np.array([edge_features_d[i] for i in range(2096)])) # (121, 1232)
# number of test and validation edges
num_test = int(np.ceil(ratings.shape[0] * 0.1))
if dataset == 'ml_100k':
num_val = int(np.ceil(ratings.shape[0] * 0.9 * 0.05))
else:
num_val = int(np.ceil(ratings.shape[0] * 0.9 * 0.05))
num_train = ratings.shape[0] - num_val - num_test
print("*****************************Splitting()****************************************")
print(f"num_test {num_test} num_val {num_val} num_train {num_train} ")
pairs_nonzero = np.array([[u, v] for u, v in zip(u_nodes, v_nodes)])
idx_nonzero = np.array([u * num_items + v for u, v in pairs_nonzero])
train_idx = idx_nonzero[0:int(num_train * ratio)]
val_idx = idx_nonzero[num_train:num_train + num_val]
test_idx = idx_nonzero[num_train + num_val:]
#train_idx 0 - 1962.0 val_idx 1962 - 2066 test_idx 2066
train_pairs_idx = pairs_nonzero[0:int(num_train * ratio)]
val_pairs_idx = pairs_nonzero[num_train:num_train + num_val]
test_pairs_idx = pairs_nonzero[num_train + num_val:]
u_test_idx, v_test_idx = test_pairs_idx.transpose()
u_val_idx, v_val_idx = val_pairs_idx.transpose()
u_train_idx, v_train_idx = train_pairs_idx.transpose()
# create labels
train_labels = labels[train_idx]
val_labels = labels[val_idx]
test_labels = labels[test_idx]
print(f"train_labels {len(train_labels)} val_labels{len(val_labels)} test_labels {len(test_labels)}")
train_edge_features_list_f=edge_features_list_f[0:len(train_labels)]
val_edge_features_list_f=edge_features_list_f[len(train_labels):len(train_labels)+len(val_labels)]
test_edge_features_list_f=edge_features_list_f[len(train_labels)+len(val_labels):]
print(f"train {len(train_edge_features_list_f)} val {len(val_edge_features_list_f)} test {len(test_edge_features_list_f)}")
train_edge_f=edge_f[train_idx , :]
val_edge_f=edge_f[val_idx, : ]
test_edge_f=edge_f[test_idx, : ]
testing=False
if testing:
u_train_idx = np.hstack([u_train_idx, u_val_idx])
v_train_idx = np.hstack([v_train_idx, v_val_idx])
train_labels = np.hstack([train_labels, val_labels])
train_edge_f = np.concatenate([train_edge_f, val_edge_f]) #train_labels (2066,) train_edge_f(2066, 49)
# for adjacency matrix construction
train_idx = np.hstack([train_idx, val_idx])
class_values = np.sort(np.unique(ratings))
# make training adjacency matrix
rating_mx_train = np.zeros(num_users * num_items, dtype=np.float32)
edge_f_mx_train = np.zeros([num_users*num_items, edge_features.shape[2]], dtype=np.float32)
edge_f_mx_test=np.zeros([num_users*num_items, edge_features.shape[2]], dtype=np.float32)
edge_f_mx_val = np.zeros([num_users * num_items, edge_features.shape[2]], dtype=np.float32)
if post_rating_map is None:
rating_mx_train[train_idx] = labels[train_idx].astype(np.float32) + 1.
edge_f_mx_train[train_idx, : ]=edge_f[train_idx, :].astype(np.float32)
edge_f_mx_test[test_idx, :] = edge_f[test_idx, :].astype(np.float32)
edge_f_mx_val[val_idx, :] = edge_f[val_idx, :].astype(np.float32)
else:
rating_mx_train[train_idx] = np.array([post_rating_map[r] for r in class_values[labels[train_idx]]]) + 1.
rating_mx_train = sp.csr_matrix(rating_mx_train.reshape(num_users, num_items))
edge_f_mx_train=edge_f_mx_train.reshape(num_users,num_items,edge_features.shape[2])
edge_f_mx_test = edge_f_mx_test.reshape(num_users, num_items, edge_features.shape[2])
edge_f_mx_val = edge_f_mx_val.reshape(num_users, num_items, edge_features.shape[2])
"""
train_edge_f = sp.csr_matrix(train_edge_f)
test_edge_f = sp.csr_matrix(test_edge_f)
val_edge_f = sp.csr_matrix(val_edge_f)
"""
print(f"***************************************************************************")
if (u_features is not None) and (v_features is not None):
print(f"u_features-sp.csr-matrix {u_features.shape} v_features-sp.csr-matrix {v_features.shape}")
print(f" Train: rating_mx_trainsp.csr-matrix {rating_mx_train.shape} edge_f_mx_train-nparray {edge_f_mx_train.shape} train_edge_f-nparray {train_edge_f.shape} train_labels-1DT {train_labels.shape} u_train_idx-1DT {u_train_idx.shape} v_train_idx-1DT {v_train_idx.shape}")
print(f"Validation : val_labels-1DT {val_labels.shape} edge_f_mx_val-nparray {edge_f_mx_val.shape} val_edge_f-nparray {val_edge_f.shape} u_val_idx-1D {u_val_idx.shape} v_val_idx-1DT {v_val_idx.shape}")
print(f"Testing: test_labels-1DT {test_labels.shape} edge_f_mx_test-nparray {edge_f_mx_test.shape} test_edge_f-nparray {test_edge_f.shape} u_test_idx-1DT {u_test_idx.shape} v_test_idx-1Dt {v_test_idx.shape}")
print(f" class_values {class_values}")
print(f"******************************************************************************")
return u_features, v_features, rating_mx_train, edge_f_mx_train, train_edge_features_list_f, train_labels, u_train_idx, v_train_idx, \
val_labels, edge_f_mx_val, val_edge_features_list_f, u_val_idx, v_val_idx, test_labels, edge_f_mx_test, test_edge_features_list_f,u_test_idx, v_test_idx, class_values, sim_users, rating_dict
def load_data_monti(dataset, testing=False, rating_map=None, post_rating_map=None):
"""
Loads data from Monti et al. paper.
if rating_map is given, apply this map to the original rating matrix
if post_rating_map is given, apply this map to the processed rating_mx_train without affecting the labels
"""
path_dataset = 'raw_data/' + dataset + '/training_test_dataset.mat'
M = load_matlab_file(path_dataset, 'M')
if rating_map is not None:
M[np.where(M)] = [rating_map[x] for x in M[np.where(M)]]
Otraining = load_matlab_file(path_dataset, 'Otraining')
Otest = load_matlab_file(path_dataset, 'Otest')
num_users = M.shape[0]
num_items = M.shape[1]
if dataset == 'flixster':
Wrow = load_matlab_file(path_dataset, 'W_users')
Wcol = load_matlab_file(path_dataset, 'W_movies')
u_features = Wrow
v_features = Wcol
elif dataset == 'douban':
Wrow = load_matlab_file(path_dataset, 'W_users')
u_features = Wrow
v_features = np.eye(num_items)
elif dataset == 'yahoo_music':
Wcol = load_matlab_file(path_dataset, 'W_tracks')
u_features = np.eye(num_users)
v_features = Wcol
u_nodes_ratings = np.where(M)[0]
v_nodes_ratings = np.where(M)[1]
ratings = M[np.where(M)]
u_nodes_ratings, v_nodes_ratings = u_nodes_ratings.astype(np.int64), v_nodes_ratings.astype(np.int32)
ratings = ratings.astype(np.float64)
u_nodes = u_nodes_ratings
v_nodes = v_nodes_ratings
print('number of users = ', len(set(u_nodes)))
print('number of item = ', len(set(v_nodes)))
neutral_rating = -1 # int(np.ceil(np.float(num_classes)/2.)) - 1
# assumes that ratings_train contains at least one example of every rating type
rating_dict = {r: i for i, r in enumerate(np.sort(np.unique(ratings)).tolist())}
labels = np.full((num_users, num_items), neutral_rating, dtype=np.int32)
labels[u_nodes, v_nodes] = np.array([rating_dict[r] for r in ratings])
for i in range(len(u_nodes)):
assert (labels[u_nodes[i], v_nodes[i]] == rating_dict[ratings[i]])
labels = labels.reshape([-1])
# number of test and validation edges
num_train = np.where(Otraining)[0].shape[0]
num_test = np.where(Otest)[0].shape[0]
num_val = int( | np.ceil(num_train * 0.2) | numpy.ceil |
from numpy import pi, dot, exp, einsum
import numpy as np
class PopulationMonteCarlo(object):
"""
A Population Monte Carlo (PMC) sampler,
which combines expectation-maximization and
importance sampling
This code follows the notation and methodolgy in
http://arxiv.org/pdf/0903.0837v1.pdf
"""
def __init__(self, posterior, n, start, sigma, pool=None, quiet=False, student=False, nu=2.0):
"""
posterior: the posterior function
n: number of components to use in the mixture
start: estimated mean of the distribution
sigma: estimated covariance matrix
pool (optional): an MPI or multiprocessing worker pool
"""
self.posterior = posterior
mu = np.random.multivariate_normal(start, sigma, size=n)
if student:
self.components = [StudentsTComponent(1.0/n, m, sigma, nu) for m in mu]
else:
self.components = [GaussianComponent(1.0/n, m, sigma) for m in mu]
self.pool = pool
self.quiet=quiet #not currently used
def sample(self, n, update=True, do_kill=True):
"Draw a sample from the Gaussian mixture and update the mixture"
self.kill_count = n*1./len(self.components)/50.
self.kill = [False for c in self.components]
#draw sample from current mixture
component_index, x = self.draw(n)
#calculate likelihoods
if self.pool is None:
samples = list(map(self.posterior, x))
else:
samples = self.pool.map(self.posterior, x)
post = np.array([s[0] for s in samples])
extra = [s[1] for s in samples]
post[np.isnan(post)] = -np.inf
#update components
log_weights = self.update_components(x, post, update, do_kill)
return x, post, extra, component_index, log_weights
def draw(self, n):
"Draw a sample from the Gaussian mixture"
A = [m.alpha for m in self.components]
A = np.array(A)
A/=A.sum()
#Components to draw from
N = np.arange(len(self.components))
C = np.random.choice(N, size=n, replace=True, p=A)
for i in N:
count = np.sum(C==i)
if count<self.kill_count:
self.kill[i] = True
print("Component %d less than kill count (%d < %d)" % (i, count, self.kill_count))
x = np.array([self.components[c].sample() for c in C])
return C, x
def update_components(self, x, log_post, update, do_kill):
"Equations 13-16 of arxiv.org 0903.0837v1"
#x #n_sample*n_dim
log_Aphi = np.array([np.log(m.alpha) + m.log_phi(x) for m in self.components]) #n-component * n_sample
Aphi = np.array([m.alpha*m.phi(x) for m in self.components]) #n-component * n_sample
post = np.exp(log_post)
w = post/Aphi.sum(0) #n_sample
logw = log_post - np.log(Aphi.sum(0))
if not update:
return logw
w_norm = w/w.sum() #n_sample
logw_norm = np.log(w_norm)
entropy = -(w_norm*logw_norm).sum()
perplexity = np.exp(entropy) / len(x)
print("Perplexity = ", perplexity)
Aphi[np.isnan(Aphi)] = 0.0
w_norm[np.isnan(w_norm)] = 0.0
A = [m.alpha for m in self.components]
#rho_top = einsum('i,ij->ij', A, phi) #n_component * n_sample
rho_bottom = Aphi.sum(0) #n_sample
rho = [rho_t/rho_bottom for rho_t in Aphi]
for d,(m,rho_d) in enumerate(zip(self.components, rho)):
try:
m.update(w_norm, x, rho_d)
except np.linalg.LinAlgError as error:
print("Component not fitting the data very well", d, str(error))
self.kill[d] = True
if do_kill:
self.components = [c for c,kill in zip(self.components,self.kill) if not kill]
print("%d components remain" % len(self.components))
return logw
class GaussianComponent(object):
"""
A single Gaussian component of the mixture model.
"""
def __init__(self,alpha, mu, sigma):
self.set(alpha, mu, sigma)
def set(self, alpha, mu, sigma):
"Set the parameters of this distribution component"
self.alpha = alpha
self.mu = mu
ndim = len(self.mu)
self.sigma = sigma
self.sigma_inv = np.linalg.inv(self.sigma)
self.A = (2*pi)**(-ndim/2.0) * np.linalg.det(self.sigma)**-0.5
self.logA = np.log(self.A)
def update(self, w_norm, x, rho_d):
"Update the parameters according to the samples and rho values"
alpha = dot(w_norm, rho_d) #scalar
if not alpha>0:
raise np.linalg.LinAlgError("alpha = %f"%alpha)
mu = einsum('i,ij,i->j',w_norm, x, rho_d) / alpha #scalar
delta = x-mu #n_sample * n_dim
print("Updating to mu = ", mu)
sigma = | einsum('i,ij,ik,i->jk',w_norm, delta, delta, rho_d) | numpy.einsum |
import torch
import torch.nn.functional as F
import numpy as np
def RMSE(X_train, X_imp, M):
"""
X_train: original data
X_imp: imputation
M: missing indicator matrix
"""
return np.sqrt(np.sum((X_train - X_imp) ** 2 * (1 - M)) / | np.sum(1 - M) | numpy.sum |
# -*- mode: python; coding: utf-8 -*-
# Copyright 2012-2018 <NAME> <<EMAIL>> and collaborators.
# Licensed under the MIT License.
"""Model data with least-squares fitting
This module provides tools for fitting models to data using least-squares
optimization.
"""
from __future__ import absolute_import, division, print_function
__all__ = 'ModelBase Model ComposedModel PolynomialModel ScaleModel'.split()
import numpy as np
try:
# numpy 1.7
import numpy.polynomial.polynomial as npoly
except ImportError:
import numpy.polynomial as npoly
from six import get_function_code
from six.moves import range, reduce
from . import binary_type, text_type
class Parameter(object):
"""Information about a parameter in a least-squares model.
These data may only be obtained after solving least-squares problem. These
objects reference information from their parent objects, so changing the
parent will alter the apparent contents of these objects.
"""
def __init__(self, owner, index):
self._owner = owner
self._index = index
def __repr__(self):
return '<Parameter "%s" (#%d) of %s>' % (self.name, self._index, self._owner)
@property
def index(self): # make this read-only
"The parameter's index in the Model's arrays."
return self._index
@property
def name(self):
"The parameter's name."
return self._owner.pnames[self._index]
@property
def value(self):
"The parameter's value."
return self._owner.params[self._index]
@property
def uncert(self):
"The uncertainty in :attr:`value`."
return self._owner.puncerts[self._index]
@property
def uval(self):
"Accesses :attr:`value` and :attr:`uncert` as a :class:`pwkit.msmt.Uval`."
from .msmt import Uval
return Uval.from_norm(self.value, self.uncert)
class ModelBase(object):
"""An abstract base class holding data and a model for least-squares fitting.
The models implemented in this module all derive from this class and so
inherit the attributes and methods described below.
A :class:`Parameter` data structure may be obtained by indexing this
object with either the parameter's numerical index or its name. I.e.::
m = Model(...).solve(...)
p = m['slope']
print(p.name, p.value, p.uncert, p.uval)
"""
data = None
"The data to be modeled; an *n*-dimensional Numpy array."
invsigma = None
"Data weights: 1/ฯ for each data point."
params = None
"After fitting, a Numpy ndarray of solved model parameters."
puncerts = None
"After fitting, a Numpy ndarray of 1ฯ uncertainties on the model parameters."
pnames = None
"A list of textual names for the parameters."
covar = None
"""After fitting, the variance-covariance matrix representing the parameter
uncertainties.
"""
mfunc = None
"""After fitting, a callable function evaluating the model fixed at best params.
The resulting function may or may not take arguments depending on the particular
kind of model being evaluated.
"""
mdata = None
"After fitting, the modeled data at the best parameters."
chisq = None
"After fitting, the ฯยฒ of the fit."
rchisq = None
"After fitting, the reduced ฯยฒ of the fit, or None if there are no degrees of freedom."
resids = None
"After fitting, the residuals: ``resids = data - mdata``."
def __init__(self, data, invsigma=None):
self.set_data(data, invsigma)
def set_data(self, data, invsigma=None):
"""Set the data to be modeled.
Returns *self*.
"""
self.data = np.array(data, dtype=np.float, ndmin=1)
if invsigma is None:
self.invsigma = np.ones(self.data.shape)
else:
i = np.array(invsigma, dtype=np.float)
self.invsigma = np.broadcast_arrays(self.data, i)[1] # allow scalar invsigma
if self.invsigma.shape != self.data.shape:
raise ValueError('data values and inverse-sigma values must have same shape')
return self
def print_soln(self):
"""Print information about the model solution."""
lmax = reduce(max,(len(x) for x in self.pnames), len('r chi sq'))
if self.puncerts is None:
for pn, val in zip(self.pnames, self.params):
print('%s: %14g' % (pn.rjust(lmax), val))
else:
for pn, val, err in zip(self.pnames, self.params, self.puncerts):
frac = abs(100. * err / val)
print('%s: %14g +/- %14g (%.2f%%)' % (pn.rjust(lmax), val, err, frac))
if self.rchisq is not None:
print('%s: %14g' % ('r chi sq'.rjust(lmax), self.rchisq))
elif self.chisq is not None:
print('%s: %14g' % ('chi sq'.rjust(lmax), self.chisq))
else:
print('%s: unknown/undefined' % ('r chi sq'.rjust(lmax)))
return self
def make_frozen_func(self, params):
"""Return a data-generating model function frozen at the specified parameters.
As with the :attr:`mfunc` attribute, the resulting function may or may
not take arguments depending on the particular kind of model being
evaluated.
"""
raise NotImplementedError()
def __getitem__(self, key):
if isinstance(key, binary_type):
# If you're not using the unicode_literals __future__, things get
# annoying really quickly without this.
key = text_type(key)
if isinstance(key, int):
idx = key
if idx < 0 or idx >= len(self.pnames):
raise ValueError('illegal parameter number %d' % key)
elif isinstance(key, text_type):
try:
idx = self.pnames.index(key)
except ValueError:
raise ValueError('no such parameter named "%s"' % key)
else:
raise ValueError('illegal parameter key %r' % key)
return Parameter(self, idx)
def plot(self, modelx, dlines=False, xmin=None, xmax=None,
ymin=None, ymax=None, **kwargs):
"""Plot the data and model (requires `omega`).
This assumes that `data` is 1D and that `mfunc` takes one argument
that should be treated as the X variable.
"""
import omega as om
modelx = np.asarray(modelx)
if modelx.shape != self.data.shape:
raise ValueError('modelx and data arrays must have same shape')
modely = self.mfunc(modelx)
sigmas = self.invsigma**-1 # TODO: handle invsigma = 0
vb = om.layout.VBox(2)
vb.pData = om.quickXYErr(modelx, self.data, sigmas,
'Data', lines=dlines, **kwargs)
vb[0] = vb.pData
vb[0].addXY(modelx, modely, 'Model')
vb[0].setYLabel('Y')
vb[0].rebound(False, True)
vb[0].setBounds(xmin, xmax, ymin, ymax)
vb[1] = vb.pResid = om.RectPlot()
vb[1].defaultField.xaxis = vb[1].defaultField.xaxis
vb[1].addXYErr(modelx, self.resids, sigmas, None, lines=False)
vb[1].setLabels('X', 'Residuals')
vb[1].rebound(False, True)
# ignore Y values since residuals are on different scale:
vb[1].setBounds(xmin, xmax)
vb.setWeight(0, 3)
return vb
def show_cov(self):
"Show the parameter covariance matrix with `pwkit.ndshow_gtk3`."
# would be nice: labels with parameter names (hard because this is
# ndshow, not omegaplot)
from .ndshow_gtk3 import view
view(self.covar, title='Covariance Matrix')
def show_corr(self):
"Show the parameter correlation matrix with `pwkit.ndshow_gtk3`."
from .ndshow_gtk3 import view
d = np.diag(self.covar) ** -0.5
corr = self.covar * d[np.newaxis,:] * d[:,np.newaxis]
view(corr, title='Correlation Matrix')
class Model(ModelBase):
"""Models data with a generic nonlinear optimizer
Basic usage is::
def func(p1, p2, x):
simulated_data = p1 * x + p2
return simulated_data
x = [1, 2, 3]
data = [10, 14, 15.8]
mdl = Model(func, data, args=(x,)).solve(guess).print_soln()
The :class:`Model` constructor can take an optional argument ``invsigma``
after ``data``; it specifies *inverse sigmas*, **not** inverse *variances*
(the usual statistical weights), for the data points. Since most
applications deal in sigmas, take care to write::
m = Model(func, data, 1. / uncerts) # right!
not::
m = Model(func, data, uncerts) # WRONG
If you have zero uncertainty on a measurement, you must wind a way to
express that constraint without including that measurement as part of the
``data`` vector.
"""
lm_prob = None
"""A :class:`pwkit.lmmin.Problem` instance describing the problem to be solved.
After setting up the data-generating function, you can access this item to
tune the solver.
"""
def __init__(self, simple_func, data, invsigma=None, args=()):
if simple_func is not None:
self.set_simple_func(simple_func, args)
if data is not None:
self.set_data(data, invsigma)
def set_func(self, func, pnames, args=()):
"""Set the model function to use an efficient but tedious calling convention.
The function should obey the following convention::
def func(param_vec, *args):
modeled_data = { do something using param_vec }
return modeled_data
This function creates the :class:`pwkit.lmmin.Problem` so that the
caller can futz with it before calling :meth:`solve`, if so desired.
Returns *self*.
"""
from .lmmin import Problem
self.func = func
self._args = args
self.pnames = list(pnames)
self.lm_prob = Problem(len(self.pnames))
return self
def set_simple_func(self, func, args=()):
"""Set the model function to use a simple but somewhat inefficient calling
convention.
The function should obey the following convention::
def func(param0, param1, ..., paramN, *args):
modeled_data = { do something using the parameters }
return modeled_data
Returns *self*.
"""
code = get_function_code(func)
npar = code.co_argcount - len(args)
pnames = code.co_varnames[:npar]
def wrapper(params, *args):
return func(*(tuple(params) + args))
return self.set_func(wrapper, pnames, args)
def make_frozen_func(self, params):
"""Returns a model function frozen to the specified parameter values.
Any remaining arguments are left free and must be provided when the
function is called.
For this model, the returned function is the application of
:func:`functools.partial` to the :attr:`func` property of this object.
"""
params = np.array(params, dtype=np.float, ndmin=1)
from functools import partial
return partial(self.func, params)
def solve(self, guess):
"""Solve for the parameters, using an initial guess.
This uses the Levenberg-Marquardt optimizer described in
:mod:`pwkit.lmmin`.
Returns *self*.
"""
guess = np.array(guess, dtype=np.float, ndmin=1)
f = self.func
args = self._args
def lmfunc(params, vec):
vec[:] = f(params, *args).flatten()
self.lm_prob.set_residual_func(self.data.flatten(),
self.invsigma.flatten(),
lmfunc, None)
self.lm_soln = soln = self.lm_prob.solve(guess)
self.params = soln.params
self.puncerts = soln.perror
self.covar = soln.covar
self.mfunc = self.make_frozen_func(soln.params)
# fvec = resids * invsigma = (data - mdata) * invsigma
self.resids = soln.fvec.reshape(self.data.shape) / self.invsigma
self.mdata = self.data - self.resids
# lm_soln.fnorm can be unreliable ("max(fnorm, fnorm1)" branch)
self.chisq = (self.lm_soln.fvec**2).sum()
if soln.ndof > 0:
self.rchisq = self.chisq / soln.ndof
return self
class PolynomialModel(ModelBase):
"""Least-squares polynomial fit.
Because this is a very specialized kind of problem, we don't need an
initial guess to solve, and we can use fast built-in numerical routines.
The output parameters are named "a0", "a1", ... and are stored in that
order in PolynomialModel.params[]. We have ``y = sum(x**i * a[i])``, so
"a2" = "params[2]" is the quadratic term, etc.
This model does *not* give uncertainties on the derived coefficients. The
as_nonlinear() method can be use to get a `Model` instance with
uncertainties.
Methods:
as_nonlinear - Return a (lmmin-based) `Model` equivalent to self.
"""
def __init__(self, maxexponent, x, data, invsigma=None):
self.maxexponent = maxexponent
self.x = np.array(x, dtype=np.float, ndmin=1, copy=False, subok=True)
self.set_data(data, invsigma)
def make_frozen_func(self, params):
return lambda x: npoly.polyval(x, params)
def solve(self):
self.pnames = ['a%d' % i for i in range(self.maxexponent + 1)]
self.params = npoly.polyfit(self.x, self.data, self.maxexponent,
w=self.invsigma)
self.puncerts = None # does anything provide this? could farm out to lmmin ...
self.covar = None
self.mfunc = self.make_frozen_func(self.params)
self.mdata = self.mfunc(self.x)
self.resids = self.data - self.mdata
self.chisq = ((self.resids * self.invsigma)**2).sum()
if self.x.size > self.maxexponent + 1:
self.rchisq = self.chisq / (self.x.size - (self.maxexponent + 1))
return self
def as_nonlinear(self, params=None):
"""Return a `Model` equivalent to this object. The nonlinear solver is less
efficient, but lets you freeze parameters, compute uncertainties, etc.
If the `params` argument is provided, solve() will be called on the
returned object with those parameters. If it is `None` and this object
has parameters in `self.params`, those will be use. Otherwise, solve()
will not be called on the returned object.
"""
if params is None:
params = self.params
nlm = Model(None, self.data, self.invsigma)
nlm.set_func(lambda p, x: npoly.polyval(x, p),
self.pnames,
args=(self.x,))
if params is not None:
nlm.solve(params)
return nlm
class ScaleModel(ModelBase):
"""Solve `data = m * x` for `m`."""
def __init__(self, x, data, invsigma=None):
self.x = np.array(x, dtype=np.float, ndmin=1, copy=False, subok=True)
self.set_data(data, invsigma)
def make_frozen_func(self, params):
return lambda x: params[0] * x
def solve(self):
w2 = self.invsigma**2
sxx = np.dot(self.x**2, w2)
sxy = np.dot(self.x * self.data, w2)
m = sxy / sxx
uc_m = 1. / np.sqrt(sxx)
self.pnames = ['m']
self.params = np.asarray([m])
self.puncerts = np.asarray([uc_m])
self.covar = self.puncerts.reshape((1, 1))
self.mfunc = lambda x: m * x
self.mdata = m * self.x
self.resids = self.data - self.mdata
self.chisq = ((self.resids * self.invsigma)**2).sum()
self.rchisq = self.chisq / (self.x.size - 1)
return self
# lmmin-based model-fitting when the model is broken down into composable
# components.
class ModelComponent(object):
npar = 0
name = None
pnames = ()
nmodelargs = 0
setguess = None
setvalue = None
setlimit = None
_accum_mfunc = None
def __init__(self, name=None):
self.name = name
def _param_names(self):
"""Overridable in case the list of parameter names needs to be
generated on the fly."""
return self.pnames
def finalize_setup(self):
"""If the component has subcomponents, this should set their `name`,
`setguess`, `setvalue`, and `setlimit` properties. It should also
set `npar` (on self) to the final value."""
pass
def prep_params(self):
"""This should make any necessary calls to `setvalue` or `setlimit`,
though in straightforward cases it should just be up to the user to
do this. If the component has subcomponents, their `prep_params`
functions should be called."""
pass
def model(self, pars, mdata):
"""Modify `mdata` based on `pars`."""
pass
def deriv(self, pars, jac):
"""Compute the Jacobian. `jac[i]` is d`mdata`/d`pars[i]`."""
pass
def extract(self, pars, perr, cov):
"""Extract fit results into the object for ease of inspection."""
self.covar = cov
def _outputshape(self, *args):
"""This is a helper for evaluating the model function at fixed parameters. To
work in the ComposedModel paradigm, we have to allocate an empty array
to hold the model output before we can fill it via the _accum_mfunc
functions. We can't do that without knowing what size it will be. That
size has to be a function of the "free" parameters to the model
function that are implicit/fixed during the fitting process. Given these "free"
parameters, _outputshape returns the shape that the output will have."""
raise NotImplementedError()
def mfunc(self, *args):
if len(args) != self.nmodelargs:
raise TypeError('model function expected %d arguments, got %d' %
(self.nmodelargs, len(args)))
result = np.zeros(self._outputshape(*args))
self._accum_mfunc(result, *args)
return result
class ComposedModel(ModelBase):
def __init__(self, component, data, invsigma=None):
if component is not None:
self.set_component(component)
if data is not None:
self.set_data(data, invsigma)
def _component_setguess(self, vals, ofs=0):
vals = np.asarray(vals)
if ofs < 0 or ofs + vals.size > self.component.npar:
raise ValueError('ofs %d, vals.size %d, npar %d' %
(ofs, vals.size, self.component.npar))
self.force_guess[ofs:ofs+vals.size] = vals
def _component_setvalue(self, cidx, val, fixed=False):
if cidx < 0 or cidx >= self.component.npar:
raise ValueError('cidx %d, npar %d' % (cidx, self.component.npar))
self.lm_prob.p_value(cidx, val, fixed=fixed)
self.force_guess[cidx] = val
def _component_setlimit(self, cidx, lower=-np.inf, upper=np.inf):
if cidx < 0 or cidx >= self.component.npar:
raise ValueError('cidx %d, npar %d' % (cidx, self.component.npar))
self.lm_prob.p_limit(cidx, lower, upper)
def set_component(self, component):
self.component = component
component.setguess = self._component_setguess
component.setvalue = self._component_setvalue
component.setlimit = self._component_setlimit
component.finalize_setup()
from .lmmin import Problem
self.lm_prob = Problem(component.npar)
self.force_guess = np.empty(component.npar)
self.force_guess.fill(np.nan)
self.pnames = list(component._param_names())
component.prep_params()
def solve(self, guess=None):
if guess is None:
guess = self.force_guess
else:
guess = np.array(guess, dtype=np.float, ndmin=1, copy=True)
for i in range(self.force_guess.size):
if np.isfinite(self.force_guess[i]):
guess[i] = self.force_guess[i]
def model(pars, outputs):
outputs.fill(0)
self.component.model(pars, outputs)
self.lm_model = model
self.lm_deriv = self.component.deriv
self.lm_prob.set_residual_func(self.data, self.invsigma, model,
self.component.deriv)
self.lm_soln = soln = self.lm_prob.solve(guess)
self.params = soln.params
self.puncerts = soln.perror
self.covar = soln.covar
# fvec = resids * invsigma = (data - mdata) * invsigma
self.resids = self.lm_soln.fvec.reshape(self.data.shape) / self.invsigma
self.mdata = self.data - self.resids
# lm_soln.fnorm can be unreliable ("max(fnorm, fnorm1)" branch)
self.chisq = (self.lm_soln.fvec**2).sum()
if soln.ndof > 0:
self.rchisq = self.chisq / soln.ndof
self.component.extract(soln.params, soln.perror, soln.covar)
return self
def make_frozen_func(self):
return self.component.mfunc
def mfunc(self, *args):
return self.component.mfunc(*args)
def debug_derivative(self, guess):
"""returns (explicit, auto)"""
from .lmmin import check_derivative
return check_derivative(self.component.npar, self.data.size,
self.lm_model, self.lm_deriv, guess)
# Now specific components useful in the above framework. The general strategy
# is to err on the side of having additional parameters in the individual
# classes, and the user can call setvalue() to fix them if they're not needed.
class AddConstantComponent(ModelComponent):
npar = 1
pnames = ('value', )
nmodelargs = 0
def model(self, pars, mdata):
mdata += pars[0]
def deriv(self, pars, jac):
jac[0] = 1.
def _outputshape(self):
return()
def extract(self, pars, perr, cov):
def _accum_mfunc(res):
res += pars[0]
self._accum_mfunc = _accum_mfunc
self.covar = cov
self.f_value = pars[0]
self.u_value = perr[0]
class AddValuesComponent(ModelComponent):
"""XXX terminology between this and AddConstant is mushy."""
nmodelargs = 0
def __init__(self, nvals, name=None):
super(AddValuesComponent, self).__init__(name)
self.npar = nvals
def _param_names(self):
for i in range(self.npar):
yield 'v%d' % i
def model(self, pars, mdata):
mdata += pars
def deriv(self, pars, jac):
jac[:,:] = np.eye(self.npar)
def _outputshape(self):
return(self.npar,)
def extract(self, pars, perr, cov):
def _accum_mfunc(res):
res += pars
self._accum_mfunc = _accum_mfunc
self.covar = cov
self.f_vals = pars
self.u_vals = perr
class AddPolynomialComponent(ModelComponent):
nmodelargs = 1
def __init__(self, maxexponent, x, name=None):
super(AddPolynomialComponent, self).__init__(name)
self.npar = maxexponent + 1
self.x = np.array(x, dtype=np.float, ndmin=1, copy=False, subok=True)
def _param_names(self):
for i in range(self.npar):
yield 'c%d' % i
def model(self, pars, mdata):
mdata += npoly.polyval(self.x, pars)
def deriv(self, pars, jac):
w = np.ones_like(self.x)
for i in range(self.npar):
jac[i] = w
w *= self.x
def _outputshape(self, x):
return x.shape
def extract(self, pars, perr, cov):
def _accum_mfunc(res, x):
res += npoly.polyval(x, pars)
self._accum_mfunc = _accum_mfunc
self.covar = cov
self.f_coeffs = pars
self.u_coeffs = perr
def _broadcast_shapes(s1, s2):
"""Given array shapes `s1` and `s2`, compute the shape of the array that would
result from broadcasting them together."""
n1 = len(s1)
n2 = len(s2)
n = max(n1, n2)
res = [1] * n
for i in range(n):
if i >= n1:
c1 = 1
else:
c1 = s1[n1-1-i]
if i >= n2:
c2 = 1
else:
c2 = s2[n2-1-i]
if c1 == 1:
rc = c2
elif c2 == 1 or c1 == c2:
rc = c1
else:
raise ValueError('array shapes %r and %r are not compatible' % (s1, s2))
res[n-1-i] = rc
return tuple(res)
class SeriesComponent(ModelComponent):
"""Apply a set of subcomponents in series, isolating each from the other. This
is only valid if every subcomponent except the first is additive --
otherwise, the Jacobian won't be right."""
def __init__(self, components=(), name=None):
super(SeriesComponent, self).__init__(name)
self.components = list(components)
def add(self, component):
"""This helps, but direct manipulation of self.components should be
supported."""
self.components.append(component)
return self
def _param_names(self):
for c in self.components:
pfx = c.name + '.' if c.name is not None else ''
for p in c._param_names():
yield pfx + p
def _offset_setguess(self, ofs, npar, vals, subofs=0):
vals = np.asarray(vals)
if subofs < 0 or subofs + vals.size > npar:
raise ValueError('subofs %d, vals.size %d, npar %d' %
(subofs, vals.size, npar))
return self.setguess(vals, ofs + subofs)
def _offset_setvalue(self, ofs, npar, cidx, value, fixed=False):
if cidx < 0 or cidx >= npar:
raise ValueError('cidx %d, npar %d' % (cidx, npar))
return self.setvalue(ofs + cidx, value, fixed)
def _offset_setlimit(self, ofs, npar, cidx, lower=-np.inf, upper=np.inf):
if cidx < 0 or cidx >= npar:
raise ValueError('cidx %d, npar %d' % (cidx, npar))
return self.setlimit(ofs + cidx, lower, upper)
def finalize_setup(self):
from functools import partial
ofs = 0
self.nmodelargs = 0
for i, c in enumerate(self.components):
if c.name is None:
c.name = 'c%d' % i
c.setguess = partial(self._offset_setguess, ofs, c.npar)
c.setvalue = partial(self._offset_setvalue, ofs, c.npar)
c.setlimit = partial(self._offset_setlimit, ofs, c.npar)
c.finalize_setup()
ofs += c.npar
self.nmodelargs += c.nmodelargs
self.npar = ofs
def prep_params(self):
for c in self.components:
c.prep_params()
def model(self, pars, mdata):
ofs = 0
for c in self.components:
p = pars[ofs:ofs+c.npar]
c.model(p, mdata)
ofs += c.npar
def deriv(self, pars, jac):
ofs = 0
for c in self.components:
p = pars[ofs:ofs+c.npar]
j = jac[ofs:ofs+c.npar]
c.deriv(p, j)
ofs += c.npar
def extract(self, pars, perr, cov):
ofs = 0
for c in self.components:
n = c.npar
spar = pars[ofs:ofs+n]
serr = perr[ofs:ofs+n]
scov = cov[ofs:ofs+n,ofs:ofs+n]
c.extract(spar, serr, scov)
ofs += n
def _outputshape(self, *args):
s = ()
ofs = 0
for c in self.components:
cargs = args[ofs:ofs+c.nmodelargs]
s = _broadcast_shapes(s, c._outputshape(*cargs))
ofs += c.nmodelargs
return s
def _accum_mfunc(self, res, *args):
ofs = 0
for c in self.components:
cargs = args[ofs:ofs+c.nmodelargs]
c._accum_mfunc(res, *cargs)
ofs += c.nmodelargs
class MatMultComponent(ModelComponent):
"""Given a component yielding k**2 data points and k additional components,
each yielding n data points. The result is [A]ร[B], where A is the square
matrix formed from the first component's output, and B is the (k, n)
matrix of stacked output from the final k components.
Parameters are ordered in same way as the components named above.
"""
def __init__(self, k, name=None):
super(MatMultComponent, self).__init__(name)
self.k = k
self.acomponent = None
self.bcomponents = [None] * k
def _param_names(self):
pfx = self.acomponent.name + '.' if self.acomponent.name is not None else ''
for p in self.acomponent._param_names():
yield pfx + p
for c in self.bcomponents:
pfx = c.name + '.' if c.name is not None else ''
for p in c._param_names():
yield pfx + p
def _offset_setguess(self, ofs, npar, vals, subofs=0):
vals = | np.asarray(vals) | numpy.asarray |
PythonSum = sum
PythonAll = all
import numpy as np
from numpy import all, any, logical_and, logical_not, isscalar, inf, logical_or, logical_xor, isnan#where
from operator import gt as Greater, lt as Less, truediv as td
from FDmisc import update_mul_inf_zero, update_div_zero, where
import operator
try:
from bottleneck import nanmin, nanmax
except ImportError:
from numpy import nanmin, nanmax
arrZero = np.array(0.0)
class surf(object):
isRendered = False
__array_priority__ = 15
def __init__(self, d, c):
self.d = d # dict of variables and linear coefficients on them (probably as multiarrays)
self.c = np.asarray(c) # (multiarray of) constant(s)
value = lambda self, point: self.c + PythonSum(point[k] * v for k, v in self.d.items())
# def invert(self, ind=None):
# ind_is_None = ind is None
# C = -self.c if ind_is_None else where(ind, -self.c, self.c)
# D = dict((k,
# (-v if ind_is_None else where(ind, -v, v))) \
# for k, v in self.d.items())
# if 'd2' not in self.__dict__:
# return surf(D, C)
# from boundsurf2 import surf2
# D2 = dict((k,
# (-v if ind_is_None else where(ind, -v, v))) \
# for k, v in self.d2.items())
# return surf2(D2, D, C)
# resolve = lambda self, domain, cmp: \
# self.c + PythonSum(where(cmp(v, 0), domain[k][0], domain[k][1])*v for k, v in self.d.items())
def exclude(self, domain, oovars, cmp):
C = []
d = self.d.copy()
for v in oovars:
tmp = d.pop(v, 0.0)
if any(tmp):
D = domain[v]
C.append(where(cmp(tmp, 0), D[0], D[1])*tmp)
c = self.c + PythonSum(C)
return surf(d, c)
# split = lambda self, inds: [extract(self, ind) for ind in inds]
#self.resolve(domain, GREATER)
minimum = lambda self, domain, domain_ind = None: \
self.c +\
(PythonSum(where(v > 0,
domain[k][0], domain[k][1])*v for k, v in self.d.items()) if domain_ind is None else
PythonSum(where(v > 0,
domain[k][0][domain_ind], domain[k][1][domain_ind])*v for k, v in self.d.items()))
#self.resolve(domain, LESS)
maximum = lambda self, domain, domain_ind = None: \
self.c +\
(PythonSum(where(v < 0,
domain[k][0], domain[k][1])*v for k, v in self.d.items()) if domain_ind is None else
PythonSum(where(v < 0,
domain[k][0][domain_ind], domain[k][1][domain_ind])*v for k, v in self.d.items()))
def render(self, domain, cmp):
self.rendered = dict((k, where(cmp(v, 0), domain[k][0], domain[k][1])*v) for k, v in self.d.items())
self.resolved = PythonSum(self.rendered) + self.c
self.isRendered = True
def extract(self, ind):
# if ind.dtype == bool:
# ind = where(ind)[0]
d = dict((k, v if v.size == 1 else v[ind]) for k, v in self.d.items())
C = self.c
c = C if C.size == 1 else C[ind]
return surf(d, c)
def __add__(self, other):
if type(other) == surf:
# if other.isRendered and not self.isRendered:
# self, other = other, self
S, O = self.d, other.d
d = S.copy()
d.update(O)
for key in set(S.keys()) & set(O.keys()):
d[key] = S[key] + O[key]
return surf(d, self.c+other.c)
elif isscalar(other) or type(other) == np.ndarray:
return surf(self.d, self.c + other)
elif isinstance(other, surf):# surf2 class instance
return other + self
else:
assert 0, 'unimplemented yet'
__sub__ = lambda self, other: self.__add__(-other)
__neg__ = lambda self: surf(dict((k, -v) for k, v in self.d.items()), -self.c)
def __mul__(self, other):
isArray = type(other) == np.ndarray
if isscalar(other) or isArray:
return surf(dict((k, v*other) for k, v in self.d.items()), self.c * other)
else:
assert 0, 'unimplemented yet'
__rmul__ = __mul__
# def koeffs_mul(self, other):
# assert type(other) == surf
# S, O = self.d, other.d
# d = dict((key, S.get(key, 0.0) * O.get(key, 0.0)) for key in set(S.keys()) | set(O.keys()))
# return surf(d, 0.0)
# def __getattr__(self, attr):
# if attr == 'resolve_index':
# assert 0, 'resolve_index must be used from surf derived classes only'
# else:
# raise AttributeError('error in FD engine (class surf)')
class boundsurf(object):#object is added for Python2 compatibility
__array_priority__ = 15
isRendered = False
resolved = False
level = 1
def __init__(self, lowersurf, uppersurf, definiteRange, domain, checkType = True):
if checkType:
assert type(lowersurf) == type(uppersurf) == surf # not surf2
self.l = lowersurf
self.u = uppersurf
self.definiteRange = definiteRange
self.domain = domain
# print lowersurf.d, lowersurf.c
# print uppersurf.d, uppersurf.c
Size = lambda self: max((len(self.l.d), len(self.u.d), 1))
def _dep(self):
r = set.union(set(self.l.d.keys()), set(self.u.d.keys()))
tmp = getattr(self.l, 'd2', None)
if tmp is not None:
r |= set(tmp.keys())
tmp = getattr(self.u, 'd2', None)
if tmp is not None:
r |= set(tmp.keys())
return r
def __getattr__(self, attr):
if attr == 'dep':
self.dep =self._dep()
return self.dep # dependence oovars
else:
raise AttributeError('incorrect attribute %s for boundsurf / boundsurf2 instance' % attr)
def exclude(self, oovars):
L = self.l.exclude(self.domain, oovars, Greater)
U = self.u.exclude(self.domain, oovars, Less)
if len(L.d) != 0 or len(U.d) != 0:
return boundsurf(L, U, self.definiteRange, self.domain)
else:
return np.vstack((L.c, U.c))
def extract(self, ind):
Ind = ind if ind.dtype != bool else where(ind)[0]
definiteRange = self.definiteRange if type(self.definiteRange) == bool \
or self.definiteRange.size == 1 else self.definiteRange[ind]
return boundsurf(self.l.extract(Ind), self.u.extract(Ind), definiteRange, self.domain)
def resolve(self):
if not self.resolved:
self._resolved = np.vstack((self.l.minimum(self.domain), self.u.maximum(self.domain)))
self.resolved = True
assert self._resolved.shape[0] == 2, 'bug in FD kernel'
return self._resolved, self.definiteRange
def invert(self, ind=None):
B = self.__class__
if ind is None:
return B(-self.u, -self.l, self.definiteRange, self.domain)
# if ind.dtype != bool:
# bool_ind = np.zeros()
assert ind.dtype == bool, 'unimplemented yet'
ind_same, ind_invert = where(logical_not(ind))[0], where(ind)[0]
l1, u1 = self.l.extract(ind_same), self.u.extract(ind_same)
l2, u2 = self.l.extract(ind_invert), self.u.extract(ind_invert)
b1 = B(l1, u1, False, self.domain)
b2 = B(-u2, -l2, False, self.domain)
b = boundsurf_join((ind_same, ind_invert), (b1, b2))
b.definiteRange = self.definiteRange
# l, u = self.u.invert(ind), self.l.invert(ind)
# if ind is None:
return b
# l_unchanged, u_unchanged = self.l.extract()
def render(self):
if self.isRendered:
return
# self.l.render(self, self.domain, GREATER)
# self.u.render(self, self.domain, LESS)
self.isRendered = True
values = lambda self, point: (self.l.value(point), self.u.value(point))
isfinite = lambda self: all(np.isfinite(self.l.c)) and all(np.isfinite(self.u.c))
# TODO: handling fd.sum()
def __add__(self, other):
if isscalar(other) or (type(other) == np.ndarray and other.size == 1):
if self.l is self.u:
# TODO: mb use id() instead of "is"
tmp = self.l+other
rr = (tmp, tmp)
else:
rr = (self.l+other, self.u+other)
return boundsurf(rr[0], rr[1], self.definiteRange, self.domain)
elif type(other) == boundsurf:# TODO: replace it by type(r[0]) after dropping Python2 support
if self.l is self.u and other.l is other.u:
# TODO: mb use id() instead of "is"
tmp = self.l+other.l
rr = (tmp, tmp)
else:
rr = (self.l+other.l, self.u+other.u)
return boundsurf(rr[0], rr[1], self.definiteRange & other.definiteRange, self.domain)
elif type(other) == np.ndarray:
assert other.shape[0] == 2, 'unimplemented yet'
L = self.l + other[0]
if self.l is self.u and np.array_equal(other[0], other[1]):
# may be from fixed variables
U = L
else:
U = self.u+other[1]
return boundsurf(L, U, self.definiteRange, self.domain)
elif isinstance(other, boundsurf): # boundsurf2
return other + self
else:
assert 0, 'unimplemented yet'
__radd__ = __add__
def __neg__(self):
l, u = self.l, self.u
if l is u:
tmp = surf(dict((k, -v) for k, v in u.d.items()), -u.c)
L, U = tmp, tmp
else:
L = surf(dict((k, -v) for k, v in u.d.items()), -u.c)
U = surf(dict((k, -v) for k, v in l.d.items()), -l.c)
return boundsurf(L, U, self.definiteRange, self.domain)
# TODO: mb rework it
__sub__ = lambda self, other: self.__add__(-other)
__rsub__ = lambda self, other: (-self).__add__(other)
def direct_sub(self, other):
self_lvl = self.level
if isinstance(other, boundsurf):
other_lvl = other.level
if self_lvl == 2 or other_lvl == 2:
from boundsurf2 import boundsurf2
B = boundsurf2
else:
B = boundsurf
L, U = other.l, other.u
return B(self.l - L, self.u - U, logical_and(self.definiteRange, other.definiteRange), self.domain)
elif type(other) == np.ndarray:
assert other.shape[0] == 2, 'probably bug'
L, U = other
B = type(self)
# definiteRange must be computed in higher level
return B(self.l - L, self.u - U, self.definiteRange, self.domain)
else:
assert 0, 'probably bug'
def __mul__(self, other, resolveSchedule = ()):
from boundsurf2 import boundsurf2
domain = self.domain
definiteRange = self.definiteRange
isArray = type(other) == np.ndarray
isBoundSurf = type(other) == boundsurf
isBoundSurf2 = type(other) == boundsurf2
if isBoundSurf:
definiteRange = logical_and(definiteRange, other.definiteRange)
R2 = other.resolve()[0] if isBoundSurf or isBoundSurf2 else other
R2_is_scalar = isscalar(R2)
if not R2_is_scalar and R2.size != 1:
assert R2.shape[0] == 2, 'bug or unimplemented yet'
R2Positive = all(R2 >= 0)
R2Negative = all(R2 <= 0)
# if not selfPositive and not selfNegative:
# assert R2Positive or R2Negative, 'bug or unimplemented yet'
if R2_is_scalar or (isArray and R2.size == 1):
if self.l is self.u:
tmp = self.l * R2
rr = (tmp, tmp)
else:
rr = (self.l * R2, self.u * R2) if R2 >= 0 else (self.u * R2, self.l * R2)
return boundsurf(rr[0], rr[1], definiteRange, domain)
R1 = self.resolve()[0]
selfPositive = all(R1 >= 0)
selfNegative = all(R1 <= 0)
if isArray:
# assert R2Positive or R2Negative, 'bug or unimplemented yet'
rr = mul_fixed_interval(self, other)
elif isBoundSurf or isBoundSurf2:
sameBounds_1 = self.l is self.u
sameBounds_2 = other.l is other.u
if self.level == other.level == 1 and sameBounds_1 and sameBounds_2 and self.b2equiv(other):
rr = b2mult_direct(self, other)
elif (((selfPositive or selfNegative) and sameBounds_1) or ((R2Positive or R2Negative) and sameBounds_2))\
and self.level == other.level == 1 and self.b2equiv(other):
Self = self if selfPositive else -self
Other = other if R2Positive else -other
r = b2mult(Self, Other)
rr = r if selfPositive == R2Positive else -r
elif (selfPositive or selfNegative) and (R2Positive or R2Negative):
Self = self if selfPositive else -self
Other = other if R2Positive else -other
if self.level == other.level == 1 and self.b2equiv(other):
r = b2mult(Self, Other)
else:
_r = Self.log() + Other.log()
if len(resolveSchedule):
_r = _r.exclude(resolveSchedule)
if type(_r) == np.ndarray:
r = np.exp(_r)
return r if selfPositive == R2Positive else -r[::-1], definiteRange
r = _r.exp()
# is definiteRange required here?
r.definiteRange = definiteRange
rr = r if selfPositive == R2Positive else -r
else:
Elems = (self, other)
rr = aux_mul_div_boundsurf(Elems, operator.mul, resolveSchedule)
# else:
# RR = R1*R2 if selfPositive and R2Positive \
# else (R1*R2)[::-1] if not selfPositive and not R2Positive\
# else R1[::-1]*R2 if not selfPositive and R2Positive\
# else R1*R2[::-1] #if selfPositive and not R2Positive
# new_l_resolved, new_u_resolved = RR
#
# l1, u1, l2, u2 = self.l, self.u, other.l, other.u
# l, u = l1.koeffs_mul(l2), u1.koeffs_mul(u2)
# l.c = new_l_resolved - l.minimum(domain)
# u.c = new_u_resolved - u.maximum(domain)
# rr = (l, u)
# return R1*other# if nanmax(R2[0])
#return 0.5 * (R1*other + R2*self)
else:
assert 0, 'bug or unimplemented yet (incorrect boundsurf.__mul__ type: %s)' % type(other)
# assert isBoundSurf2, 'bug or unimplemented yet (incorrect boundsurf.__mul__ type: %s)' % type(other)
# return other * self
R = rr if type(rr) in (boundsurf, boundsurf2) else boundsurf(rr[0], rr[1], definiteRange, domain)
R = mul_handle_nan(R, R1, R2, domain)
return R
__rmul__ = __mul__
def __div__(self, other, resolveSchedule=()):
isBoundSurf = isinstance(other, boundsurf)
assert isBoundSurf
r = aux_mul_div_boundsurf((self, other), operator.truediv, resolveSchedule)
# return r
# ind_inf_z = logical_or(logical_or(R2[0]==0, R2[1]==0), logical_or(isinf(R1[0]), isinf(R1[1])))
#(R2[0]==0) | (R2[1]==0) | (isinf(R2[0])) | (isinf(R2[1])) | (isinf(R1[0])) | isinf(R1[1])
isBoundsurf = isinstance(r, boundsurf)
rr = r.resolve()[0] if isBoundsurf else r#[0]
# import pylab, numpy
# xx = numpy.linspace(-1, 0, 1000)
# t=r.l.d.keys()[0]
# tmp=r
# pylab.plot(xx, tmp.l.d2.get(t, 0.0)*xx**2+ tmp.l.d.get(t, 0.0)*xx+ tmp.l.c, 'r')
# pylab.plot(xx, tmp.u.d2.get(t, 0.0)*xx**2+ tmp.u.d.get(t, 0.0)*xx+ tmp.u.c, 'b')
# pylab.grid()
# pylab.show()
# nans may be from other computations from a level below, although
ind_nan = logical_or(isnan(rr[0]), isnan(rr[1]))
if not any(ind_nan) or not isBoundsurf:
return r #if isBoundsurf else rr
Ind_finite = where(logical_not(ind_nan))[0]
r_finite = r.extract(Ind_finite)
ind_nan = where(ind_nan)[0]
R1 = self.resolve()[0]
R2 = other.resolve()[0]
lb1, ub1, lb2, ub2 = R1[0, ind_nan], R1[1, ind_nan], R2[0, ind_nan], R2[1, ind_nan]
tmp = np.vstack((td(lb1, lb2), td(lb1, ub2), td(ub1, lb2), td(ub1, ub2)))
R = np.vstack((nanmin(tmp, 0), nanmax(tmp, 0)))
update_div_zero(lb1, ub1, lb2, ub2, R)
b = boundsurf(surf({}, R[0]), surf({}, R[1]), False, self.domain)
r = boundsurf_join((ind_nan, Ind_finite), (b, r_finite))
definiteRange = logical_and(self.definiteRange, other.definiteRange)
r.definiteRange = definiteRange
return r
__truediv__ = __div__
# __rdiv__ = lambda self, other: other * self ** -1
# __rtruediv__ = __rdiv__
def log(self):#, domain_ind = slice(None)):
from Interval import defaultIntervalEngine
# from ooFun import oofun
# return oofun._interval_(self, domain, dtype)
# from overloads import log_interval
# return log_interval(self, self.domain, float)
ia_lvl_2_unavailable = len(self.l.d) != 1 or len(self.u.d) != 1 \
or (self.level == 2 and (len(self.l.d2) != 1 or len(self.u.d2) != 1))
is_b2 = self.level == 2
if ia_lvl_2_unavailable or is_b2:
r1 = defaultIntervalEngine(self, np.log, lambda x: 1.0/x, monotonity = 1, convexity = -1, feasLB = 0.0)[0]
else:
r1 = None
if ia_lvl_2_unavailable:
return r1
from overloads import log_b_interval
r = log_b_interval(self, r1)[0]
return r
def exp(self):#, domain_ind = slice(None)):
from Interval import defaultIntervalEngine
ia_lvl_2_unavailable = len(self.l.d) != 1 or len(self.u.d) != 1 \
or (self.level == 2 and (len(self.l.d2) != 1 or len(self.u.d2) != 1))
is_b2 = self.level == 2
if ia_lvl_2_unavailable or is_b2:
r1 = defaultIntervalEngine(self, np.exp, np.exp,
monotonity = 1, convexity = 1)[0]
else:
r1 = None
if ia_lvl_2_unavailable:
return r1
from overloads import exp_b_interval
r = exp_b_interval(self, r1, self.definiteRange, self.domain)[0]
return r
# TODO: rework it if __iadd_, __imul__ etc will be created
def copy(self):
assert '__iadd__' not in self.__dict__
assert '__imul__' not in self.__dict__
assert '__idiv__' not in self.__dict__
assert '__isub__' not in self.__dict__
return self
def b2equiv(self, other):
if len(self.l.d) > 1 or len(other.l.d) > 1 or len(self.u.d) > 1 or len(other.u.d) > 1:
return False
if len(getattr(self.l, 'd2', {})) > 1 or len(getattr(other.l, 'd2', {})) > 1 \
or len(getattr(self.u, 'd2', {})) > 1 or len(getattr(other.u, 'd2', {})) > 1:
return False
if not (set(self.l.d.keys()) == set(other.l.d.keys()) == set(self.u.d.keys()) == set(other.u.d.keys())):
return False
return True
abs = lambda self: boundsurf_abs(self)
def boundsurf_abs(b):
r, definiteRange = b.resolve()
lf, uf = r
assert lf.ndim <= 1, 'unimplemented yet'
ind_l = lf >= 0
if all(ind_l):
return b, b.definiteRange
ind_u = uf <= 0
if all(ind_u):
return -b, b.definiteRange
from Interval import defaultIntervalEngine
return defaultIntervalEngine(b, np.abs, np.sign,
monotonity = np.nan, convexity = 1,
criticalPoint = 0.0, criticalPointValue = 0.0)
def Join(inds, arrays):
# print(type(inds), type(arrays), len(inds), len(arrays))
# print(PythonSum(ind.size for ind in inds), arrays[0].dtype)
r = np.empty(PythonSum(ind.size for ind in inds), arrays[0].dtype)
# print(r.shape, r.dtype)
for ind, arr in zip(inds, arrays):
if ind.size == 0:
continue
# print (ind.shape, arr.shape)
r[ind] = arr
return r
def surf_join(inds, S):
c = Join(inds, [s.c for s in S]) # list, not iterator!
keys = set.union(*[set(s.d.keys()) for s in S])
d = dict((k, Join(inds, [s.d.get(k, arrZero) for s in S])) for k in keys)
keys = set.union(*[set(getattr(s,'d2', {}).keys()) for s in S])
if len(keys) == 0:
return surf(d, c)
d2 = dict((k, Join(inds, [getattr(s, 'd2', {}).get(k, arrZero) for s in S])) for k in keys)
from boundsurf2 import surf2
return surf2(d2, d, c)
def boundsurf_join(inds, B):
inds = [(ind if ind.dtype != bool else where(ind)[0]) for ind in inds]
# B = [b for b in B if b is not None]
L = surf_join(inds, [b.l for b in B])
U = surf_join(inds, [b.u for b in B]) #if self.l is not self.u else L
definiteRange = True \
if PythonAll(np.array_equiv(True, b.definiteRange) for b in B)\
else Join(inds, [np.asarray(b.definiteRange) for b in B])
from boundsurf2 import boundsurf2
b = boundsurf if type(L) == type(U) == surf else boundsurf2
return b(L, U, definiteRange, B[0].domain)
#split = lambda condition1, condition2: \
# (
# where(condition1)[0],
# where(logical_and(condition2, logical_not(condition1)))[0],
# where(logical_and(logical_not(condition1), logical_not(condition2)))[0]
# )
def split(*conditions):
#Rest = np.ones_like(conditions[0]) # dtype bool
#Temporary for PyPy:
Rest = np.ones(conditions[0].shape, conditions[0].dtype)
r = []
for c in conditions:
tmp = logical_and(c, Rest)
r.append(where(tmp)[0])
Rest &= logical_not(c)
r.append(where(Rest)[0])
return r
Split = lambda condition1, condition2: \
(
condition1,
logical_and(condition2, logical_not(condition1)),
logical_and(logical_not(condition1), logical_not(condition2))
)
def devided_interval(inp, r, domain, dtype, feasLB = -inf, feasUB = inf):
import ooFun
lb_ub, definiteRange = inp._interval(domain, dtype, ia_surf_level = 2)
isBoundSurf = isinstance(lb_ub, boundsurf)
if not isBoundSurf:
return ooFun.oofun._interval_(r, domain, dtype)
lb_ub_resolved = lb_ub.resolve()[0]
if feasLB != -inf or feasUB != inf:
from Interval import adjustBounds
lb_ub_resolved, definiteRange = adjustBounds(lb_ub_resolved, definiteRange, feasLB, feasUB)
lb_ub.definiteRange = definiteRange
lb, ub = lb_ub_resolved
Inds = split(ub <= -0.0, lb >= 0.0)
assert len(Inds) == 3
monotonities = [r.engine_monotonity] * (len(Inds)-1) if r.engine_monotonity is not np.nan \
else r.monotonities
convexities = [r.engine_convexity] * (len(Inds)-1) if r.engine_convexity is not np.nan else r.convexities
m = PythonSum(ind_.size for ind_ in Inds)
inds, rr = [], []
from Interval import defaultIntervalEngine
for j, ind in enumerate(Inds[:-1]):
if ind.size != 0:
tmp = defaultIntervalEngine(lb_ub, r.fun, r.d, monotonity=monotonities[j], convexity=convexities[j],
feasLB = feasLB, feasUB = feasUB, domain_ind = ind if ind.size != m else slice(None))[0]
if ind.size == m:
return tmp, tmp.definiteRange
rr.append(tmp)
inds.append(ind)
_ind = Inds[-1]
if _ind.size:
if convexities == (-1, 1) and r.engine_monotonity == 1:
tmp = defaultIntervalEngine(lb_ub, r.fun, r.d, monotonity = r.engine_monotonity, convexity=-101,
feasLB = feasLB, feasUB = feasUB, domain_ind = _ind if _ind.size != m else slice(None))[0]
if _ind.size == m:
return tmp, tmp.definiteRange
elif convexities == (1, -1) and r.engine_monotonity is not np.nan:
tmp = defaultIntervalEngine(lb_ub, r.fun, r.d, monotonity = r.engine_monotonity, convexity= 9, # 10-1
feasLB = feasLB, feasUB = feasUB, domain_ind = _ind if _ind.size != m else slice(None))[0]
if _ind.size == m:
return tmp, tmp.definiteRange
else:
DefiniteRange = definiteRange if type(definiteRange) == bool or definiteRange.size == 1 \
else definiteRange[_ind]
Tmp, definiteRange3 = \
ooFun.oofun._interval_(r, domain, dtype, inputData = (lb_ub_resolved[:, _ind], DefiniteRange))
if _ind.size == m:
return Tmp, definiteRange3
tmp = boundsurf(surf({}, Tmp[0]), surf({}, Tmp[1]), definiteRange3, domain)
rr.append(tmp)
inds.append(_ind)
b = boundsurf_join(inds, rr)
return b, b.definiteRange
def aux_mul_div_boundsurf(Elems, op, resolveSchedule=()):
_r = []
_resolved = []
changeSign = False
indZ = False
definiteRange = np.array(True)
for elem in Elems:
_R = elem.resolve()[0]
lb, ub = _R
ind_positive, ind_negative, ind_z = Split(lb >= 0, ub <= 0)
not_ind_negative = logical_not(ind_negative)
changeSign = logical_xor(changeSign, ind_negative)
indZ = logical_or(indZ, ind_z)
tmp1 = elem.extract(not_ind_negative)
tmp2 = -elem.extract(ind_negative)
Tmp = boundsurf_join((not_ind_negative, ind_negative), (tmp1, tmp2))#.log()
_r.append(Tmp)
_resolved.append(_R)
definiteRange = logical_and(definiteRange, elem.definiteRange)
use_exp = True
if op == operator.mul:
if len(_r) == 2 and _r[0].level == _r[1].level == 1 and _r[0].b2equiv(_r[1]):
rr = b2mult(_r[0], _r[1])
use_exp = False
else:
rr = PythonSum(elem.log() for elem in _r)#.exp()
else:
assert op == operator.truediv and len(Elems) == 2
if 1 and _r[1].level == 1 and _r[0].b2equiv(_r[1]):
rr = b2div(_r[0], _r[1])
use_exp = False
else:
rr = (_r[0].log() - _r[1].log())#.exp()
changeSign = logical_and(changeSign, logical_not(indZ))
keepSign = logical_and(logical_not(changeSign), logical_not(indZ))
if use_exp:
if len(resolveSchedule):
rr = rr.exclude(resolveSchedule)
if type(rr) == np.ndarray:
#print('asdf')
r = np.exp(rr)
r[:, changeSign] = -r[:, changeSign][::-1]
return r#, definiteRange
# print len(rr.dep)
rr = rr.exp()
# print type(rr)
# if type(rr) != boundsurf:
# print(rr.l.d2, rr.l.d, rr.l.c, '----', rr.u.d2, rr.u.d, rr.u.c)
_rr, _inds = [], []
if any(keepSign):
_rr.append(rr.extract(keepSign))
_inds.append(keepSign)
if any(changeSign):
_rr.append(-rr.extract(changeSign))
_inds.append(changeSign)
if any(indZ):
assert len(Elems) == 2, 'unimplemented yet'
lb1, ub1 = Elems[0].resolve()[0] # R1
other_lb, other_ub = Elems[1].resolve()[0] # R2
IndZ = where(indZ)[0]
tmp_z = np.vstack((
op(lb1[IndZ], other_lb[IndZ]),
op(ub1[IndZ], other_lb[IndZ]),
op(lb1[IndZ], other_ub[IndZ]),
op(ub1[IndZ], other_ub[IndZ])
))
l_z, u_z = nanmin(tmp_z, 0), | nanmax(tmp_z, 0) | numpy.nanmax |
# -*- coding: utf-8 -*-
'''
Survival (toxico-dynamics) models, forward simulation and model fitting.
References
----------
[1] <NAME> al. (2011). General unified threshold model of survival -
a toxicokinetic-toxicodynamic framework for ecotoxicology.
Environmental Science & Technology, 45(7), 2529-2540.
'''
import sys
import numpy as np
import pandas as pd
import scipy.integrate as sid
from scipy.special import erf
import lmfit
import matplotlib.pyplot as plt
import seaborn as sns
from tqdm import tqdm
import corner
#ODE solver settings
ATOL = 1e-9
MXSTEP = 1000
def mortality_lognormal(r, s):
'''Calculate mortality from cumulative log-normal distribution
Keyword arguments:
:param r: ratio of body burdens to cbr, summed (dimensionless)
:param s: dose-response slope (dimensionless)
:returns: mortality fraction (fraction)
'''
if r>0:
mean = 0.0
x = (np.log10(r) - mean) / (s * np.sqrt(2))
return 0.5 * (1 + erf(x))
else:
return 0.0
def guts_sic(y, t, ke, cd):
'''One-compartment scaled internal concentration ODE (rhs)'''
# One-compartment kinetics model for body residues
dy = ke*(cd(t) - y)
return dy
def guts_sic_sd(y, t, params, cd, dy):
'''GUTS-SIC-SD: Scaled internal concentration + hazard rate survival ODE (rhs)'''
v = params
n = y.size - 1
# One-compartment kinetics model for body residues
dcv = guts_sic(y[:n], t, v['ke'], cd)
#Dose metric
cstot = | np.sum(y[:n]) | numpy.sum |
'''Collection of classes and functions for loading, interpolation and processing of atomic data.
Refer also to the adas_files.py script.
'''
import numpy as np
import matplotlib.pyplot as plt
from scipy.interpolate import RectBivariateSpline, interp1d
from matplotlib import cm
import os, sys, copy
import scipy.ndimage
from scipy.linalg import svd
from scipy import constants
from . import adas_files
def get_adas_file_types():
'''Obtain a description of each ADAS file type and its meaning in the context of Aurora.
Returns
------------
dict
Dictionary with keys given by the ADAS file types and values giving a description for them.
Notes
---------
For background on ADAS generalized collisional-radiative modeling and data formats, refer to
[1]_.
References
-----------------
.. [1] Summers et al., "Ionization state, excited populations and emission of impurities
in dynamic finite density plasmas: I. The generalized collisional-radiative model for
light elements", Plasma Physics and Controlled Fusion, 48:2, 2006
'''
return {'acd':'effective recombination',
'scd':'effective ionization',
'prb':'continuum radiation',
'plt':'line radiation',
'ccd':'thermal charge exchange',
'prc':'thermal charge exchange continuum radiation',
'pls':'line radiation in the SXR range',
'prs':'continuum radiation in the SXR range',
'brs':'continuum spectral bremstrahlung',
'fis':'sensitivity in the SXR range',
'pbs':'impurity bremsstrahlung in SXR range, also included in prs files'
}
class adas_file:
'''Read ADAS file in ADF11 format over the given density and temperature grids.
Note that such grids vary between files, and the species they refer to may too.
Refer to ADAS documentation for details on each file.
Parameters
----------
filepath : str
Path to location where ADAS file is located.
'''
def __init__(self, filepath):
self.filepath = filepath
self.filename=filepath.split('/')[-1]
self.file_type = self.filename[:3]
if self.file_type not in ['brs','sxr']:
self.imp = self.filename.split('_')[1].split('.')[0]
# get data
self.load()
# settings for plotting
self.n_ion = self.data.shape[0]
self.ncol = np.ceil( | np.sqrt(self.n_ion) | numpy.sqrt |
"""
=====================================================================
Reconstruction of the diffusion signal with the kurtosis tensor model
=====================================================================
The diffusion kurtosis model is an expansion of the diffusion tensor model
(see :ref:`example_reconst_dti`). In addition to the diffusion tensor (DT), the
diffusion kurtosis model quantifies the degree to which water diffusion in
biological tissues is non-Gaussian using the kurtosis tensor (KT)
[Jensen2005]_.
Measurements of non-Gaussian diffusion from the diffusion kurtosis model are of
interest because they can be used to charaterize tissue microstructural
heterogeneity [Jensen2010]_ and to derive concrete biophysical parameters, such
as the density of axonal fibres and diffusion tortuosity [Fieremans2011]_.
Moreover, DKI can be used to resolve crossing fibers in tractography and to
obtain invariant rotational measures not limited to well-aligned fiber
populations [NetoHe2015]_.
The diffusion kurtosis model expresses the diffusion-weighted signal as:
.. math::
S(n,b)=S_{0}e^{-bD(n)+\frac{1}{6}b^{2}D(n)^{2}K(n)}
where $\mathbf{b}$ is the applied diffusion weighting (which is dependent on
the measurement parameters), $S_0$ is the signal in the absence of diffusion
gradient sensitization, $\mathbf{D(n)}$ is the value of diffusion along
direction $\mathbf{n}$, and $\mathbf{K(n)}$ is the value of kurtosis along
direction $\mathbf{n}$. The directional diffusion $\mathbf{D(n)}$ and kurtosis
$\mathbf{K(n)}$ can be related to the diffusion tensor (DT) and kurtosis tensor
(KT) using the following equations:
.. math::
D(n)=\sum_{i=1}^{3}\sum_{j=1}^{3}n_{i}n_{j}D_{ij}
and
.. math::
K(n)=\frac{MD^{2}}{D(n)^{2}}\sum_{i=1}^{3}\sum_{j=1}^{3}\sum_{k=1}^{3}
\sum_{l=1}^{3}n_{i}n_{j}n_{k}n_{l}W_{ijkl}
where $D_{ij}$ are the elements of the second-order DT, and $W_{ijkl}$ the
elements of the fourth-order KT and $MD$ is the mean diffusivity. As the DT,
KT has antipodal symmetry and thus only 15 Wijkl elemments are needed to fully
characterize the KT:
.. math::
\begin{matrix} ( & W_{xxxx} & W_{yyyy} & W_{zzzz} & W_{xxxy} & W_{xxxz}
& ... \\
& W_{xyyy} & W_{yyyz} & W_{xzzz} & W_{yzzz} & W_{xxyy}
& ... \\
& W_{xxzz} & W_{yyzz} & W_{xxyz} & W_{xyyz} & W_{xyzz}
& & )\end{matrix}
In the following example we show how to fit the diffusion kurtosis model on
diffusion-weighted multi-shell datasets and how to estimate diffusion kurtosis
based statistics.
First, we import all relevant modules:
"""
import numpy as np
import matplotlib.pyplot as plt
import dipy.reconst.dki as dki
import dipy.reconst.dti as dti
import dipy.reconst.dki_micro as dki_micro
from dipy.data import fetch_cfin_multib
from dipy.data import read_cfin_dwi
from dipy.segment.mask import median_otsu
from scipy.ndimage.filters import gaussian_filter
"""
DKI requires multi-shell data, i.e. data acquired from more than one non-zero
b-value. Here, we use fetch to download a multi-shell dataset which was kindly
provided by Hansen and Jespersen (more details about the data are provided in
their paper [Hansen2016]_). The total size of the downloaded data is 192
MBytes, however you only need to fetch it once.
"""
fetch_cfin_multib()
img, gtab = read_cfin_dwi()
data = img.get_data()
affine = img.affine
"""
Function ``read_cenir_multib`` return img and gtab which contains respectively
a nibabel Nifti1Image object (where the data can be extracted) and a
GradientTable object with information about the b-values and b-vectors.
Before fitting the data, we preform some data pre-processing. We first compute
a brain mask to avoid unnecessary calculations on the background of the image.
"""
maskdata, mask = median_otsu(data, 4, 2, False, vol_idx=[0, 1], dilate=1)
"""
Since the diffusion kurtosis models involves the estimation of a large number
of parameters [TaxCMW2015]_ and since the non-Gaussian components of the
diffusion signal are more sensitive to artefacts [NetoHe2012]_, it might be
favorable to suppress the effects of noise and artefacts before diffusion
kurtosis fitting. In this example the effects of noise and artefacts are
suppress by using 3D Gaussian smoothing (with a Gaussian kernel with
fwhm=1.25) as suggested by pioneer DKI studies (e.g. [Jensen2005]_,
[NetoHe2012]_). Although here the Gaussian smoothing is used so that results
are comparable to these studies, it is important to note that more advanced
noise and artifact suppression algorithms are available in DIPY_ (e.g. the
non-local means filter :ref:`example-denoise-nlmeans`).
"""
fwhm = 1.25
gauss_std = fwhm / np.sqrt(8 * | np.log(2) | numpy.log |
"""
CCT ๅปบๆจกไผๅไปฃ็
GPU CUDA ๅ ้ cctpy ๆๆต่ท่ธช
ๆณจๆๆต่ฏไปฃ็ ไธญ็ ga32 ๅ ga64 ๅฎไนไธบ
ga32 = GPU_ACCELERATOR(float_number_type=GPU_ACCELERATOR.FLOAT32)
ga64 = GPU_ACCELERATOR(float_number_type=GPU_ACCELERATOR.FLOAT64,block_dim_x=512)
2021ๅนด6ๆ17ๆฅ ๅขๅ CPU ๆจกๅผ
ไฝ่
๏ผ่ตตๆถฆๆ
ๆฅๆ๏ผ2021ๅนด5ๆ4ๆฅ
"""
# ๆฏๅฆ้็จ CPU ๆจกๅผ่ฟ่ก
from packages.beamline import Beamline
from packages.cct import CCT
from packages.magnets import *
from packages.particles import *
from packages.trajectory import Trajectory
from packages.line2s import *
from packages.local_coordinate_system import LocalCoordinateSystem
from packages.base_utils import BaseUtils
from packages.constants import *
from packages.point import *
import warnings # since v0.1.1 ๆ้ๆนๆณ่ฟๆถ
from scipy.integrate import solve_ivp # since v0.1.1 ODE45
import numpy
import os # since v0.1.1 ๆฅ็CPUๆ ธๅฟๆฐ
import sys
import random # since v0.1.1 ้ๆบๆฐ
import math
import matplotlib.pyplot as plt
from typing import Callable, Dict, Generic, Iterable, List, NoReturn, Optional, Tuple, TypeVar, Union
import time # since v0.1.1 ็ป่ฎก่ฎก็ฎๆถ้ฟ
import multiprocessing # since v0.1.1 ๅค็บฟ็จ่ฎก็ฎ
__CPU_MODE__: bool = False
try:
import pycuda.autoinit
import pycuda.driver as drv
from pycuda.compiler import SourceModule
except ModuleNotFoundError as e:
__CPU_MODE__ = True
class GPU_ACCELERATOR:
# CUDA ๆตฎ็นๆฐ็ฑปๅ๏ผๅฏไปฅ้ๆฉ 32 ไฝๅ 64 ไฝๆตฎ็นๆฐ๏ผๅ่
่ฎก็ฎ้ๅบฆๅฟซ๏ผ็ฒพๅบฆ่พไฝ
# ไฝฟ็จ 32 ไฝๆตฎ็นๆฐ่ฎก็ฎๆถ๏ผๅ
ธๅ่ฏฏๅทฎไธบ 0.05 mm ๅ 0.01 mrad
FLOAT32: str = "FLOAT32"
FLOAT64: str = "FLOAT64"
QS_DATA_LENGTH = 16
def __init__(self,
float_number_type: str = FLOAT32,
block_dim_x: int = 1024,
# ็ตๆตๅ
ๆฐ็ฎๆๅค 2000*120๏ผๅฆๆ 120 ๆฎต 1ๅ๏ผๅๆๅค2000ๅ
max_current_element_number: int = 2000*120,
max_qs_datas_length: int = 10000, # ๆๅค 10000 ไธช qs
cpu_mode: bool = False
) -> None:
"""
ๅฏๅจไธไธช GPU ๅ ้ๅจ๏ผ็จไบๅ ้ cctpy ๆ็บฟ็็ฒๅญ่ท่ธช
่ฟๆไธไบๅ
ถไปๅ่ฝ๏ผๆ็ไธ้ซ๏ผไป
็จไฝๆต่ฏ
float_number_type ๆตฎ็นๆฐ็ฑปๅ๏ผๅๅผไธบ FLOAT32 ๆ FLOAT64๏ผๅณ 32 ไฝ่ฟ่กๆ 64 ไฝ๏ผ้ป่ฎค 32 ไฝใ
64 ไฝๆตฎ็นๆฐ็ฒพๅบฆๆด้ซ๏ผไฝๆฏ่ฎก็ฎ็้ๅบฆๅฏ่ฝๆฏ 32 ไฝๆ
ข 2-10 ๅ
block_dim_x ๅ็บฟ็จๆฐ็ฎ๏ผ้ป่ฎค 1024 ไธช๏ผๅฟ
้กปๆฏ 2 ็ๅนๆฌกใๅฆๆ้็จ 64 ไฝๆตฎ็นๆฐ๏ผๅ 1024 ๅฏ่ฝไผๆฅ้๏ผๅบๅ 512 ๆๆดไฝ
ไธๅๅคงๅฐ็ block_dim_x๏ผๅฏ่ฝๅฏน่ฎก็ฎๆ็ๆๅฝฑๅ
ๅจๆฝ่ฑกไธ๏ผGPU ๅไธบ่ฅๅนฒ็บฟ็จๅ๏ผๆฏไธชๅๅ
ๆ่ฅๅนฒ็บฟ็จ
ๅๅ
็บฟ็จ๏ผๅฏไปฅไฝฟ็จ __shared__ ไฝฟ็จๅ
ฑไบซๅ
ๅญ๏ผ่ฎฟ้ฎ้ๅบฆๅฟซ๏ผ๏ผๅๆถๅ
ทๆๅๆญฅๆบๅถ๏ผๅ ๆญคๅฏไปฅๆนไพฟ็ๅๅทฅๅไฝ
ๅไน้ด๏ผๆฒกๆๅๆญฅๆบๅถ๏ผๆไปฅ็บฟ็จ้่ฎฏๆ ไป่ฐ่ตท
max_current_element_number ๆๅคง็ตๆตๅ
ๆฐ็ฎ๏ผๅจ GPU ๅ ้ไธญ๏ผCCT ๆฐๆฎไปฅ็ตๆตๅ
็ๅฝขๅผไผ ๅ
ฅๆพๅญใ
้ป่ฎคๅผ 2000*120 ๏ผๅฏไปฅ็ไฝไธๅ
ฑ 2000 ๅ๏ผๆฏๅๅ 120 ๆฎต๏ผ
max_qs_datas_length ๆๅคง qs ็ฃ้ๆฐ็ฎ๏ผ้ป่ฎคไธบ 10000๏ผๅ่ฟไนๅคง่่ๅฐๅ็
cpu_mode ้็จ CPU ๆจกๅผ่ฟ่ก
"""
self.float_number_type = float_number_type
self.max_current_element_number = int(max_current_element_number)
self.max_qs_datas_length = int(max_qs_datas_length)
self.cpu_mode:bool = __CPU_MODE__ or cpu_mode # ๅช่ฆไธค่
ไธญๆไธไธชไธบ True ๅ้็จ cpu ๆจกๅผ
if __CPU_MODE__:
print("ๆชๅฎ่ฃ
pycuda๏ผGPU ๅ ้ๅ่ฝๅฐไปฅ CPU ๆจกๅผ่ฟ่ก")
elif self.cpu_mode:
print("GPU ๅ ้ๅ่ฝๅฐไปฅ CPU ๆจกๅผ่ฟ่ก")
# ๆฃๆฅ block_dim_x ๅๆณๆง
if block_dim_x > 1024 or block_dim_x < 0:
raise ValueError(
f"block_dim_x ๅบ >=1 and <=1024 ๅ
ๅ๏ผไธ่ฝๆฏ{block_dim_x}")
if block_dim_x & (block_dim_x-1) != 0:
raise ValueError(f"block_dim_x ๅบ่ฏฅๅ 2 ็ๅนๆฌก๏ผไธ่ฝไธบ{block_dim_x}")
self.block_dim_x: int = int(block_dim_x)
# ๅคดๆไปถๅฏผๅ
ฅ
cuda_code_00_include = """
// ๅชๅฏผๅ
ฅ stdio๏ผ็จไบๆ ๅ่พๅบ printf() ๅฝๆฐ
// CUDA ไธญ็ไธไบๅ
็ฝฎๅฝๆฐๅๆนๆณ๏ผๆ ้ๅฏผๅ
ฅ
#include <stdio.h>
"""
# ๅฎไนๆตฎ็น็ฑปๅ
cuda_code_01_float_type_define: str = None
if float_number_type == GPU_ACCELERATOR.FLOAT32:
cuda_code_01_float_type_define = """
// ๅฎไนไธบ 32 ไฝๆตฎ็นๆฐๆจกๅผ
#define FLOAT32
"""
self.numpy_dtype = numpy.float32
elif float_number_type == GPU_ACCELERATOR.FLOAT64:
cuda_code_01_float_type_define = """
// ๅฎไนไธบ 64 ไฝๆตฎ็นๆฐๆจกๅผ
#define FLOAT64
"""
self.numpy_dtype = numpy.float64
if self.block_dim_x > 512:
print(f"ๅฝๅ GPU ่ฎพ็ฝฎไธบ 64 ไฝๆจกๅผ๏ผๅ็บฟ็จๆฐ๏ผ{self.block_dim_x}๏ผๅฏ่ฝ่ฟๅค๏ผๅ
ๆ ธๅฏ่ฝๆ ๆณๅฏๅจ\n" +
"ๅ
ธๅๅผๅธธไธบ pycuda._driver.LaunchError: cuLaunchKernel failed: too many resources requested for launch\n" +
"้ๅฐๆญคๆ
ๅต๏ผๅฏ้
ๆ
่ฐๅฐๅ็บฟ็จๆฐ")
else:
raise ValueError(
"float_number_type ๅฟ
้กปๆฏ GPU_ACCELERATOR.FLOAT32 ๆ GPU_ACCELERATOR.FLOAT64")
# ๅฎๅฎไน
# CUDA ไปฃ็ ๅ C ่ฏญ่จๅ ไนไธๆจกไธๆ ทใๅช่ฆๆ C/C++ ๅบ็ก๏ผๅฐฑ่ฝ็ๆ CUDA ไปฃ็
cuda_code_02_define = """
// ๆ นๆฎๅฎไน็ๆตฎ็นๆฐๆจกๅผ๏ผๅฐ FLOAT ๅฎๆฟๆขไธบ float ๆ double
#ifdef FLOAT32
#define FLOAT float
#else
#define FLOAT double
#endif
// ็ปดๅบฆ ไธ็ปด
#define DIM (3)
// ็ปดๅบฆ็ดขๅผ 0 1 2 ่กจ็คบ X Y Z๏ผ่ฟๆ ทๅฏนไธไธชๆฐ็ปๅๅผ๏ผ็่ตทๆฅๆธ
ๆฐไธไบ
#define X (0)
#define Y (1)
#define Z (2)
// ็ฒๅญๅๆฐ็ดขๅผ (px0, py1, pz2, vx3, vy4, vz5, rm6 ็ธๅฏน่ดจ้, e7 ็ต่ท้, speed8 ้็, distance9 ่ฟๅจ่ท็ฆป)
#define PARTICLE_DIM (10)
#define PX (0)
#define PY (1)
#define PZ (2)
#define VX (3)
#define VY (4)
#define VZ (5)
#define RM (6)
#define E (7)
#define SPEED (8)
#define DISTANCE (9)
// ๅ็บฟ็จๆฐ็ฎ
#define BLOCK_DIM_X ({block_dim_x})
#define QS_DATA_LENGTH (16)
#define MAX_CURRENT_ELEMENT_NUMBER ({max_current_element_number})
#define MAX_QS_DATAS_LENGTH ({max_qs_datas_length})
""".format(
block_dim_x=self.block_dim_x,
max_current_element_number=self.max_current_element_number,
max_qs_datas_length=self.max_qs_datas_length
)
# ๅ้่ฟ็ฎๅ
่ๅฝๆฐ
cuda_code_03_vct_functions = """
// ๅ้ๅไน
// ไผ ๅ
ฅ a b ret ไธไธชๆฐ็ป๏ผๅฐ a ร b ็็ปๆไผ ๅ
ฅ ret ไธญ
// ไป็ป้
่ฏปๅ
ทไฝๅฎ็ฐ๏ผๅ็ฐ ret ไธ่ฝๆฏ a ๆ่
b๏ผ่ฟๆ ทไผๅฏผ่ด็ปๆๅบ้
__device__ __forceinline__ void vct_cross(FLOAT *a, FLOAT *b, FLOAT *ret) {
ret[X] = a[Y] * b[Z] - a[Z] * b[Y];
ret[Y] = -a[X] * b[Z] + a[Z] * b[X];
ret[Z] = a[X] * b[Y] - a[Y] * b[X];
}
// ๅ้ๅๅฐๅ ๆณ
// ไผ ๅ
ฅไธคไธชๆฐ็ป a_local ๅ b๏ผๅฐ a_local + b ็็ปๆๆพๅ
ฅ a_local ไธญ
__device__ __forceinline__ void vct_add_local(FLOAT *a_local, FLOAT *b) {
a_local[X] += b[X];
a_local[Y] += b[Y];
a_local[Z] += b[Z];
}
// ๅ้ๅๅฐๅ ๆณ
// ๅฝๆฐๆไนๅไธ๏ผไฝๆฏๅฎๆ็ๆฏ 6 ็ปดๅ ๆณ
__device__ __forceinline__ void vct6_add_local(FLOAT *a_local, FLOAT *b) {
a_local[X] += b[X];
a_local[Y] += b[Y];
a_local[Z] += b[Z];
a_local[X+DIM] += b[X+DIM];
a_local[Y+DIM] += b[Y+DIM];
a_local[Z+DIM] += b[Z+DIM];
}
// ๅ้ๅ ๆณ
// ไผ ๅ
ฅ a b ret ไธไธชๆฐ็ป๏ผๅฐ a + b ็็ปๆไผ ๅ
ฅ ret ไธญ
__device__ __forceinline__ void vct_add(FLOAT *a, FLOAT *b, FLOAT *ret) {
ret[X] = a[X] + b[X];
ret[Y] = a[Y] + b[Y];
ret[Z] = a[Z] + b[Z];
}
// ๅ้ๅ ๆณ
// ๅฝๆฐๆไนๅไธ๏ผไฝๆฏๅฎๆ็ๆฏ 6 ็ปดๅ ๆณ
__device__ __forceinline__ void vct6_add(FLOAT *a, FLOAT *b, FLOAT *ret) {
ret[X] = a[X] + b[X];
ret[Y] = a[Y] + b[Y];
ret[Z] = a[Z] + b[Z];
ret[X+DIM] = a[X+DIM] + b[X+DIM];
ret[Y+DIM] = a[Y+DIM] + b[Y+DIM];
ret[Z+DIM] = a[Z+DIM] + b[Z+DIM];
}
// ๅ้*ๅธธๆฐ๏ผๅๅฐๆไฝ
__device__ __forceinline__ void vct_dot_a_v(FLOAT a, FLOAT *v) {
v[X] *= a;
v[Y] *= a;
v[Z] *= a;
}
// ๅ้*ๅธธๆฐ๏ผๅๅฐๆไฝใๅ
ญ็ปด
__device__ __forceinline__ void vct6_dot_a_v(FLOAT a, FLOAT *v) {
v[X] *= a;
v[Y] *= a;
v[Z] *= a;
v[X+DIM] *= a;
v[Y+DIM] *= a;
v[Z+DIM] *= a;
}
// ๅ้*ๅธธๆฐใ็ปๆๅๅ
ฅ ret ไธญ
__device__ __forceinline__ void vct_dot_a_v_ret(FLOAT a, FLOAT *v, FLOAT *ret) {
ret[X] = v[X] * a;
ret[Y] = v[Y] * a;
ret[Z] = v[Z] * a;
}
// ๅ้*ๅธธๆฐใๅ
ญ็ปดใ็ปๆๅๅ
ฅ ret ไธญ
__device__ __forceinline__ void vct6_dot_a_v_ret(FLOAT a, FLOAT *v, FLOAT *ret) {
ret[X] = v[X] * a;
ret[Y] = v[Y] * a;
ret[Z] = v[Z] * a;
ret[X+DIM] = v[X+DIM] * a;
ret[Y+DIM] = v[Y+DIM] * a;
ret[Z+DIM] = v[Z+DIM] * a;
}
// ๅ้ๅ
็งฏ๏ผ็ดๆฅ่ฟๅๆ ้ๅผ
__device__ __forceinline__ FLOAT vct_dot_v_v(FLOAT *v,FLOAT *w){
return v[X] * w[X] + v[Y] * w[Y] + v[Z] * w[Z];
}
// ๅ้ๆท่ด่ตๅผ๏ผๆบ src๏ผๅฎฟ des
__device__ __forceinline__ void vct_copy(FLOAT *src, FLOAT *des) {
des[X] = src[X];
des[Y] = src[Y];
des[Z] = src[Z];
}
// ๅ้ๆท่ด่ตๅผ๏ผๅ
ญ็ปด๏ผๆบ src๏ผๅฎฟ des
__device__ __forceinline__ void vct6_copy(FLOAT *src, FLOAT *des) {
des[X] = src[X];
des[Y] = src[Y];
des[Z] = src[Z];
des[X+DIM] = src[X+DIM];
des[Y+DIM] = src[Y+DIM];
des[Z+DIM] = src[Z+DIM];
}
// ๆฑๅ้้ฟๅบฆ๏ผ็ดๆฅ่ฟๅ่ฎก็ฎ็ปๆ
__device__ __forceinline__ FLOAT vct_len(FLOAT *v) {
// ๆ นๆฎ 32 ไฝ่ฟๆฏ 64 ไฝๆไธๅ็ๅฎ็ฐ
#ifdef FLOAT32
return sqrtf(v[X] * v[X] + v[Y] * v[Y] + v[Z] * v[Z]);
#else
return sqrt(v[X] * v[X] + v[Y] * v[Y] + v[Z] * v[Z]);
#endif
}
// ๅฐ็ข้ v ็ฝฎไธบ 0
__device__ __forceinline__ void vct_zero(FLOAT *v) {
v[X] = 0.0;
v[Y] = 0.0;
v[Z] = 0.0;
}
// ๆๅฐ็ข้๏ผไธ่ฌ็จไบ debug
__device__ __forceinline__ void vct_print(FLOAT *v) {
#ifdef FLOAT32
printf("%.15f, %.15f, %.15f\\n", v[X], v[Y], v[Z]);
#else
printf("%.15lf, %.15lf, %.15lf\\n", v[X], v[Y], v[Z]);
#endif
}
// ๆๅฐๅ
ญ็ปด็ข้๏ผไธ่ฌ็จไบ debug
__device__ __forceinline__ void vct6_print(FLOAT *v) {
#ifdef FLOAT32
printf("%.15f, %.15f, %.15f, %.15f, %.15f, %.15f\\n", v[X], v[Y], v[Z], v[X+DIM], v[Y+DIM], v[Z+DIM]);
#else
printf("%.15lf, %.15lf, %.15lf, %.15lf, %.15lf, %.15lf\\n", v[X], v[Y], v[Z] ,v[X+DIM], v[Y+DIM], v[Z+DIM]);
#endif
}
// ็ข้ๅๆณ๏ผ็ปๆๆพๅจ ret ไธญ
__device__ __forceinline__ void vct_sub(FLOAT *a, FLOAT *b, FLOAT *ret) {
ret[X] = a[X] - b[X];
ret[Y] = a[Y] - b[Y];
ret[Z] = a[Z] - b[Z];
}
"""
cuda_code_04_dB = """
// ่ฎก็ฎ็ตๆตๅ
ๅจ p ็นไบง็็็ฃๅบ
// ๅ
ถไธญ p0 ่กจ็คบ็ตๆตๅ
็ไฝ็ฝฎ
// kl ๅซไน่งไธ
// ่ฟๅๅผๆพๅจ ret ไธญ
//
// ๅๆฌ็ตๆตๅ
็่ฎก็ฎๅ
ฌๅผๅฆไธ๏ผ
// dB = (miu0/4pi) * Idl ร r / (r^3)
// ๅ
ถไธญ r = p - p0๏ผp0 ๆฏ็ตๆตๅ
็ไฝ็ฝฎ
//
// ๅฆๆ่่ๆๅฐไธๆฎต็ตๆต๏ผ่ตท็นs0๏ผ็ป็นs1๏ผๅไบง็็็ฃๅบไธบ
// ฮB = (miu0/4pi) * I * (s1-s2)*r / (r^3)
// ๅๆ ท็๏ผr = p - p0๏ผp0 = (s1+s2)/2
//
// ๅ ไธบ (miu0/4pi) * I * (s1-s2) ๆดไฝๅทฒ็ฅ๏ผๆไปฅๆๅ่ฎก็ฎไธบ kl
// p0 ๆๅๅทฒ็ฅ๏ผๅณ (s1+s2)/2๏ผไนๆๅ็ปๅบ
// ่ฟๆ ทๅฏไปฅๅๅฐๆ ๆไน็้ๅค่ฎก็ฎ
//
// ่กฅๅ
๏ผๅๆ ๅๆฏๅ
จๅฑๅๆ
__device__ __forceinline__ void dB(FLOAT *kl, FLOAT *p0, FLOAT *p, FLOAT *ret){
FLOAT r[DIM];
FLOAT rr;
vct_sub(p, p0, r); // r = p - p0
rr = vct_len(r); // rr = abs(r)
rr = rr*rr*rr; // rr = rr^3
vct_cross(kl, r, ret); // ret = kl ร r
vct_dot_a_v(1.0/rr, ret); // ret = (kl ร r)/(rr^3)
}
// ่ฎก็ฎๆๆ็็ตๆตๅ
ๅจ p ็นไบง็็็ฃๅบ
// number ่กจ็คบ็ตๆตๅ
ๆฐ็ฎ
// kls ๆฏ DIM = 3 ็ป่กจ็คบไธไธช kl
// p0s ๆฏ DIM = 3 ็ป่กจ็คบไธไธช p0
// shared_ret ๆฏไธไธช shared ้๏ผไฟๅญ่ฟๅๅผ
// ่ฐ็จ่ฏฅๆนๆณๅ๏ผๅบ่ฟ่กๅๆญฅๅค็ __syncthreads();
__device__ void current_element_B(FLOAT *kls, FLOAT *p0s, int number, FLOAT *p, FLOAT *shared_ret){
int tid = threadIdx.x; // 0-1023 (decide by BLOCK_DIM_X)
FLOAT db[DIM];
__shared__ FLOAT s_dbs[DIM*BLOCK_DIM_X];
vct_zero(s_dbs + tid*DIM);
// ่ฎก็ฎๆฏไธช็ตๆตๅ
ไบง็็็ฃๅบ
for(int i = tid*DIM; i < number*DIM; i += BLOCK_DIM_X*DIM){
dB(kls + i, p0s + i, p, db);
vct_add_local(s_dbs + tid*DIM, db);
}
// ่ง็บฆๆฑๅ๏ผfrom https://www.bilibili.com/video/BV15E411x7yT๏ผ
for(int step = BLOCK_DIM_X>>1; step >= 1; step>>=1){
__syncthreads(); // ๆฑๅๅๅๆญฅ
if(tid<step) vct_add_local(s_dbs + tid * DIM, s_dbs + (tid + step) * DIM);
}
if(tid == 0) vct_copy(s_dbs, shared_ret);
}
"""
cuda_code_05_QS = """
// ่ฎก็ฎ QS ๅจ p ็นไบง็็็ฃๅบ
// origin xi yi zi ๅๅซๆฏ QS ็ๅฑ้จๅๆ ็ณป
// ่ฟไธชๅฝๆฐๅช้่ฆๅ็บฟ็จ่ฎก็ฎ
__device__ __forceinline__ void magnet_at_qs(FLOAT *origin, FLOAT *xi, FLOAT *yi, FLOAT *zi,
FLOAT length, FLOAT gradient, FLOAT second_gradient, FLOAT aper_r, FLOAT *p, FLOAT* ret){
FLOAT temp1[DIM];
FLOAT temp2[DIM];
vct_sub(p, origin, temp1); // temp1 = p - origin
temp2[X] = vct_dot_v_v(xi, temp1);
temp2[Y] = vct_dot_v_v(yi, temp1);
temp2[Z] = vct_dot_v_v(zi, temp1); // ่ฟๆถ temp2 ๅฐฑๆฏๅ
จๅฑๅๆ p ็นๅจ QS ๅฑ้จๅๆ ็ณปไธญ็ๅๆ
vct_zero(ret);
if(temp2[Z]<0 || temp2[Z]>length){
return; // ๆ ็ฃๅบ
}else{
if(
temp2[X] > aper_r ||
temp2[X] < -aper_r ||
temp2[Y] > aper_r ||
temp2[Y] < -aper_r ||
#ifdef FLOAT32
sqrtf(temp2[X]*temp2[X]+temp2[Y]*temp2[Y]) > aper_r
#else
sqrt(temp2[X]*temp2[X]+temp2[Y]*temp2[Y]) > aper_r
#endif
){
return; // ๆ ็ฃๅบ
}else{
temp1[X] = gradient * temp2[Y] + second_gradient * (temp2[X] * temp2[Y]);
temp1[Y] = gradient * temp2[X] + 0.5 * second_gradient * (temp2[X] * temp2[X] - temp2[Y] * temp2[Y]);
vct_dot_a_v_ret(temp1[X], xi, ret);
vct_dot_a_v_ret(temp1[Y], yi, temp2);
vct_add_local(ret, temp2);
}
}
}
// ่ฎก็ฎ QS ๅจ p ็นไบง็็็ฃๅบ
// ไธๅฝๆฐ็ qs_data ็ๆฌ
__device__ __forceinline__ void magnet_at_qs_date(FLOAT *qs_data, FLOAT *p, FLOAT* ret){
magnet_at_qs(
qs_data, // origin
qs_data + 3, //xi
qs_data + 6, //yi
qs_data + 9, //zi
*(qs_data + 12), // len
*(qs_data + 13), // g
*(qs_data + 14), // sg
*(qs_data + 15), // aper r
p, ret
);
}
// ่ฎก็ฎๅคไธช qs ็ฃ้็็ฃๅบ๏ผๅนถ่ก่ฎก็ฎ
// ้่ฆๅๆญฅ
__device__ void magnet_at_qs_dates(FLOAT *qs_datas, int qs_number, FLOAT *p, FLOAT* shared_ret){
int tid = threadIdx.x; // 0-1023 (decide by BLOCK_DIM_X)
FLOAT db[DIM];
__shared__ FLOAT s_dbs[DIM*BLOCK_DIM_X];
vct_zero(s_dbs + tid*DIM);
// ่ฎก็ฎๆฏไธช qs ็ฃ้ไบง็็็ฃๅบ
for(int i = tid; i < qs_number; i += BLOCK_DIM_X){
magnet_at_qs_date(
qs_datas + i * QS_DATA_LENGTH, p, db
);
// printf("%d %d\\n",qs_number,tid);
// vct_print(db);
vct_add_local(s_dbs + tid*DIM, db);
}
// ่ง็บฆๆฑๅ๏ผfrom https://www.bilibili.com/video/BV15E411x7yT๏ผ
for(int step = BLOCK_DIM_X>>1; step >= 1; step>>=1){
__syncthreads(); // ๆฑๅๅๅๆญฅ
if(tid<step) vct_add_local(s_dbs + tid * DIM, s_dbs + (tid + step) * DIM);
}
if(tid == 0) vct_copy(s_dbs, shared_ret);
}
"""
cuda_code_06_magnet_at = """
// ๆดไธชๆ็บฟๅจ p ็นไบง็ๅพ็ฃๅบ๏ผๅชๆไธไธช QS ็ฃ้๏ผ๏ผ
// FLOAT *kls, FLOAT* p0s, int current_element_number ๅ CCT ็ตๆตๅ
็ธๅ
ณ
// FLOAT *qs_data ่กจ็คบ QS ็ฃ้ๆๆๅๆฐ๏ผๅๅซๆฏๅฑ้จๅๆ ็ณปๅ็นorigin,ไธไธช่ฝดxi yi zi๏ผ้ฟๅบฆ ๆขฏๅบฆ ไบ้ถๆขฏๅบฆ ๅญๅพ
// p ่กจ็คบ่ฆๆฑ็ฃๅบๅพๅ
จๅฑๅๆ ็น
// shared_ret ่กจ็คบ็ฃๅบ่ฟๅๅผ๏ผๅบ่ฏฅๆฏไธไธช __shared__๏ผ
// ๆฌๆนๆณๅทฒ็ปๅฎๆๅๆญฅไบ๏ผไธ็จ่ๅค่ฐ็จ __syncthreads();
__device__ void magnet_with_single_qs(FLOAT *kls, FLOAT* p0s, int current_element_number,
FLOAT *qs_data, FLOAT *p, FLOAT *shared_ret){
int tid = threadIdx.x;
FLOAT qs_magnet[DIM];
current_element_B(kls, p0s, current_element_number, p, shared_ret);
__syncthreads(); // ๅๅ
ๅๆญฅ
if(tid == 0){
// ่ฎก็ฎ QS ็็ฃๅบ็กฎๅฎไธ่ฝๅนถ่ก
// ไนๆฒกๆๅฟ
่ฆ่ฎฉๆฏไธช็บฟ็จ้ฝ้ๅค่ฎก็ฎไธๆฌก
// ่ฝ็ถไธคๆฌกๅๆญฅๆ็น้บป็ฆ๏ผไฝ่ณๅฐๅชๆไธไธช็บฟ็จๆๅไธ่ฟ่ก
magnet_at_qs(
qs_data, // origin
qs_data + 3, //xi
qs_data + 6, //yi
qs_data + 9, //zi
*(qs_data + 12), // len
*(qs_data + 13), // g
*(qs_data + 14), // sg
*(qs_data + 15), // aper r
p, qs_magnet
);
vct_add_local(shared_ret, qs_magnet);
}
__syncthreads(); // ๅๅ
ๅๆญฅ
}
__device__ void magnet_with_multi_qs(FLOAT *kls, FLOAT* p0s, int current_element_number,
FLOAT *qs_datas, int qs_number, FLOAT *p, FLOAT *shared_ret){
int tid = threadIdx.x;
__shared__ FLOAT s_qs_magnet[DIM];
current_element_B(kls, p0s, current_element_number, p, shared_ret);
__syncthreads(); // ๅๅ
ๅๆญฅ
// ่ฎก็ฎๅคไธช qs ็ฃ้็็ฃๅบ
magnet_at_qs_dates(qs_datas, qs_number, p, s_qs_magnet);
if(tid == 0){
vct_add_local(shared_ret, s_qs_magnet);
}
__syncthreads(); // ๅๅ
ๅๆญฅ
}
"""
cuda_code_07_runge_kutta4 = """
// runge_kutta4 ไปฃ็ ๅ cctpy ไธญ็ runge_kutta4 ไธๆจกไธๆ ท
// Y0 ๆฐ็ป้ฟๅบฆไธบ 6
// Y0 ไผๅ็ๅๅ๏ผๆขๆฏ่พๅ
ฅไนๆฏ่พๅบ
// ่ฟไธชๅฝๆฐๅ็บฟ็จ่ฟ่ก
// void (*call)(FLOAT,FLOAT*,FLOAT*) ่กจ็คบ tn Yn ๅฐ Yn+1 ็่ฝฌ็งป๏ผๅฎ้
ไฝฟ็จไธญ่ฟไผๅธฆๆดๅคๅๆฐ๏ผC ่ฏญ่จๆฒกๆ้ญๅ
๏ผ
// ๆไปฅ่ฟไธชๅฝๆฐไป
ไป
ๆฏๅๅ
__device__ void runge_kutta4(FLOAT t0, FLOAT t_end, FLOAT *Y0, void (*call)(FLOAT,FLOAT*,FLOAT*), FLOAT dt){
#ifdef FLOAT32
int number = (int)(ceilf((t_end - t0) / dt));
#else
int number = (int)(ceil((t_end - t0) / dt));
#endif
// ้ๆฐๅฎไนไบ dt
dt = (t_end - t0) / ((FLOAT)(number));
FLOAT k1[DIM*2];
FLOAT k2[DIM*2];
FLOAT k3[DIM*2];
FLOAT k4[DIM*2];
FLOAT temp[DIM*2];
for(int ignore = 0; ignore < number; ignore++){
(*call)(t0, Y0, k1);
vct6_dot_a_v_ret(dt / 2., k1, temp); // temp = dt / 2 * k1
vct6_add_local(temp, Y0); // temp = Y0 + temp
(*call)(t0 + dt / 2., temp, k2);
vct6_dot_a_v_ret(dt / 2., k2, temp); // temp = dt / 2 * k2
vct6_add_local(temp, Y0); // temp = Y0 + temp
(*call)(t0 + dt / 2., temp, k3);
vct6_dot_a_v_ret(dt, k3, temp); // temp = dt * k3
vct6_add_local(temp, Y0); // temp = Y0 + temp
(*call)(t0 + dt, temp, k4);
t0 += dt;
vct6_add(k1, k4, temp); // temp = k1 + k4
vct6_dot_a_v(2.0, k2);
vct6_dot_a_v(2.0, k3);
vct6_add(k2, k3, k1); // k1 ๅทฒ็ปๆฒก็จไบ๏ผๆไปฅ่ฃ
k1 = k2 + k3
vct6_add_local(temp, k1);
vct6_dot_a_v(dt / 6.0, temp);
vct6_add_local(Y0, temp);
// Y0 += (dt / 6) * (k1 + 2 * k2 + 2 * k3 + k4);
}
}
"""
cuda_code_08_run_only = """
// runge_kutta4_for_magnet_with_single_qs ๅฝๆฐ็จๅฐ็ๅ่ฐ
// FLOAT t0, FLOAT* Y0, FLOAT* Y1 ๅพฎๅ่ฎก็ฎ
// ๅ
ถไธญ Y = [P, V]
// FLOAT k = particle[E] / particle[RM]; // k: float = particle.e / particle.relativistic_mass
// FLOAT *kls, FLOAT* p0s, int current_element_number, ่กจ็คบๆๆ็ตๆตๅ
// FLOAT *qs_data ่กจ็คบไธไธช QS ็ฃ้
__device__ void callback_for_runge_kutta4_for_magnet_with_single_qs(
FLOAT t0, FLOAT* Y0, FLOAT* Y1, FLOAT k,
FLOAT *kls, FLOAT* p0s, int current_element_number,
FLOAT *qs_data
)
{
int tid = threadIdx.x;
__shared__ FLOAT m[DIM]; // ็ฃๅบ
magnet_with_single_qs(kls, p0s, current_element_number, qs_data, Y0, m); //Y0 ๅชไฝฟ็จๅ3้กน๏ผ่กจ็คบไฝ็ฝฎใๅทฒๅๆญฅ
if(tid == 0){ // ๅ็บฟ็จๅฎๆๅณๅฏ
// ------------ ไปฅไธไธคๆญฅ่ฎก็ฎๅ ้ๅบฆ๏ผๅๅ
ฅ Y1 + 3 ไธญ ----------
// Y0 + 3 ๆฏๅ้ๅบฆ v
// Y1 + 3 ็จไบๅญๅ ้ๅบฆ๏ผๅณ v ร m๏ผ่ฟๆฒกๆไน k = e/rm
vct_cross(Y0 + 3, m, Y1 + 3);
vct_dot_a_v(k, Y1 + 3); // ๅณ (v ร m) * a๏ผๅนถไธๆ็งฏๅญๅจ Y1 + 3 ไธญ
// ------------- ไปฅไธๆๅ้ๅบฆๅคๅถๅฐ Y1 ไธญ ------------
vct_copy(Y0 + 3, Y1); // Y0 ไธญๅไธ้กน๏ผ้ๅบฆใๅคๅถๅฐ Y1 ็ๅ3้กน
}
__syncthreads(); // ๅๅ
ๅๆญฅ
}
// ๅค qs
__device__ void callback_for_runge_kutta4_for_magnet_with_multi_qs(
FLOAT t0, FLOAT* Y0, FLOAT* Y1, FLOAT k,
FLOAT *kls, FLOAT* p0s, int current_element_number,
FLOAT *qs_datas, int qs_number
)
{
int tid = threadIdx.x;
__shared__ FLOAT m[DIM]; // ็ฃๅบ
magnet_with_multi_qs(kls, p0s, current_element_number, qs_datas, qs_number, Y0, m); //Y0 ๅชไฝฟ็จๅ3้กน๏ผ่กจ็คบไฝ็ฝฎใๅทฒๅๆญฅ
if(tid == 0){ // ๅ็บฟ็จๅฎๆๅณๅฏ
// ------------ ไปฅไธไธคๆญฅ่ฎก็ฎๅ ้ๅบฆ๏ผๅๅ
ฅ Y1 + 3 ไธญ ----------
// Y0 + 3 ๆฏๅ้ๅบฆ v
// Y1 + 3 ็จไบๅญๅ ้ๅบฆ๏ผๅณ v ร m๏ผ่ฟๆฒกๆไน k = e/rm
vct_cross(Y0 + 3, m, Y1 + 3);
vct_dot_a_v(k, Y1 + 3); // ๅณ (v ร m) * a๏ผๅนถไธๆ็งฏๅญๅจ Y1 + 3 ไธญ
// ------------- ไปฅไธๆๅ้ๅบฆๅคๅถๅฐ Y1 ไธญ ------------
vct_copy(Y0 + 3, Y1); // Y0 ไธญๅไธ้กน๏ผ้ๅบฆใๅคๅถๅฐ Y1 ็ๅ3้กน
}
__syncthreads(); // ๅๅ
ๅๆญฅ
}
// ๅไธช็ฒๅญ่ท่ธช
// runge_kutta4 ๅฝๆฐ็จไบ magnet_with_single_qs ็็ๆฌ๏ผๅณ็ฒๅญ่ท่ธช
// Y0 ๅณๆฏ [P, v] ็ฒๅญไฝ็ฝฎใ็ฒๅญ้ๅบฆ
// void (*call)(FLOAT,FLOAT*,FLOAT*,FLOAT,FLOAT*,FLOAT*,int,FLOAT*) ๆนไธบ callback_for_runge_kutta4_for_magnet_with_single_qs
// ๅ 3 ้กน FLOAT,FLOAT*,FLOAT* ๅๅฝๆฐๅๅ runge_kutta4 ๅฝๆฐไธๆ ท๏ผๅณ t0 Y0 Y1
// ็ฌฌ 4 ้กน๏ผ่กจ็คบ k = particle[E] / particle[RM]; // k: float = particle.e / particle.relativistic_mass
// ็ฌฌ 567 ้กน๏ผFLOAT*,FLOAT*,int ่กจ็คบๆๆ็ตๆตๆบ๏ผFLOAT *kls, FLOAT* p0s, int current_element_number
// ๆๅไธ้กน๏ผ่กจ็คบ qs_data
// particle ่กจ็คบ็ฒๅญ (px0, py1, pz2, vx3, vy4, vz5, rm6, e7, speed8, distance9) len = 10
/*__global__*/ __device__ void track_for_magnet_with_single_qs(FLOAT *distance, FLOAT *footstep,
FLOAT *kls, FLOAT* p0s, int *current_element_number,
FLOAT *qs_data, FLOAT *particle)
{
int tid = threadIdx.x;
FLOAT t0 = 0.0; // ๅผๅงๆถ้ดไธบ 0
FLOAT t_end = (*distance) / particle[SPEED]; // ็จๆถ = ่ท็ฆป/้็
#ifdef FLOAT32
int number = (int)(ceilf( (*distance) / (*footstep) ));
#else
int number = (int)(ceil( (*distance) / (*footstep)));
#endif
FLOAT dt = (t_end - t0) / ((FLOAT)(number));
FLOAT k = particle[E] / particle[RM]; // k: float = particle.e / particle.relativistic_mass
__shared__ FLOAT Y0[DIM*2]; // Y0 ๅณๆฏ [P, v] ็ฒๅญไฝ็ฝฎใ็ฒๅญ้ๅบฆ๏ผๅฐฑๆฏ particle ๅไธค้กน
__shared__ FLOAT k1[DIM*2];
__shared__ FLOAT k2[DIM*2];
__shared__ FLOAT k3[DIM*2];
__shared__ FLOAT k4[DIM*2];
__shared__ FLOAT temp[DIM*2];
if(tid == 0){
vct6_copy(particle, Y0); // ๅ Y0
}
for(int ignore = 0; ignore < number; ignore++){
__syncthreads(); // ๅพช็ฏๅๅๆญฅ
callback_for_runge_kutta4_for_magnet_with_single_qs(t0, Y0, k1, k, kls, p0s, *current_element_number, qs_data); // ๅทฒๅๆญฅ
if(tid == 0){
vct6_dot_a_v_ret(dt / 2., k1, temp); // temp = dt / 2 * k1
vct6_add_local(temp, Y0); // temp = Y0 + temp
}
__syncthreads();
callback_for_runge_kutta4_for_magnet_with_single_qs(t0 + dt / 2., temp, k2, k, kls, p0s, *current_element_number, qs_data);
if(tid == 0){
vct6_dot_a_v_ret(dt / 2., k2, temp); // temp = dt / 2 * k2
vct6_add_local(temp, Y0); // temp = Y0 + temp
}
__syncthreads();
callback_for_runge_kutta4_for_magnet_with_single_qs(t0 + dt / 2., temp, k3, k, kls, p0s, *current_element_number, qs_data);
if(tid == 0){
vct6_dot_a_v_ret(dt, k3, temp); // temp = dt * k3
vct6_add_local(temp, Y0); // temp = Y0 + temp
}
__syncthreads();
callback_for_runge_kutta4_for_magnet_with_single_qs(t0 + dt, temp, k4, k, kls, p0s, *current_element_number, qs_data);
t0 += dt;
if(tid == 0){
vct6_add(k1, k4, temp); // temp = k1 + k4
vct6_dot_a_v(2.0, k2);
vct6_dot_a_v(2.0, k3);
vct6_add(k2, k3, k1); // k1 ๅทฒ็ปๆฒก็จไบ๏ผๆไปฅ่ฃ
k1 = k2 + k3
vct6_add_local(temp, k1);
vct6_dot_a_v(dt / 6.0, temp);
vct6_add_local(Y0, temp);
// Y0 += (dt / 6) * (k1 + 2 * k2 + 2 * k3 + k4);
}
}
// ๅๅ particle
if(tid == 0){
vct6_copy(Y0 ,particle); // ๅ Y0
particle[DISTANCE] = *distance;
}
__syncthreads();
}
// ไธๅฝๆฐ็ global ็ๆฌ
__global__ void track_for_magnet_with_single_qs_g(FLOAT *distance, FLOAT *footstep,
FLOAT *kls, FLOAT* p0s, int *current_element_number,
FLOAT *qs_data, FLOAT *particle)
{
int tid = threadIdx.x;
FLOAT t0 = 0.0; // ๅผๅงๆถ้ดไธบ 0
FLOAT t_end = (*distance) / particle[SPEED]; // ็จๆถ = ่ท็ฆป/้็
#ifdef FLOAT32
int number = (int)(ceilf( (*distance) / (*footstep) ));
#else
int number = (int)(ceil( (*distance) / (*footstep)));
#endif
FLOAT dt = (t_end - t0) / ((FLOAT)(number));
FLOAT k = particle[E] / particle[RM]; // k: float = particle.e / particle.relativistic_mass
__shared__ FLOAT Y0[DIM*2]; // Y0 ๅณๆฏ [P, v] ็ฒๅญไฝ็ฝฎใ็ฒๅญ้ๅบฆ๏ผๅฐฑๆฏ particle ๅไธค้กน
__shared__ FLOAT k1[DIM*2];
__shared__ FLOAT k2[DIM*2];
__shared__ FLOAT k3[DIM*2];
__shared__ FLOAT k4[DIM*2];
__shared__ FLOAT temp[DIM*2];
if(tid == 0){
vct6_copy(particle, Y0); // ๅ Y0
}
for(int ignore = 0; ignore < number; ignore++){
__syncthreads(); // ๅพช็ฏๅๅๆญฅ
callback_for_runge_kutta4_for_magnet_with_single_qs(t0, Y0, k1, k, kls, p0s, *current_element_number, qs_data); // ๅทฒๅๆญฅ
if(tid == 0){
vct6_dot_a_v_ret(dt / 2., k1, temp); // temp = dt / 2 * k1
vct6_add_local(temp, Y0); // temp = Y0 + temp
}
__syncthreads();
callback_for_runge_kutta4_for_magnet_with_single_qs(t0 + dt / 2., temp, k2, k, kls, p0s, *current_element_number, qs_data);
if(tid == 0){
vct6_dot_a_v_ret(dt / 2., k2, temp); // temp = dt / 2 * k2
vct6_add_local(temp, Y0); // temp = Y0 + temp
}
__syncthreads();
callback_for_runge_kutta4_for_magnet_with_single_qs(t0 + dt / 2., temp, k3, k, kls, p0s, *current_element_number, qs_data);
if(tid == 0){
vct6_dot_a_v_ret(dt, k3, temp); // temp = dt * k3
vct6_add_local(temp, Y0); // temp = Y0 + temp
}
__syncthreads();
callback_for_runge_kutta4_for_magnet_with_single_qs(t0 + dt, temp, k4, k, kls, p0s, *current_element_number, qs_data);
t0 += dt;
if(tid == 0){
vct6_add(k1, k4, temp); // temp = k1 + k4
vct6_dot_a_v(2.0, k2);
vct6_dot_a_v(2.0, k3);
vct6_add(k2, k3, k1); // k1 ๅทฒ็ปๆฒก็จไบ๏ผๆไปฅ่ฃ
k1 = k2 + k3
vct6_add_local(temp, k1);
vct6_dot_a_v(dt / 6.0, temp);
vct6_add_local(Y0, temp);
// Y0 += (dt / 6) * (k1 + 2 * k2 + 2 * k3 + k4);
}
}
// ๅๅ particle
if(tid == 0){
vct6_copy(Y0 ,particle); // ๅ Y0
particle[DISTANCE] = *distance;
}
__syncthreads();
}
// ------------------------- ๅค qs ็ๆฌ------------------------------------------ //
// ๅไธช็ฒๅญ่ท่ธช๏ผๅค qs ็ๆฌ
/*__global__*/ __device__ void track_for_magnet_with_multi_qs(FLOAT *distance, FLOAT *footstep,
FLOAT *kls, FLOAT* p0s, int *current_element_number,
FLOAT *qs_datas, int *qs_number, FLOAT *particle)
{
int tid = threadIdx.x;
FLOAT t0 = 0.0; // ๅผๅงๆถ้ดไธบ 0
FLOAT t_end = (*distance) / particle[SPEED]; // ็จๆถ = ่ท็ฆป/้็
#ifdef FLOAT32
int number = (int)(ceilf( (*distance) / (*footstep) ));
#else
int number = (int)(ceil( (*distance) / (*footstep)));
#endif
FLOAT dt = (t_end - t0) / ((FLOAT)(number));
FLOAT k = particle[E] / particle[RM]; // k: float = particle.e / particle.relativistic_mass
__shared__ FLOAT Y0[DIM*2]; // Y0 ๅณๆฏ [P, v] ็ฒๅญไฝ็ฝฎใ็ฒๅญ้ๅบฆ๏ผๅฐฑๆฏ particle ๅไธค้กน
__shared__ FLOAT k1[DIM*2];
__shared__ FLOAT k2[DIM*2];
__shared__ FLOAT k3[DIM*2];
__shared__ FLOAT k4[DIM*2];
__shared__ FLOAT temp[DIM*2];
if(tid == 0){
vct6_copy(particle, Y0); // ๅ Y0
}
for(int ignore = 0; ignore < number; ignore++){
__syncthreads(); // ๅพช็ฏๅๅๆญฅ
callback_for_runge_kutta4_for_magnet_with_multi_qs(t0, Y0, k1, k, kls, p0s, *current_element_number, qs_datas, *qs_number); // ๅทฒๅๆญฅ
if(tid == 0){
vct6_dot_a_v_ret(dt / 2., k1, temp); // temp = dt / 2 * k1
vct6_add_local(temp, Y0); // temp = Y0 + temp
}
__syncthreads();
callback_for_runge_kutta4_for_magnet_with_multi_qs(t0 + dt / 2., temp, k2, k, kls, p0s, *current_element_number, qs_datas, *qs_number);
if(tid == 0){
vct6_dot_a_v_ret(dt / 2., k2, temp); // temp = dt / 2 * k2
vct6_add_local(temp, Y0); // temp = Y0 + temp
}
__syncthreads();
callback_for_runge_kutta4_for_magnet_with_multi_qs(t0 + dt / 2., temp, k3, k, kls, p0s, *current_element_number, qs_datas, *qs_number);
if(tid == 0){
vct6_dot_a_v_ret(dt, k3, temp); // temp = dt * k3
vct6_add_local(temp, Y0); // temp = Y0 + temp
}
__syncthreads();
callback_for_runge_kutta4_for_magnet_with_multi_qs(t0 + dt, temp, k4, k, kls, p0s, *current_element_number, qs_datas, *qs_number);
t0 += dt;
if(tid == 0){
vct6_add(k1, k4, temp); // temp = k1 + k4
vct6_dot_a_v(2.0, k2);
vct6_dot_a_v(2.0, k3);
vct6_add(k2, k3, k1); // k1 ๅทฒ็ปๆฒก็จไบ๏ผๆไปฅ่ฃ
k1 = k2 + k3
vct6_add_local(temp, k1);
vct6_dot_a_v(dt / 6.0, temp);
vct6_add_local(Y0, temp);
// Y0 += (dt / 6) * (k1 + 2 * k2 + 2 * k3 + k4);
}
}
// ๅๅ particle
if(tid == 0){
vct6_copy(Y0 ,particle); // ๅ Y0
particle[DISTANCE] = *distance;
}
__syncthreads();
}
// ไธๅฝๆฐ็ global ็ๆฌ๏ผๅค qs ็ๆฌ
__global__ void track_for_magnet_with_multi_qs_g(FLOAT *distance, FLOAT *footstep,
FLOAT *kls, FLOAT* p0s, int *current_element_number,
FLOAT *qs_datas, int *qs_number, FLOAT *particle)
{
int tid = threadIdx.x;
FLOAT t0 = 0.0; // ๅผๅงๆถ้ดไธบ 0
FLOAT t_end = (*distance) / particle[SPEED]; // ็จๆถ = ่ท็ฆป/้็
#ifdef FLOAT32
int number = (int)(ceilf( (*distance) / (*footstep) ));
#else
int number = (int)(ceil( (*distance) / (*footstep)));
#endif
FLOAT dt = (t_end - t0) / ((FLOAT)(number));
FLOAT k = particle[E] / particle[RM]; // k: float = particle.e / particle.relativistic_mass
__shared__ FLOAT Y0[DIM*2]; // Y0 ๅณๆฏ [P, v] ็ฒๅญไฝ็ฝฎใ็ฒๅญ้ๅบฆ๏ผๅฐฑๆฏ particle ๅไธค้กน
__shared__ FLOAT k1[DIM*2];
__shared__ FLOAT k2[DIM*2];
__shared__ FLOAT k3[DIM*2];
__shared__ FLOAT k4[DIM*2];
__shared__ FLOAT temp[DIM*2];
if(tid == 0){
vct6_copy(particle, Y0); // ๅ Y0
}
for(int ignore = 0; ignore < number; ignore++){
__syncthreads(); // ๅพช็ฏๅๅๆญฅ
callback_for_runge_kutta4_for_magnet_with_multi_qs(t0, Y0, k1, k, kls, p0s, *current_element_number, qs_datas, *qs_number); // ๅทฒๅๆญฅ
if(tid == 0){
vct6_dot_a_v_ret(dt / 2., k1, temp); // temp = dt / 2 * k1
vct6_add_local(temp, Y0); // temp = Y0 + temp
}
__syncthreads();
callback_for_runge_kutta4_for_magnet_with_multi_qs(t0 + dt / 2., temp, k2, k, kls, p0s, *current_element_number, qs_datas, *qs_number);
if(tid == 0){
vct6_dot_a_v_ret(dt / 2., k2, temp); // temp = dt / 2 * k2
vct6_add_local(temp, Y0); // temp = Y0 + temp
}
__syncthreads();
callback_for_runge_kutta4_for_magnet_with_multi_qs(t0 + dt / 2., temp, k3, k, kls, p0s, *current_element_number, qs_datas, *qs_number);
if(tid == 0){
vct6_dot_a_v_ret(dt, k3, temp); // temp = dt * k3
vct6_add_local(temp, Y0); // temp = Y0 + temp
}
__syncthreads();
callback_for_runge_kutta4_for_magnet_with_multi_qs(t0 + dt, temp, k4, k, kls, p0s, *current_element_number, qs_datas, *qs_number);
t0 += dt;
if(tid == 0){
vct6_add(k1, k4, temp); // temp = k1 + k4
vct6_dot_a_v(2.0, k2);
vct6_dot_a_v(2.0, k3);
vct6_add(k2, k3, k1); // k1 ๅทฒ็ปๆฒก็จไบ๏ผๆไปฅ่ฃ
k1 = k2 + k3
vct6_add_local(temp, k1);
vct6_dot_a_v(dt / 6.0, temp);
vct6_add_local(Y0, temp);
// Y0 += (dt / 6) * (k1 + 2 * k2 + 2 * k3 + k4);
}
}
// ๅๅ particle
if(tid == 0){
vct6_copy(Y0 ,particle); // ๅ Y0
particle[DISTANCE] = *distance;
}
__syncthreads();
}
"""
cuda_code_09_run_multi_particle = """
// ๅค็ฒๅญ่ท่ธช๏ผไธฒ่ก
__device__ void track_multi_particle_for_magnet_with_single_qs(FLOAT *distance, FLOAT *footstep,
FLOAT *kls, FLOAT *p0s, int *current_element_number,
FLOAT *qs_data, FLOAT *particle, int *particle_number)
{
for(int i = 0; i< (*particle_number);i++){
track_for_magnet_with_single_qs(distance, footstep, kls, p0s,
current_element_number, qs_data, particle + i * PARTICLE_DIM);
}
}
__global__ void track_multi_particle_for_magnet_with_single_qs_g(FLOAT *distance, FLOAT *footstep,
FLOAT *kls, FLOAT *p0s, int *current_element_number,
FLOAT *qs_data, FLOAT *particle, int *particle_number)
{
for(int i = 0; i< (*particle_number);i++){
track_for_magnet_with_single_qs(distance, footstep, kls, p0s,
current_element_number, qs_data, particle + i * PARTICLE_DIM);
}
}
// ๅค qs ็ๆฌ
__device__ void track_multi_particle_for_magnet_with_multi_qs(FLOAT *distance, FLOAT *footstep,
FLOAT *kls, FLOAT *p0s, int *current_element_number,
FLOAT *qs_datas, int *qs_number, FLOAT *particle, int *particle_number)
{
for(int i = 0; i< (*particle_number);i++){
track_for_magnet_with_multi_qs(distance, footstep, kls, p0s,
current_element_number, qs_datas, qs_number, particle + i * PARTICLE_DIM);
}
}
__global__ void track_multi_particle_for_magnet_with_multi_qs_g(FLOAT *distance, FLOAT *footstep,
FLOAT *kls, FLOAT *p0s, int *current_element_number,
FLOAT *qs_datas, int *qs_number, FLOAT *particle, int *particle_number)
{
for(int i = 0; i< (*particle_number);i++){
track_for_magnet_with_multi_qs(distance, footstep, kls, p0s,
current_element_number, qs_datas, qs_number, particle + i * PARTICLE_DIM);
}
}
"""
cuda_code_10_run_multi_particle_multi_beamline = """
__global__ void track_multi_particle_beamline_for_magnet_with_single_qs(FLOAT *distance, FLOAT *footstep,
FLOAT *kls, FLOAT *p0s, int *current_element_number,
FLOAT *qs_data, FLOAT *particle, int *particle_number)
{
int bid = blockIdx.x;
track_multi_particle_for_magnet_with_single_qs(
distance, // ๅ
จๅฑ็ธๅ
footstep, // ๅ
จๅฑ็ธๅ
kls + MAX_CURRENT_ELEMENT_NUMBER * DIM * bid,
p0s + MAX_CURRENT_ELEMENT_NUMBER * DIM * bid, // ๅฝๅ็ป็ตๆตๅ
ๅๆฐ
current_element_number + bid, // ๅฝๅ็ป็ตๆตๅ
ๆฐ็ฎ
qs_data + QS_DATA_LENGTH * bid, // ๅฝๅ็ป QS ๅๆฐ
particle + (*particle_number) * PARTICLE_DIM * bid, // ๅฝๅ็ป็ฒๅญ
particle_number // ๅ
จๅฑ็ธๅ
);
}
// ๅค qs ็ๆฌ
__global__ void track_multi_particle_beamline_for_magnet_with_multi_qs(FLOAT *distance, FLOAT *footstep,
FLOAT *kls, FLOAT *p0s, int *current_element_number,
FLOAT *qs_datas, int *qs_number, FLOAT *particle, int *particle_number)
{
int bid = blockIdx.x;
track_multi_particle_for_magnet_with_multi_qs(
distance, // ๅ
จๅฑ็ธๅ
footstep, // ๅ
จๅฑ็ธๅ
kls + MAX_CURRENT_ELEMENT_NUMBER * DIM * bid,
p0s + MAX_CURRENT_ELEMENT_NUMBER * DIM * bid, // ๅฝๅ็ป็ตๆตๅ
ๅๆฐ
current_element_number + bid, // ๅฝๅ็ป็ตๆตๅ
ๆฐ็ฎ
qs_datas + MAX_QS_DATAS_LENGTH * QS_DATA_LENGTH * bid , // ๅฝๅ็ป QS ๅๆฐ
qs_number + bid, // ๅฝๅ็ป QS ๆฐ็ฎ
particle + (*particle_number) * PARTICLE_DIM * bid, // ๅฝๅ็ป็ฒๅญ
particle_number // ๅ
จๅฑ็ธๅ
);
}
"""
self.cuda_code: str = (
cuda_code_00_include +
cuda_code_01_float_type_define +
cuda_code_02_define +
cuda_code_03_vct_functions +
cuda_code_04_dB +
cuda_code_05_QS +
cuda_code_06_magnet_at +
cuda_code_07_runge_kutta4 +
cuda_code_08_run_only +
cuda_code_09_run_multi_particle +
cuda_code_10_run_multi_particle_multi_beamline
)
def print_cuda_code(self) -> None:
"""
ๆๅฐcudaไปฃ็ ๏ผไพๆดไฝๅๆ
"""
print(self.cuda_code)
def vct_length(self, p3: P3):
"""
ๆต่ฏ็จๅฝๆฐ๏ผ่ฎก็ฎ็ข้้ฟๅบฆ
็คบไพ๏ผ
ga32 = GPU_ACCELERATOR(float_number_type=GPU_ACCELERATOR.FLOAT32)
ga64 = GPU_ACCELERATOR(float_number_type=GPU_ACCELERATOR.FLOAT64)
v = P3(1,1,1)
print(f"diff={ga32.vct_length(v) - v.length()}") # diff=-3.1087248775207854e-08
print(f"diff={ga64.vct_length(v) - v.length()}") # diff=0.0
"""
if self.cpu_mode:
return p3.length()
code = """
__global__ void vl(FLOAT* v, FLOAT* ret){
*ret = vct_len(v);
}
"""
mod = SourceModule(self.cuda_code + code)
vl = mod.get_function("vl")
ret = numpy.empty((1,), dtype=self.numpy_dtype)
vl(drv.In(p3.to_numpy_ndarry3(numpy_dtype=self.numpy_dtype)),
drv.Out(ret), grid=(1, 1, 1), block=(1, 1, 1))
return float(ret[0])
def vct_print(self, p3: P3):
"""
ๆต่ฏ็จๅฝๆฐ๏ผๆๅฐ็ข้
็คบไพ๏ผ
ga32 = GPU_ACCELERATOR(float_number_type=GPU_ACCELERATOR.FLOAT32)
ga64 = GPU_ACCELERATOR(float_number_type=GPU_ACCELERATOR.FLOAT64)
v = P3(1/3, 1/6, 1/7)
ga32.vct_print(v)
ga64.vct_print(v)
>>>
0.333333343267441, 0.166666671633720, 0.142857149243355
0.333333333333333, 0.166666666666667, 0.142857142857143
"""
if self.cpu_mode:
print(p3)
code = """
__global__ void vp(FLOAT* v){
vct_print(v);
}
"""
mod = SourceModule(self.cuda_code + code)
vp = mod.get_function("vp")
vp(drv.In(p3.to_numpy_ndarry3(numpy_dtype=self.numpy_dtype)),
grid=(1, 1, 1), block=(1, 1, 1))
def current_element_B(self, kls: numpy.ndarray, p0s: numpy.ndarray, number: int, p: P3):
"""
่ฎก็ฎ็ตๆตๅ
้ๅ๏ผๅจ p ็นไบง็็็ฃๅบ
ๅฏนๆฏไปฃ็ ๅฆไธ๏ผ
cct = CCT(
local_coordinate_system=LocalCoordinateSystem.global_coordinate_system(),
big_r=1*M, small_r=30*MM, bending_angle=30, tilt_angles=[30],
winding_number=30, current=1000,
starting_point_in_ksi_phi_coordinate=P2(0,0),
end_point_in_ksi_phi_coordinate=P2(2*30*math.pi,30/180*math.pi)
)
point = P3(1,0.1,0)
print("่ฎก็ฎ็ตๆตๅ
้ๅ๏ผๅจ p ็นไบง็็็ฃๅบ")
print("CPU่ฎก็ฎ็ปๆ๏ผ",cct.magnetic_field_at(point))
# ่ทๅ kls p0s๏ผๆ 32 ไฝๅ 64 ไฝไนๅ
kls64,p0s64=cct.global_current_elements_and_elementary_current_positions(numpy.float64)
kls32,p0s32=cct.global_current_elements_and_elementary_current_positions(numpy.float32)
# ็ตๆตๅ
ๆฐ็ฎ
current_element_number = cct.total_disperse_number
print("GPU32่ฎก็ฎ็ปๆ๏ผ",ga32.current_element_B(kls32,p0s32,current_element_number,point))
print("GPU64่ฎก็ฎ็ปๆ๏ผ",ga64.current_element_B(kls64,p0s64,current_element_number,point))
"""
if self.cpu_mode:
raise NotImplementedError
code = """
__global__ void ce(FLOAT *kls, FLOAT *p0s, int* number, FLOAT *p, FLOAT *ret){
__shared__ FLOAT s_ret[DIM];
int tid = threadIdx.x;
current_element_B(kls,p0s,*number,p,s_ret);
if(tid == 0) vct_copy(s_ret, ret);
}
"""
mod = SourceModule(self.cuda_code + code)
ce = mod.get_function("ce")
ret = numpy.empty((3,), dtype=self.numpy_dtype)
ce(drv.In(kls.astype(self.numpy_dtype)),
drv.In(p0s.astype(self.numpy_dtype)),
drv.In(numpy.array([number], dtype=numpy.int32)),
drv.In(p.to_numpy_ndarry3(numpy_dtype=self.numpy_dtype)),
drv.Out(ret),
grid=(1, 1, 1), block=(self.block_dim_x, 1, 1))
return P3.from_numpy_ndarry(ret)
def magnet_at_qs(self, qs_data, p3: P3):
"""
qs ็ฃ้ๅจ p ็นไบง็็็ฃๅบ
p ็นๆฏ ๅ
จๅฑๅๆ ็น
ๆต่ฏไปฃ็ ๏ผ
qs = QS(
local_coordinate_system=LocalCoordinateSystem(
location=P3(1,0,0),
x_direction=-P3.x_direct(),
z_direction=P3.y_direct()
),
length=0.27,
gradient=5,
second_gradient=20,
aperture_radius=100*MM
)
point = P3(0.95,0,0)
# ๆฅ็ๅ
ฅๅฃไธญๅฟไฝ็ฝฎ็็ฃๅบ
print("magnet_at_qs ่ฎก็ฎ qs ็ฃ้๏ผๅจ p ็นไบง็็็ฃๅบ")
print("CPU่ฎก็ฎ็ปๆ๏ผ",qs.magnetic_field_at(point))
print("GPU32่ฎก็ฎ็ปๆ๏ผ",ga32.magnet_at_qs(qs.to_numpy_array(numpy.float32),point))
print("GPU64่ฎก็ฎ็ปๆ๏ผ",ga64.magnet_at_qs(qs.to_numpy_array(numpy.float64),point))
# CPU่ฎก็ฎ็ปๆ๏ผ (0.0, 0.0, 0.27500000000000024)
# GPU32่ฎก็ฎ็ปๆ๏ผ (0.0, 0.0, 0.27500006556510925)
# GPU64่ฎก็ฎ็ปๆ๏ผ (0.0, 0.0, 0.27500000000000024)
"""
if self.cpu_mode:
raise NotImplementedError
code = """
__global__ void mq(FLOAT *qs_data, FLOAT *p, FLOAT *ret){
magnet_at_qs(
qs_data, // origin
qs_data + 3, //xi
qs_data + 6, //yi
qs_data + 9, //zi
*(qs_data + 12), // len
*(qs_data + 13), // g
*(qs_data + 14), // sg
*(qs_data + 15), // aper r
p, ret
);
}
"""
mod = SourceModule(self.cuda_code + code)
mq = mod.get_function("mq")
ret = numpy.empty((3,), dtype=self.numpy_dtype)
mq(drv.In(qs_data.astype(self.numpy_dtype)),
drv.In(p3.to_numpy_ndarry3(numpy_dtype=self.numpy_dtype)),
drv.Out(ret),
grid=(1, 1, 1), block=(1, 1, 1)
)
return P3.from_numpy_ndarry(ret)
def magnet_at_qs_date(self, qs_data, p3: P3):
"""
qs ็ฃ้ๅจ p ็นไบง็็็ฃๅบ
qs_data ็ๆฌ
ๆต่ฏไปฃ็ ๏ผ
qs = QS(
local_coordinate_system=LocalCoordinateSystem(
location=P3(1,0,0),
x_direction=-P3.x_direct(),
z_direction=P3.y_direct()
),
length=0.27,
gradient=5,
second_gradient=20,
aperture_radius=100*MM
)
point = P3(0.95,0,0)
print("magnet_at_qs_date ่ฎก็ฎ qs ็ฃ้๏ผๅจ p ็นไบง็็็ฃๅบ")
print("CPU่ฎก็ฎ็ปๆ๏ผ",qs.magnetic_field_at(point))
print("GPU32่ฎก็ฎ็ปๆ๏ผ",ga32.magnet_at_qs_date(qs.to_numpy_array(numpy.float32),point))
print("GPU64่ฎก็ฎ็ปๆ๏ผ",ga64.magnet_at_qs_date(qs.to_numpy_array(numpy.float64),point))
since 2021ๅนด5ๆ6ๆฅ
"""
if self.cpu_mode:
raise NotImplementedError
code = """
__global__ void mq_data(FLOAT *qs_data, FLOAT *p, FLOAT *ret){
magnet_at_qs_date(qs_data, p ,ret);
}
"""
mod = SourceModule(self.cuda_code + code)
mq = mod.get_function("mq_data")
ret = numpy.empty((3,), dtype=self.numpy_dtype)
mq(drv.In(qs_data.astype(self.numpy_dtype)),
drv.In(p3.to_numpy_ndarry3(numpy_dtype=self.numpy_dtype)),
drv.Out(ret),
grid=(1, 1, 1), block=(1, 1, 1)
)
return P3.from_numpy_ndarry(ret)
def magnet_at_qs_dates(self, qss: List[QS], p: P3) -> P3:
"""
ๅคไธช qs ็ฃ้ๅจ p ็นไบง็็็ฃๅบ
2021ๅนด5ๆ6ๆฅ ้ช่ฏๆๅ
"""
code = """
__global__ void qss(FLOAT *qs_data, int* qs_number, FLOAT *p, FLOAT *ret){
// ่ฟ้ๅฟ
้กป็จ __shared__ ๅปๆฅๆถ
__shared__ FLOAT s_ret[DIM];
int tid = threadIdx.x;
magnet_at_qs_dates(qs_data, *qs_number, p, s_ret);
if(tid == 0) vct_copy(s_ret, ret);
}
"""
if self.cpu_mode:
m = P3.zeros()
for qs in qss:
m += qs.magnetic_field_at(p)
return m
mod = SourceModule(self.cuda_code + code)
mq = mod.get_function("qss")
ret = numpy.empty((3,), dtype=self.numpy_dtype)
qs_datas: List[numpy.ndarray] = []
for qs in qss:
qs_datas.append(qs.to_numpy_array(self.numpy_dtype))
qs_datas: numpy.ndarray = numpy.concatenate(tuple(qs_datas))
mq(drv.In(qs_datas.astype(self.numpy_dtype)),
drv.In(numpy.array([len(qss)], dtype=numpy.int32)),
drv.In(p.to_numpy_ndarry3(numpy_dtype=self.numpy_dtype)),
drv.Out(ret),
grid=(1, 1, 1), block=(self.block_dim_x, 1, 1)
)
return P3.from_numpy_ndarry(ret)
def magnet_at_beamline_with_single_qs(self, bl: Beamline, p: P3) -> P3:
"""
CCT ๅ QS ๅ่ตทๆฅๆต่ฏ
ๆต่ฏไปฃ็ ๏ผ
bl = (
Beamline.set_start_point(P2.origin())
.first_drift(direct=P2.x_direct(),length=1)
.append_qs(length=0.27,gradient=5,second_gradient=20,aperture_radius=100*MM)
.append_drift(length=1)
.append_dipole_cct(
big_r=1,small_r_inner=100*MM,small_r_outer=120*MM,bending_angle=45,
tilt_angles=[30],winding_number=60,current=10000
).append_drift(length=1)
)
print(" magnet_at_beamline_with_single_qs ๅไธ qs ็ beamline ็ฃๅบ่ฎก็ฎ")
point1 = P3(1.2,50*MM,0)
point2 = P3(2.3,50*MM,0)
print("CPU่ฎก็ฎ็ปๆ1๏ผ",bl.magnetic_field_at(point1))
print("GPU32่ฎก็ฎ็ปๆ1๏ผ",ga32.magnet_at_beamline_with_single_qs(bl,point1))
print("GPU64่ฎก็ฎ็ปๆ1๏ผ",ga64.magnet_at_beamline_with_single_qs(bl,point1))
print("CPU่ฎก็ฎ็ปๆ2๏ผ",bl.magnetic_field_at(point2))
print("GPU32่ฎก็ฎ็ปๆ2๏ผ",ga32.magnet_at_beamline_with_single_qs(bl,point2))
print("GPU64่ฎก็ฎ็ปๆ2๏ผ",ga64.magnet_at_beamline_with_single_qs(bl,point2))
# GPU32่ฎก็ฎ็ปๆ1๏ผ (0.0006631895666942, -9.404712182004005e-05, 0.2723771035671234)
# GPU64่ฎก็ฎ็ปๆ1๏ผ (0.000663189549448528, -9.404708930037921e-05, 0.2723771039989055)
# CPU่ฎก็ฎ็ปๆ2๏ผ (-0.021273493843574243, 0.048440921145815385, 1.0980479752081713)
# GPU32่ฎก็ฎ็ปๆ2๏ผ (-0.021273484453558922, 0.04844103008508682, 1.0980477333068848)
# GPU64่ฎก็ฎ็ปๆ2๏ผ (-0.021273493843573958, 0.04844092114581488, 1.0980479752081695)
"""
if self.cpu_mode:
return bl.magnetic_field_at(p)
code = """
__global__ void ma(FLOAT *kls, FLOAT* p0s, int* current_element_number,
FLOAT *qs_data, FLOAT *p, FLOAT *ret){
int tid = threadIdx.x;
__shared__ FLOAT shared_ret[DIM];
magnet_with_single_qs(kls, p0s, *current_element_number, qs_data, p, shared_ret);
if(tid == 0) vct_copy(shared_ret, ret);
}
"""
mod = SourceModule(self.cuda_code + code)
ma = mod.get_function('ma')
ret = numpy.empty((3,), dtype=self.numpy_dtype)
kls_list: List[numpy.ndarray] = [] # ๅญๆพๅคไธช cct ็บฟๅ็ kls
p0s_list: List[numpy.ndarray] = [] # ๅญๆพๅคไธช cct ็บฟๅ็ p0s
current_element_number = 0
qs_data = None # ๅชๆไธไธช qs
for m in bl.magnets:
if isinstance(m, CCT):
cct = CCT.as_cct(m)
kls, p0s = cct.global_current_elements_and_elementary_current_positions(
numpy_dtype=self.numpy_dtype)
current_element_number += cct.total_disperse_number
kls_list.append(kls) # ่ฎฐไฝ kls ๅ p0s ๆฏไธ็ปดๆฐ็ป๏ผๆฒกไธไธชไธบไธ็ป่กจ็คบไธไธชไธ็ปด็ข้
p0s_list.append(p0s)
elif isinstance(m, QS):
qs = QS.as_qs(m)
qs_data = qs.to_numpy_array(self.numpy_dtype)
else:
raise ValueError(f"็ฃ้ {m} ๆ ๆณ็จ GPU ๅ ้")
kls_all = numpy.concatenate(tuple(kls_list))
p0s_all = numpy.concatenate(tuple(p0s_list))
ma(
drv.In(kls_all),
drv.In(p0s_all),
drv.In(numpy.array([current_element_number], dtype=numpy.int32)),
drv.In(qs_data),
drv.In(p.to_numpy_ndarry3(numpy_dtype=self.numpy_dtype)),
drv.Out(ret),
grid=(1, 1, 1), block=(self.block_dim_x, 1, 1)
)
return P3.from_numpy_ndarry(ret)
def track_one_particle_with_single_qs(self, bl: Beamline, p: RunningParticle, distance: float, footstep: float):
"""
็ฒๅญ่ท่ธช๏ผ็ตๆตๅ
+ ๅไธช QS
ๆต่ฏไปฃ็ ๏ผ
# ๅๅปบ beamline ๅชๆไธไธช qs
bl = HUST_SC_GANTRY().create_second_bending_part_beamline()
# ๅๅปบ็ฒๅญ๏ผ็ๆณ็ฒๅญ๏ผ
particle = ParticleFactory.create_proton_along(bl,kinetic_MeV=215)
# ๅคๅถไธไปฝ
particle_cpu = particle.copy()
particle_gpu32 = particle.copy()
particle_gpu64 = particle.copy()
# ่ฟ่ก
footstep=100*MM
ParticleRunner.run_only(particle_cpu,bl,bl.get_length(),footstep=footstep)
ga32.track_one_particle_with_single_qs(bl,particle_gpu32,bl.get_length(),footstep=footstep)
ga64.track_one_particle_with_single_qs(bl,particle_gpu64,bl.get_length(),footstep=footstep)
print("CPU่ฎก็ฎ็ปๆ: ",particle_cpu.detailed_info())
print("GPU32่ฎก็ฎ็ปๆ: ",particle_gpu32.detailed_info())
print("GPU64่ฎก็ฎ็ปๆ: ",particle_gpu64.detailed_info())
print("GPU32่ฎก็ฎๅCPUๅฏนๆฏ: ",(particle_cpu-particle_gpu32).detailed_info())
print("GPU64่ฎก็ฎๅCPUๅฏนๆฏ: ",(particle_cpu-particle_gpu64).detailed_info())
# CPU่ฎก็ฎ็ปๆ: Particle[p=(7.409509849267735, -0.028282989447753218, 5.0076184754665586e-05), v=(1809891.9615852616, -174308430.5414393, -330480.4098605619)], rm=2.0558942080656965e-27, e=1.6021766208e-19, speed=174317774.94179922, distance=7.104727865682728]
# GPU32่ฎก็ฎ็ปๆ: Particle[p=(7.409510612487793, -0.02828289568424225, 5.0118236686103046e-05), v=(1809917.875, -174308416.0, -330476.3125)], rm=2.0558942007434142e-27, e=1.602176597458587e-19, speed=174317776.0, distance=7.104727745056152]
# GPU64่ฎก็ฎ็ปๆ: Particle[p=(7.409509849267735, -0.028282989447752843, 5.0076184754525616e-05), v=(1809891.961585234, -174308430.54143927, -330480.409860578)], rm=2.0558942080656965e-27, e=1.6021766208e-19, speed=174317774.94179922, distance=7.104727865682728]
# GPU32่ฎก็ฎๅCPUๅฏนๆฏ: Particle[p=(-7.632200578200354e-07, -9.376351096934687e-08, -4.2051931437459694e-08), v=(-25.91341473837383, -14.541439294815063, -4.097360561892856)], rm=7.322282306994799e-36, e=2.3341413164924317e-27, speed=-1.0582007765769958, distance=1.2062657539502197e-07]
# GPU64่ฎก็ฎๅCPUๅฏนๆฏ: Particle[p=(0.0, -3.7470027081099033e-16, 1.3997050046787862e-16), v=(2.7706846594810486e-08, -2.9802322387695312e-08, 1.6123522073030472e-08)], rm=0.0, e=0.0, speed=0.0, distance=0.0]
"""
if self.cpu_mode:
ParticleRunner.run_only(
p = p,
m = bl,
length = distance,
footstep = footstep
)
return
mod = SourceModule(self.cuda_code)
track = mod.get_function('track_for_magnet_with_single_qs_g')
particle: numpy.ndarray = p.to_numpy_array_data(
numpy_dtype=self.numpy_dtype)
kls_list: List[numpy.ndarray] = [] # ๅญๆพๅคไธช cct ็บฟๅ็ kls
p0s_list: List[numpy.ndarray] = [] # ๅญๆพๅคไธช cct ็บฟๅ็ p0s
current_element_number = 0
qs_data = None
for m in bl.magnets:
if isinstance(m, CCT):
cct = CCT.as_cct(m)
kls, p0s = cct.global_current_elements_and_elementary_current_positions(
numpy_dtype=self.numpy_dtype)
current_element_number += cct.total_disperse_number
kls_list.append(kls) # ่ฎฐไฝ kls ๅ p0s ๆฏไธ็ปดๆฐ็ป๏ผๆฒกไธไธชไธบไธ็ป่กจ็คบไธไธชไธ็ปด็ข้
p0s_list.append(p0s)
elif isinstance(m, QS):
qs = QS.as_qs(m)
qs_data = qs.to_numpy_array(numpy_dtype=self.numpy_dtype)
else:
raise ValueError(f"็ฃ้ {m} ๆ ๆณ็จ GPU ๅ ้")
kls_all = numpy.concatenate(tuple(kls_list))
p0s_all = numpy.concatenate(tuple(p0s_list))
track(
drv.In(numpy.array([distance], dtype=self.numpy_dtype)),
drv.In(numpy.array([footstep], dtype=self.numpy_dtype)),
drv.In(kls_all),
drv.In(p0s_all),
drv.In(numpy.array([current_element_number], dtype=numpy.int32)),
drv.In(qs_data),
drv.InOut(particle),
grid=(1, 1, 1), block=(self.block_dim_x, 1, 1)
)
p.populate(RunningParticle.from_numpy_array_data(particle))
def track_one_particle_with_multi_qs(self, bl: Beamline, p: RunningParticle, distance: float, footstep: float):
"""
็ฒๅญ่ท่ธช๏ผ็ตๆตๅ
+ ๅคไธช QS
ๆต่ฏไปฃ็ ๏ผ
# ๅๅปบ beamline 3ไธช qs
bl = HUST_SC_GANTRY().create_first_bending_part_beamline()
# ๅๅปบ็ฒๅญ๏ผ็ๆณ็ฒๅญ๏ผ
particle = ParticleFactory.create_proton_along(bl,kinetic_MeV=215)
# ๅคๅถไธไปฝ
particle_cpu = particle.copy()
particle_gpu32 = particle.copy()
particle_gpu64 = particle.copy()
# ่ฟ่ก
footstep=100*MM
ParticleRunner.run_only(particle_cpu,bl,bl.get_length(),footstep=footstep)
ga32.track_one_particle_with_multi_qs(bl,particle_gpu32,bl.get_length(),footstep=footstep)
ga64.track_one_particle_with_multi_qs(bl,particle_gpu64,bl.get_length(),footstep=footstep)
print("track_one_particle_with_multi_qs 2 ")
print("CPU่ฎก็ฎ็ปๆ: ",particle_cpu.detailed_info())
print("GPU32่ฎก็ฎ็ปๆ: ",particle_gpu32.detailed_info())
print("GPU64่ฎก็ฎ็ปๆ: ",particle_gpu64.detailed_info())
print("GPU32่ฎก็ฎๅCPUๅฏนๆฏ: ",(particle_cpu-particle_gpu32).detailed_info())
print("GPU64่ฎก็ฎๅCPUๅฏนๆฏ: ",(particle_cpu-particle_gpu64).detailed_info())
# track_one_particle_with_multi_qs 2
# CPU่ฎก็ฎ็ปๆ: Particle[p=(3.687315812380205, 1.548315945537494, -0.003352065021200123), v=(119474899.55705348, 126923892.97270872, -352485.58348381834)], rm=2.0558942080656965e-27, e=1.6021766208e-19, speed=174317774.94179922, distance=4.149802255227576]
# GPU32่ฎก็ฎ็ปๆ: Particle[p=(3.6873157024383545, 1.5483157634735107, -0.0033521109726279974), v=(119474888.0, 126923888.0, -352490.09375)], rm=2.0558942007434142e-27, e=1.602176597458587e-19, speed=174317776.0, distance=4.149802207946777]
# GPU64่ฎก็ฎ็ปๆ: Particle[p=(3.687315812380205, 1.5483159455374929, -0.0033520650212005175), v=(119474899.55705343, 126923892.97270869, -352485.58348386886)], rm=2.0558942080656965e-27, e=1.6021766208e-19, speed=174317774.94179922, distance=4.149802255227576]
# GPU32่ฎก็ฎๅCPUๅฏนๆฏ: Particle[p=(1.0994185029034043e-07, 1.8206398322284656e-07, 4.595142787458539e-08), v=(11.557053476572037, 4.9727087169885635, 4.51026618166361)], rm=7.322282306994799e-36, e=2.3341413164924317e-27, speed=-1.0582007765769958, distance=4.728079883165037e-08]
# GPU64่ฎก็ฎๅCPUๅฏนๆฏ: Particle[p=(0.0, 1.1102230246251565e-15, 3.946495907847236e-16), v=(4.470348358154297e-08, 2.9802322387695312e-08, 5.052424967288971e-08)], rm=0.0, e=0.0, speed=0.0, distance=0.0]
"""
if self.cpu_mode:
ParticleRunner.run_only(
p = p,
m = bl,
length = distance,
footstep = footstep
)
return
mod = SourceModule(self.cuda_code)
track = mod.get_function('track_for_magnet_with_multi_qs_g')
particle: numpy.ndarray = p.to_numpy_array_data(
numpy_dtype=self.numpy_dtype)
kls_list: List[numpy.ndarray] = [] # ๅญๆพๅคไธช cct ็บฟๅ็ kls
p0s_list: List[numpy.ndarray] = [] # ๅญๆพๅคไธช cct ็บฟๅ็ p0s
current_element_number = 0 # ็ตๆตๅ
ๆฐ็ฎ
qs_number = 0 # qs ๆฐ็ฎ
qs_datas: List[numpy.ndarray] = [] # qs_data ๆฐๆฎ
for m in bl.magnets:
if isinstance(m, CCT):
cct = CCT.as_cct(m)
kls, p0s = cct.global_current_elements_and_elementary_current_positions(
numpy_dtype=self.numpy_dtype)
current_element_number += cct.total_disperse_number
kls_list.append(kls) # ่ฎฐไฝ kls ๅ p0s ๆฏไธ็ปดๆฐ็ป๏ผๆฒกไธไธชไธบไธ็ป่กจ็คบไธไธชไธ็ปด็ข้
p0s_list.append(p0s)
elif isinstance(m, QS):
qs = QS.as_qs(m)
qs_datas.append(qs.to_numpy_array(
numpy_dtype=self.numpy_dtype))
qs_number += 1
else:
raise ValueError(f"็ฃ้ {m} ๆ ๆณ็จ GPU ๅ ้")
kls_all = numpy.concatenate(tuple(kls_list))
p0s_all = numpy.concatenate(tuple(p0s_list))
qs_datas_con = numpy.concatenate(tuple(qs_datas))
track(
drv.In(numpy.array([distance], dtype=self.numpy_dtype)),
drv.In(numpy.array([footstep], dtype=self.numpy_dtype)),
drv.In(kls_all),
drv.In(p0s_all),
drv.In(numpy.array([current_element_number], dtype=numpy.int32)),
drv.In(qs_datas_con),
drv.In(numpy.array([qs_number], dtype=numpy.int32)),
drv.InOut(particle),
grid=(1, 1, 1), block=(self.block_dim_x, 1, 1)
)
p.populate(RunningParticle.from_numpy_array_data(particle))
def track_multi_particle_for_magnet_with_single_qs(self, bl: Beamline, ps: List[RunningParticle], distance: float, footstep: float):
"""
ๅค็ฒๅญ่ท่ธช๏ผ็ตๆตๅ
+ ๅไธช QS
ๆต่ฏไปฃ็ ๏ผ
# track_multi_particle_for_magnet_with_single_qs_g ๅค็ฒๅญ่ท่ธช๏ผ็ตๆตๅ
+ ๅไธช QS
# ๅๅปบ beamline ๅชๆไธไธช qs
bl = HUST_SC_GANTRY().create_second_bending_part_beamline()
# ๅๅปบ่ตท็นๅ็ป็น็็ๆณ็ฒๅญ
ip_start = ParticleFactory.create_proton_along(bl,kinetic_MeV=215,s=0)
ip_end = ParticleFactory.create_proton_along(bl,kinetic_MeV=215,s=bl.get_length())
# ๅๅปบ็ธๆคญๅๅๅธ็ฒๅญ
pps = PhaseSpaceParticle.phase_space_particles_along_positive_ellipse_in_xxp_plane(
xMax=3.5*MM,xpMax=7*MRAD,delta=0.0,number=6
)
# ๅฐ็ธๆคญๅๅๅธ็ฒๅญ่ฝฌไธบๅฎ้
็ฒๅญ
ps = ParticleFactory.create_from_phase_space_particles(
ideal_particle=ip_start,
coordinate_system=ip_start.get_natural_coordinate_system(),
phase_space_particles=pps
)
# ๅคๅถไธๅ
ps_cpu = [p.copy() for p in ps]
ps_gpu32 = [p.copy() for p in ps]
ps_gpu64 = [p.copy() for p in ps]
# ่ฟ่ก
footstep=100*MM
ParticleRunner.run_only(ps_cpu,bl,bl.get_length(),footstep)
ga32.track_multi_particle_for_magnet_with_single_qs(bl,ps_gpu32,bl.get_length(),footstep)
ga64.track_multi_particle_for_magnet_with_single_qs(bl,ps_gpu64,bl.get_length(),footstep)
# ่ฝฌๅ็ธ็ฉบ้ด
pps_end_cpu = PhaseSpaceParticle.create_from_running_particles(ip_end,ip_end.get_natural_coordinate_system(),ps_cpu)
pps_end_gpu32 = PhaseSpaceParticle.create_from_running_particles(ip_end,ip_end.get_natural_coordinate_system(),ps_gpu32)
pps_end_gpu64 = PhaseSpaceParticle.create_from_running_particles(ip_end,ip_end.get_natural_coordinate_system(),ps_gpu64)
# ็ปๅพ
Plot2.plot_p2s(PhaseSpaceParticle.phase_space_particles_project_to_xxp_plane(pps_end_cpu,True),describe='rx')
Plot2.plot_p2s(PhaseSpaceParticle.phase_space_particles_project_to_xxp_plane(pps_end_gpu32,True),describe='k|')
Plot2.plot_p2s(PhaseSpaceParticle.phase_space_particles_project_to_xxp_plane(pps_end_gpu64,True),describe='b_')
Plot2.legend("CPU","GPU32","GPU64",font_size=32)
Plot2.info(x_label='x/mm',y_label="xp/mr",title="xxp-plane",font_size=32)
Plot2.equal()
Plot2.show()
"""
if self.cpu_mode:
ParticleRunner.run_only(
p = ps,
m = bl,
length = distance,
footstep = footstep
)
return
mod = SourceModule(self.cuda_code)
track = mod.get_function(
'track_multi_particle_for_magnet_with_single_qs_g')
kls_list: List[numpy.ndarray] = [] # ๅญๆพๅคไธช cct ็บฟๅ็ kls
p0s_list: List[numpy.ndarray] = [] # ๅญๆพๅคไธช cct ็บฟๅ็ p0s
particle_list: List[numpy.ndarray] = [
p.to_numpy_array_data(numpy_dtype=self.numpy_dtype) for p in ps]
current_element_number = 0
qs_data = None
for m in bl.magnets:
if isinstance(m, CCT):
cct = CCT.as_cct(m)
kls, p0s = cct.global_current_elements_and_elementary_current_positions(
numpy_dtype=self.numpy_dtype)
current_element_number += cct.total_disperse_number
kls_list.append(kls) # ่ฎฐไฝ kls ๅ p0s ๆฏไธ็ปดๆฐ็ป๏ผๆฒกไธไธชไธบไธ็ป่กจ็คบไธไธชไธ็ปด็ข้
p0s_list.append(p0s)
elif isinstance(m, QS):
qs = QS.as_qs(m)
qs_data = qs.to_numpy_array(numpy_dtype=self.numpy_dtype)
else:
raise ValueError(f"็ฃ้ {m} ๆ ๆณ็จ GPU ๅ ้")
kls_all = numpy.concatenate(tuple(kls_list))
p0s_all = numpy.concatenate(tuple(p0s_list))
particles_all = numpy.concatenate(tuple(particle_list))
track(
drv.In(numpy.array([distance], dtype=self.numpy_dtype)),
drv.In(numpy.array([footstep], dtype=self.numpy_dtype)),
drv.In(kls_all),
drv.In(p0s_all),
drv.In(numpy.array([current_element_number], dtype=numpy.int32)),
drv.In(qs_data),
drv.InOut(particles_all),
drv.In(numpy.array([len(ps)], dtype=numpy.int32)),
grid=(1, 1, 1), block=(self.block_dim_x, 1, 1)
)
particles_all = particles_all.reshape((-1, 10))
for i in range(len(ps)):
ps[i].populate(
RunningParticle.from_numpy_array_data(particles_all[i]))
def track_multi_particle_for_magnet_with_multi_qs(
self, bl: Beamline, ps: List[RunningParticle], distance: float, footstep: float):
"""
ๅค็ฒๅญ่ท่ธช๏ผ็ตๆตๅ
+ ๅคไธช QS
ๆต่ฏไปฃ็ ๏ผ
# ----- track_multi_particle_for_magnet_with_multi_qs -----
# ๅๅปบ beamline 3ไธช qs
bl = HUST_SC_GANTRY().create_first_bending_part_beamline()
# ๅๅปบ่ตท็นๅ็ป็น็็ๆณ็ฒๅญ
ip_start = ParticleFactory.create_proton_along(bl,kinetic_MeV=215,s=0)
ip_end = ParticleFactory.create_proton_along(bl,kinetic_MeV=215,s=bl.get_length())
# ๅๅปบ็ธๆคญๅๅๅธ็ฒๅญ
pps = PhaseSpaceParticle.phase_space_particles_along_positive_ellipse_in_xxp_plane(
xMax=3.5*MM,xpMax=7*MRAD,delta=0.0,number=6
)
# ๅฐ็ธๆคญๅๅๅธ็ฒๅญ่ฝฌไธบๅฎ้
็ฒๅญ
ps = ParticleFactory.create_from_phase_space_particles(
ideal_particle=ip_start,
coordinate_system=ip_start.get_natural_coordinate_system(),
phase_space_particles=pps
)
# ๅคๅถไธๅ
ps_cpu = [p.copy() for p in ps]
ps_gpu32 = [p.copy() for p in ps]
ps_gpu64 = [p.copy() for p in ps]
# ่ฟ่ก
footstep=100*MM
ParticleRunner.run_only(ps_cpu,bl,bl.get_length(),footstep)
ga32.track_multi_particle_for_magnet_with_multi_qs(bl,ps_gpu32,bl.get_length(),footstep)
ga64.track_multi_particle_for_magnet_with_multi_qs(bl,ps_gpu64,bl.get_length(),footstep)
# ่ฝฌๅ็ธ็ฉบ้ด
pps_end_cpu = PhaseSpaceParticle.create_from_running_particles(ip_end,ip_end.get_natural_coordinate_system(),ps_cpu)
pps_end_gpu32 = PhaseSpaceParticle.create_from_running_particles(ip_end,ip_end.get_natural_coordinate_system(),ps_gpu32)
pps_end_gpu64 = PhaseSpaceParticle.create_from_running_particles(ip_end,ip_end.get_natural_coordinate_system(),ps_gpu64)
# ็ปๅพ
Plot2.plot_p2s(PhaseSpaceParticle.phase_space_particles_project_to_xxp_plane(pps_end_cpu,True),describe='rx')
Plot2.plot_p2s(PhaseSpaceParticle.phase_space_particles_project_to_xxp_plane(pps_end_gpu32,True),describe='k|')
Plot2.plot_p2s(PhaseSpaceParticle.phase_space_particles_project_to_xxp_plane(pps_end_gpu64,True),describe='b_')
Plot2.legend("CPU","GPU32","GPU64",font_size=32)
Plot2.info(x_label='x/mm',y_label="xp/mr",title="xxp-plane",font_size=32)
Plot2.equal()
Plot2.show()
"""
if self.cpu_mode:
ParticleRunner.run_only(
p = ps,
m = bl,
length = distance,
footstep = footstep
)
return
mod = SourceModule(self.cuda_code)
track = mod.get_function(
'track_multi_particle_for_magnet_with_multi_qs_g')
kls_list: List[numpy.ndarray] = [] # ๅญๆพๅคไธช cct ็บฟๅ็ kls
p0s_list: List[numpy.ndarray] = [] # ๅญๆพๅคไธช cct ็บฟๅ็ p0s
particle_list: List[numpy.ndarray] = [
p.to_numpy_array_data(numpy_dtype=self.numpy_dtype) for p in ps]
current_element_number = 0
qs_datas: List[numpy.ndarray] = []
qs_number = 0
for m in bl.magnets:
if isinstance(m, CCT):
cct = CCT.as_cct(m)
kls, p0s = cct.global_current_elements_and_elementary_current_positions(
numpy_dtype=self.numpy_dtype)
current_element_number += cct.total_disperse_number
kls_list.append(kls) # ่ฎฐไฝ kls ๅ p0s ๆฏไธ็ปดๆฐ็ป๏ผๆฒกไธไธชไธบไธ็ป่กจ็คบไธไธชไธ็ปด็ข้
p0s_list.append(p0s)
elif isinstance(m, QS):
qs = QS.as_qs(m)
qs_datas.append(qs.to_numpy_array(
numpy_dtype=self.numpy_dtype))
qs_number += 1
else:
raise ValueError(f"็ฃ้ {m} ๆ ๆณ็จ GPU ๅ ้")
kls_all = numpy.concatenate(tuple(kls_list))
p0s_all = numpy.concatenate(tuple(p0s_list))
particles_all = numpy.concatenate(tuple(particle_list))
qs_datas_con = numpy.concatenate(tuple(qs_datas))
track(
drv.In(numpy.array([distance], dtype=self.numpy_dtype)),
drv.In(numpy.array([footstep], dtype=self.numpy_dtype)),
drv.In(kls_all),
drv.In(p0s_all),
drv.In(numpy.array([current_element_number], dtype=numpy.int32)),
drv.In(qs_datas_con),
drv.In(numpy.array([qs_number], dtype=numpy.int32)),
drv.InOut(particles_all),
drv.In(numpy.array([len(ps)], dtype=numpy.int32)),
grid=(1, 1, 1), block=(self.block_dim_x, 1, 1)
)
particles_all = particles_all.reshape((-1, 10))
for i in range(len(ps)):
ps[i].populate(
RunningParticle.from_numpy_array_data(particles_all[i]))
def track_multi_particle_beamline_for_magnet_with_single_qs(
self, bls: List[Beamline], ps: List[RunningParticle],
distance: float, footstep: float) -> List[List[RunningParticle]]:
"""
ๅค็ฒๅญๅคๆ็บฟ่ท่ธช๏ผ็ตๆตๅ
+ ๅไธช QS
"""
if self.cpu_mode:
ret:List[List[RunningParticle]] = []
for bl in bls:
cps = [p.copy() for p in ps]
ParticleRunner.run_only(
p = cps,
m = bl,
length = distance,
footstep = footstep
)
ret.append(cps)
return ret
mod = SourceModule(self.cuda_code)
track = mod.get_function(
'track_multi_particle_beamline_for_magnet_with_single_qs')
# ๆๆ beamline ็ๆฐๆฎ
kls_all_all_beamline: List[numpy.ndarray] = []
p0s_all_all_beamline: List[numpy.ndarray] = []
qs_data_all_beamline: List[numpy.ndarray] = []
particles_all_all_beamline: List[numpy.ndarray] = []
current_element_numbers: List[int] = []
for bl in bls:
kls_list: List[numpy.ndarray] = []
p0s_list: List[numpy.ndarray] = []
particle_list: List[numpy.ndarray] = [
p.to_numpy_array_data(numpy_dtype=self.numpy_dtype) for p in ps]
current_element_number = 0
qs_data = None
for m in bl.magnets:
if isinstance(m, CCT):
cct = CCT.as_cct(m)
kls, p0s = cct.global_current_elements_and_elementary_current_positions(
numpy_dtype=self.numpy_dtype)
current_element_number += cct.total_disperse_number
kls_list.append(kls)
p0s_list.append(p0s)
elif isinstance(m, QS):
qs = QS.as_qs(m)
qs_data = qs.to_numpy_array(numpy_dtype=self.numpy_dtype)
else:
raise ValueError(f"{m} ๆ ๆณ็จ GOU ๅ ้")
kls_all = numpy.concatenate(tuple(kls_list)) # ๅคไธช่ฟ่ตทๆฅ
p0s_all = numpy.concatenate(tuple(p0s_list))
# ่ฟ้ๅคๅถไธไธ็ๆไนๆฏไปไนๅข๏ผ
# ๅ็ญ๏ผๅ ไธบไธไธชๆบๆถ็ kls p0s ้ฟๅบฆๆฏ็ฑ max_current_element_number*3 ๅณๅฎ
# ่ไธๆฏ็ฑ len(kls_all) ๅณๅฎ
kls_all_pad = numpy.zeros(
(self.max_current_element_number*3,), dtype=self.numpy_dtype)
p0s_all_pad = numpy.zeros(
(self.max_current_element_number*3,), dtype=self.numpy_dtype)
kls_all_pad[0:len(kls_all)] = kls_all
p0s_all_pad[0:len(p0s_all)] = p0s_all
particles_all = numpy.concatenate(tuple(particle_list))
kls_all_all_beamline.append(kls_all_pad)
p0s_all_all_beamline.append(p0s_all_pad)
qs_data_all_beamline.append(qs_data)
particles_all_all_beamline.append(particles_all)
current_element_numbers.append(current_element_number)
kls_all_all_beamline = numpy.concatenate(tuple(kls_all_all_beamline))
p0s_all_all_beamline = numpy.concatenate(tuple(p0s_all_all_beamline))
qs_data_all_beamline = numpy.concatenate(tuple(qs_data_all_beamline))
particles_all_all_beamline = numpy.concatenate(
tuple(particles_all_all_beamline))
track(
drv.In(numpy.array([distance], dtype=self.numpy_dtype)), # ่ฟๅจ่ทฏ็จ
drv.In(numpy.array([footstep], dtype=self.numpy_dtype)), # ๆญฅ้ฟ
drv.In(kls_all_all_beamline),
drv.In(p0s_all_all_beamline),
drv.In(numpy.array(current_element_numbers, dtype=numpy.int32)),
drv.In(qs_data_all_beamline),
drv.InOut(particles_all_all_beamline),
drv.In(numpy.array([len(ps)], dtype=numpy.int32)), # ็ฒๅญๆฐ
grid=(len(bls), 1, 1),
block=(self.block_dim_x, 1, 1)
)
particles_all_all_beamline = particles_all_all_beamline.reshape(
(len(bls), len(ps), 10))
ret: List[List[RunningParticle]] = []
for bid in range(len(bls)):
ps_ran: List[RunningParticle] = []
for pid in range(len(ps)):
ps_ran.append(RunningParticle.from_numpy_array_data(
particles_all_all_beamline[bid][pid]))
ret.append(ps_ran)
return ret
def track_multi_particle_beamline_for_magnet_with_multi_qs(
self, bls: List[Beamline], ps: List[RunningParticle],
distance: float, footstep: float) -> List[List[RunningParticle]]:
"""
ๅค็ฒๅญๅคๆ็บฟ่ท่ธช๏ผ็ตๆตๅ
+ ๅคไธช QS
"""
if self.cpu_mode:
ret:List[List[RunningParticle]] = []
for bl in bls:
cps = [p.copy() for p in ps]
ParticleRunner.run_only(
p = cps,
m = bl,
length = distance,
footstep = footstep,
concurrency_level=None
)
ret.append(cps)
return ret
mod = SourceModule(self.cuda_code)
track = mod.get_function(
'track_multi_particle_beamline_for_magnet_with_multi_qs')
# ๆๆ beamline ็ๆฐๆฎ
kls_all_all_beamline: List[numpy.ndarray] = []
p0s_all_all_beamline: List[numpy.ndarray] = []
qs_datas_all_beamline: List[numpy.ndarray] = []
particles_all_all_beamline: List[numpy.ndarray] = []
current_element_numbers: List[int] = []
qs_numbers: List[int] = []
for bl in bls:
kls_list: List[numpy.ndarray] = []
p0s_list: List[numpy.ndarray] = []
particle_list: List[numpy.ndarray] = [
p.to_numpy_array_data(numpy_dtype=self.numpy_dtype) for p in ps]
current_element_number = 0
qs_number = 0
qs_datas: List[numpy.ndarray] = []
for m in bl.magnets:
if isinstance(m, CCT):
cct = CCT.as_cct(m)
kls, p0s = cct.global_current_elements_and_elementary_current_positions(
numpy_dtype=self.numpy_dtype)
current_element_number += cct.total_disperse_number
kls_list.append(kls)
p0s_list.append(p0s)
elif isinstance(m, QS):
qs = QS.as_qs(m)
qs_datas.append(qs.to_numpy_array(
numpy_dtype=self.numpy_dtype))
qs_number += 1
else:
raise ValueError(f"{m} ๆ ๆณ็จ GPU ๅ ้")
kls_all = numpy.concatenate(tuple(kls_list)) # ๅคไธช่ฟ่ตทๆฅ
p0s_all = numpy.concatenate(tuple(p0s_list))
qs_datas_con = numpy.concatenate(tuple(qs_datas))
# ๅถไฝ kls_all_all_beamline p0s_all_all_beamline
# ่ฟ้ๅคๅถไธไธ็ๆไนๆฏไปไนๅข๏ผ
# ๅ็ญ๏ผๅ ไธบไธไธชๆบๆถ็ kls p0s ้ฟๅบฆๆฏ็ฑ max_current_element_number*3 ๅณๅฎ
# ่ไธๆฏ็ฑ len(kls_all) ๅณๅฎ
kls_all_pad = numpy.zeros(
(self.max_current_element_number*3,), dtype=self.numpy_dtype)
p0s_all_pad = numpy.zeros(
(self.max_current_element_number*3,), dtype=self.numpy_dtype)
kls_all_pad[0:len(kls_all)] = kls_all
p0s_all_pad[0:len(p0s_all)] = p0s_all
kls_all_all_beamline.append(kls_all_pad)
p0s_all_all_beamline.append(p0s_all_pad)
# ๅถไฝ qs_datas_all_beamline
qs_datas_all_pad = numpy.zeros(
(self.max_qs_datas_length*GPU_ACCELERATOR.QS_DATA_LENGTH,),
dtype=self.numpy_dtype
)
qs_datas_all_pad[0:qs_number *
GPU_ACCELERATOR.QS_DATA_LENGTH] = qs_datas_con
qs_datas_all_beamline.append(qs_datas_all_pad)
particles_all = numpy.concatenate(tuple(particle_list))
particles_all_all_beamline.append(particles_all)
current_element_numbers.append(current_element_number)
qs_numbers.append(qs_number)
kls_all_all_beamline: numpy.ndarray = numpy.concatenate(
tuple(kls_all_all_beamline))
p0s_all_all_beamline: numpy.ndarray = numpy.concatenate(
tuple(p0s_all_all_beamline))
qs_datas_all_beamline: numpy.ndarray = numpy.concatenate(
tuple(qs_datas_all_beamline))
particles_all_all_beamline = numpy.concatenate(
tuple(particles_all_all_beamline))
track(
drv.In(numpy.array([distance], dtype=self.numpy_dtype)), # ่ฟๅจ่ทฏ็จ
drv.In(numpy.array([footstep], dtype=self.numpy_dtype)), # ๆญฅ้ฟ
drv.In(kls_all_all_beamline),
drv.In(p0s_all_all_beamline),
drv.In( | numpy.array(current_element_numbers, dtype=numpy.int32) | numpy.array |
import numpy
import scipy
import scipy.signal
import librosa
import typing
import peakutils
import matplotlib.pyplot as plt
from ..multipitch import Multipitch
from ..chromagram import Chromagram
from ..dsp.wfir import wfir
from ..dsp.frame import frame_cutter
from collections import OrderedDict
class MultipitchESACF(Multipitch):
def __init__(
self,
audio_path,
ham_ms=46.4,
k=0.67,
n_peaks_elim=6,
peak_thresh=0.1,
peak_min_dist=10,
):
super().__init__(audio_path)
self.ham_samples = int(self.fs * ham_ms / 1000.0)
self.k = k
self.n_peaks_elim = n_peaks_elim
self.peak_thresh = peak_thresh
self.peak_min_dist = peak_min_dist
@staticmethod
def display_name():
return "ESACF (Tolonen, Karjalainen)"
@staticmethod
def method_number():
return 1
def compute_pitches(self, display_plot_frame=-1):
overall_chromagram = Chromagram()
for frame, x_frame in enumerate(frame_cutter(self.x, self.ham_samples)):
x = wfir(x_frame, self.fs, 12)
x_hi = _highpass_filter(x, self.fs)
x_hi = numpy.clip(x_hi, 0, None) # half-wave rectification
x_hi = lowpass_filter(x_hi, self.fs, 1000) # paper wants it
x_lo = lowpass_filter(x, self.fs, 1000)
x_sacf = _sacf([x_lo, x_hi])
x_esacf, harmonic_elim_plots = _esacf(x_sacf, self.n_peaks_elim, True)
peak_indices = peakutils.indexes(
x_esacf, thres=self.peak_thresh, min_dist=self.peak_min_dist
)
peak_indices_interp = peakutils.interpolate(
numpy.arange(x_esacf.shape[0]), x_esacf, ind=peak_indices
)
chromagram = Chromagram()
for i, tau in enumerate(peak_indices_interp):
pitch = self.fs / tau
try:
note = librosa.hz_to_note(pitch, octave=False)
chromagram[note] += x_esacf[peak_indices[i]]
except ValueError:
continue
overall_chromagram += chromagram
if frame == display_plot_frame:
_display_plots(
self.clip_name,
self.fs,
self.ham_samples,
frame,
x,
x_lo,
x_hi,
x_sacf,
x_esacf,
harmonic_elim_plots,
peak_indices,
peak_indices_interp,
)
return overall_chromagram
def _sacf(x_channels: typing.List[numpy.ndarray], k=None) -> numpy.ndarray:
# k is same as p (power) in the Klapuri/Ansi paper, method 3
if not k:
k = 0.67
shape = x_channels[0].shape[0]
running_sum = numpy.zeros(shape)
for xc in x_channels:
running_sum += numpy.abs(numpy.fft.fft(xc)) ** k
return numpy.real(numpy.fft.ifft(running_sum))[:int((shape-1)/2)]
def _esacf(
x2: numpy.ndarray, n_peaks: int, ret_plots: bool
) -> typing.Tuple[numpy.ndarray, typing.List[numpy.ndarray]]:
"""
enhance the SACF with the following procedure
clip to positive values, time stretch by n_peaks
subtract original
"""
x2tmp = x2.copy()
to_plot = []
for timescale in range(2, n_peaks + 1):
x2tmp = numpy.clip(x2tmp, 0, None)
x2stretched = librosa.effects.time_stretch(x2tmp, timescale).copy()
x2stretched.resize(x2tmp.shape)
if ret_plots:
to_plot.append(x2stretched)
x2tmp -= x2stretched
x2tmp = numpy.clip(x2tmp, 0, None)
return x2tmp, to_plot
def _highpass_filter(x: numpy.ndarray, fs: float) -> numpy.ndarray:
b, a = scipy.signal.butter(2, [1000 / (fs / 2)], btype="high")
return scipy.signal.lfilter(b, a, x)
"""
Paper says:
The lowpass block also includes a highpass rolloff with 12 dB/octave below 70 Hz.
Still TODO?
"""
def lowpass_filter(x: numpy.ndarray, fs: float, band: float) -> numpy.ndarray:
b, a = scipy.signal.butter(2, [band / (fs / 2)], btype="low")
return scipy.signal.lfilter(b, a, x)
def _display_plots(
clip_name,
fs,
frame_size,
frame,
x,
x_lo,
x_hi,
x_sacf,
x_esacf,
harmonic_elim_plots,
peak_indices,
peak_indices_interp,
):
samples = numpy.arange(frame_size)
fig1, (ax1, ax2) = plt.subplots(2, 1)
ax1.set_title("{0} - x[n], frame {1}".format(clip_name, frame))
ax1.set_xlabel("n (samples)")
ax1.set_ylabel("amplitude")
ax1.plot(samples, x, "b", alpha=0.5, label="x[n]")
ax1.plot(samples, x_lo, "g", alpha=0.5, linestyle="--", label="x[n] lo")
ax1.plot(samples, x_hi, "r", alpha=0.5, linestyle=":", label="x[n] hi")
ax1.grid()
ax1.legend(loc="upper right")
ax2.set_title("SACF, ESACF")
ax2.set_xlabel("n (samples)")
ax2.set_ylabel("normalized amplitude")
i = 0
for i, h in enumerate(harmonic_elim_plots):
h_norm = h / numpy.max(h)
ax2.plot(
samples,
numpy.concatenate((h_norm, numpy.zeros(samples.shape[0] - h.shape[0]))),
"C{0}".format(i),
alpha=0.1,
label="time stretch {0}".format(2 + i),
)
i += 1
sacf_norm = x_sacf / | numpy.max(x_sacf) | numpy.max |
# ---------- MODULES ----------
# standard modules
import numpy as np
import pandas as pd
import datetime
from dateutil.relativedelta import relativedelta
from sklearn.linear_model import LinearRegression
import matplotlib.pyplot as plt
# custom modules
from classes.Stock import Stock, StockIndex
from classes.FinnhubAPI import FinnhubClient
from utils.generic import npDateTime64_2_str
from utils.plot import createPlot
from classes.GlobalVariables import *
# ---------- CLASSES ----------
class StockAnalyzer():
# value shared across all class instances
# variables
NET_MARGIN = 'net margin'
RETURN_ON_EQUITY = 'return on equity'
#
useWeightedHistoricalData = False
weightingStep = 1
def __init__(self,stock,index=None):
if not isinstance(stock,Stock):
raise TypeError('Object ' + str(stock) + ' is no instance of class Stock')
if index is None:
self.stockIndex = None
elif isinstance(index,StockIndex):
self.stockIndex = index
elif isinstance(index,str):
self.stockIndex = StockIndex(index)
else:
raise TypeError('The index needs to be a \'str\' or an object of StockIndex, but it is \'' + str(type(index)) + '.')
self.stock = stock
# variables for anayzing the stock
# EPS
self.meanWeightedEps = None
self.calcWeightedEps()
self._GrahamNumber = None
self._Recommendations = None
self._LevermannScore = None
#
self._NetMargin = None
self._ReturnOnEquity = None
self._ReturnOnAssets = None
self._FreeCashFlowBySales = None
self._PriceToSales = None
self._PriceToEarnings = None
self._PresentShareValue = None
self._CurrentRatio = None
self._AssetTurnover = None
self.dividendYield = 0
a = self.stock.financialStatements
print(a)
# analyze the stock
self.analyzeStock()
def analyzeStock(self):
self.calcGrahamNumber()
self.calcDCF()
self.calcReturnOnEquity()
self.calcReturnOnAssets()
self.calcFreeCashFlowBySales()
self.calcPiotroskiFScore()
@property
def GrahamNumber(self):
if self._GrahamNumber is None:
self.calcGrahamNumber()
return self._GrahamNumber
@property
def ReturnOnEquity(self):
if self._ReturnOnEquity is None:
self.calcReturnOnEquity()
return self._ReturnOnEquity
@property
def ReturnOnAssets(self):
if self._ReturnOnAssets is None:
self.calcReturnOnAssets()
return self._ReturnOnAssets
@property
def FreeCashFlowBySales(self):
if self._FreeCashFlowBySales is None:
self.calcFreeCashFlowBySales()
return self._FreeCashFlowBySales
@property
def PriceToSales(self):
if self._PriceToSales is None:
self.calcPriceToSales()
return self._PriceToSales
@property
def NetMargin(self):
if self._NetMargin is None:
self.calcNetMargin()
return self._NetMargin
@property
def LevermannScore(self):
if self._LevermannScore is None:
self.calcLevermannScore()
return self._LevermannScore
@property
def Recommendations(self):
if self._Recommendations is None:
self._Recommendations = self.getLatestRecommendations()
return self._Recommendations
@property
def PresentShareValue(self):
if self._PresentShareValue is None:
self.calcDCF()
return self._PresentShareValue
@property
def currentRatio(self):
if self._CurrentRatio is None:
currentAssets = self.stock.financialStatements.loc['Total Current Assets'].copy()
currentLiabilities = self.stock.financialStatements.loc['Total Current Liabilities'].copy()
df = pd.Series()
for date in currentAssets.index.values:
df.loc[date] = currentAssets.loc[date]/currentLiabilities.loc[date]
self._CurrentRatio = df
return self._CurrentRatio
@property
def assetTurnover(self):
if self._AssetTurnover is None:
sales = self.stock.financialStatements.loc[REVENUES,:]
assets = self.stock.financialStatements.loc[ASSETS,:]
df = pd.Series()
for date in sales.index.values:
df.loc[date] = sales.loc[date]/assets.loc[date]
self._AssetTurnover = df
return self._AssetTurnover
"""
Berechnung der Graham Number
"""
def calcGrahamNumber(self):
if (self.meanWeightedEps is not None) and (self.stock.isItemInBasicData(Stock.BOOK_VALUE_PER_SHARE)):
if (self.meanWeightedEps < 0):
print(' +++ avg. weighted EPS < 0! Stock: ' + self.stock.symbol + ' (' + self.stock.name + ') +++')
self.meanWeightedEps = 0
if (self.stock.getBasicDataItem(Stock.BOOK_VALUE_PER_SHARE) < 0):
print(' +++ book value per share < 0! Stock: ' + self.stock.symbol + ' (' + self.stock.name + ') +++')
self._GrahamNumber = np.sqrt(15 * self.meanWeightedEps * 1.5 * self.stock.getBasicDataItem(Stock.BOOK_VALUE_PER_SHARE))
else:
self._GrahamNumber = 0
"""
Check if all necessary assumptions for DCF is available
"""
def isAssumptionsCompleteForDCF(self):
return (self.stock.assumptions is not None) and \
('discountRate' in self.stock.assumptions.keys()) and \
('margin_of_safety' in self.stock.assumptions.keys()) and \
('growth_year_1_to_5' in self.stock.assumptions.keys()) and\
('growth_year_6_to_10' in self.stock.assumptions.keys()) and\
('growth_year_10ff' in self.stock.assumptions.keys())
"""
Discounted Cash Flow Verfahren
"""
def calcDCF(self,detailed=True,generatePlot=False):
# check if all needed assumptions data is available
if self.isAssumptionsCompleteForDCF():
# Free Chashflow der letzten Jahre
CF = self.stock.financialStatements.loc['freeCashFlow',:].copy()
CF.fillna(CF.mean(), inplace=True) # TODO: NaN Werte werden durch Mittelwert ersetzt
# Sortierung in aufsteigender Reihenfolge (alt -> neu)
CF_sorted = []
years = []
for date in sorted(CF.index.values.copy()):
CF_sorted.append(CF.loc[date])
years.append(int(date[0:4]))
# Berechnung
model = linearRegression(range(len(CF_sorted)),CF_sorted,plotResult=False)
todaysCashFlow_regression = model.predict(np.array([len(CF_sorted)-1]).reshape(1, -1))[0]
todaysCashFlow_thisYear = CF_sorted[-1]
FCFstartValue = (todaysCashFlow_regression+todaysCashFlow_thisYear)/2
if detailed:
print('DCF start value is the mean value of last years value and the regression value: {v:.2f} Mrd. {c}'.format(v=FCFstartValue/10**9,c=self.stock.currencySymbol))
print('-'*54)
print(' year | Free Cash Flow | discounted free cash flow')
print(' ' + '-'*52 + ' ')
# Wachstumsrate der naechsten 10 Jahre
discountRate = self.stock.assumptions["discountRate"]/100
# Free Cash Flow der naechsten 5 Jahre
growthRate = self.stock.assumptions['growth_year_1_to_5']/100
discountedCashFlow = []
FCF, year = [], []
for i in range(1,6):
FCF.append((FCFstartValue*(1+growthRate)**i))
discountedCashFlow.append(FCF[-1] / ((1 + discountRate)**i))
year.append(years[-1]+i)
if detailed:
print(' {y:2.0f} | {fcf:6.2f} Mrd. | {dfcf:6.2f} Mrd.'.format(y=i,fcf=FCF[-1]/10**9,dfcf=discountedCashFlow[-1]/10**9))
# Free Cash Flow der Jahre 6-10
growthRate = self.stock.assumptions['growth_year_6_to_10']/100
for i in range(6,11):
FCF.append((FCF[4]*(1+growthRate)**(i-5)))
discountedCashFlow.append(FCF[-1] / ((1 + discountRate)**i))
year.append(years[-1]+i)
if detailed:
print(' {y:2.0f} | {fcf:6.2f} Mrd. | {dfcf:6.2f} Mrd.'.format(y=i,fcf=FCF[-1]/10**9,dfcf=discountedCashFlow[-1]/10**9))
# Free Cash Flow insgesamt ab dem 11. Jahr (perpetuity value) im heutigen Wert (discounted perpetuity value)
# - FCF_10: Free Cash Flow in 10 Jahren
# - growthRate_10: Wachstum des Free Cash Flows nach dem 10. Jahr
# Formel: FCF_10 * (1 + growthRate_10) / ((discountRate - growthRate_10) * (1 + discountRate))
growthRate = self.stock.assumptions['growth_year_10ff']/100
# perpetuity value
FCF.append((FCF[-1] * (1 + growthRate)) / (discountRate - growthRate))
# discounted perpetuity value
discountedCashFlow.append(FCF[-1] / ((1 + discountRate)**10))
if detailed:
print(' inf | {fcf:6.2f} Mrd. | {dfcf:6.2f} Mrd.'.format(y=i,fcf=FCF[-1]/10**9,dfcf=discountedCashFlow[-1]/10**9))
print(' ' + '-'*52 + ' ')
# Summe der, auf den aktuellen Zeitpunkt bezogenen, zukuenfitgen Cashflows
totalEquityValue = sum(discountedCashFlow)
# Wert einer Aktie zum aktuellen Zeitpunkt auf Grundlage aller zkรผnftigen Free Cash Flows
# Beruecksichtigung einer Margin of safety
marginOfSafety = self.stock.assumptions["margin_of_safety"]/100
sharesOutstanding = self.stock.keyStatistics[Stock.SHARES_OUTSTANDING]
perShareValue = totalEquityValue/sharesOutstanding/(1 + marginOfSafety)
if detailed:
print(' {v:7.2f} Mrd.'.format(v=totalEquityValue/10**9))
print(' shares outstanding: {v:7.0f} Mio.'.format(v=sharesOutstanding/10**6))
print(' ' + '-'*52 + ' ')
print(' present value per share: {v:7.2f}'.format(v=perShareValue))
print('-'*54 + '\n')
self._PresentShareValue = perShareValue
if generatePlot:
createPlot([years,years[-1],year],[CF_sorted,FCFstartValue,FCF[:-1]],legend_list=['historical free cash flows','start value for DCF method','estimated free cash flows'])
return FCF
else:
print(' +++ Discounted Cash Flow Analysis failed due to missing data +++ ')
return []
"""
Berechnung des Levermann Scores
"""
def calcLevermannScore(self):
if (self.stockIndex is not None):
self._LevermannScore = LevermannScore(self.stock,self.stockIndex)
else:
print('Zur Berechnung des Levermann Scores muss ein Index angegeben werden')
def calcPiotroskiFScore(self):
score = 0
# TODO calcPiotroskiFScore implementieren
# Nettogewinn
# Punkt, wenn positiver Wert
netIncome = self.stock.financialStatements.loc[NET_INCOME,:].copy()
dates = sorted(netIncome.index.values)
thisYear, previousYear = dates[-1], dates[-2]
if (netIncome.loc[thisYear]) > 0:
score += 1
# Operating Cashflow
# Punkt, wenn positiver Wert
operatingCashFlow = self.stock.financialStatements.loc[CASH_FROM_OPERATING_ACTIVITIES,:]
if (operatingCashFlow.loc[thisYear] > 0):
score += 1
# Gesamtkapitalrendite
# Punkt, wenn groesser als im Vorjahr
ROA = self.ReturnOnAssets
if (ROA.loc[thisYear] > ROA.loc[previousYear]):
score += 1
# Operating Cashflow versus Nettogewinn
# Punkt, wenn Operativer Cash Flow groesser als Nettogewinn
if (operatingCashFlow.loc[thisYear] > netIncome.loc[thisYear]):
score += 1
# Verschuldungsgrad
# Punkt wenn LongTerm-Debt/Assets kleiner als im Vorjahr
longTermDebt = self.stock.financialStatements.loc['Long Term Debt']
assets = self.stock.financialStatements.loc[ASSETS].copy()
ltd2a = []
for date in dates[-2:]:
ltd2a.append(longTermDebt.loc[date]/assets.loc[date])
if (ltd2a[-1] < ltd2a[-2]):
score += 1
# Liquiditรคt 3. Grades
# Punkt, wenn die current ratio groeser ist als im Vorjahr
currentRatio = self.currentRatio
if (currentRatio.loc[thisYear] > currentRatio.loc[previousYear]):
score += 1
# Aktienanzahl
# Punkt, wenn die Aktienanzahl nicht zugenommen hat
numberOfShares = self.stock.financialStatements.loc[DILUTED_AVERAGE_SHARES,:]
if not (numberOfShares.loc[thisYear] > numberOfShares.loc[previousYear]):
score += 1
# Rohmarge
# Punkt, wenn die Bruttomarge im Vergleich zum Vorjahr gewachsen ist
grossProfit = self.stock.financialStatements.loc['Gross Profit',:]
revenues = self.stock.financialStatements.loc[REVENUES,:]
if ((grossProfit.loc[thisYear]/revenues.loc[thisYear]) > (grossProfit.loc[previousYear]/revenues.loc[previousYear])):
score += 1
# Kapitalumschlag
# Punkt, wenn asset turnover groesser ist als im Vorjahr
assetTurnover = self.assetTurnover
if (assetTurnover.loc[thisYear] > assetTurnover.loc[previousYear]):
score += 1
# Gesamtbewertung
# Hoch: 7-9P, Mittel: 3-6P, Niedrig: 0-2P
if (score >= 7):
bewertung = 'Hoch'
elif (score >= 3):
bewertung = 'Mittel'
else:
bewertung = 'Niedrig'
comment = 'Piotroski F Score: {score:.0f} ({bew})'.format(score=score,bew=bewertung)
return score, comment
def calcTechnicalIndicator(self):
# TODO calcTechincalIndicator implementieren
# GD 200 Linie
# Historische Werte
# MACD
# Bewertung: 3 von 3 positiv -> kaufen
# Bewertung: 3 von 3 negativ -> verkaufen
# Bewertung: 0-2 von 3 -> halten
pass
def calcWeightedEps(self):
epsKey = 'dilutedEPS'
if (self.stock.financialStatements is not None) and (epsKey in self.stock.financialStatements.index.values):
# get historical EPS data
epsHistory = self.stock.financialStatements.loc[epsKey,:].copy()
# remove NaN values
for row in epsHistory.index.values:
if np.isnan(epsHistory.loc[row]):
epsHistory = epsHistory.drop(row)
if (self.useWeightedHistoricalData):
# create weighting with the global defined stepsize
weighting = [1]
for i in range(len(epsHistory)-1):
weighting.append(weighting[-1]+StockAnalyzer.weightingStep)
weighting = list(reversed(weighting))
else:
# same factor for every year
weighting = [1 for i in range(len(epsHistory))]
# calculate the weighted eps
weightedEps = [factor*value for value,factor in zip(epsHistory,weighting)]
self.meanWeightedEps = sum(weightedEps)/sum(weighting)
else:
self.meanWeightedEps = self.stock.getBasicDataItem(Stock.EARNINGS_PER_SHARE)
def loadRecommendations(self):
self._Recommendations = FinnhubClient(self.stock.symbol).getRecommendationsDataFrame()
def getLatestRecommendations(self):
latestRecommendations = self.Recommendations.iloc[0,:]
latest = latestRecommendations[['strongBuy','buy','hold','sell','strongSell']]
return latest
"""
Berechnung des Nettogewinns
"""
def calcNetMargin(self):
if self.stock.financialStatements is None:
raise Exception('The stock has no historical financial data. "Total Revenue" and "Net Income" needed!')
else:
# Nettogewinn
netIncome = self.stock.financialStatements.loc[NET_INCOME,:].copy()
# Umsatz
revenues = self.stock.financialStatements.loc[REVENUES,:].copy()
dic = {}
for index in sorted(netIncome.index, reverse=True):
dic[index] = netIncome.loc[index]/revenues.loc[index]
df = pd.Series(dic, index=dic.keys())
df.reindex(sorted(df.index, reverse=True))
self._NetMargin = df
return df
def calcReturnOnEquity(self):
if self.stock.financialStatements is None:
raise Exception('The stock has no historical financial data. "Total Stockholder Equity" and "Net Income" needed!')
else:
# Eigenkapital
equity = self.stock.financialStatements.loc[STOCKHOLDERS_EQUITY,:].copy()
# Betriebseinkommen
income = self.stock.financialStatements.loc[NET_INCOME,:].copy()
# Berechnung der Eigenkapitalrendite fuer jedes Jahr
dic = {}
for index in sorted(equity.index, reverse=True):
# ignore NAN-values
if (not np.isnan(income[index])) and (not np.isnan(equity[index])):
dic[index] = income[index]/equity[index]
df = pd.Series(dic, index=dic.keys())
self._ReturnOnEquity = df
return df
def calcReturnOnAssets(self):
if self.stock.financialStatements is None:
raise Exception('The stock has no historical financial data. "Total Assets" and "Net Income" needed!')
else:
# Gesamtvermรถgen
totalAssets = self.stock.financialStatements.loc[ASSETS,:].copy()
# Betriebseinkommen
income = self.stock.financialStatements.loc[NET_INCOME,:].copy()
# Berechnung der Kapitalrendite fuer jedes Jahr
dic = {}
for index in sorted(totalAssets.index,reverse=True):
dic[index] = income[index]/totalAssets[index]
df = pd.Series(dic, index=dic.keys())
self._ReturnOnAssets = df
return df
def calcFreeCashFlowBySales(self):
if self.stock.financialStatements is None:
raise Exception('The stock has no historical financial data. "Total Revenue" and "freeCashFlow" needed!')
else:
# Umsatz
revenues = self.stock.financialStatements.loc[REVENUES,:].copy()
# Free Cash Flow
freeCashFlow = self.stock.financialStatements.loc[FREE_CASH_FLOW,:].copy()
# Berechnung des Free cash flows bezogen auf die Einnahmen fuer jedes Jahr
dic = {}
for index in sorted(revenues.index, reverse=True):
dic[index] = freeCashFlow[index]/revenues[index]
df = pd.Series(dic, index=dic.keys())
self._FreeCashFlowBySales = df
return df
def calcPriceToSales(self):
if self.stock.financialStatements is None:
raise Exception('The stock has no historical financial data. "Total Revenue" and "Total Stockholder Equity" needed!')
else:
# Umsatz
revenues = self.stock.financialStatements.loc[REVENUES,:].copy()
# Marktkapitalisierung
totalStockHolderEquity = self.stock.financialStatements.loc[STOCKHOLDERS_EQUITY,:].copy()
# Price to Sales fuer jedes Jahr
P_S = pd.Series()
for date in list(revenues.index.values.copy()):
# Price/Sales
price = totalStockHolderEquity.loc[date]
sales = revenues.loc[date]
P_S.loc[date] = price/sales
self._PriceToSales
def calcGrowth(self,valueList,percentage=False):
if percentage:
factor = 100
else:
factor = 1
return [(valueList[i]-valueList[i-1])/valueList[i-1]*factor for i in range(1,len(valueList))]
def createPDF(self):
cs = self.stock.currencySymbol
xlabel = 'Jahr'
pdf = StockPDF(pdfFileName=self.stock.symbol + '.pdf')
pdf.newPage()
"""
Plot 1
"""
# Plot Operating Income
ylabel = 'Income in Mrd. ' + cs
operatingIncome = self.stock.financialStatements.loc[OPERATING_INCOME].copy()/10**9
operatingIncome = operatingIncome.reindex(sorted(operatingIncome.index,reverse=True), axis=0)
pdf.addPlot(1,operatingIncome, title='Income', xlabel=xlabel, ylabel=ylabel, line=False, label="Operating Income")
# Regressionsrechnung Operating Income
if not np.any(np.isnan(operatingIncome)):
series = self.__interpolate(operatingIncome)
pdf.addPlot(1,series, title='Income', xlabel=xlabel, ylabel=ylabel, line=True, label="Operating Income (lin. Regression)")
# Net Income
netIncome = self.stock.financialStatements.loc[NET_INCOME].copy()/10**9
netIncome = netIncome.reindex(sorted(netIncome.index,reverse=True), axis=0)
pdf.addPlot(1,netIncome,title='Income',xlabel=xlabel,ylabel=ylabel, line=False, label="Net Income")
"""
Plot 2
"""
# Cash flow from operating activities
ylabel = 'Cash flow in Mrd. ' + cs
totalCashFlowFromOperations = self.stock.financialStatements.loc[CASH_FROM_OPERATING_ACTIVITIES].copy()/10**9
totalCashFlowFromOperations = totalCashFlowFromOperations.reindex(sorted(totalCashFlowFromOperations.index,reverse=True), axis=0)
pdf.addPlot(2,totalCashFlowFromOperations,title='Cash flow',xlabel=xlabel,ylabel=ylabel, line=False, label="Cash Flow from operating activities")
# Regressionsrechnung
if not np.any(np.isnan(totalCashFlowFromOperations)):
series = self.__interpolate(totalCashFlowFromOperations)
pdf.addPlot(2,series,title='Cash flow',xlabel=xlabel,ylabel=ylabel, line=True, label="Cash Flow from operating activities (lin. Regression)")
# Free cash flow
freeCashFlow = self.stock.financialStatements.loc[FREE_CASH_FLOW].copy()/10**9
freeCashFlow = freeCashFlow.reindex(sorted(freeCashFlow.index,reverse=True), axis=0)
pdf.addPlot(2,freeCashFlow,title='Cash flow',xlabel=xlabel,ylabel=ylabel, line=False, label="Free Cash Flow")
"""
Plot 3
"""
# Return on Equity
ylabel = 'in %'
ROE = self.ReturnOnEquity*100
ROA = self.ReturnOnAssets*100
dates = sorted(list(set(list(ROE.index)+list(ROA.index))),reverse=True)
ROE = ROE.reindex(dates, axis=0)
pdf.addPlot(3,ROE,xlabel=xlabel,ylabel=ylabel, line=False, label='Return on Equity')
# Return on Assets
ROA = ROA.reindex(dates, axis=0)
pdf.addPlot(3,ROA,xlabel=xlabel,ylabel=ylabel, line=False, label='Return on Assets')
"""
Plot 4
"""
# Number of Shares
ylabel = 'Diluted number in Mio.'
averageShares = self.stock.financialStatements.loc[DILUTED_AVERAGE_SHARES].copy()/10**6
averageShares = averageShares.reindex(sorted(averageShares.index,reverse=True), axis=0)
pdf.addPlot(4,averageShares, xlabel=xlabel, ylabel=ylabel, title='Number of shares outstanding', line=False)
"""
New page
"""
pdf.newPage()
"""
Plot 1
"""
ylabel = ''
equity = self.stock.financialStatements.loc[STOCKHOLDERS_EQUITY].copy()
assets = self.stock.financialStatements.loc[ASSETS].copy()
series = pd.Series()
for date in sorted(equity.index.values, reverse=True):
series.loc[date] = assets.loc[date]/equity.loc[date]
pdf.addPlot(1,series, xlabel=xlabel, ylabel=ylabel, title='Financial Leverage', line=False)
pdf.closePDF()
def __interpolate(self,dataSeries):
# Regressionsrechnung Operating Income
years = [float(y[0:4]) for y in dataSeries.index.values]
lr = linearRegression(years,dataSeries.values)
series = pd.Series()
for y in dataSeries.index.values:
series.loc[y] = lr.predict(np.array([float(y[0:4])]).reshape(1, -1))
return series
def printBasicAnalysis(self):
# variables for formatting the console output
stringFormat = "35s"
dispLineLength = 55
sepString = '-'*dispLineLength + '\n'
# format margin around stock name
stockName = self.stock.company.longName
stockNameOutput = stockName
if (len(stockName) < dispLineLength):
margin = int((dispLineLength-len(stockName))/2)
stockNameOutput = ' '*margin + stockName + ' '*margin
# string to print the graham number
strGrahamNumber = '{str:{strFormat}}{gn:6.2f}'.format(str='Graham number:',gn=self.GrahamNumber,strFormat=stringFormat) + ' ' + self.stock.currencySymbol + '\n'
# string to print the stock's current value
strCurrentStockValue = ''
stockPrice = self.stock.getBasicDataItem(Stock.MARKET_PRICE)
if (stockPrice is not None):
strCurrentStockValue = '{str:{strFormat}}{val:6.2f}'.format(str="Current share price:",val=stockPrice,strFormat=stringFormat) + ' ' + self.stock.currencySymbol + '\n'
# Nettogewinn
limit = 15/100.0
limit2 = 5/100.0
isGood = sum([1 if nm > limit else 0 for nm in self.NetMargin]) == len(self.NetMargin)
isBad = sum([1 if nm < limit2 else 0 for nm in self.NetMargin]) == len(self.NetMargin)
avgNetMargin = sum(self.NetMargin)/len(self.NetMargin)
if isGood: # groesser als 15%
strNetMarginComment = 'good, always >= {limit:.0f}%'.format(limit=limit*100)
elif avgNetMargin > limit:
strNetMarginComment = 'ok, avg >= {limit:.0f}%'.format(limit=limit*100)
elif isBad: # kleiner als 5%
strNetMarginComment = 'Caution!, avg < {limit:.0f}%'.format(limit=limit2*100)
else:
strNetMarginComment = '-'
strNetMargin = '{str:{strFormat}}{val:6.2f}'.format(str="Net margin (" + str(len(self.NetMargin)) + "y avg.):",val=avgNetMargin*100,strFormat=stringFormat) + \
'% (' + strNetMarginComment + ')\n'
# Kapitalrendite
limit = 6/100.0
isGood = sum([1 if roa >= limit else 0 for roa in self._ReturnOnAssets]) == len(self.ReturnOnAssets)
avgRoA = sum(self.ReturnOnAssets)/len(self.ReturnOnAssets)
if isGood:
strRoAcomment = 'good, always >= {limit:.0f}%'.format(limit=limit*100)
elif avgRoA >= limit:
strRoAcomment = 'ok, avg >= {limit:.0f}%'.format(limit=limit*100)
else:
strRoAcomment = '?'
# String fuer die Ausgabe
strReturnOnAssets = '{str:{strFormat}}{val:6.2f}'.format(str="Return on assets (" + str(len(self.ReturnOnAssets)) + "y avg.):",val=avgRoA*100,strFormat=stringFormat) + \
'% (' + strRoAcomment + ')\n'
"""
Marktkapitalisierung
"""
strMarketCapComment = ''
# Wenn die Marktkapitalisierung geringer als 500Mio ist, dann wird eine Warnung ausgegeben
if self.stock.keyStatistics[Stock.MARKET_CAP] < 500*10**6:
strMarketCapComment = '!ACHTUNG: kleines Unternehmen!'
strSize = 'Mio.'
marketCap = self.stock.keyStatistics[Stock.MARKET_CAP]/10**6
if self.stock.keyStatistics[Stock.MARKET_CAP] > 10**9:
strSize = 'Mrd.'
marketCap = self.stock.keyStatistics[Stock.MARKET_CAP]/10**9
# String fuer die Ausgabe
strMarketCap = '{str:{strFormat}}{val:6.2f}'.format(str="Market capitalization: ",val=marketCap,strFormat=stringFormat) + \
' ' + strSize + ' ' + self.stock.currencySymbol + ' ' + strMarketCapComment +'\n'
"""
Operating Income (Pruefung, ob die Firma jemals Geld verdient hat)
"""
operatingIncome = self.stock.financialStatements.loc[OPERATING_INCOME].copy()
opIncList, opIncSize = [], 'Mrd.'
strYear, strValue = ' '*6 + '|', ' '*6 + '|'
for date in list(sorted(operatingIncome.index.values.copy())):
value = operatingIncome.loc[date]/10**9 # in Mrd
opIncList.append(value)
strYear = strYear + ' {year} |'.format(year=date[:4])
strValue = strValue + ' {v:6.2f} |'.format(v=value)
# Waehrung
strValue = strValue + ' ' + opIncSize + ' ' + self.stock.currencySymbol
# mittleres Wachstum
operatingIncomeGrowth = self.calcGrowth(opIncList,percentage=True)
avgOperatingIncomeGrowth = sum(operatingIncomeGrowth)/len(operatingIncomeGrowth)
# String fuer die Ausgabe
strOperatingIncome = '{str:{strFormat}}{val:6.2f}'.format(str="Operating Income Growth: ",val=avgOperatingIncomeGrowth,strFormat=stringFormat) + \
'%\n' + strYear + '\n' + strValue + '\n'
"""
Cash flow from operating activities
"""
totalCashFlowFromOperations = self.stock.financialStatements.loc[CASH_FROM_OPERATING_ACTIVITIES].copy()
cashFromOpActList = []
strYear, strValue = ' '*6 + '|', ' '*6 + '|'
for date in list(sorted(totalCashFlowFromOperations.index.values.copy())):
value = totalCashFlowFromOperations.loc[date]/10**9 # in Mrd
cashFromOpActList.append(value)
strYear = strYear + ' {year} |'.format(year=date[:4])
strValue = strValue + ' {v:6.2f} |'.format(v=value)
# Waehrung
strValue = strValue + ' ' + opIncSize + ' ' + self.stock.currencySymbol
# Mittleres Wachstum
cashFromOpActGrowth = self.calcGrowth(cashFromOpActList,percentage=True)
avgCashFromOpActGrowth = sum(cashFromOpActGrowth)/len(cashFromOpActGrowth)
# String fuer die Ausgabe
strCashFlowFromOperatingActivities = '{str:{strFormat}}{val:6.2f}'.format(str="Cash flow from operating activities growth: ",val=avgCashFromOpActGrowth,strFormat=stringFormat) + \
'% \n' + strYear + '\n' + strValue + '\n'
"""
ROE - Return on Equity - Eigenkapitalrenidte
ROA - Return on Assets
Financial Leverage
"""
equity = self.stock.financialStatements.loc[STOCKHOLDERS_EQUITY].copy()
assets = self.stock.financialStatements.loc[ASSETS].copy()
leverageList = []
strYear, strROE, strROA, strLeverage = ' '*6 + '|', '- ROE |', '- ROA |', '- LEV |'
for date in list(sorted(self.ReturnOnEquity.index.values.copy())):
strYear = strYear + ' {year} |'.format(year=date[:4])
strROE = strROE + ' {v:6.2f} |'.format(v=self.ReturnOnEquity.loc[date]*100) # in Prozent
strROA = strROA + ' {v:6.2f} |'.format(v=self.ReturnOnAssets.loc[date]*100) # in Prozent
leverageList.append(assets.loc[date]/equity.loc[date])
strLeverage = strLeverage + ' {v:6.2f} |'.format(v=leverageList[-1])
# Einheit
strROE = strROE + ' %'
strROA = strROA + ' %'
# Beurteilung ROE
avgROE = sum(self.ReturnOnEquity)/len(self.ReturnOnEquity)*100 # Mittelwert in Prozent
if avgROE >= 15:
strRoeComment = ' mittleres ROE > 15% --> sehr gut'
elif avgROE >= 10:
strRoeComment = ' mittleres ROE > 10% --> gut'
else:
strRoeComment = ' mittleres ROE < 10% --> ACHTUNG!'
# Beurteilung ROA
avgROA = sum(self.ReturnOnAssets)/len(self.ReturnOnAssets)*100 # Mittelwert in Prozent
if avgROA > 1.2:
strRoaComment = ' ROA > 1.2% --> gut'
elif avgROA > 1.0:
strRoaComment = ' ROA > 1.0% --> in Ordnung'
elif avgROA < 0.7:
strRoaComment = ' ROA < 0.7% --> Pruefen, warum so gering!'
else:
strRoaComment = ' ROA sollte noch etwas besser sein'
# Beurteilung der Leverage
model = linearRegression(range(len(leverageList)),leverageList)
# Mittelwert
avgLeverage = sum(leverageList)/len(leverageList)
if 'Banks' in self.stock.company.industry:
if avgLeverage > 13:
strLeverageComment = ' '*6 + 'hohe Leverage (> 13) --> ACHTUNG!'
elif (avgLeverage < 9):
strLeverageComment = ' '*6 + 'recht geringe Leverage fรผr Banken (< 9) --> PRUEFEN!'
else:
strLeverageComment = ' '*6 + 'Leverage ok'
else:
if (avgLeverage > 3.5):
strLeverageComment = ' '*6 + 'hohe Leverage (> 3.5) --> ACHTUNG!'
elif (avgLeverage > 2.5) and (model.coef_/avgLeverage > 0.2):
strLeverageComment = ' '*6 + 'Leverage steigt schnell an --> ACHTUNG!'
else:
strLeverageComment = ' '*6 + 'Leverage ok'
# String fuer die Ausgabe
strReturnOnEquity = '{str:{strFormat}}{val:6.2f}'.format(str="Return on Equity: ",val=avgROE,strFormat=stringFormat) + \
'% \n' + strYear + '\n' + strROE + '\n' + strROA + '\n' + strLeverage + '\n' + strRoeComment + '\n' + strRoaComment + '\n' + strLeverageComment + '\n'
"""
Earnings Growth - Gewinnwachstum
"""
earnings = self.stock.financialStatements.loc[NET_INCOME].copy()
earningsList = []
strYear, strEarnings = ' '*6 + '|', ' '*6 + '|'
for date in list(sorted(earnings.index.values.copy())):
earningsList.append(earnings.loc[date])
strYear = strYear + ' {year} |'.format(year=date[:4])
strEarnings = strEarnings + ' {v:6.2f} |'.format(v=earnings.loc[date]/10**9)
# Waehrung
strEarnings = strEarnings + ' Mrd. ' + self.stock.currencySymbol
earningsGrowth = self.calcGrowth(earningsList,percentage=True)
avgEarningsGrowth = sum(earningsGrowth)/len(earningsGrowth)
# String fuer die Ausgabe
strEarningsGrwoth = '{str:{strFormat}}{val:6.2f}'.format(str="Earnings Growth: ",val=avgEarningsGrowth,strFormat=stringFormat) + \
'% \n' + strYear + '\n' + strEarnings + '\n'
"""
Debt
"""
"""
Free Cash Flow / Sales
"""
freeCashFlow = self.stock.financialStatements.loc[FREE_CASH_FLOW].copy()
sales = self.stock.financialStatements.loc[REVENUES].copy()
freeCashFlowToSales = []
strYear, strValue = ' '*6 + '|', ' '*6 + '|'
for date in list(sorted(freeCashFlow.index.values.copy())):
freeCashFlowToSales.append(freeCashFlow.loc[date]/sales.loc[date]*100) # in Prozent
strYear = strYear + ' {year} |'.format(year=date[:4])
strValue = strValue + ' {v:6.2f} |'.format(v=freeCashFlowToSales[-1])
# Einheit
strValue = strValue + ' %'
# Mittelwert und Bewertung
avgFreeCashFlowPerSales = sum(freeCashFlowToSales)/len(freeCashFlowToSales)
if avgFreeCashFlowPerSales >= 10:
strFCF_Sales_comment = ' '*6 + '--> sehr gut'
if avgFreeCashFlowPerSales >= 5:
strFCF_Sales_comment = ' '*6 + '--> gut'
else:
strFCF_Sales_comment = ' '*6 + '-> Prรผfen, ob das Unternehmen stark wรคchst!'
# String fuer die Ausgabe
strFreeCashFlowPerSales = '{str:{strFormat}}'.format(str="Free Cash Flow / Sales: ",strFormat=stringFormat) + \
'\n' + strYear + '\n' + strValue + ' \n' + strFCF_Sales_comment + '\n'
"""
Number of shares
"""
averageShares = self.stock.financialStatements.loc[DILUTED_AVERAGE_SHARES].copy()
avgSharesList = []
strYear, strValue = ' '*6 + '|', ' '*6 + '|'
for date in list(sorted(averageShares.index.values.copy())):
# ignore NAN values
if not np.isnan(averageShares.loc[date]):
avgSharesList.append(averageShares.loc[date]/10**6) # in Millionen
strYear = strYear + ' {year} |'.format(year=date[:4])
strValue = strValue + ' {v:5.0f} |'.format(v=avgSharesList[-1])
# Mittelwert und Bewertung
averageSharesGrowth = self.calcGrowth(avgSharesList,percentage=True)
avgAverageSharesGrowth = sum(averageSharesGrowth)/len(averageSharesGrowth)
if avgAverageSharesGrowth < 0:
strAverageShares = ' '*6 + 'Aktienrรผckkรคufe --> kรถnnte gut sein'
elif avgAverageSharesGrowth == 0:
strAverageShares = ' '*6 + 'Anzahl der Aktien bleibt gleich'
elif (avgAverageSharesGrowth > 0) and (avgAverageSharesGrowth < 1.5):
strAverageShares = ' '*6 + 'Anzahl der Aktien steigt um jรคhrlich ca. {v:.1f}%'.format(v=avgAverageSharesGrowth)
elif avgAverageSharesGrowth > 2:
strAverageShares = ' '*6 + 'ACHTUNG: Die Anzahl der Aktien steigt sehr stark! Jรคhrlich ca. {v:.1f}%'.format(v=avgAverageSharesGrowth)
elif np.isnan(avgAverageSharesGrowth):
nonNanValues = [asg for asg in averageSharesGrowth if not np.isnan(asg)]
avgGrowth = sum(nonNanValues)/len(nonNanValues)
strAverageShares = ' '*6 + '+++ Es fehlende Werte +++.\n' + ' '*6 + 'jรคhrlicher Anstieg: ca. {v:.1f}%'.format(v=avgGrowth)
else:
strAverageShares = '+++Bitte ersetze diesen Text+++'
strNumberOfShares = '{str:{strFormat}}'.format(str="Number of Shares (Mio.): ",strFormat=stringFormat) + \
'\n' + strYear + '\n' + strValue + ' \n' + strAverageShares + '\n'
"""
Free Cash Flow
"""
freeCashFlow = self.stock.financialStatements.loc[FREE_CASH_FLOW].copy()
strYear, strValue = ' '*6 + '|', ' '*6 + '|'
for date in list(sorted(freeCashFlow.index.values.copy())):
strYear = strYear + ' {year} |'.format(year=date[:4])
strValue = strValue + ' {v:6.2f} |'.format(v=freeCashFlow.loc[date]/10**9) # in Mrd
# Einheit
strValue = strValue + ' Mrd. ' + self.stock.currencySymbol
# String fuer die Ausgabe
strFreeCashFlow = '{str:{strFormat}}'.format(str="Free Cash Flow: ",strFormat=stringFormat) + \
'\n' + strYear + '\n' + strValue + ' \n'
"""
Discounted Cash Flow
"""
freeCashFlow = self.stock.financialStatements.loc[FREE_CASH_FLOW].copy()
freeCashFlowList = [freeCashFlow[date] for date in list(sorted(freeCashFlow.index.values.copy()))]
freeCashFlowGrowth = self.calcGrowth(freeCashFlowList,percentage=True)
avgFreeCashFlowGrowth = sum(freeCashFlowGrowth)/len(freeCashFlowGrowth)
if self.isAssumptionsCompleteForDCF():
strDiscountedCashFlow = 'Discounted Cash Flow (DCF)\n' + \
' - margin of safety: {v:.1f}%'.format(v=self.stock.assumptions["margin_of_safety"]) + '\n' + \
' - discount rate: {v:.1f}%'.format(v=self.stock.assumptions["discountRate"]) + '\n' + \
' - expected cash flow growth: \n' + \
' - year 1-5: {v:.1f}%'.format(v=self.stock.assumptions["growth_year_1_to_5"]) + '\n' + \
' - year 6-10: {v:.1f}%'.format(v=self.stock.assumptions["growth_year_6_to_10"]) + '\n' + \
' - afterwards: {v:.1f}%'.format(v=self.stock.assumptions["growth_year_10ff"]) + '\n' + \
' (previous average cash flow growth: {v:.1f}%)'.format(v=avgFreeCashFlowGrowth) + '\n' + \
'\n' + \
'{str:{strFormat}}{val:6.2f}'.format(str="Present Share Value: ",val=self.PresentShareValue,strFormat=stringFormat) + ' ' + self.stock.currencySymbol + '\n'
else:
strDiscountedCashFlow = 'Discounted Cash Flow (DCF)\n' + ' +++ Can\'t be calculated due to missing data +++\n' + \
' (previous average cash flow growth: {v:.1f}%)'.format(v=avgFreeCashFlowGrowth) + '\n'
"""
Piotroski F Score
"""
score, comment = self.calcPiotroskiFScore()
strPiotroskiFScore = comment + '\n'
# Combine all fragments to a string
string2Print = sepString + \
stockNameOutput + '\n' + \
sepString + \
strMarketCap + \
sepString + \
strOperatingIncome + \
sepString + \
strCashFlowFromOperatingActivities + \
sepString + \
strReturnOnEquity + \
sepString + \
strEarningsGrwoth + \
sepString + \
strFreeCashFlowPerSales + \
sepString + \
strNumberOfShares + \
sepString + \
strFreeCashFlow + \
sepString + \
strDiscountedCashFlow + \
sepString + \
strPiotroskiFScore + \
sepString + \
strCurrentStockValue + \
sepString
# print to the console
print(string2Print)
def printDetailedAnalysis(self):
pass
class LevermannScore():
MOMENTUM_CONST = '-'
MOMENTUM_RISING = 'steigend'
MOMENTUM_FALLING = 'fallend'
REVERSAL_DEFAULT = 'nicht eindeutig'
REVERSAL_POSITIVE = 'Aktie schlaegt Index'
REVERSAL_NEGATIVE = 'Aktie schlechter als Index'
# Kommentare zum berechneten Score
COMMENT_BUY = 'kaufen'
COMMENT_HOLD = 'halten'
COMMENT_SELL = 'verkaufen'
COMMENT_DEFAULT = ''
def __init__(self,stock,index):
if not isinstance(stock,Stock):
raise TypeError('Object ' + str(stock) + ' is no instance of class Stock')
if not isinstance(index,StockIndex):
raise TypeError('Object ' + str(index) + ' is no instance of class StockIndex')
# Speichern des Objekts
self.stock = stock
self.stockIndex = index
self.Score = None
self.Comment = self.COMMENT_DEFAULT
# Werte fuer die Berechnung des Levermann-Scores
# Return on Equity (RoE), Eigenkapitalrenite letztes Jahr
self._ReturnOnEquity = None
# EBIT-Marge letztes Jahr
self.EbitMarge = None
# Eigenkapitalquote letztes Jahr
self.EKratio = None
# KGV 5 Jahre (letzte 3 Jahre, aktuelles Jahr und nรคchstes Jahr)
self.KGV_5y = None
# KGV aktuell
self.KGV_now = None
# Analystenmeinungen
self.recommendations = None
# Reaktion auf Quartalszahlen
self.quarterlyReaction = None
# Gewinnrevision
self.profitRevision = None
# Kurs heute gg. Kurs vor 6 Monaten
self.sharePriceRelative_6m = None
# Kurs heute gg. Kurs vor 1 Jahr
self.sharePriceRelative_1y = None
# Kursmomentum steigend
self.sharePriceMomentum = None
# Dreimonatsreversal
self.reversal_3m = None
# Gewinnwachstum
self.profitGrowth = None
# Berechnung des LevermannScores fuer die Aktien
self.calcScore()
def getScore(self):
if self.Score is None:
self.calcScore()
return self.Score
def calcScore(self):
# TODO calcLevermannScore implementieren
LevermannScore = 0
#print(self.stock.financialStatements)
# RoE (Return on Equity) Eigenkapitalrendite (Gewinn / Eigenkapital)
# Eigenkapital
equity = list(self.stock.financialStatements.loc[STOCKHOLDERS_EQUITY,:].copy())
# TODO Klรคrung ob "Net Income" (abzgl. Steuern etc.) oder "Operating Income" (Steuern noch nicht abgezogen)
# laut dem Wert auf "https://aktien.guide/levermann-strategie/Microsoft-US5949181045" muss der Eintrag aus "Operating Income" genutzt werden
# da sonst ein zu niedriger Prozentwert entsteht
Gewinn = list(self.stock.financialStatements.loc[OPERATING_INCOME,:].copy())
# Eigenkapitalrendite fuer jedes Jahr
annualyRoE = [gewinn/ek*100 for gewinn,ek in zip(Gewinn,equity)]
# Eigenkapitalrendite: Mittelwert ueber die Jahre
RoE = sum(annualyRoE)/len(annualyRoE)
self._ReturnOnEquity = RoE
# RoE > 20% -> +1, 10% < RoE < 20% -> 0, RoE < 10% -> -1
if (RoE > 20):
LevermannScore += 1
elif (RoE < 10):
LevermannScore -= 1
# EBIT-Marge (EBIT / Umsatz)
# EBIT
EBIT = list(self.stock.financialStatements.loc[EBIT,:].copy())
if 0 in EBIT:
print('\n +++ EBIT enthรคlt mindestens einen Eintrag mit 0 +++')
print(self.stock.financialStatements.loc[EBIT,:])
print(self.stock.financialStatements)
print('\n')
# Umsatz
totalSales = list(self.stock.financialStatements.loc[REVENUES,:].copy())
# EBIT-Marge der letzten Jahre und Mittelwert
annualyEbitMarge = [ebit/umsatz*100 for ebit,umsatz in zip(EBIT,totalSales)]
EbitMarge = sum(annualyEbitMarge)/len(annualyEbitMarge)
self.EbitMarge = EbitMarge
# EBIT-Marge > 12% -> +1, 6% < EBIT-Marge < 12% -> 0, EBIT-Marge < 6% -> -1
if (EbitMarge > 12):
LevermannScore += 1
elif (EbitMarge < 6):
LevermannScore -= 1
# EKQ Eigenkapitalquote
# Eigenkapital (schon vorhanden, da bei RoE verwendet)
# Gesamtverbindlichkeiten + Eigenkapital
GK = list(self.stock.financialStatements.loc[ASSETS,:].copy())
# Eigenkapitalquote
annualyEKratio = [ek/gk*100 for ek,gk in zip(equity,GK)]
EKratio = sum(annualyEKratio)/len(annualyEKratio)
self.EKratio = EKratio
# EKQ > 25% -> +1, 15% < EKQ < 25% -> 0, EKQ < 15% -> -1
if (EKratio > 25):
LevermannScore += 1
elif (EKratio < 15):
LevermannScore -= 1
# KGV aktuelles Jahr
#
currentYear = int(datetime.datetime.utcnow().strftime('%Y'))
nextYear = currentYear+1
# Geschaetztes EPS
EPS_estimates = self.stock.estimates.loc[self.stock.EARNINGS_PER_SHARE,:].copy()
# Geschaetztes EPS fuer das aktuelle Jahr
# Datum finden
dateKey_this_year = [d for d in EPS_estimates.index.values if str(currentYear) in d]
dateKey_next_year = [d for d in EPS_estimates.index.values if str(nextYear) in d]
EPS_est_this_year = EPS_estimates.loc[dateKey_this_year[0]]
EPS_est_next_year = EPS_estimates.loc[dateKey_next_year[0]]
KGV = self.stock.getBasicDataItem(self.stock.MARKET_PRICE)/EPS_est_this_year
self.KGV_now = KGV
# 0 < KGV < 12 -> +1, 12 < KGV < 16 -> 0, KGV < 0, KGV > 16 -> -1
if (KGV < 12):
LevermannScore += 1
elif (KGV > 16):
LevermannScore -= 1
# KGV 5 Jahre (letzten 3 Jahre, aktuelles Jahr, nรคchstes Jahr)
# EPS der letzten Jahre auslesen und von alt zu neu sortieren
EPSdf = self.stock.financialStatements.loc['dilutedEPS',:].copy()
# Die Eintraege absteigend nach dem Datum auslesen
EPS = [EPSdf.loc[date] for date in sorted(EPSdf.index,reverse=True)]
# alt zu neu
EPS.reverse()
# EPS-Schaetzung fuer aktuelles und naechstes Jahr anhaengen
EPS.append(EPS_est_this_year)
EPS.append(EPS_est_next_year)
# nur die letzten 5 Eintraege
if len(EPS) > 5:
EPS = EPS[-5:]
# NaN-Werte durch den Mittelwert ersetzen
if ('NaN' in str(EPS)) or ('nan' in str(EPS)):
EPS_wo_nan = [eps for eps in EPS if not | np.isnan(eps) | numpy.isnan |
import numpy as np
from pint import UnitRegistry
u = UnitRegistry()
Q_ = u.Quantity
frequenz = Q_(np.array([101, 210, 300, 400, 502, 600, 700, 800, 904, 1001]), 'kHz')
I_one_seven = Q_(np.array([0, 29, 31, 53, 93, 105, 128]), 'mA')
I_eight_ten_1 = Q_( | np.array([126, 140, 148]) | numpy.array |
import matplotlib.pyplot as plt
from scipy.stats import logistic
import statsmodels.formula.api as smf
import pandas as pd
import numpy as np
def get_panel_estimates(estimator, df):
assert estimator in ["naive", "diff"]
subset = df.loc[(slice(None), 10), :]
if estimator == "naive":
rslt = smf.ols(formula="Y ~ D", data=subset).fit()
elif estimator == "diff":
subset.loc[(slice(None), slice(None)), "S"] = subset["Y"] - subset["Y_8"]
rslt = smf.ols(formula="S ~ D ", data=subset).fit()
return rslt
def get_propensity_score(selection, o, u, additional_effect, y0):
if selection == "baseline":
idx = -3.8 + o + u
elif selection == "self-selection on gains":
idx = -7.3 + o + u + 5 * additional_effect
elif selection == "self-selection on pretest":
idx = -3.8 + o + u + 0.05 * (y0[0] - 98)
else:
raise NotImplementedError
return np.exp(idx) / (1 + np.exp(idx))
def get_sample_panel_demonstration(num_agents, selection, trajectory):
assert trajectory in ["parallel", "divergent"]
columns = ["Y", "D", "O", "X", "E", "U", "Y_1", "Y_0", "Y_8"]
index = list()
for i in range(num_agents):
for j in [8, 9, 10]:
index.append((i, j))
index = pd.MultiIndex.from_tuples(index, names=("Identifier", "Grade"))
df = pd.DataFrame(columns=columns, index=index)
df.loc[(slice(None), 8), "D"] = 0
for i in range(num_agents):
o, u, x, e = get_covariates()
# We first sample the outcomes in the control state.
y0 = list()
for level in [98, 99, 100]:
rslt = level + o + u + x + e + np.random.normal(scale=np.sqrt(10))
y0.append(rslt)
# Sampling the effects of treatment
baseline_effect = np.random.normal(loc=9, scale=1)
additional_effect = np.random.normal(loc=0, scale=1)
# The propensity score governs the attributes of selection. This is where the selection
# on gains or the pretreatment variable is taking place.
p = get_propensity_score(selection, o, u, additional_effect, y0)
d = | np.random.choice([1, 0], p=[p, 1 - p]) | numpy.random.choice |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Mar 22 12:00:37 2020
@author: tianyu
"""
import numpy as np
import pandas as pd
import scipy.sparse as sp
import torch
from sklearn.preprocessing import Normalizer
import math
from torch.autograd import Variable
import torch.nn.functional as F
import torch.utils.data as Data
from sklearn.metrics.pairwise import euclidean_distances
import os
from sklearn import preprocessing
from sklearn import linear_model
def encode_onehot(labels):
classes = set(labels)
classes_dict = {c: np.identity(len(classes))[i, :] for i, c in
enumerate(classes)}
labels_onehot = np.array(list(map(classes_dict.get, labels)),
dtype=np.int32)
return labels_onehot
#path = '/Users/tianyu/Google Drive/fasttext/gcn/pygcn-master/data/cora/'
#dataset = 'cora'
def high_var_dfdata_gene(data, num, gene = None, ind=False):
dat = np.asarray(data)
datavar = np.var(dat, axis = 1)*(-1)
ind_maxvar = np.argsort(datavar) #small --> big
if gene is None and ind is False:
return data.iloc[ind_maxvar[:num]]
if ind:
return data.iloc[ind_maxvar[:num]], ind_maxvar[:num]
ind_gene = data.index.values[ind_maxvar[:num]]
return data.iloc[ind_maxvar[:num]],gene.loc[ind_gene]
def high_var_dfdata(data, num, gene = None, ind=False):
dat = np.asarray(data)
datavar = np.var(dat, axis = 1)*(-1)
ind_maxvar = np.argsort(datavar)
gene_ind = ind_maxvar[:num]
# np.random.shuffle(gene_ind)
if gene is None and ind is False:
return data.iloc[ind_maxvar[:num]]
if ind:
return data.iloc[gene_ind], gene_ind
return data.iloc[gene_ind],gene.iloc[gene_ind]
def high_var_npdata(data, num, gene = None, ind=False): #data: gene*cell
dat = np.asarray(data)
datavar = np.var(dat, axis = 1)*(-1)
ind_maxvar = np.argsort(datavar)
gene_ind = ind_maxvar[:num]
# geneind2 = np.random.choice(ind_maxvar[num//2:], size = num//2, replace = False)
# gene_ind = np.concatenate((gene_ind, geneind2))
#np.random.shuffle(gene_ind)
if gene is None and ind is False:
return data[gene_ind]
if ind:
return data[gene_ind],gene_ind
return data[gene_ind],gene.iloc[gene_ind]
def high_tfIdf_npdata(data,tfIdf, num, gene = None, ind=False):
dat = np.asarray(data)
datavar = np.var(tfIdf, axis = 1)*(-1)
ind_maxvar = np.argsort(datavar)
gene_ind = ind_maxvar[:num]
np.random.shuffle(gene_ind)
if gene is None and ind is False:
return data[gene_ind]
if ind:
return data[gene_ind],gene_ind
return data[gene_ind],gene.iloc[gene_ind]
def high_expr_dfdata(data, num, gene = None, ind=False):
dat = np.asarray(data)
datavar = np.sum(dat, axis = 1)*(-1)
ind_maxvar = np.argsort(datavar)
gene_ind = ind_maxvar[:num]
# np.random.shuffle(gene_ind)
if gene is None and ind is False:
return data.iloc[gene_ind]
if ind:
return data.iloc[gene_ind], gene_ind
return data.iloc[gene_ind],gene.iloc[gene_ind]
def high_expr_npdata(data, num, gene = None, ind=False):
dat = np.asarray(data)
datavar = np.sum(dat, axis = 1)*(-1)
ind_maxvar = np.argsort(datavar)
gene_ind = ind_maxvar[:num]
# np.random.shuffle(gene_ind)
if gene is None and ind is False:
return data[gene_ind]
if ind:
return data[gene_ind],gene_ind
return data[gene_ind],gene.iloc[gene_ind]
def get_rank_gene(OutputDir, dataset):
gene = pd.read_csv(OutputDir+dataset+'/rank_genes_dropouts_'+dataset+'.csv')
return gene
def rank_gene_dropouts(data, OutputDir, dataset):
# data: n_cell * n_gene
genes = np.zeros([np.shape(data)[1],1], dtype = '>U10')
train = pd.DataFrame(data)
train.columns = np.arange(len(train.columns))
# rank genes training set
dropout = (train == 0).sum(axis='rows') # n_gene * 1
dropout = (dropout / train.shape[0]) * 100
mean = train.mean(axis='rows') # n_gene * 1
notzero = np.where((np.array(mean) > 0) & (np.array(dropout) > 0))[0]
zero = np.where(~((np.array(mean) > 0) & (np.array(dropout) > 0)))[0]
train_notzero = train.iloc[:,notzero]
train_zero = train.iloc[:,zero]
zero_genes = train_zero.columns
dropout = dropout.iloc[notzero]
mean = mean.iloc[notzero]
dropout = np.log2(np.array(dropout)).reshape(-1,1)
mean = np.array(mean).reshape(-1,1)
reg = linear_model.LinearRegression()
reg.fit(mean,dropout)
residuals = dropout - reg.predict(mean)
residuals = pd.Series(np.array(residuals).ravel(),index=train_notzero.columns) # n_gene * 1
residuals = residuals.sort_values(ascending=False)
sorted_genes = residuals.index
sorted_genes = sorted_genes.append(zero_genes)
genes[:,0] = sorted_genes.values
genes = pd.DataFrame(genes)
genes.to_csv(OutputDir + dataset + "/rank_genes_dropouts_" + dataset + ".csv", index = False)
def data_noise(data): # data is samples*genes
for i in range(data.shape[0]):
#drop_index = np.random.choice(train_data.shape[1], 500, replace=False)
#train_data[i, drop_index] = 0
target_dims = data.shape[1]
noise = np.random.rand(target_dims)/10.0
data[i] = data[i] + noise
return data
def norm_max(data):
data = np.asarray(data)
max_data = np.max([np.absolute(np.min(data)), np.max(data)])
data = data/max_data
return data
def findDuplicated(df):
df = df.T
idx = df.index.str.upper()
filter1 = idx.duplicated(keep = 'first')
print('duplicated rows:',np.where(filter1 == True)[0])
indd = np.where(filter1 == False)[0]
df = df.iloc[indd]
return df.T
# In[]:
def load_labels(path, dataset):
labels = pd.read_csv(os.path.join(path + dataset) +'/Labels.csv',index_col = None)
labels.columns = ['V1']
class_mapping = {label: idx for idx, label in enumerate(np.unique(labels['V1']))}
labels['V1'] = labels['V1'].map(class_mapping)
del class_mapping
labels = np.asarray(labels).reshape(-1)
return labels
def load_usoskin(path = '/Users/tianyu/google drive/fasttext/imputation/', dataset='usoskin', net='String'):
# path = os.path.join('/Users',user,'google drive/fasttext/imputation')
data = pd.read_csv(os.path.join(path, dataset, 'data_13776.csv'), index_col = 0)
# adj = sp.load_npz(os.path.join(path, dataset, 'adj13776.npz'))
print(data.shape)
adj = sp.load_npz(os.path.join(path + dataset) + '/adj'+ net + dataset + '_'+str(13776)+'.npz')
print(adj.shape)
labels = pd.read_csv(path +'/' +dataset +'/data_labels.csv',index_col = 0)
class_mapping = {label: idx for idx, label in enumerate(np.unique(labels['V1']))}
labels['V1'] = labels['V1'].map(class_mapping)
del class_mapping
labels = np.asarray(labels).reshape(-1)
return adj, np.asarray(data), labels
def load_kolod(path = '/Users/tianyu/google drive/fasttext/imputation/', dataset='kolod', net='pcc'):
# path = os.path.join('/Users',user,'google drive/fasttext/imputation')
data = pd.read_csv(os.path.join(path, dataset, 'kolod.csv'), index_col = 0)
# adj = sp.load_npz(os.path.join(path, dataset, 'adj13776.npz'))
print(data.shape)
adj = np.corrcoef(np.asarray(data))
#adj[np.where(adj < 0.3)] = 0
labels = pd.read_csv(path +'/' +dataset +'/kolod_labels.csv',index_col = 0)
class_mapping = {label: idx for idx, label in enumerate( | np.unique(labels['V1']) | numpy.unique |
# Copyright 2018 <NAME>. All rights reserved.
#
# Licensed under the MIT license
"""
Script for panels of Figure S1 (Zebrafish network evolution example)
"""
import os
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as pl
import seaborn as sns
from Figure3 import mpath
import h5py
from global_defs import GlobalDefs
import core as c
from mo_types import MoTypes
from multiprocessing import Pool
from pandas import DataFrame
from scipy.stats import wilcoxon
# file definitions
base_path_zf = "./model_data/Adam_1e-4/sepInput_mixTrain/"
paths_512_zf = [f + '/' for f in os.listdir(base_path_zf) if "_3m512_" in f]
def playback_response_helper(mo_type, model_path, stimulus, std, no_pad_size, nreps):
mdata = c.ModelData(model_path)
gpn_wn = mo_type.network_model()
gpn_wn.load(mdata.ModelDefinition, mdata.LastCheckpoint)
wna = mo_type.wn_sim(std, gpn_wn, t_preferred=GlobalDefs.tPreferred)
ev_path = model_path + '/evolve/generation_weights.npy'
ev_weights = np.load(ev_path)
w = np.mean(ev_weights[-1, :, :], 0)
wna.bf_weights = w
wna.eval_every = 2 # fast evaluation to keep up with stimulus fluctuations
traces = []
for r in range(nreps):
bt = wna.compute_openloop_behavior(stimulus)[0].astype(float)
traces.append(bt[-no_pad_size:])
traces = np.vstack(traces)
p_move = np.mean(traces > 0, 0) # probability of selecting a movement bout (p_bout weights plus pred. control)
p_bout = np.mean(traces > -1, 0) # any behavior selected - purely under control of p_bout weights
# compute magnitude by using expected values for straight and turn bouts
traces[traces < 1] = np.nan
traces[traces < 2] = 0
traces[traces > 1] = 30
mag = np.nanmean(traces, 0)
return p_move, p_bout, mag
if __name__ == "__main__":
save_folder = "./DataFigures/FigureS1/"
if not os.path.exists(save_folder):
os.makedirs(save_folder)
sns.reset_orig()
mpl.rcParams['pdf.fonttype'] = 42
# Example evolution on one network
p = mpath(base_path_zf, paths_512_zf[0])
evol_p = p + "/evolve/"
errors = np.load(evol_p + "generation_errors.npy")
weights = np.load(evol_p + "generation_weights.npy")
# Panel: Error progression
fig, ax = pl.subplots()
ax.errorbar(np.arange(50), np.mean(errors, 1), np.std(errors, 1), linestyle='None', marker='o', color="C1")
ax.errorbar(49, np.mean(errors, 1)[49], np.std(errors, 1)[49], linestyle='None', marker='o', color="C0")
ax.errorbar(7, np.mean(errors, 1)[7], np.std(errors, 1)[7], linestyle='None', marker='o', color=(.5, .5, .5))
ax.set_xlabel("Generation")
ax.set_ylabel("Navigation error [C]")
sns.despine(fig, ax)
fig.savefig(save_folder + "network_0_evolveError.pdf", type="pdf")
# Panel: Pairwise weight correlations
corr_0 = []
corr_7 = []
corr_49 = []
for i in range(512):
for j in range(512):
if i < j:
corr_0.append(np.corrcoef(weights[0, i, :], weights[0, j, :])[0, 1])
corr_7.append(np.corrcoef(weights[7, i, :], weights[7, j, :])[0, 1])
corr_49.append(np.corrcoef(weights[49, i, :], weights[49, j, :])[0, 1])
fig, ax = pl.subplots()
sns.kdeplot(corr_0, ax=ax, color="C1")
sns.kdeplot(corr_7, ax=ax, color=(.5, .5, .5))
sns.kdeplot(corr_49, ax=ax, color="C0")
ax.set_xlabel("Pairwise weight vector correlations")
ax.set_ylabel("Density")
sns.despine(fig, ax)
fig.savefig(save_folder + "network_0_evolveWeightCorrs.pdf", type="pdf")
# Panel: Example weight matrices
fig, axes = pl.subplots(ncols=4)
sns.heatmap(weights[0, :, :], vmin=-3, vmax=3, center=0, cbar=False, cmap="RdBu_r", ax=axes[0], xticklabels=False,
yticklabels=False, rasterized=True)
sns.heatmap(weights[7, :, :], vmin=-3, vmax=3, center=0, cbar=False, cmap="RdBu_r", ax=axes[1], xticklabels=False,
yticklabels=False, rasterized=True)
sns.heatmap(weights[49, :, :], vmin=-3, vmax=3, center=0, cbar=True, cmap="RdBu_r", ax=axes[2], xticklabels=False,
yticklabels=False, cbar_ax=axes[3], rasterized=True)
axes[0].set_ylabel("Generation weight vectors")
for a in axes[:-1]:
a.set_xlabel("Weights")
fig.savefig(save_folder + "network_0_evolveWeights.pdf", type="pdf")
# Panel: WN Playback stimulus response comparison
# load zebrafish data
dfile = h5py.File("fish_wn_playback_data.hdf5", 'r')
padded_temp_input = np.array(dfile["padded_temp_input"]) # in C - needs to be standardized for network!
model_framerate = np.array(dfile["model_framerate"])[0]
fish_bout_freq = np.array(dfile["fish_bout_freq"])
fish_bout_freq_se = np.array(dfile["fish_bout_freq_se"])
fish_mags = np.array(dfile["mags_by_fish"])
dfile.close()
pd_stim_seconds = padded_temp_input.size // model_framerate
stim_seconds = fish_bout_freq.size // model_framerate
# create time vectors for padded and non-padded data
model_time = np.linspace(0, stim_seconds, fish_bout_freq.size)
net_time = np.linspace(0, stim_seconds, stim_seconds*GlobalDefs.frame_rate)
pd_model_time = np.linspace(0, pd_stim_seconds, padded_temp_input.size)
pd_net_time = np.linspace(0, pd_stim_seconds, pd_stim_seconds*GlobalDefs.frame_rate)
pti_network = np.interp(pd_net_time, pd_model_time, padded_temp_input) # simulation input with initial padding
std_zf = c.GradientData.load_standards("gd_training_data.hdf5")
net_in = (pti_network - std_zf.temp_mean) / std_zf.temp_std
mo = MoTypes(False)
n_reps = 5000 # run 5000 simulations per network
net_bout_freqs = []
net_p_bout_freqs = []
net_mags = []
process_pool = Pool(processes=4)
process_ar = []
for p in paths_512_zf:
m_path = mpath(base_path_zf, p)
process_ar.append(process_pool.apply_async(playback_response_helper,
[mo, m_path, net_in, std_zf, net_time.size, n_reps]))
for i, ar in enumerate(process_ar):
bf, pb, m = ar.get()
net_bout_freqs.append(bf * GlobalDefs.frame_rate)
net_p_bout_freqs.append(pb * GlobalDefs.frame_rate)
net_mags.append(m)
print("Process {0} of {1} completed".format(i+1, len(paths_512_zf)))
process_pool.close()
net_bout_freqs = np.vstack(net_bout_freqs)
net_p_bout_freqs = np.vstack(net_p_bout_freqs)
net_mags = np.vstack(net_mags)
# interpolate fish-data to net timebase
fish_bout_freq = np.interp(net_time, model_time, fish_bout_freq)
fish_bout_freq_se = np.interp(net_time, model_time, fish_bout_freq_se)
# interpolate magnitudes for each fish
fish_mags_interp = np.zeros((fish_mags.shape[0], fish_bout_freq.size))
for i, fm in enumerate(fish_mags):
fish_mags_interp[i, :] = np.interp(net_time, model_time, fm)
# plot stimulus
fig, ax = pl.subplots()
ax.plot(net_time[:-4], net_in[-net_time.size:-4]*std_zf.temp_std + std_zf.temp_mean, 'k')
ax.set_xticks([0, 5, 10, 15])
ax.set_xlabel("Time [s]")
ax.set_ylabel("Temperature [C]")
sns.despine(fig, ax)
fig.savefig(save_folder + "playback_stimulus.pdf", type="pdf")
# plot fish and model bout frequency across time
corr_bout = np.corrcoef(fish_bout_freq[:-4], np.nanmean(net_bout_freqs, 0)[:-4])[0, 1]
corr_p_bout = np.corrcoef(fish_bout_freq[:-4], np.nanmean(net_p_bout_freqs, 0)[:-4])[0, 1]
fig, ax = pl.subplots()
sns.tsplot(net_bout_freqs[:, :-4], net_time[:-4], condition="r = {0}".format( | np.round(corr_bout, 2) | numpy.round |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Copyright 2020. Triad National Security, LLC. All rights reserved.
This program was produced under U.S. Government contract 89233218CNA000001 for Los Alamos
National Laboratory (LANL), which is operated by Triad National Security, LLC for the U.S.
Department of Energy/National Nuclear Security Administration. All rights in the program are
reserved by Triad National Security, LLC, and the U.S. Department of Energy/National Nuclear
Security Administration. The Government is granted for itself and others acting on its behalf a
nonexclusive, paid-up, irrevocable worldwide license in this material to reproduce, prepare
derivative works, distribute copies to the public, perform publicly and display publicly, and to permit
others to do so.
LANL software release C19112
Author: <NAME>
"""
import numpy as np
import scipy as sp
from scipy import stats
import matplotlib.pyplot as plt
from itertools import combinations, chain
from scipy.special import comb
from collections import namedtuple
from pathos.multiprocessing import ProcessingPool as Pool
import time
def abline(slope, intercept):
"""Plot a line from slope and intercept"""
axes = plt.gca()
x_vals = np.array(axes.get_xlim())
y_vals = intercept + slope * x_vals
plt.plot(x_vals, y_vals, '--', color='red')
pos = lambda a: (abs(a) + a) / 2 # same as max(0,a)
def const(signs, knots):
"""Get max value of BASS basis function, assuming 0-1 range of inputs"""
cc = np.prod(((signs + 1) / 2 - signs * knots))
if cc == 0:
return 1
return cc
def makeBasis(signs, vs, knots, xdata):
"""Make basis function using continuous variables"""
cc = const(signs, knots)
temp1 = pos(signs * (xdata[:, vs] - knots))
if len(signs) == 1:
return temp1 / cc
temp2 = np.prod(temp1, axis=1) / cc
return temp2
def normalize(x, bounds):
"""Normalize to 0-1 scale"""
return (x - bounds[:, 0]) / (bounds[:, 1] - bounds[:, 0])
def unnormalize(z, bounds):
"""Inverse of normalize"""
return z * (bounds[:, 1] - bounds[:, 0]) + bounds[:, 0]
def comb_index(n, k):
"""Get all combinations of indices from 0:n of length k"""
# https://stackoverflow.com/questions/16003217/n-d-version-of-itertools-combinations-in-numpy
count = comb(n, k, exact=True)
index = np.fromiter(chain.from_iterable(combinations(range(n), k)),
int, count=count * k)
return index.reshape(-1, k)
def dmwnchBass(z_vec, vars_use):
"""Multivariate Walenius' noncentral hypergeometric density function with some variables fixed"""
alpha = z_vec[vars_use - 1] / sum(np.delete(z_vec, vars_use))
j = len(alpha)
ss = 1 + (-1) ** j * 1 / (sum(alpha) + 1)
for i in range(j - 1):
idx = comb_index(j, i + 1)
temp = alpha[idx]
ss = ss + (-1) ** (i + 1) * sum(1 / (temp.sum(axis=1) + 1))
return ss
Qf = namedtuple('Qf', 'R bhat qf')
def getQf(XtX, Xty):
"""Get the quadratic form y'X solve(X'X) X'y, as well as least squares beta and cholesky of X'X"""
try:
R = sp.linalg.cholesky(XtX, lower=False) # might be a better way to do this with sp.linalg.cho_factor
except np.linalg.LinAlgError as e:
return None
dr = np.diag(R)
if len(dr) > 1:
if max(dr[1:]) / min(dr) > 1e3:
return None
bhat = sp.linalg.solve_triangular(R, sp.linalg.solve_triangular(R, Xty, trans=1))
qf = np.dot(bhat, Xty)
return Qf(R, bhat, qf)
def logProbChangeMod(n_int, vars_use, I_vec, z_vec, p, maxInt):
"""Get reversibility factor for RJMCMC acceptance ratio, and also prior"""
if n_int == 1:
out = (np.log(I_vec[n_int - 1]) - np.log(2 * p) # proposal
+ np.log(2 * p) + np.log(maxInt))
else:
x = np.zeros(p)
x[vars_use] = 1
lprob_vars_noReplace = np.log(dmwnchBass(z_vec, vars_use))
out = (np.log(I_vec[n_int - 1]) + lprob_vars_noReplace - n_int * np.log(2) # proposal
+ n_int * np.log(2) + np.log(comb(p, n_int)) + np.log(maxInt)) # prior
return out
CandidateBasis = namedtuple('CandidateBasis', 'basis n_int signs vs knots lbmcmp')
def genCandBasis(maxInt, I_vec, z_vec, p, xdata):
"""Generate a candidate basis for birth step, as well as the RJMCMC reversibility factor and prior"""
n_int = int(np.random.choice(range(maxInt), p=I_vec) + 1)
signs = np.random.choice([-1, 1], size=n_int, replace=True)
# knots = np.random.rand(n_int)
knots = np.zeros(n_int)
if n_int == 1:
vs = np.random.choice(p)
knots = np.random.choice(xdata[:, vs], size=1)
else:
vs = np.sort(np.random.choice(p, size=n_int, p=z_vec, replace=False))
for i in range(n_int):
knots[i] = np.random.choice(xdata[:, vs[i]], size=1)
basis = makeBasis(signs, vs, knots, xdata)
lbmcmp = logProbChangeMod(n_int, vs, I_vec, z_vec, p, maxInt)
return CandidateBasis(basis, n_int, signs, vs, knots, lbmcmp)
BasisChange = namedtuple('BasisChange', 'basis signs vs knots')
def genBasisChange(knots, signs, vs, tochange_int, xdata):
"""Generate a condidate basis for change step"""
knots_cand = knots.copy()
signs_cand = signs.copy()
signs_cand[tochange_int] = np.random.choice([-1, 1], size=1)
knots_cand[tochange_int] = np.random.choice(xdata[:, vs[tochange_int]], size=1) # np.random.rand(1)
basis = makeBasis(signs_cand, vs, knots_cand, xdata)
return BasisChange(basis, signs_cand, vs, knots_cand)
class BassPrior:
"""Structure to store prior"""
def __init__(self, maxInt, maxBasis, npart, g1, g2, s2_lower, h1, h2, a_tau, b_tau, w1, w2):
self.maxInt = maxInt
self.maxBasis = maxBasis
self.npart = npart
self.g1 = g1
self.g2 = g2
self.s2_lower = s2_lower
self.h1 = h1
self.h2 = h2
self.a_tau = a_tau
self.b_tau = b_tau
self.w1 = w1
self.w2 = w2
return
class BassData:
"""Structure to store data"""
def __init__(self, xx, y):
self.xx_orig = xx
self.y = y
self.ssy = sum(y * y)
self.n = len(xx)
self.p = len(xx[0])
self.bounds = np.zeros([self.p, 2])
for i in range(self.p):
self.bounds[i, 0] = np.min(xx[:, i])
self.bounds[i, 1] = np.max(xx[:, i])
self.xx = normalize(self.xx_orig, self.bounds)
return
Samples = namedtuple('Samples', 's2 lam tau nbasis nbasis_models n_int signs vs knots beta')
Sample = namedtuple('Sample', 's2 lam tau nbasis nbasis_models n_int signs vs knots beta')
class BassState:
"""The current state of the RJMCMC chain, with methods for getting the log posterior and for updating the state"""
def __init__(self, data, prior):
self.data = data
self.prior = prior
self.s2 = 1.
self.nbasis = 0
self.tau = 1.
self.s2_rate = 1.
self.R = 1
self.lam = 1
self.I_star = np.ones(prior.maxInt) * prior.w1
self.I_vec = self.I_star / np.sum(self.I_star)
self.z_star = np.ones(data.p) * prior.w2
self.z_vec = self.z_star / np.sum(self.z_star)
self.basis = np.ones([data.n, 1])
self.nc = 1
self.knots = np.zeros([prior.maxBasis, prior.maxInt])
self.signs = np.zeros([prior.maxBasis, prior.maxInt],
dtype=int) # could do "bool_", but would have to transform 0 to -1
self.vs = np.zeros([prior.maxBasis, prior.maxInt], dtype=int)
self.n_int = np.zeros([prior.maxBasis], dtype=int)
self.Xty = np.zeros(prior.maxBasis + 2)
self.Xty[0] = np.sum(data.y)
self.XtX = np.zeros([prior.maxBasis + 2, prior.maxBasis + 2])
self.XtX[0, 0] = data.n
self.R = np.array([[np.sqrt(data.n)]]) # np.linalg.cholesky(self.XtX[0, 0])
self.R_inv_t = np.array([[1 / np.sqrt(data.n)]])
self.bhat = np.mean(data.y)
self.qf = pow(np.sqrt(data.n) * np.mean(data.y), 2)
self.count = np.zeros(3)
self.cmod = False # has the state changed since the last write (i.e., has a birth, death, or change been accepted)?
return
def log_post(self): # needs updating
"""get current log posterior"""
lp = (
- (self.s2_rate + self.prior.g2) / self.s2
- (self.data.n / 2 + 1 + (self.nbasis + 1) / 2 + self.prior.g1) * np.log(self.s2)
+ np.sum(np.log(abs(np.diag(self.R)))) # .5*determinant of XtX
+ (self.prior.a_tau + (self.nbasis + 1) / 2 - 1) * np.log(self.tau) - self.prior.a_tau * self.tau
- (self.nbasis + 1) / 2 * np.log(2 * np.pi)
+ (self.prior.h1 + self.nbasis - 1) * np.log(self.lam) - self.lam * (self.prior.h2 + 1)
) # curr$nbasis-1 because poisson prior is excluding intercept (for curr$nbasis instead of curr$nbasis+1)
# -lfactorial(curr$nbasis) # added, but maybe cancels with prior
self.lp = lp
return
def update(self):
"""Update the current state using a RJMCMC step (and Gibbs steps at the end of this function)"""
move_type = np.random.choice([1, 2, 3])
if self.nbasis == 0:
move_type = 1
if self.nbasis == self.prior.maxBasis:
move_type = np.random.choice( | np.array([2, 3]) | numpy.array |
"""
Mix between a Feedforward Neural Network and Restricted Boltzmann Machine.
Inputs and Outputs are all consolidated and training is a 1-step Gibbs
sample where the error is the difference between the Input/Output feed
and their reconstruction after they bounced back (Gibbs' sample)
"""
# TODO: Profile and optimize performance
import time
import copy
import numpy as np
import sklearn.metrics as mt
from sklearn.preprocessing import MinMaxScaler
__version__ = '1.0'
UNCLAMPED_VALUE = 0.0 # DONE: Tested 0 and 0.5
def relu(input_value, minimum=0, maximum=1):
"""
Apply RELU activation function with option to clip values
:param input_value: Numpy array with input values
:param minimum: Minimum value to clip (default 0)
:param maximum: Maximum value to clip (default 1)
:return: Numpy array with RELU function applied
"""
return np.clip(input_value, minimum, maximum)
class MirNet(object):
"""
Mirror Network that consolidates input and output together
Training is done similarly to Boltzmann machine with
a 1-step Gibbs' sampling (deterministic network)
"""
def __init__(self, hidden_layers=(100,), type='classifier', seed=None,
verbose=False):
"""
Build MirNet basic structure. Loosely structured like Sklean MLP
:param hidden_layers: Tuple describing the architecture
and number of neurons present in each layer
:param type: Network type: 'classifier' (default), 'regressor'
:param seed: Random seed to initialize the network
:param verbose: Verbose mode
"""
if type == "classifier":
self.loss = mt.log_loss
self.activation = relu
elif type == "regressor":
self.loss = mt.mean_squared_error
self.activation = relu
else:
raise Exception("Type %s not recognized" % type)
self.type = type
np.random.seed(seed)
self.epochs = 0
self.hidden_layers = hidden_layers
self.weights = []
self.scaler = MinMaxScaler() # TESTED: self.scaler = StandardScaler()
self.verbose = verbose
def sample(self, input_value, weights):
"""
Calculate 1-step Gibbs sample of the input data vector
:param input_value: Numpy array with values for all first level neurons (including output)
:param weights: List of Numpy arrays with network weights
:return: Two Numpy arrays with neurons value calculated for the positive and negative phase
"""
# Positive phase, from input to last layer
pos_phase = [input_value]
for w in weights:
neurons_input = np.dot(pos_phase[-1], w)
neurons_output = self.activation(neurons_input)
pos_phase = pos_phase + [neurons_output]
# Negative phase, from last to input layer
neg_phase = [pos_phase[-1]]
for w in weights[::-1]:
neurons_input = np.dot(neg_phase[0], np.transpose(w))
neurons_output = self.activation(neurons_input)
neg_phase = [neurons_output] + neg_phase
return pos_phase, neg_phase
def predict(self, input_array, weights=None):
"""
Predict output given a certain input to the network.
If not all columns are passed (values "unclamped") only missing fields are returned
:param input_array: Numpy array with values for first level neurons
:param weights: Network weights to be used (by default network weights are used)
:return: Numpy array with the values of the neurons (input/output) calculated
"""
if weights is None:
weights = self.weights
input_neurons = input_array.shape[1]
total_neurons = weights[0].shape[0]
samples = len(input_array)
padding = np.full((samples, total_neurons - input_neurons),
UNCLAMPED_VALUE)
X = self.scaler.transform(np.hstack((input_array, padding)))
fneurons, bneurons = self.sample(X, weights)
if input_neurons == total_neurons:
return self.scaler.inverse_transform(bneurons[0])
else:
return self.scaler.inverse_transform(bneurons[0])[:, input_neurons:] # Return only the fields not passed
def early_stop(self, epoch, patience, tolerance, start_time, max_time, max_epochs):
"""
Checks on different training condition to determine whether the
training should stop
:param epoch: Current training epoch
:param patience: Epochs by which is required an improvement of <tolerance> to avoid early stopping
:param tolerance: Improvement required during <patience> epochs to avoid early stopping
:param start_time: Time when training started
:param max_time: Maximum time (in seconds) for training
:param max_epochs: Maximum number of epochs for training
:return: Boolean on whether the training should stop
"""
if epoch > patience:
best_old_loss = min(self.losses_test[:-patience])
best_new_loss = min(self.losses_test[-patience:])
if best_new_loss > best_old_loss * (1 - tolerance):
print("Early Stop! No %f improvement over last %i epochs"
% (tolerance, patience))
return True
if max_time > 0 and (time.time() - start_time) >= max_time:
print("Early Stop! Time limit of %i seconds reached"
% max_time)
return True
if max_epochs > 0 and epoch >= max_epochs:
print("Early Stop! Limit of %i epochs reached"
% max_epochs)
return True
return False
def fit(self, X, Y=None, sgd_init=100, rate=0.001, m=0.9,
X_test=None, Y_test=None, test_fraction=0.1, sgd_annealing=0.5,
tolerance=0.01, patience=10, max_epochs=100, max_time=0):
"""
Uses a standard SKLearn "fit" interface with Input and Output values and feeds it
into the train_data method where input and outputs are undifferentiated
:param X: input values
:param Y: output or target values (not required)
:param sgd_init: starting value for mini batch_size size
:param rate: starting value for learning rate
:param m: momentum
:param X_test: Input values for test_data
:param Y_test: Output values for test_data (not required)
:param test_fraction: Fraction of X to be used for test_data (if X_test is None)
:param sgd_annealing: Batch size reduction at each epoch where test_data loss does not improve by tolerance
:param tolerance: Minimum improvement during <patience> epochs to avoid early stopping
:param patience: Number of epochs for which is required an improvement of <tolerance> to avoid early stopping
:param max_epochs: Maximum number of epochs for training
:param max_time: Maximum time (in seconds) for training
"""
start_time = time.time()
data = self.scaler.fit_transform( | np.hstack((X, Y)) | numpy.hstack |
import unittest
import numpy as np
from sklearn.datasets import (
load_breast_cancer,
load_iris
)
from msitrees._core import (
gini_impurity,
gini_information_gain,
entropy,
get_class_and_proba,
classif_best_split
)
class TestGiniImpurity(unittest.TestCase):
def test_input_type_list(self):
try:
gini_impurity([0, 0])
except TypeError:
self.fail('Exception on allowed input type - list')
def test_input_type_tuple(self):
try:
gini_impurity((0, 0))
except TypeError:
self.fail('Exception on allowed input type - tuple')
def test_input_type_numpy(self):
try:
gini_impurity(np.array([0, 0]))
except TypeError:
self.fail('Exception on allowed input type - np.ndarray')
def test_input_int(self):
with self.assertRaises(ValueError):
gini_impurity(0)
def test_input_other(self):
with self.assertRaises(TypeError):
gini_impurity('foo')
with self.assertRaises(TypeError):
gini_impurity({'foo': 1})
def test_input_wrong_shape(self):
with self.assertRaises(ValueError):
gini_impurity(np.array([[1, 0], [1, 0]]))
def test_input_empty_list(self):
with self.assertRaises(ValueError):
gini_impurity([])
def test_input_empty_array(self):
with self.assertRaises(ValueError):
gini_impurity(np.array([]))
def test_binary_max_impurity(self):
arr = | np.array([1, 0, 1, 0]) | numpy.array |
"""
Tests for tools
Author: <NAME>
License: Simplified-BSD
"""
from __future__ import division, absolute_import, print_function
import numpy as np
import pandas as pd
from scipy.linalg import solve_discrete_lyapunov
from statsmodels.tsa.statespace import tools
from statsmodels.tsa.api import acovf
# from .results import results_sarimax
from numpy.testing import (
assert_allclose, assert_equal, assert_array_equal, assert_almost_equal,
assert_raises
)
class TestCompanionMatrix(object):
cases = [
(2, np.array([[0,1],[0,0]])),
([1,-1,-2], np.array([[1,1],
[2,0]])),
([1,-1,-2,-3], np.array([[1,1,0],
[2,0,1],
[3,0,0]])),
([1,-np.array([[1,2],[3,4]]),-np.array([[5,6],[7,8]])],
np.array([[1,2,5,6],
[3,4,7,8],
[1,0,0,0],
[0,1,0,0]]).T)
]
def test_cases(self):
for polynomial, result in self.cases:
assert_equal(tools.companion_matrix(polynomial), result)
class TestDiff(object):
x = np.arange(10)
cases = [
# diff = 1
([1,2,3], 1, None, 1, [1, 1]),
# diff = 2
(x, 2, None, 1, [0]*8),
# diff = 1, seasonal_diff=1, k_seasons=4
(x, 1, 1, 4, [0]*5),
(x**2, 1, 1, 4, [8]*5),
(x**3, 1, 1, 4, [60, 84, 108, 132, 156]),
# diff = 1, seasonal_diff=2, k_seasons=2
(x, 1, 2, 2, [0]*5),
(x**2, 1, 2, 2, [0]*5),
(x**3, 1, 2, 2, [24]*5),
(x**4, 1, 2, 2, [240, 336, 432, 528, 624]),
]
def test_cases(self):
# Basic cases
for series, diff, seasonal_diff, k_seasons, result in self.cases:
# Test numpy array
x = tools.diff(series, diff, seasonal_diff, k_seasons)
assert_almost_equal(x, result)
# Test as Pandas Series
series = pd.Series(series)
# Rewrite to test as n-dimensional array
series = np.c_[series, series]
result = np.c_[result, result]
# Test Numpy array
x = tools.diff(series, diff, seasonal_diff, k_seasons)
assert_almost_equal(x, result)
# Test as Pandas Dataframe
series = pd.DataFrame(series)
x = tools.diff(series, diff, seasonal_diff, k_seasons)
assert_almost_equal(x, result)
class TestSolveDiscreteLyapunov(object):
def solve_dicrete_lyapunov_direct(self, a, q, complex_step=False):
# This is the discrete Lyapunov solver as "real function of real
# variables": the difference between this and the usual, complex,
# version is that in the Kronecker product the second argument is
# *not* conjugated here.
if not complex_step:
lhs = np.kron(a, a.conj())
lhs = np.eye(lhs.shape[0]) - lhs
x = np.linalg.solve(lhs, q.flatten())
else:
lhs = np.kron(a, a)
lhs = np.eye(lhs.shape[0]) - lhs
x = np.linalg.solve(lhs, q.flatten())
return np.reshape(x, q.shape)
def test_univariate(self):
# Real case
a = np.array([[0.5]])
q = np.array([[10.]])
actual = tools.solve_discrete_lyapunov(a, q)
desired = solve_discrete_lyapunov(a, q)
assert_allclose(actual, desired)
# Complex case (where the Lyapunov equation is taken as a complex
# function)
a = np.array([[0.5+1j]])
q = np.array([[10.]])
actual = tools.solve_discrete_lyapunov(a, q)
desired = solve_discrete_lyapunov(a, q)
assert_allclose(actual, desired)
# Complex case (where the Lyapunov equation is taken as a real
# function)
a = np.array([[0.5+1j]])
q = np.array([[10.]])
actual = tools.solve_discrete_lyapunov(a, q, complex_step=True)
desired = self.solve_dicrete_lyapunov_direct(a, q, complex_step=True)
assert_allclose(actual, desired)
def test_multivariate(self):
# Real case
a = tools.companion_matrix([1, -0.4, 0.5])
q = np.diag([10., 5.])
actual = tools.solve_discrete_lyapunov(a, q)
desired = solve_discrete_lyapunov(a, q)
assert_allclose(actual, desired)
# Complex case (where the Lyapunov equation is taken as a complex
# function)
a = tools.companion_matrix([1, -0.4+0.1j, 0.5])
q = np.diag([10., 5.])
actual = tools.solve_discrete_lyapunov(a, q, complex_step=False)
desired = self.solve_dicrete_lyapunov_direct(a, q, complex_step=False)
assert_allclose(actual, desired)
# Complex case (where the Lyapunov equation is taken as a real
# function)
a = tools.companion_matrix([1, -0.4+0.1j, 0.5])
q = np.diag([10., 5.])
actual = tools.solve_discrete_lyapunov(a, q, complex_step=True)
desired = self.solve_dicrete_lyapunov_direct(a, q, complex_step=True)
assert_allclose(actual, desired)
class TestConcat(object):
x = np.arange(10)
valid = [
(((1,2,3),(4,)), (1,2,3,4)),
(((1,2,3),[4]), (1,2,3,4)),
(([1,2,3],np.r_[4]), (1,2,3,4)),
((np.r_[1,2,3],pd.Series([4])), 0, True, (1,2,3,4)),
((pd.Series([1,2,3]),pd.Series([4])), 0, True, (1,2,3,4)),
((np.c_[x[:2],x[:2]], np.c_[x[2:3],x[2:3]]), np.c_[x[:3],x[:3]]),
((np.c_[x[:2],x[:2]].T, np.c_[x[2:3],x[2:3]].T), 1, np.c_[x[:3],x[:3]].T),
((pd.DataFrame(np.c_[x[:2],x[:2]]), np.c_[x[2:3],x[2:3]]), 0, True, np.c_[x[:3],x[:3]]),
]
invalid = [
(((1,2,3), pd.Series([4])), ValueError),
(((1,2,3), np.array([[1,2]])), ValueError)
]
def test_valid(self):
for args in self.valid:
assert_array_equal(tools.concat(*args[:-1]), args[-1])
def test_invalid(self):
for args in self.invalid:
assert_raises(args[-1], tools.concat, *args[:-1])
class TestIsInvertible(object):
cases = [
([1, -0.5], True),
([1, 1-1e-9], True),
([1, 1], False),
([1, 0.9,0.1], True),
(np.array([1,0.9,0.1]), True),
(pd.Series([1,0.9,0.1]), True)
]
def test_cases(self):
for polynomial, invertible in self.cases:
assert_equal(tools.is_invertible(polynomial), invertible)
class TestConstrainStationaryUnivariate(object):
cases = [
(np.array([2.]), -2./((1+2.**2)**0.5))
]
def test_cases(self):
for unconstrained, constrained in self.cases:
result = tools.constrain_stationary_univariate(unconstrained)
assert_equal(result, constrained)
class TestUnconstrainStationaryUnivariate(object):
cases = [
(np.array([-2./((1+2.**2)**0.5)]), np.array([2.]))
]
def test_cases(self):
for constrained, unconstrained in self.cases:
result = tools.unconstrain_stationary_univariate(constrained)
assert_allclose(result, unconstrained)
class TestStationaryUnivariate(object):
# Test that the constraint and unconstraint functions are inverses
constrained_cases = [
np.array([0]), np.array([0.1]), np.array([-0.5]), np.array([0.999])]
unconstrained_cases = [
np.array([10.]), np.array([-40.42]), np.array([0.123])]
def test_cases(self):
for constrained in self.constrained_cases:
unconstrained = tools.unconstrain_stationary_univariate(constrained)
reconstrained = tools.constrain_stationary_univariate(unconstrained)
assert_allclose(reconstrained, constrained)
for unconstrained in self.unconstrained_cases:
constrained = tools.constrain_stationary_univariate(unconstrained)
reunconstrained = tools.unconstrain_stationary_univariate(constrained)
assert_allclose(reunconstrained, unconstrained)
class TestValidateMatrixShape(object):
# name, shape, nrows, ncols, nobs
valid = [
('TEST', (5,2), 5, 2, None),
('TEST', (5,2), 5, 2, 10),
('TEST', (5,2,10), 5, 2, 10),
]
invalid = [
('TEST', (5,), 5, None, None),
('TEST', (5,1,1,1), 5, 1, None),
('TEST', (5,2), 10, 2, None),
('TEST', (5,2), 5, 1, None),
('TEST', (5,2,10), 5, 2, None),
('TEST', (5,2,10), 5, 2, 5),
]
def test_valid_cases(self):
for args in self.valid:
# Just testing that no exception is raised
tools.validate_matrix_shape(*args)
def test_invalid_cases(self):
for args in self.invalid:
assert_raises(
ValueError, tools.validate_matrix_shape, *args
)
class TestValidateVectorShape(object):
# name, shape, nrows, ncols, nobs
valid = [
('TEST', (5,), 5, None),
('TEST', (5,), 5, 10),
('TEST', (5,10), 5, 10),
]
invalid = [
('TEST', (5,2,10), 5, 10),
('TEST', (5,), 10, None),
('TEST', (5,10), 5, None),
('TEST', (5,10), 5, 5),
]
def test_valid_cases(self):
for args in self.valid:
# Just testing that no exception is raised
tools.validate_vector_shape(*args)
def test_invalid_cases(self):
for args in self.invalid:
assert_raises(
ValueError, tools.validate_vector_shape, *args
)
def test_multivariate_acovf():
_acovf = tools._compute_multivariate_acovf_from_coefficients
# Test for a VAR(1) process. From Lutkepohl (2007), pages 27-28.
# See (2.1.14) for Phi_1, (2.1.33) for Sigma_u, and (2.1.34) for Gamma_0
Sigma_u = np.array([[2.25, 0, 0],
[0, 1.0, 0.5],
[0, 0.5, 0.74]])
Phi_1 = np.array([[0.5, 0, 0],
[0.1, 0.1, 0.3],
[0, 0.2, 0.3]])
Gamma_0 = np.array([[3.0, 0.161, 0.019],
[0.161, 1.172, 0.674],
[0.019, 0.674, 0.954]])
assert_allclose(_acovf([Phi_1], Sigma_u)[0], Gamma_0, atol=1e-3)
# Test for a VAR(2) process. From Lutkepohl (2007), pages 28-29
# See (2.1.40) for Phi_1, Phi_2, (2.1.14) for Sigma_u, and (2.1.42) for
# Gamma_0, Gamma_1
Sigma_u = np.diag([0.09, 0.04])
Phi_1 = np.array([[0.5, 0.1],
[0.4, 0.5]])
Phi_2 = np.array([[0, 0],
[0.25, 0]])
Gamma_0 = np.array([[0.131, 0.066],
[0.066, 0.181]])
Gamma_1 = np.array([[0.072, 0.051],
[0.104, 0.143]])
Gamma_2 = np.array([[0.046, 0.040],
[0.113, 0.108]])
Gamma_3 = np.array([[0.035, 0.031],
[0.093, 0.083]])
assert_allclose(
_acovf([Phi_1, Phi_2], Sigma_u, maxlag=0),
[Gamma_0], atol=1e-3)
assert_allclose(
_acovf([Phi_1, Phi_2], Sigma_u, maxlag=1),
[Gamma_0, Gamma_1], atol=1e-3)
assert_allclose(
_acovf([Phi_1, Phi_2], Sigma_u),
[Gamma_0, Gamma_1], atol=1e-3)
assert_allclose(
_acovf([Phi_1, Phi_2], Sigma_u, maxlag=2),
[Gamma_0, Gamma_1, Gamma_2], atol=1e-3)
assert_allclose(
_acovf([Phi_1, Phi_2], Sigma_u, maxlag=3),
[Gamma_0, Gamma_1, Gamma_2, Gamma_3], atol=1e-3)
# Test sample acovf in the univariate case against sm.tsa.acovf
x = np.arange(20)*1.0
assert_allclose(
np.squeeze(tools._compute_multivariate_sample_acovf(x, maxlag=4)),
acovf(x)[:5])
def test_multivariate_pacf():
# Test sample acovf in the univariate case against sm.tsa.acovf
np.random.seed(1234)
x = np.arange(10000)
y = np.random.normal(size=10000)
# Note: could make this test more precise with higher nobs, but no need to
assert_allclose(
tools._compute_multivariate_sample_pacf(np.c_[x, y], maxlag=1)[0],
np.diag([1, 0]), atol=1e-2)
class TestConstrainStationaryMultivariate(object):
cases = [
# This is the same test as the univariate case above, except notice
# the sign difference; this is an array input / output
(np.array([[2.]]), np.eye(1), np.array([[2./((1+2.**2)**0.5)]])),
# Same as above, but now a list input / output
([np.array([[2.]])], np.eye(1), [np.array([[2./((1+2.**2)**0.5)]])])
]
eigval_cases = [
[np.array([[0]])],
[np.array([[100]]), np.array([[50]])],
[np.array([[30, 1], [-23, 15]]), np.array([[10, .3], [.5, -30]])],
]
def test_cases(self):
# Test against known results
for unconstrained, error_variance, constrained in self.cases:
result = tools.constrain_stationary_multivariate(
unconstrained, error_variance)
assert_allclose(result[0], constrained)
# Test that the constrained results correspond to companion matrices
# with eigenvalues less than 1 in modulus
for unconstrained in self.eigval_cases:
if type(unconstrained) == list:
cov = np.eye(unconstrained[0].shape[0])
else:
cov = np.eye(unconstrained.shape[0])
constrained, _ = tools.constrain_stationary_multivariate(unconstrained, cov)
companion = tools.companion_matrix(
[1] + [-constrained[i] for i in range(len(constrained))]
).T
assert_equal(np.max(np.abs(np.linalg.eigvals(companion))) < 1, True)
class TestUnconstrainStationaryMultivariate(object):
cases = [
# This is the same test as the univariate case above, except notice
# the sign difference; this is an array input / output
(np.array([[2./((1+2.**2)**0.5)]]), np.eye(1), np.array([[2.]])),
# Same as above, but now a list input / output
([np.array([[2./((1+2.**2)**0.5)]])], np.eye(1), [np.array([[2.]])])
]
def test_cases(self):
for constrained, error_variance, unconstrained in self.cases:
result = tools.unconstrain_stationary_multivariate(
constrained, error_variance)
assert_allclose(result[0], unconstrained)
class TestStationaryMultivariate(object):
# Test that the constraint and unconstraint functions are inverses
constrained_cases = [
np.array([[0]]), np.array([[0.1]]), np.array([[-0.5]]), np.array([[0.999]]),
[np.array([[0]])],
np.array([[0.8, -0.2]]),
[np.array([[0.8]]), np.array([[-0.2]])],
[np.array([[0.3, 0.01], [-0.23, 0.15]]), np.array([[0.1, 0.03], [0.05, -0.3]])],
np.array([[0.3, 0.01, 0.1, 0.03], [-0.23, 0.15, 0.05, -0.3]])
]
unconstrained_cases = [
np.array([[0]]), np.array([[-40.42]]), np.array([[0.123]]),
[np.array([[0]])],
np.array([[100, 50]]),
[np.array([[100]]), np.array([[50]])],
[np.array([[30, 1], [-23, 15]]), np.array([[10, .3], [.5, -30]])],
np.array([[30, 1, 10, .3], [-23, 15, .5, -30]])
]
def test_cases(self):
for constrained in self.constrained_cases:
if type(constrained) == list:
cov = np.eye(constrained[0].shape[0])
else:
cov = np.eye(constrained.shape[0])
unconstrained, _ = tools.unconstrain_stationary_multivariate(constrained, cov)
reconstrained, _ = tools.constrain_stationary_multivariate(unconstrained, cov)
assert_allclose(reconstrained, constrained)
for unconstrained in self.unconstrained_cases:
if type(unconstrained) == list:
cov = np.eye(unconstrained[0].shape[0])
else:
cov = np.eye(unconstrained.shape[0])
constrained, _ = tools.constrain_stationary_multivariate(unconstrained, cov)
reunconstrained, _ = tools.unconstrain_stationary_multivariate(constrained, cov)
# Note: low tolerance comes from last example in unconstrained_cases,
# but is not a real problem
assert_allclose(reunconstrained, unconstrained, atol=1e-4)
def test_reorder_matrix_rows():
nobs = 5
k_endog = 3
k_states = 3
missing = np.zeros((k_endog, nobs))
given = np.zeros((k_endog, k_states, nobs))
given[:, :, :] = np.array([[11, 12, 13],
[21, 22, 23],
[31, 32, 33]])[:, :, np.newaxis]
desired = given.copy()
missing[0, 0] = 1
given[:, :, 0] = np.array([[21, 22, 23],
[31, 32, 33],
[0, 0, 0]])
desired[0, :, 0] = 0
missing[:2, 1] = 1
given[:, :, 1] = np.array([[31, 32, 33],
[0, 0, 0],
[0, 0, 0]])
desired[:2, :, 1] = 0
missing[0, 2] = 1
missing[2, 2] = 1
given[:, :, 2] = np.array([[21, 22, 23],
[0, 0, 0],
[0, 0, 0]])
desired[0, :, 2] = 0
desired[2, :, 2] = 0
missing[1, 3] = 1
given[:, :, 3] = np.array([[11, 12, 13],
[31, 32, 33],
[0, 0, 0]])
desired[1, :, 3] = 0
missing[2, 4] = 1
given[:, :, 4] = np.array([[11, 12, 13],
[21, 22, 23],
[0, 0, 0]])
desired[2, :, 4] = 0
actual = np.asfortranarray(given)
missing = np.asfortranarray(missing.astype(np.int32))
tools.reorder_missing_matrix(actual, missing, True, False, False, inplace=True)
assert_equal(actual, desired)
def test_reorder_matrix_cols():
nobs = 5
k_endog = 3
k_states = 3
missing = np.zeros((k_endog, nobs))
given = np.zeros((k_endog, k_states, nobs))
given[:, :, :] = np.array([[11, 12, 13],
[21, 22, 23],
[31, 32, 33]])[:, :, np.newaxis]
desired = given.copy()
missing[0, 0] = 1
given[:, :, :] = np.array([[12, 13, 0],
[22, 23, 0],
[32, 33, 0]])[:, :, np.newaxis]
desired[:, 0, 0] = 0
missing[:2, 1] = 1
given[:, :, 1] = np.array([[13, 0, 0],
[23, 0, 0],
[33, 0, 0]])
desired[:, :2, 1] = 0
missing[0, 2] = 1
missing[2, 2] = 1
given[:, :, 2] = np.array([[12, 0, 0],
[22, 0, 0],
[32, 0, 0]])
desired[:, 0, 2] = 0
desired[:, 2, 2] = 0
missing[1, 3] = 1
given[:, :, 3] = np.array([[11, 13, 0],
[21, 23, 0],
[31, 33, 0]])
desired[:, 1, 3] = 0
missing[2, 4] = 1
given[:, :, 4] = np.array([[11, 12, 0],
[21, 22, 0],
[31, 32, 0]])
desired[:, 2, 4] = 0
actual = np.asfortranarray(given)
missing = np.asfortranarray(missing.astype(np.int32))
tools.reorder_missing_matrix(actual, missing, False, True, False, inplace=True)
assert_equal(actual[:, :, 4], desired[:, :, 4])
def test_reorder_submatrix():
nobs = 5
k_endog = 3
missing = np.zeros((k_endog, nobs))
missing[0, 0] = 1
missing[:2, 1] = 1
missing[0, 2] = 1
missing[2, 2] = 1
missing[1, 3] = 1
missing[2, 4] = 1
given = np.zeros((k_endog, k_endog, nobs))
given[:, :, :] = np.array([[11, 12, 13],
[21, 22, 23],
[31, 32, 33]])[:, :, np.newaxis]
desired = given.copy()
given[:, :, 0] = np.array([[22, 23, 0],
[32, 33, 0],
[0, 0, 0]])
desired[0, :, 0] = 0
desired[:, 0, 0] = 0
given[:, :, 1] = np.array([[33, 0, 0],
[0, 0, 0],
[0, 0, 0]])
desired[:2, :, 1] = 0
desired[:, :2, 1] = 0
given[:, :, 2] = np.array([[22, 0, 0],
[0, 0, 0],
[0, 0, 0]])
desired[0, :, 2] = 0
desired[:, 0, 2] = 0
desired[2, :, 2] = 0
desired[:, 2, 2] = 0
given[:, :, 3] = np.array([[11, 13, 0],
[31, 33, 0],
[0, 0, 0]])
desired[1, :, 3] = 0
desired[:, 1, 3] = 0
given[:, :, 4] = np.array([[11, 12, 0],
[21, 22, 0],
[0, 0, 0]])
desired[2, :, 4] = 0
desired[:, 2, 4] = 0
actual = np.asfortranarray(given)
missing = np.asfortranarray(missing.astype(np.int32))
tools.reorder_missing_matrix(actual, missing, True, True, False, inplace=True)
assert_equal(actual, desired)
def test_reorder_diagonal_submatrix():
nobs = 5
k_endog = 3
missing = np.zeros((k_endog, nobs))
missing[0, 0] = 1
missing[:2, 1] = 1
missing[0, 2] = 1
missing[2, 2] = 1
missing[1, 3] = 1
missing[2, 4] = 1
given = np.zeros((k_endog, k_endog, nobs))
given[:, :, :] = np.array([[11, 00, 00],
[00, 22, 00],
[00, 00, 33]])[:, :, np.newaxis]
desired = given.copy()
given[:, :, 0] = np.array([[22, 00, 0],
[00, 33, 0],
[0, 0, 0]])
desired[0, :, 0] = 0
desired[:, 0, 0] = 0
given[:, :, 1] = np.array([[33, 0, 0],
[0, 0, 0],
[0, 0, 0]])
desired[:2, :, 1] = 0
desired[:, :2, 1] = 0
given[:, :, 2] = np.array([[22, 0, 0],
[0, 0, 0],
[0, 0, 0]])
desired[0, :, 2] = 0
desired[:, 0, 2] = 0
desired[2, :, 2] = 0
desired[:, 2, 2] = 0
given[:, :, 3] = np.array([[11, 00, 0],
[00, 33, 0],
[0, 0, 0]])
desired[1, :, 3] = 0
desired[:, 1, 3] = 0
given[:, :, 4] = np.array([[11, 00, 0],
[00, 22, 0],
[0, 0, 0]])
desired[2, :, 4] = 0
desired[:, 2, 4] = 0
actual = np.asfortranarray(given.copy())
missing = np.asfortranarray(missing.astype(np.int32))
tools.reorder_missing_matrix(actual, missing, True, True, False, inplace=True)
assert_equal(actual, desired)
actual = np.asfortranarray(given.copy())
tools.reorder_missing_matrix(actual, missing, True, True, True, inplace=True)
assert_equal(actual, desired)
def test_reorder_vector():
nobs = 5
k_endog = 3
missing = np.zeros((k_endog, nobs))
missing[0, 0] = 1
missing[:2, 1] = 1
missing[0, 2] = 1
missing[2, 2] = 1
missing[1, 3] = 1
missing[2, 4] = 1
given = np.zeros((k_endog, nobs))
given[:, :] = np.array([1, 2, 3])[:, np.newaxis]
desired = given.copy()
given[:, 0] = [2, 3, 0]
desired[:, 0] = [0, 2, 3]
given[:, 1] = [3, 0, 0]
desired[:, 1] = [0, 0, 3]
given[:, 2] = [2, 0, 0]
desired[:, 2] = [0, 2, 0]
given[:, 3] = [1, 3, 0]
desired[:, 3] = [1, 0, 3]
given[:, 4] = [1, 2, 0]
desired[:, 4] = [1, 2, 0]
actual = np.asfortranarray(given.copy())
missing = np.asfortranarray(missing.astype(np.int32))
tools.reorder_missing_vector(actual, missing, inplace=True)
assert_equal(actual, desired)
def test_copy_missing_matrix_rows():
nobs = 5
k_endog = 3
k_states = 2
missing = np.zeros((k_endog, nobs))
missing[0, 0] = 1
missing[:2, 1] = 1
missing[0, 2] = 1
missing[2, 2] = 1
missing[1, 3] = 1
missing[2, 4] = 1
A = np.zeros((k_endog, k_states, nobs))
for t in range(nobs):
n = int(k_endog - np.sum(missing[:, t]))
A[:n, :, t] = 1.
B = np.zeros((k_endog, k_states, nobs), order='F')
missing = np.asfortranarray(missing.astype(np.int32))
tools.copy_missing_matrix(A, B, missing, True, False, False, inplace=True)
assert_equal(B, A)
def test_copy_missing_matrix_cols():
nobs = 5
k_endog = 3
k_states = 2
missing = np.zeros((k_endog, nobs))
missing[0, 0] = 1
missing[:2, 1] = 1
missing[0, 2] = 1
missing[2, 2] = 1
missing[1, 3] = 1
missing[2, 4] = 1
A = np.zeros((k_states, k_endog, nobs))
for t in range(nobs):
n = int(k_endog - np.sum(missing[:, t]))
A[:, :n, t] = 1.
B = np.zeros((k_states, k_endog, nobs), order='F')
missing = np.asfortranarray(missing.astype(np.int32))
tools.copy_missing_matrix(A, B, missing, False, True, False, inplace=True)
assert_equal(B, A)
def test_copy_missing_submatrix():
nobs = 5
k_endog = 3
missing = np.zeros((k_endog, nobs))
missing[0, 0] = 1
missing[:2, 1] = 1
missing[0, 2] = 1
missing[2, 2] = 1
missing[1, 3] = 1
missing[2, 4] = 1
A = np.zeros((k_endog, k_endog, nobs))
for t in range(nobs):
n = int(k_endog - np.sum(missing[:, t]))
A[:n, :n, t] = 1.
B = np.zeros((k_endog, k_endog, nobs), order='F')
missing = np.asfortranarray(missing.astype(np.int32))
tools.copy_missing_matrix(A, B, missing, True, True, False, inplace=True)
assert_equal(B, A)
def test_copy_missing_diagonal_submatrix():
nobs = 5
k_endog = 3
missing = np.zeros((k_endog, nobs))
missing[0, 0] = 1
missing[:2, 1] = 1
missing[0, 2] = 1
missing[2, 2] = 1
missing[1, 3] = 1
missing[2, 4] = 1
A = | np.zeros((k_endog, k_endog, nobs)) | numpy.zeros |
# TODO: Unfinished algorithm
# Double DQN with Experience Replay using Rank method (heapq).
import gym
import matplotlib as mpl
mpl.use('TkAgg')
import matplotlib.pyplot as plt
import numpy as np
import random
import tensorflow as tf
tf.compat.v1.disable_eager_execution()
from tensorflow.keras.layers import Dense, Input, Activation
from tensorflow.keras import Model
from tensorflow.keras.optimizers import Adam
import heapq
from itertools import count
DISPLAY_REWARD_THRESHOLD = 400 # renders environment if total episode reward is greater then this threshold
RENDER = True # rendering wastes time
class DDQN_PER():
def __init__(self):
self.gamma = 0.95 # reward discount
self.learning_rate = 0.001
self.memory_size = 10000
self.epsilon = 0.8 # Exploration rate
self.epsilon_min = 0.05
self.epsilon_decay = 0.995
self.memory = []
self.batch_size = 32
self.rewards = []
self.update_frequency = 50
self.counter = count() # In case multiple memories with same priority
self.replay_period = 50
self.edge = 1e-10 # In case priority is 0
def build_network(self):
input = Input(shape=(self.num_states,))
layer = Dense(24, activation='relu')(input)
layer = Dense(self.num_actions)(layer)
output = Activation('linear')(layer)
model = Model(input, output)
adam = Adam(lr=self.learning_rate)
model.compile(loss='mse', optimizer=adam)
return model
# Save <s, a ,r, s'> of each step
def store_memory(self, priority, state, action, reward, next_state, done):
if len(self.memory) > self.memory_size:
heapq.heappop(self.memory)
heapq.heappush(self.memory, [priority, next(self.counter), state, action, reward, next_state, done])
def train(self):
# a = [[1, 2], [30]]
# b = [[2], [20]]
# c = [[1, 1], [40]]
# h = []
# heapq.heapify(h)
# heapq.heappush(h, c)
# heapq.heappush(h, a)
# heapq.heappush(h, b)
# print(h)
# print(heapq.heappop(h))
# print(h)
# Setup environment first
env = gym.make('CartPole-v1')
env.seed(1)
env = env.unwrapped
self.num_actions = env.action_space.n
self.num_states = env.observation_space.shape[0]
# Initialize q_network and target_q_network
q_network = self.build_network()
target_q_network = self.build_network()
target_q_network.set_weights(q_network.get_weights())
max_episode = 100000
max_step = 10000
reward_history = []
episode_history = []
# Populate memory first
state = env.reset()
print("Warming up...")
while len(self.memory) < self.batch_size:
action = env.action_space.sample()
next_state, reward, done, info = env.step(action)
self.store_memory(1, state, action, reward, next_state, done)
if done:
state = env.reset()
print("Warm up complete.")
for episode_count in range(max_episode):
state = env.reset()
current_step = 0
while True:
#env.render()
# Network predict
q_values = q_network.predict(np.reshape(state, (1, self.num_states))).ravel()
# Decide if exploring or not
if np.random.rand() >= self.epsilon:
action = np.argmax(q_values)
else:
action = random.randrange(self.num_actions)
# Perform action
next_state, reward, done, info = env.step(action)
# Calculate priority
next_q_values = target_q_network.predict(np.reshape(next_state, (1, self.num_states))).ravel()
next_action = q_network.predict(np.reshape(next_state, (1, self.num_states))).ravel()
td_error = reward + self.gamma * (1 - done) * next_q_values[next_action] - q_values[action]
priority = abs(td_error) + self.edge
# Store transition
self.store_memory(priority, state, action, reward, next_state, done)
self.rewards.append(reward)
# Decrease exploration
if self.epsilon > self.epsilon_min:
self.epsilon *= self.epsilon_decay
# Sample minibatch from memory based on their priority
priorities = self.memory
minibatch = random.sample(self.memory, self.batch_size)
# Transform the minibatch for processing
minibatch = list(zip(*minibatch))
# Calculate all td_targets for current minibatch
priorities, _, states, actions, rewards, next_states, dones = minibatch
batch_q_values = q_network.predict_on_batch(np.array(states))
batch_next_q_values = target_q_network.predict_on_batch(np.array(next_states))
next_actions = np.argmax(q_network.predict_on_batch(np.array(next_states)), axis=1)
td_targets = batch_q_values.copy
for i in range(self.batch_size):
td_targets[i][actions[i]] = rewards[i] + self.gamma * (1 - dones[i]) * batch_next_q_values[i][next_actions[i]]
# Train network
q_network.train_on_batch( | np.array(states) | numpy.array |
import numpy as np
import cv2
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.nn import functional as F
class VanillaGrad(object):
def __init__(self, pretrained_model, cuda=False):
self.pretrained_model = pretrained_model
self.features = pretrained_model.children()
self.cuda = cuda
#self.pretrained_model.eval()
def __call__(self, x, index=None):
output = self.pretrained_model(x)
if index is None:
index = np.argmax(output.data.cpu().numpy())
one_hot = np.zeros((1, output.size()[-1]), dtype=np.float32)
one_hot[0][index] = 1
if self.cuda:
one_hot = Variable(torch.from_numpy(one_hot).cuda(), requires_grad=True)
else:
one_hot = Variable(torch.from_numpy(one_hot), requires_grad=True)
one_hot = torch.sum(one_hot * output)
one_hot.backward(retain_graph=True)
grad = x.grad.data.cpu().numpy()
grad = grad[0, :, :, :]
return grad
class SmoothGrad(VanillaGrad):
def __init__(self, pretrained_model, cuda=False, stdev_spread=0.15,
n_samples=25, magnitude=True):
super(SmoothGrad, self).__init__(pretrained_model, cuda)
"""
self.pretrained_model = pretrained_model
self.features = pretrained_model.features
self.cuda = cuda
self.pretrained_model.eval()
"""
self.stdev_spread = stdev_spread
self.n_samples = n_samples
self.magnitutde = magnitude
def __call__(self, x, index=None):
x = x.data.cpu().numpy()
stdev = self.stdev_spread * (np.max(x) - np.min(x))
total_gradients = np.zeros_like(x)
for i in range(self.n_samples):
noise = np.random.normal(0, stdev, x.shape).astype(np.float32)
x_plus_noise = x + noise
if self.cuda:
x_plus_noise = Variable(torch.from_numpy(x_plus_noise).cuda(), requires_grad=True)
else:
x_plus_noise = Variable(torch.from_numpy(x_plus_noise), requires_grad=True)
output = self.pretrained_model(x_plus_noise)
if index is None:
index = np.argmax(output.data.cpu().numpy())
one_hot = np.zeros((1, output.size()[-1]), dtype=np.float32)
one_hot[0][index] = 1
if self.cuda:
one_hot = Variable(torch.from_numpy(one_hot).cuda(), requires_grad=True)
else:
one_hot = Variable(torch.from_numpy(one_hot), requires_grad=True)
one_hot = torch.sum(one_hot * output)
if x_plus_noise.grad is not None:
x_plus_noise.grad.data.zero_()
one_hot.backward(retain_graph=True)
grad = x_plus_noise.grad.data.cpu().numpy()
if self.magnitutde:
total_gradients += (grad * grad)
else:
total_gradients += grad
#if self.visdom:
avg_gradients = total_gradients[0, :, :, :] / self.n_samples
return avg_gradients
class GuidedBackpropReLU(torch.autograd.Function):
def __init__(self, inplace=False):
super(GuidedBackpropReLU, self).__init__()
self.inplace = inplace
def forward(self, input):
pos_mask = (input > 0).type_as(input)
output = torch.addcmul(
torch.zeros(input.size()).type_as(input),
input,
pos_mask)
self.save_for_backward(input, output)
return output
def backward(self, grad_output):
input, output = self.saved_tensors
pos_mask_1 = (input > 0).type_as(grad_output)
pos_mask_2 = (grad_output > 0).type_as(grad_output)
grad_input = torch.addcmul(
torch.zeros(input.size()).type_as(input),
torch.addcmul(
torch.zeros(input.size()).type_as(input), grad_output, pos_mask_1),
pos_mask_2)
return grad_input
def __repr__(self):
inplace_str = ', inplace' if self.inplace else ''
return self.__class__.__name__ + ' (' \
+ inplace_str + ')'
class GuidedBackpropGrad(VanillaGrad):
def __init__(self, pretrained_model, cuda=False):
super(GuidedBackpropGrad, self).__init__(pretrained_model, cuda)
for idx, module in enumerate(self.features):
if module.__class__.__name__.lower() is 'relu':
self.features._modules[idx] = GuidedBackpropReLU()
class GuidedBackpropSmoothGrad(SmoothGrad):
def __init__(self, pretrained_model, cuda=False, stdev_spread=.15, n_samples=25, magnitude=True):
super(GuidedBackpropSmoothGrad, self).__init__(
pretrained_model, cuda, stdev_spread, n_samples, magnitude)
for idx, module in enumerate(self.features):
if module.__class__.__name__.lower() is '':
self.features[idx] = GuidedBackpropReLU()
class FeatureExtractor(object):
def __init__(self, model, target_layers):
self.model = model
self.features = model.named_children()
self.target_layers = target_layers
self.gradients = []
self.outputs = []
def __call__(self, x):
target_activations, output = self.extract_features(x)
output = output.view(output.size(0), -1)
output = self.model.classifier(output)
return target_activations, output
def get_gradients(self):
return self.gradients
def save_gradient(self, grad):
self.gradients.append(grad)
def extract_features(self, x):
for name, module in self.features:
if name in self.target_layers:
module.register_hook(self.save_gradient)
module.register_forward_hook(lambda layer, _, output: self.outputs.append(output))
x = self.model(x)
return self.outputs, x
class GradCam(object):
def __init__(self, pretrained_model, target_layer_names, img_size, cuda):
self.pretrained_model = pretrained_model
self.cuda = cuda
if self.cuda:
self.pretrained_model.cuda()
self.pretrained_model.eval()
self.extractor = FeatureExtractor(self.pretrained_model, target_layer_names)
self.img_size = img_size
def __call__(self, x, index=None):
features, output = self.extractor(x)
if index is None:
index = np.argmax(output.data.cpu().numpy())
one_hot = np.zeros((1, output.size()[-1]), dtype=np.float32)
one_hot[0][index] = 1
one_hot = Variable(torch.from_numpy(one_hot), requires_grad=True)
if self.cuda:
one_hot = one_hot.cuda()
one_hot = torch.sum(one_hot * output)
self.pretrained_model.zero_grad()
one_hot.backward(retain_variables=True)
grads = self.extractor.get_gradients()[-1].data.cpu().numpy()
target = features[-1].data.cpu().numpy()[0, :]
weights = np.mean(grads, axis=(2, 3))[0, :]
cam = np.ones(target.shape[1:], dtype=np.float32)
for i, w in enumerate(weights):
cam += w * target[i, :, :]
cam = np.maximum(cam, 0)
cam = cv2.resize(cam, (self.img_size, self.img_size))
cam = cam - | np.min(cam) | numpy.min |
#=============================================================================
# Author: <NAME>
#-----------------------------------------------------------------------------
# OHT Analysis Geometry
#-----------------------------------------------------------------------------
# Creating OHT geometry using gmsh api. The first layer is created using gmsh
# api. The subsequent plies and cohesive layers are created using python loops.
# The cohesive elements are zero thickness elements with 8 nodes. 4-nodes from
# bottom and top are shared with plies respectively
#=============================================================================
# Geometry and Mesh controls|
#----------------------------
# <numMainbodyXElem> <numMainbodyXElem>
# <numClampXElem> โ---------โ โ---------โ
# ----------------------------------------------------------------------
# โ | | |\ /| | |
# | | | | \ / | | |
# | | | | \____/--|-โ <numCSElem> |
# | | | | / \ | | |
# Ly | | | \____/ | | |
# | | | | / | \ | | |
# | | | | / โ \ | | |
# | | | |<numCurveElem> | |
# โ ----------------------------------------------------------------------
# โ-------clp-------โ โ-------clp-------โ
# โ----------------------------- Lx ----------------------------------โ
#
#=============================================================================
def create_geo(Rx=0.0024,file_dst="D:\\",filename='sample_temp',
onlypreview=0, save_mesh=0,meshname='mymesh',
preview_geom_mesh=0,log_flag=0, numCurveElem=25,
numClampXElem =4, numMainbodyXElem= 10, numCSElem = 15):
"""
Args:
Rx (float, optional): Longitudinal radius of elliptical hole.
Defaults to 0.0024.
file_dst (str, optional): Destination path of generated geometry file.
Defaults to "D:\".
filename (str, optional): Filename of generated geometry file.
Defaults to 'sample_temp'.
onlypreview (int, optional): Only previews the geometry in GMSH
application no saving. Defaults to 0.
save_mesh (int, optional): Flag to save the mesh (different from the
LS-Dyna geometry and mesh). Defaults to 0.
meshname (str, optional): Mesh name. Defaults to 'mymesh'.
preview_geom_mesh (int, optional): Previews the gemetry (and mesh) then
saves it. User needs to close preview window manually to proceed.
Defaults to 0.
log_flag (int, optional): Whethre to generate GMSH log or not.
Defaults to 0.
numCurveElem (int, optional): Mesh control- Number of elements on the
hole circumference (in each of the 4 sectors). Defaults to 25.
numClampXElem (int, optional): Mesh control- Number of element on
clamping area in x-direction. Defaults to 4.
numMainbodyXElem (int, optional): Mesh control- Number of element on
mainbody area in x-direction. Defaults to 10.
numCSElem (int, optional): Mesh control- Number of elements in cross
sectional part of notch mesh. Defaults to 15.
"""
# Importing utilities
import gmsh # install the api using => "pip install gmsh"
import numpy as np
np.set_printoptions(precision=20)
from time import process_time
try:from mt_x_mainKeywordString import mainFile
except:from utilities.mt_x_mainKeywordString import mainFile
import subprocess
import os
import time
start = process_time()
# Model geometry details
Lx = 0.240 # Length of geometry in x-direction
Ly = 0.0288 # Width of geometry in y-direction
Lz = 0.0019 # Thickness of geometry in z-direction
clp = 0.05 # Claming distance
numPly=12 # Number of composite material layers(plies)
LzPly = Lz/numPly # Thickness of each ply
numCohElem = numPly-1 # Number of cohesive element layers (numPly-1)
Rx=Rx # Radius of elliptical notch in x-direction
Ry=0.0024 # Radius of elliptical notch in y-direction
MeshSizeFactor = 1 # Resulatant = MeshSize * meshSizeFactor
os.makedirs(file_dst, exist_ok=True)
# lsprepost application location (if not in path)
lspp_loc = "C:\Program Files\LSTC\LS-PrePost 4.8\lsprepost4.8_x64.exe"
# starting gmsh
gmsh.initialize()
# logging gmsh geometry and mesh creation process
if log_flag:
gmsh.logger.start()
gmsh.model.add("mymodel")
gmsh.option.setNumber('Geometry.CopyMeshingMethod',1);
#=====================================================================
# Geometry creation
#=====================================================================
# Finding intersection point of a line passing through ellipse center and
# ellipse
a = Rx
b = Ry
if Rx>Ry:
m = Ry/Rx*(0.8) # slope of the line
if Rx==Ry:
m = 1
else:
# m = 1
m = Ry/Rx*2 # slope of the line
c = Ly/2 - (m * Lx/2)
h = Lx/2
k = Ly/2
phi = c - k
e1x1 = (((b*b*h)-a*a*m*phi+
a*b*np.sqrt((b*b)+(a*a*m*m)-(2*m*phi*h)-(phi*phi)-(m*m*h*h)))/
(a*a*m*m + b*b))
e1y1 = m*e1x1 + c
e1x2 = (((b*b*h) - a*a*m*phi -
a*b*np.sqrt(b*b + a*a*m*m - 2*m*phi*h - phi*phi - m*m*h*h))/
(a*a*m*m + b*b))
e1y2 = m*e1x2 + c
e2x1 = 0 + (Lx/2-Ly/2)
e2y1 = 0
e2x2 = Ly+(Lx/2-Ly/2)
e2y2 = Ly
# Creating points in the space
gmsh.model.occ.addPoint(0, 0, 0, 1.0) #1
gmsh.model.occ.addPoint(clp, 0, 0, 1.0) #2
gmsh.model.occ.addPoint(Lx-clp, 0, 0, 1.0) #3
gmsh.model.occ.addPoint(Lx, 0, 0, 1.0) #4
gmsh.model.occ.addPoint(0, Ly, 0, 1.0) #5
gmsh.model.occ.addPoint(clp, Ly, 0, 1.0) #6
gmsh.model.occ.addPoint(Lx-clp, Ly, 0, 1.0) #7
gmsh.model.occ.addPoint(Lx, Ly, 0, 1.0) #8
gmsh.model.occ.addPoint(e1x2, e1y2, 0, 1.0) #9
gmsh.model.occ.addPoint(e1x1, e1y2, 0, 1.0) #10
gmsh.model.occ.addPoint(e1x1, e1y1, 0, 1.0) #11
gmsh.model.occ.addPoint(e1x2, e1y1, 0, 1.0) #12
gmsh.model.occ.addPoint(e2x2, e2y2, 0, 1.0) #13
gmsh.model.occ.addPoint(e2x1, e2y2, 0, 1.0) #14
gmsh.model.occ.addPoint(e2x1, e2y1, 0, 1.0) #15
gmsh.model.occ.addPoint(e2x2, e2y1, 0, 1.0) #16
if Rx>=Ry:
gmsh.model.occ.addPoint(Lx/2, Ly, 0, 1.0) #17
else:
gmsh.model.occ.addPoint(0, Ly/2, 0, 1.0) #18
gmsh.model.occ.addPoint(clp, Ly/2, 0, 1.0) #19
gmsh.model.occ.addPoint(e2x1, Ly/2, 0, 1.0) #20
ClampXElem = []
MainbodyXElem = []
CSElem = []
CurveElem = []
Curve2 = []
# Creating lines by joining points
if Rx>=Ry:
CurveElem.append(gmsh.model.occ.addLine(1,5))
CurveElem.append(gmsh.model.occ.addLine(2,6))
CurveElem.append(gmsh.model.occ.addLine(3,7))
CurveElem.append(gmsh.model.occ.addLine(4,8))
CurveElem.append(gmsh.model.occ.addLine(14,15))
CurveElem.append(gmsh.model.occ.addLine(13,16))
CurveElem.append(gmsh.model.occ.addLine(15,16))
ClampXElem.append(gmsh.model.occ.addLine(1,2))
ClampXElem.append(gmsh.model.occ.addLine(3,4))
ClampXElem.append(gmsh.model.occ.addLine(5,6))
ClampXElem.append(gmsh.model.occ.addLine(7,8))
MainbodyXElem.append(gmsh.model.occ.addLine(2,15))
MainbodyXElem.append(gmsh.model.occ.addLine(16,3))
MainbodyXElem.append(gmsh.model.occ.addLine(6,14))
MainbodyXElem.append(gmsh.model.occ.addLine(13,7))
CSElem.append(gmsh.model.occ.addLine(12,14))
CSElem.append(gmsh.model.occ.addLine(9,15))
CSElem.append(gmsh.model.occ.addLine(10,16))
CSElem.append(gmsh.model.occ.addLine(11,13))
Curve2.append(gmsh.model.occ.addLine(14,17))
Curve2.append(gmsh.model.occ.addLine(17,13))
if Rx<Ry:
ClampXElem.append(gmsh.model.occ.addLine(1,2))
ClampXElem.append(gmsh.model.occ.addLine(3,4))
ClampXElem.append(gmsh.model.occ.addLine(5,6))
ClampXElem.append(gmsh.model.occ.addLine(7,8))
MainbodyXElem.append(gmsh.model.occ.addLine(2,15))
MainbodyXElem.append(gmsh.model.occ.addLine(16,3))
MainbodyXElem.append(gmsh.model.occ.addLine(6,14))
MainbodyXElem.append(gmsh.model.occ.addLine(13,7))
CSElem.append(gmsh.model.occ.addLine(12,14))
CSElem.append(gmsh.model.occ.addLine(9,15))
CSElem.append(gmsh.model.occ.addLine(10,16))
CSElem.append(gmsh.model.occ.addLine(11,13))
CurveElem.append(gmsh.model.occ.addLine(3,7))
CurveElem.append(gmsh.model.occ.addLine(4,8))
CurveElem.append(gmsh.model.occ.addLine(13,16))
CurveElem.append(gmsh.model.occ.addLine(13,14))
CurveElem.append(gmsh.model.occ.addLine(15,16))
Curve2.append(gmsh.model.occ.addLine(1,17))
Curve2.append(gmsh.model.occ.addLine(17,5))
Curve2.append(gmsh.model.occ.addLine(2,18))
Curve2.append(gmsh.model.occ.addLine(18,6))
Curve2.append(gmsh.model.occ.addLine(15,19))
Curve2.append(gmsh.model.occ.addLine(19,14))
gmsh.model.occ.synchronize()
# Creating ellipse
mellipse = np.pi/2
if Rx>=Ry:
ellipse = gmsh.model.occ.addEllipse(Lx/2,Ly/2,0,Rx,Ry,angle1=mellipse,
angle2=2*np.pi+mellipse)
else:
ellipse = gmsh.model.occ.addEllipse(Lx/2,Ly/2,0,Ry,Rx,angle1=mellipse,
angle2=2*np.pi+mellipse)
gmsh.model.occ.rotate([(1,ellipse)], Lx/2, Ly/2, 0, 0, 0, 1, np.pi/2)
gmsh.model.occ.synchronize()
# Splitting ellipse using lines across ellipse
cutOut = gmsh.model.occ.cut([(1,ellipse)],
[(1,CSElem[0]),(1,CSElem[1]),(1,CSElem[2]),
(1,CSElem[3])],removeTool=(False))
for i in range(1,len(cutOut[0])-1):
CurveElem.append(cutOut[0][i][1])
Curve2.append(cutOut[0][0][1])
Curve2.append(cutOut[0][-1][1])
gmsh.model.occ.synchronize()
# Surface groups : Grouping different lines to form closed surface
# boundaries.
sTag_list = []
if Rx>=Ry:
linegroup = [
[1,8,2,10],[14,2,12,5],[6,13,3,15],[3,9,4,11],
[17,7,18,24],[25,18,6,19],[16,5,17,23]
]
else:
linegroup = [
[6,13,8,15],[13,2,14,4],
[17,11,25,10],[11,15,12,26],[12,16,9,27]
]
for surface in linegroup:
lTag = gmsh.model.occ.add_wire(surface)
sTag = gmsh.model.occ.add_plane_surface([lTag])
sTag_list.append(sTag)
gmsh.model.occ.synchronize()
# Setting transfinite curves for structured mesh
for i in ClampXElem:
gmsh.model.mesh.setTransfiniteCurve(i,numClampXElem)
gmsh.model.occ.synchronize()
for i in MainbodyXElem:
gmsh.model.mesh.setTransfiniteCurve(i,numMainbodyXElem)
gmsh.model.occ.synchronize()
for i in CSElem:
gmsh.model.mesh.setTransfiniteCurve(i,numCSElem)
gmsh.model.occ.synchronize()
for i in CurveElem:
gmsh.model.mesh.setTransfiniteCurve(i,numCurveElem)
gmsh.model.occ.synchronize()
for i in Curve2:
gmsh.model.mesh.setTransfiniteCurve(i,int(numCurveElem/2))
gmsh.model.occ.synchronize()
# Setting tranfinite surfaces for structured mesh
for i in sTag_list:
gmsh.model.mesh.setTransfiniteSurface(i)
gmsh.model.occ.synchronize()
# surface groups : Grouping different lines to form closed surface
# boundaries (with more than 4 points/lines)
if Rx>=Ry:
lTag = gmsh.model.occ.add_wire([19,21,20,16,22,26])
gmsh.model.occ.synchronize()
sTag = gmsh.model.occ.add_plane_surface([lTag])
gmsh.model.occ.synchronize()
gmsh.model.mesh.setTransfiniteSurface(tag=sTag,cornerTags=[13,14,12,11])
gmsh.model.occ.synchronize()
elif Rx<Ry:
lTag = gmsh.model.occ.add_wire([1,20,21,3,19,18])
sTag = gmsh.model.occ.add_plane_surface([lTag])
gmsh.model.occ.synchronize()
gmsh.model.mesh.setTransfiniteSurface(tag=sTag,cornerTags=[1,2,6,5])
gmsh.model.occ.synchronize()
lTag = gmsh.model.occ.add_wire([5,22,23,7,21,20])
sTag = gmsh.model.occ.add_plane_surface([lTag])
gmsh.model.occ.synchronize()
gmsh.model.mesh.setTransfiniteSurface(tag=sTag,cornerTags=[2,15,14,6])
gmsh.model.occ.synchronize()
lTag = gmsh.model.occ.add_wire([9,23,22,10,24,28])
sTag = gmsh.model.occ.add_plane_surface([lTag])
gmsh.model.occ.synchronize()
gmsh.model.mesh.setTransfiniteSurface(tag=sTag,cornerTags=[15,9,12,14])
gmsh.model.occ.synchronize()
gmsh.model.mesh.recombine()
# Extrude: Adding thickness to create a singly ply.
# Number of elements in thickness direction
numElemThickness = 3
model_ = gmsh.model.getEntities(2)
gmsh.model.occ.synchronize()
gmsh.model.occ.extrude(model_,0,0,LzPly,numElements=[numElemThickness],
heights=[1],recombine=True)
gmsh.model.occ.synchronize()
#=====================================================================
# Meshing
#=====================================================================
# Mesh options
gmsh.option.setNumber("Mesh.Smoothing", 100)
# 2D mesh algorithm (1: MeshAdapt, 2: Automatic, 3: Initial mesh only,
# 5: Delaunay, 6: Frontal-Delaunay, 7: BAMG, 8: Frontal-Delaunay for Quads,
# 9: Packing of Parallelograms)
meshalgo2d = 8
gmsh.option.setNumber("Mesh.Algorithm",meshalgo2d)
# Recombine all triangular meshes? (yes:1, no:0)
RecombineTriMesh = 1
gmsh.option.setNumber("Mesh.RecombineAll",RecombineTriMesh)
gmsh.option.setNumber("Mesh.MeshSizeFactor",MeshSizeFactor)
# Generating mesh
gmsh.model.mesh.clear()
# Meshing 2D
gmsh.model.mesh.generate(2)
# Meshing 3D
gmsh.model.mesh.generate(3)
# gmsh mesh name without extension # save mesh (yes-1,no-0)
if save_mesh:
gmsh.write(f"{meshname}.msh")
if onlypreview==0:
# Get nodes and their coordinates
nodeTags, nodeCoords, _ = gmsh.model.mesh.getNodes()
# Type,number of elements,
elementTypes, elementTags, elementNodeTags = gmsh.model.mesh.getElements(3)
elementTypes = elementTypes[0]
elementTags = elementTags[0]
elementNodeTags = elementNodeTags[0]
# Launch the GUI to to preview geometry and mesh using gmsh applications.
# Script pauses untill gmsh application is closed manually
if preview_geom_mesh:
gmsh.fltk.run()
if log_flag:
log = gmsh.logger.get()
gmsh.logger.stop()
# close gmsh
gmsh.finalize()
if onlypreview == 0:
#=====================================================================
# Data Extraction: processing data collected from gmsh
#=====================================================================
numElem = len(elementTags)
if meshalgo2d==8 or RecombineTriMesh==1: # processing based on shape of solid elements in mesh
elemtonodes = np.reshape(elementNodeTags,(-1,8)) # quad/hex elements -> 8 unique coordinates
else:
elemtonodes = np.reshape(elementNodeTags,(-1,4)) # tetra hedral elements -> 4 unique coordinates
last4nodes = np.transpose(np.asarray([elemtonodes[:,3]])) # when represented in terms of hexahedral coordinates, the last node is repeated 4 times -> resulting in 8 nodes
last4nodes = np.repeat(last4nodes,4,axis=1)
elemtonodes = np.concatenate([elemtonodes,last4nodes],axis=1)
node_coord = np.round(np.transpose(np.vstack([nodeCoords[0::3],nodeCoords[1::3],nodeCoords[2::3]])),16)
numNode = len(node_coord)
assert(len(node_coord) == len(nodeTags))
assert(len(elemtonodes) == numElem)
print('Total number of elements in 1st ply:\t',numElem)
print('Total number of nodes in 1st ply:\t',numNode)
print('Mesh generation of 1st ply complete...')
#=====================================================================
# Building full model
#=====================================================================
# Add 1st layer mesh data to the database dictionary 'mesh_data'
#---------------------------------------------------------------------
mesh_data = {}
mesh_data[1]={}
mesh_data[1]['nodeCoord'] = np.copy(node_coord)
mesh_data[1]['elemNode'] = np.copy(elemtonodes)
mesh_data[1]['elemidx'] = np.array(list(range(1,numElem+1))).astype('int64')
mesh_data[1]['nodeidx'] = np.copy(nodeTags.astype('int64'))
# To assert that there no duplicate elements sharing same nodes
assert(len(mesh_data[1]['elemNode']) ==
len(np.unique(mesh_data[1]['elemNode'],axis=0)))
# Adding other layer mesh data to the database dictionary 'mesh_data'
#---------------------------------------------------------------------
for ply in range(2,numPly+1):
mesh_data[ply]={}
# Adding thickness to all z coordinates
new_z = mesh_data[ply-1]['nodeCoord'][:,2] + LzPly
mesh_data[ply]['nodeCoord'] = np.transpose(np.vstack([mesh_data[ply-1]['nodeCoord'][:,0],mesh_data[ply-1]['nodeCoord'][:,1],new_z]))
mesh_data[ply]['nodeCoord'] = np.round(mesh_data[ply]['nodeCoord'],16)
mesh_data[ply]['elemidx'] = np.arange( max(mesh_data[ply-1]['elemidx'])+1, max(mesh_data[ply-1]['elemidx'])+1+numElem )
mesh_data[ply]['nodeidx'] = np.arange( max(mesh_data[ply-1]['nodeidx'])+1, max(mesh_data[ply-1]['nodeidx'])+1+numNode )
mesh_data[ply]['elemNode'] = np.copy(mesh_data[ply-1]['elemNode'] + numNode)
# To assert that there no duplicate elements sharing same nodes
assert(len(mesh_data[ply]['elemNode']) == len(np.unique(mesh_data[ply]['elemNode'],axis=0)))
print('Mesh generation ply complete...')
# Adding cohesive layers mesh data to the database dictionary 'mesh_data'
#---------------------------------------------------------------------
mesh_data[numPly+1] = {}
elemnode_map = {}
for layer in range(1,numCohElem+1):
lower_ply = layer
upper_ply = layer+1
# nodes on upper surface of lower ply
lower_nodes_lidx = np.where(mesh_data[lower_ply]['nodeCoord'][:,2] == max(mesh_data[lower_ply]['nodeCoord'][:,2]))[0]
# Elemetonode uses global node index. Hence converting them back
lower_node_gidx = mesh_data[lower_ply]['nodeidx'][lower_nodes_lidx]
LowerPlyElements_lidx = []
temp1 = np.in1d(mesh_data[lower_ply]['elemNode'][:,4],lower_node_gidx)
temp2 = np.in1d(mesh_data[lower_ply]['elemNode'][:,5],lower_node_gidx)
temp3 = np.in1d(mesh_data[lower_ply]['elemNode'][:,6],lower_node_gidx)
temp4 = np.in1d(mesh_data[lower_ply]['elemNode'][:,7],lower_node_gidx)
temp = temp1*temp2*temp3*temp4
LowerPlyElements_lidx = np.where(temp)
lowerply_node_gidx = mesh_data[lower_ply]['elemNode'][LowerPlyElements_lidx,4:][0]
vround = 10
if layer==1:
upperply_elemnode_list = []
for nodecoord_i in lowerply_node_gidx:
for nodei in nodecoord_i:
nodei_lidx = np.where(mesh_data[lower_ply]['nodeidx']==nodei)[0][0] # in local idx
# getting node with similar coordinate as "nodei" in local index
node_x = np.where(np.round(mesh_data[upper_ply]['nodeCoord'][:,0],vround)== np.round(mesh_data[lower_ply]['nodeCoord'][nodei_lidx][0],vround))[0]
node_y = node_x[np.where(np.round(mesh_data[upper_ply]['nodeCoord'][node_x,1],vround)== np.round(mesh_data[lower_ply]['nodeCoord'][nodei_lidx][1],vround))[0]]
node_z = node_y[np.where(np.round(mesh_data[upper_ply]['nodeCoord'][node_y,2],vround)== np.round(mesh_data[lower_ply]['nodeCoord'][nodei_lidx,2],vround))[0]]
# getting global indices
element_gidx = mesh_data[upper_ply]['nodeidx'][node_z][0]
upperply_elemnode_list.append(element_gidx)
# 4 nodes of upper layer element for hex elements
upperply_node_gidx = np.reshape(upperply_elemnode_list,(-1,4))
# Creating map to be used for other layers
node_map = np.vstack((upperply_node_gidx[:,0] - lowerply_node_gidx[:,0],
upperply_node_gidx[:,1] - lowerply_node_gidx[:,1],
upperply_node_gidx[:,2] - lowerply_node_gidx[:,2],
upperply_node_gidx[:,3] - lowerply_node_gidx[:,3],
)).transpose()
else:
# Using map to get nodes of eleemnts of other layers
upperply_node_gidx = lowerply_node_gidx + node_map
new_layer_elemnode = np.hstack((lowerply_node_gidx,upperply_node_gidx))
new_layer_elemnode = np.unique(new_layer_elemnode, axis=0)
elemnode_map[layer] = new_layer_elemnode
if layer == 1:
new_layer_elemidx = np.arange( max(mesh_data[numPly]['elemidx'])+1, max(mesh_data[numPly]['elemidx'])+1+len(new_layer_elemnode) )
mesh_data[numPly+1]['elemidx'] = new_layer_elemidx
mesh_data[numPly+1]['elemNode'] = new_layer_elemnode
else:
new_layer_elemidx = np.arange( max(mesh_data[numPly+1]['elemidx'])+1, max(mesh_data[numPly+1]['elemidx'])+1+len(new_layer_elemnode) )
mesh_data[numPly+1]['elemidx'] = np.hstack((mesh_data[numPly+1]['elemidx'],new_layer_elemidx))
mesh_data[numPly+1]['elemNode'] = np.vstack((mesh_data[numPly+1]['elemNode'],new_layer_elemnode))
# To ensure that there are no duplicate elements sharing same nodes
assert(len(mesh_data[numPly+1]['elemNode']) == len(np.unique(mesh_data[numPly+1]['elemNode'],axis=0)))
print('Mesh generation cohesive layer complete...')
#=====================================================================
# Node Set: Creating node set for boundary conditions
#=====================================================================
node_list = {}
# all nodes with x-coord <= 0.05
kx1 = np.where(np.round(mesh_data[numPly]['nodeCoord'][:,0],15) <= clp)[0]
# all nodes with z-coord == Lz
kz1 = np.where(np.round(mesh_data[numPly]['nodeCoord'][:,2],15) >= round(Lz,16))[0]
clamp_set_1_local_idx = np.intersect1d(kx1,kz1)
node_list[1] = mesh_data[numPly]['nodeidx'][clamp_set_1_local_idx]
# all nodes with x-coord <= 0.05
kx2 = np.where( | np.round(mesh_data[1]['nodeCoord'][:,0],15) | numpy.round |
"""
save_annotations.py
Code for saving image annotations from crowdflower
@author: <NAME>
"""
"""
Import python packages
"""
from subprocess import call
import skimage.io
import skimage.measure
import scipy.misc
import numpy as np
import warnings
import pathlib
import os
import urllib.request, urllib.parse, urllib.error
import pdb
"""
Load csv file
"""
def download_csv():
csv_names = [
'1257789'
]
for csv_name in csv_names:
csv_direc = os.path.join( '/data/annotation_csv', csv_name)
csv_filename = 'f' + csv_name + '.csv'
csv_file = os.path.join(csv_direc, csv_filename)
if not os.path.isfile(csv_file):
call([ "unzip", csv_file+".zip", "-d", csv_direc])
df = pd.DataFrame.from_csv(csv_file)
urls = df.loc[:,['annotation', 'image_url', 'broken_link']]
for index, row in df.iterrows():
print(row)
broken_link = row['broken_link']
# Get image_name
if broken_link is False:
annotation_url = row['annotation'][8:-2]
image_url = row['image_url']
image_folder = os.path.join(csv_direc, "nuclear", row['cell_type'], row['set_number'])
if not os.path.exists(image_folder):
os.makedirs(image_folder)
# generate image id
image_url_split = image_url.split("/")
image_id = image_url_split[-1][8:-4].zfill(4)
# annotated image location
annotated_image_folder = os.path.join(image_folder, "Annotation")
if not os.path.exists(annotated_image_folder):
os.makedirs(annotated_image_folder)
#pdb.set_trace()
annotated_image_name = "annotation_" + image_id + ".tif"
annotated_image_path = os.path.join( annotated_image_folder, annotated_image_name)
# raw image location
raw_image_folder = os.path.join(image_folder, "RawImages")
if not os.path.exists(raw_image_folder):
os.makedirs(raw_image_folder)
raw_image_name = "img_00000" + image_id + "_DAPI_000.jpg"
raw_image_path = os.path.join( raw_image_folder, raw_image_name)
#pdb.set_trace()
print(image_url_split)
# Download annotated image
annotated_image = urllib.request.URLopener()
annotated_image.retrieve(annotation_url, annotated_image_path)
# Download raw image
raw_image = urllib.request.URLopener()
raw_image.retrieve(image_url, raw_image_path)
def reshape_montage(montage_file, output_folder, x_size = 256, y_size = 256, x_images = 3, y_images = 10):
debug = False
# open composite image
img = scipy.misc.imread(montage_file)
# create output directory
pathlib.Path(output_folder).mkdir(exist_ok=True)
# extract red channel
img = img[:,:,0]
# convert data to integers for convenience
img = img.astype(np.int16)
# chop up the montage
x_end = x_size - 1
y_end = y_size - 1
images = np.ndarray( shape=(x_size, y_size, x_images*y_images), dtype=np.int16)
image_number = 0
while x_end < (x_size*x_images):
# moving along columns until we get to the end of the column
while y_end < (y_size*y_images):
if debug:
print("x_end: " + str(x_end))
print("y_end: " + str(y_end))
images[:,:,image_number] = img[
(x_end-(x_size-1)):(x_end+1),
(y_end-(y_size-1)):(y_end+1) ]
image_number += 1
y_end += y_size
# once we reach the end of a column, move to the beginning of the
# next row, and continue
y_end = y_size - 1
x_end += x_size
# renumber the images so that the numbers are 1 to N
labels = | np.unique(images) | numpy.unique |
import cv2
import numpy as np
colors = {
"black": (0,0,0),
"red" : (230, 0, 0),
"green": (0, 204, 0),
"blue": (0, 0, 204),
"azure": (164, 255, 255)
}
class Canvas:
def __init__(self,width = 1280,height = 720,title = 'window') -> None:
self.width = width
self.height = height
self.title = title
self.img = 255*np.ones([self.height,self.width,3])
def show(self):
cv2.imshow(self.title, self.img)
cv2.waitKey(0)
cv2.destroyAllWindows()
def save(self, filename = 'graph.png'):
cv2.imwrite(filename,self.img)
def RGB2BGR(color):
return (color[2],color[1],color[0])
class VisualVertex:
def __init__(self,center,radius = 40,color = "blue",text = "A") -> None:
self.center = center
self.radius = radius
self.color = self.RGB2BGR(colors[color])
self.text = text
def draw(self,canvas):
cv2.circle(canvas.img,self.center,self.radius,self.color,2)
TEXT_FACE = cv2.FONT_HERSHEY_DUPLEX
TEXT_SCALE = 1
TEXT_THICKNESS = 2
text_size, _ = cv2.getTextSize(self.text, TEXT_FACE, TEXT_SCALE, TEXT_THICKNESS)
text_origin = (self.center[0] - text_size[0] // 2, self.center[1] + text_size[1] // 2)
cv2.putText(canvas.img,self.text,text_origin,TEXT_FACE,TEXT_SCALE,self.color,TEXT_THICKNESS)
def RGB2BGR(self,color):
return (color[2],color[1],color[0])
def getCenter(self):
return np.array(self.center)
class VisualEdge:
def __init__(self,vertex1,vertex2,color = "blue",text = "1") -> None:
self.vertex1 = vertex1
self.vertex2 = vertex2
self.color = self.RGB2BGR(colors[color])
self.text = text
def RGB2BGR(self,color):
return (color[2],color[1],color[0])
def draw(self,canvas):
C1 = self.vertex1.getCenter()
C2 = self.vertex2.getCenter()
V = C2 - C1
d = np.linalg.norm(V)
M1 = (C1 + (self.vertex1.radius/d) * (C2 - C1)).astype(np.uint32)
M2 = (C2 + (self.vertex2.radius/d) * (C1 - C2)).astype(np.uint32)
cv2.line(canvas.img,tuple(M1),tuple(M2),self.color,2)
TEXT_FACE = cv2.FONT_HERSHEY_DUPLEX
TEXT_SCALE = 1
TEXT_THICKNESS = 2
text_origin = ((M1 + M2)/2).astype(np.uint32)
text_origin -= (2 * ( | np.array([-V[1],V[0]]) | numpy.array |
import itertools
from collections import namedtuple
import numpy as np
from scipy.stats import ks_2samp
from sklearn.model_selection import train_test_split
import sklearn.utils
import matplotlib as mpl
import matplotlib.colors
import seaborn as sns
from collections import defaultdict
import xgboost as xgb
import inspect
from scedar.eda.sdm import SampleDistanceMatrix
from scedar.eda.plot import swarm
from scedar.eda.plot import heatmap
from scedar.eda import mdl
from scedar.eda import mtype
from scedar import utils
class SingleLabelClassifiedSamples(SampleDistanceMatrix):
"""Data structure of single label classified samples
Attributes:
_x (2D number array): (n_samples, n_features) data matrix.
_d (2D number array): (n_samples, n_samples) distance matrix.
_labs (list of labels): list of labels in the same type, int or str.
_fids (list of feature IDs): list of feature IDs in the same type,
int or str.
_sids (list of sample IDs): list of sample IDs in the same type,
int or str.
_metric (str): Distance metric.
Note:
If sort by labels, the samples will be reordered, so that samples from
left to right are from one label to another.
"""
# sid, lab, fid, x
def __init__(self, x, labs, sids=None, fids=None, d=None,
metric="cosine", use_pdist=True, nprocs=None):
# sids: sample IDs. String or int.
# labs: sample classified labels. String or int.
# x: (n_samples, n_features)
super(SingleLabelClassifiedSamples, self).__init__(
x=x, d=d, metric=metric, use_pdist=use_pdist,
sids=sids, fids=fids, nprocs=nprocs)
mtype.check_is_valid_labs(labs)
labs = np.array(labs)
if self._sids.shape[0] != labs.shape[0]:
raise ValueError("sids must have the same length as labs")
self._labs = labs
self._set_up_lab_rel_attrs()
# keep a copy of original labels
self._orig_labs = labs
self._xgb_lut = {}
return
def _set_up_lab_rel_attrs(self):
"""Set up labels related attrs
"""
self._uniq_labs, self._uniq_lab_cnts = np.unique(
self._labs, return_counts=True)
# {lab: array([sid0, ...]), ...}
sid_lut = {}
for ulab in self._uniq_labs:
sid_lut[ulab] = self._sids[self._labs == ulab]
self._sid_lut = sid_lut
# {sid1: lab1, ...}
lab_lut = {}
# sids only contain unique values
for i in range(self._sids.shape[0]):
lab_lut[self._sids[i]] = self._labs[i]
self._lab_lut = lab_lut
def sort_by_labels(self):
"""
Return a copy with sorted sample indices by labels and distances.
"""
labels = np.array(self.labs)
# slcs is empty
if len(labels) == 0 or self._x.size == 0:
return self.ind_x()
uniq_labs = np.unique(labels)
s_ind_lut = dict([(ulab, np.where(labels == ulab)[0])
for ulab in uniq_labs])
# sort within each label
for ulab in uniq_labs:
# get sample indices of that class
s_inds = s_ind_lut[ulab]
# sort that class by distance to the first sample
# get a list of distances to the frist sample
s_dist_to_s0_list = [self._d[s_inds[0], s_inds[i]]
for i in range(len(s_inds))]
# sort indices by distances
sorted_s_inds = s_inds[np.argsort(s_dist_to_s0_list,
kind="mergesort")]
# update lut
s_ind_lut[ulab] = sorted_s_inds
# sort classes by distances of first samples
# frist sample indices
lab_fs_inds = [s_ind_lut[ulab][0] for ulab in uniq_labs]
# distance of first samples to the first class first sample
lab_fs_dist_to_fc_list = [self._d[lab_fs_inds[0], lab_fs_inds[i]]
for i in range(len(lab_fs_inds))]
sorted_ulabs = uniq_labs[np.argsort(lab_fs_dist_to_fc_list,
kind="mergesort")]
sorted_s_inds = np.concatenate([s_ind_lut[ulab]
for ulab in sorted_ulabs])
return self.ind_x(sorted_s_inds)
def filter_min_class_n(self, min_class_n):
uniq_lab_cnts = np.unique(self._labs, return_counts=True)
nf_sid_ind = np.in1d(
self._labs, (uniq_lab_cnts[0])[uniq_lab_cnts[1] >= min_class_n])
return self.ind_x(nf_sid_ind)
def labs_to_sids(self, labs):
return tuple(tuple(self._sid_lut[y].tolist()) for y in labs)
def sids_to_labs(self, sids):
return | np.array([self._lab_lut[x] for x in sids]) | numpy.array |
import numpy as np
import pandas as pd
from scipy.interpolate import interp1d
def vshale_gr(gr_curve,gr_sand,gr_shale,type='linear'):
"""vshale_gr [summary]
Parameters
----------
gr_curve : [type]
[description]
gr_sand : [type]
[description]
gr_shale : [type]
[description]
type : str, optional
[description], by default 'linear'
Returns
-------
[type]
[description]
Raises
------
ValueError
[description]
"""
gr_curve=np.atleast_1d(gr_curve)
gr_sand=np.atleast_1d(gr_sand)
gr_shale=np.atleast_1d(gr_shale)
igr=(gr_curve-gr_sand)/(gr_shale-gr_sand)
igr[igr < 0.0] = 0.0
igr[igr > 1.0] = 1.0
#https://www.geoloil.com/VshModels.php
if type == 'linear':
vsh = igr
elif type == 'clavier':
vsh = 1.7 - np.sqrt(3.38 - np.power(igr+0.7,2))
elif type == 'stieber':
vsh = igr/(3-2*igr)
elif type == 'larionov_old':
vsh = 0.33 * (np.power(2,2*igr)-1)
elif type == 'larionov_tertiary':
vsh = 0.083 * (np.power(2,3.7*igr)-1)
else:
raise ValueError(f'method especified [ {type} ] does not exist')
return vsh
def vshale_dn(rho_curve, ntr_curve, rho_ma=2.65, rho_f=1.0, hi_shl=0.46,rho_shl=2.43):
"""vshale_dn [summary]
Parameters
----------
rho_curve : [type]
[description]
ntr_curve : [type]
[description]
rho_ma : float, optional
[description], by default 2.65
rho_f : float, optional
[description], by default 1.0
hi_shl : float, optional
[description], by default 0.46
rho_shl : float, optional
[description], by default 2.43
Returns
-------
[type]
[description]
"""
rho_curve= np.atleast_1d(rho_curve)
ntr_curve= np.atleast_1d(ntr_curve)
rho_ma = np.atleast_1d(rho_ma)
rho_f = np.atleast_1d(rho_f)
hi_shl = np.atleast_1d(hi_shl)
rho_shl = np.atleast_1d(rho_shl)
vsh = (rho_curve - rho_ma + ntr_curve*(rho_ma-rho_f))/(rho_shl - rho_ma + hi_shl*(rho_ma-rho_f))
vsh[vsh < 0.0] = 0.0
vsh[vsh > 1.0] = 1.0
return vsh
def phi_rho(rho_curve,rho_ma=2.65,rho_f=1.0):
"""phi_rho [summary]
Parameters
----------
rho_curve : [type]
[description]
rho_ma : float, optional
[description], by default 2.65
rho_f : float, optional
[description], by default 1.0
Returns
-------
[type]
[description]
"""
rho_curve=np.atleast_1d(rho_curve)
rho_ma=np.atleast_1d(rho_ma)
rho_f=np.atleast_1d(rho_f)
phi_rho_curve=(rho_ma-rho_curve)/(rho_ma-rho_f)
phi_rho_curve[phi_rho_curve < 0.0] = 0.0
phi_rho_curve[phi_rho_curve > 1.0] = 1.0
return phi_rho_curve
def phie(phi_curve,vsh_curve):
"""phie [summary]
Parameters
----------
phi_curve : [type]
[description]
vsh_curve : [type]
[description]
Returns
-------
[type]
[description]
"""
phi_curve=np.atleast_1d(phi_curve)
vsh_curve=np.atleast_1d(vsh_curve)
phie_curve=phi_curve*(1 -vsh_curve)
phie_curve[phie_curve < 0.0] = 0.0
phie_curve[phie_curve > 0.3] = 0.3
return phie_curve
def phia(phi_rho_curve, ntr_curve, method='geometric'):
"""phia [summary]
Parameters
----------
phi_rho_curve : [type]
[description]
ntr_curve : [type]
[description]
method : str, optional
[description], by default 'geometric'
Returns
-------
[type]
[description]
"""
phi_rho_curve = np.atleast_1d(phi_rho_curve)
ntr_curve = np.atleast_1d(ntr_curve)
c = np.transpose(np.vstack((phi_rho_curve,ntr_curve)))
if method == 'mean':
phia_curve = np.mean(c,axis=1)
elif method== 'geometric':
phia_curve = np.power(((np.power(phi_rho_curve,2)+np.power(ntr_curve,2))/2),0.5)
return phia_curve
def facies_dnp(rho_curve, ntr_curve,pef_curve,**kw):
"""facies_dnp [summary]
Parameters
----------
rho_curve : [type]
[description]
ntr_curve : [type]
[description]
pef_curve : [type]
[description]
Returns
-------
[type]
[description]
"""
rho_curve = np.atleast_1d(rho_curve)
ntr_curve = np.atleast_1d(ntr_curve)
pef_curve = np.atleast_1d(pef_curve)
phi_rho_curve = phi_rho(rho_curve,**kw)
phia_curve = phia(phi_rho_curve,ntr_curve)
u = pef_curve*((rho_curve + 0.1833)/1.07)
uma = (u - 0.398 * phia_curve)/(1-phia_curve)
dga = (rho_curve - phia_curve)/(1-phia_curve)
return uma, dga
def sw(rt_curve,phi_curve,rw,vsh_curve=None,a=0.62,m=2.15,n=2,rsh=4.0,alpha=0.3,method="archie"):
"""sw [summary]
Parameters
----------
rt_curve : [type]
[description]
phi_curve : [type]
[description]
rw : [type]
[description]
vsh_curve : [type], optional
[description], by default None
a : float, optional
[description], by default 0.62
m : float, optional
[description], by default 2.15
n : int, optional
[description], by default 2
rsh : float, optional
[description], by default 4.0
alpha : float, optional
[description], by default 0.3
method : str, optional
[description], by default "archie"
Returns
-------
[type]
[description]
"""
a=np.atleast_1d(a)
m=np.atleast_1d(m)
n=np.atleast_1d(n)
vsh = np.atleast_1d(vsh_curve) if vsh_curve is not None else None
rsh=np.atleast_1d(rsh)
alpha=np.atleast_1d(alpha)
rt=np.atleast_1d(rt_curve)
phi = np.atleast_1d(phi_curve)
rw= | np.atleast_1d(rw) | numpy.atleast_1d |
Subsets and Splits