prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
from __future__ import print_function, division, absolute_import
import functools
import sys
import warnings
# unittest only added in 3.4 self.subTest()
if sys.version_info[0] < 3 or sys.version_info[1] < 4:
import unittest2 as unittest
else:
import unittest
# unittest.mock is not available in 2.7 (though unittest2 might contain it?)
try:
import unittest.mock as mock
except ImportError:
import mock
import matplotlib
matplotlib.use('Agg') # fix execution of tests involving matplotlib on travis
import numpy as np
import six.moves as sm
import imgaug as ia
from imgaug import augmenters as iaa
from imgaug import parameters as iap
from imgaug import dtypes as iadt
from imgaug.testutils import (array_equal_lists, keypoints_equal, reseed,
runtest_pickleable_uint8_img)
import imgaug.augmenters.arithmetic as arithmetic_lib
import imgaug.augmenters.contrast as contrast_lib
class TestAdd(unittest.TestCase):
def setUp(self):
reseed()
def test___init___bad_datatypes(self):
# test exceptions for wrong parameter types
got_exception = False
try:
_ = iaa.Add(value="test")
except Exception:
got_exception = True
assert got_exception
got_exception = False
try:
_ = iaa.Add(value=1, per_channel="test")
except Exception:
got_exception = True
assert got_exception
def test_add_zero(self):
# no add, shouldnt change anything
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.Add(value=0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
def test_add_one(self):
# add > 0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.Add(value=1)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images + 1
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = [images_list[0] + 1]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images + 1
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [images_list[0] + 1]
assert array_equal_lists(observed, expected)
def test_minus_one(self):
# add < 0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.Add(value=-1)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images - 1
assert np.array_equal(observed, expected)
observed = aug.augment_images(images_list)
expected = [images_list[0] - 1]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images - 1
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [images_list[0] - 1]
assert array_equal_lists(observed, expected)
def test_uint8_every_possible_value(self):
# uint8, every possible addition for base value 127
for value_type in [float, int]:
for per_channel in [False, True]:
for value in np.arange(-255, 255+1):
aug = iaa.Add(value=value_type(value), per_channel=per_channel)
expected = np.clip(127 + value_type(value), 0, 255)
img = np.full((1, 1), 127, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == expected
img = np.full((1, 1, 3), 127, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert np.all(img_aug == expected)
def test_add_floats(self):
# specific tests with floats
aug = iaa.Add(value=0.75)
img = np.full((1, 1), 1, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == 2
img = np.full((1, 1), 1, dtype=np.uint16)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == 2
aug = iaa.Add(value=0.45)
img = np.full((1, 1), 1, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == 1
img = np.full((1, 1), 1, dtype=np.uint16)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == 1
def test_stochastic_parameters_as_value(self):
# test other parameters
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.Add(value=iap.DiscreteUniform(1, 10))
observed = aug.augment_images(images)
assert 100 + 1 <= np.average(observed) <= 100 + 10
aug = iaa.Add(value=iap.Uniform(1, 10))
observed = aug.augment_images(images)
assert 100 + 1 <= np.average(observed) <= 100 + 10
aug = iaa.Add(value=iap.Clip(iap.Normal(1, 1), -3, 3))
observed = aug.augment_images(images)
assert 100 - 3 <= np.average(observed) <= 100 + 3
aug = iaa.Add(value=iap.Discretize(iap.Clip(iap.Normal(1, 1), -3, 3)))
observed = aug.augment_images(images)
assert 100 - 3 <= np.average(observed) <= 100 + 3
def test_keypoints_dont_change(self):
# keypoints shouldnt be changed
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
aug = iaa.Add(value=1)
aug_det = iaa.Add(value=1).to_deterministic()
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
def test_tuple_as_value(self):
# varying values
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.Add(value=(0, 10))
aug_det = aug.to_deterministic()
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.7)
assert nb_changed_aug_det == 0
def test_per_channel(self):
# test channelwise
aug = iaa.Add(value=iap.Choice([0, 1]), per_channel=True)
observed = aug.augment_image(np.zeros((1, 1, 100), dtype=np.uint8))
uq = np.unique(observed)
assert observed.shape == (1, 1, 100)
assert 0 in uq
assert 1 in uq
assert len(uq) == 2
def test_per_channel_with_probability(self):
# test channelwise with probability
aug = iaa.Add(value=iap.Choice([0, 1]), per_channel=0.5)
seen = [0, 0]
for _ in sm.xrange(400):
observed = aug.augment_image(np.zeros((1, 1, 20), dtype=np.uint8))
assert observed.shape == (1, 1, 20)
uq = np.unique(observed)
per_channel = (len(uq) == 2)
if per_channel:
seen[0] += 1
else:
seen[1] += 1
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.Add(1)
image_aug = aug(image=image)
assert np.all(image_aug == 1)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_unusual_channel_numbers(self):
shapes = [
(1, 1, 4),
(1, 1, 5),
(1, 1, 512),
(1, 1, 513)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.Add(1)
image_aug = aug(image=image)
assert np.all(image_aug == 1)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_get_parameters(self):
# test get_parameters()
aug = iaa.Add(value=1, per_channel=False)
params = aug.get_parameters()
assert isinstance(params[0], iap.Deterministic)
assert isinstance(params[1], iap.Deterministic)
assert params[0].value == 1
assert params[1].value == 0
def test_heatmaps(self):
# test heatmaps (not affected by augmenter)
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
aug = iaa.Add(value=10)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_other_dtypes_bool(self):
image = np.zeros((3, 3), dtype=bool)
aug = iaa.Add(value=1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.Add(value=1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.Add(value=-1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.Add(value=-2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
def test_other_dtypes_uint_int(self):
for dtype in [np.uint8, np.uint16, np.int8, np.int16]:
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
image = np.full((3, 3), min_value, dtype=dtype)
aug = iaa.Add(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value + 21)
image = np.full((3, 3), max_value - 2, dtype=dtype)
aug = iaa.Add(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value - 1)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.Add(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.Add(2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(-9)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(-10)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(-11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value)
for _ in sm.xrange(10):
image = np.full((1, 1, 3), 20, dtype=dtype)
aug = iaa.Add(iap.Uniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) == 1
image = np.full((1, 1, 100), 20, dtype=dtype)
aug = iaa.Add(iap.Uniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
image = np.full((1, 1, 3), 20, dtype=dtype)
aug = iaa.Add(iap.DiscreteUniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) == 1
image = np.full((1, 1, 100), 20, dtype=dtype)
aug = iaa.Add(iap.DiscreteUniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
def test_other_dtypes_float(self):
# float
for dtype in [np.float16, np.float32]:
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
if dtype == np.float16:
atol = 1e-3 * max_value
else:
atol = 1e-9 * max_value
_allclose = functools.partial(np.allclose, atol=atol, rtol=0)
image = np.full((3, 3), min_value, dtype=dtype)
aug = iaa.Add(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value + 21)
image = np.full((3, 3), max_value - 2, dtype=dtype)
aug = iaa.Add(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, max_value - 1)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.Add(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, max_value)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.Add(2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, max_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(-9)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(-10)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(-11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value)
for _ in sm.xrange(10):
image = np.full((50, 1, 3), 0, dtype=dtype)
aug = iaa.Add(iap.Uniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert np.allclose(image_aug[1:, :, 0], image_aug[:-1, :, 0])
assert np.allclose(image_aug[..., 0], image_aug[..., 1])
image = np.full((1, 1, 100), 0, dtype=dtype)
aug = iaa.Add(iap.Uniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1])
image = np.full((50, 1, 3), 0, dtype=dtype)
aug = iaa.Add(iap.DiscreteUniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert np.allclose(image_aug[1:, :, 0], image_aug[:-1, :, 0])
assert np.allclose(image_aug[..., 0], image_aug[..., 1])
image = np.full((1, 1, 100), 0, dtype=dtype)
aug = iaa.Add(iap.DiscreteUniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1])
def test_pickleable(self):
aug = iaa.Add((0, 50), per_channel=True, random_state=1)
runtest_pickleable_uint8_img(aug, iterations=10)
class TestAddElementwise(unittest.TestCase):
def setUp(self):
reseed()
def test___init___bad_datatypes(self):
# test exceptions for wrong parameter types
got_exception = False
try:
_aug = iaa.AddElementwise(value="test")
except Exception:
got_exception = True
assert got_exception
got_exception = False
try:
_aug = iaa.AddElementwise(value=1, per_channel="test")
except Exception:
got_exception = True
assert got_exception
def test_add_zero(self):
# no add, shouldnt change anything
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.AddElementwise(value=0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
def test_add_one(self):
# add > 0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.AddElementwise(value=1)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images + 1
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = [images_list[0] + 1]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images + 1
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [images_list[0] + 1]
assert array_equal_lists(observed, expected)
def test_add_minus_one(self):
# add < 0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.AddElementwise(value=-1)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images - 1
assert np.array_equal(observed, expected)
observed = aug.augment_images(images_list)
expected = [images_list[0] - 1]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images - 1
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [images_list[0] - 1]
assert array_equal_lists(observed, expected)
def test_uint8_every_possible_value(self):
# uint8, every possible addition for base value 127
for value_type in [int]:
for per_channel in [False, True]:
for value in np.arange(-255, 255+1):
aug = iaa.AddElementwise(value=value_type(value), per_channel=per_channel)
expected = np.clip(127 + value_type(value), 0, 255)
img = np.full((1, 1), 127, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == expected
img = np.full((1, 1, 3), 127, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert np.all(img_aug == expected)
def test_stochastic_parameters_as_value(self):
# test other parameters
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.AddElementwise(value=iap.DiscreteUniform(1, 10))
observed = aug.augment_images(images)
assert np.min(observed) >= 100 + 1
assert np.max(observed) <= 100 + 10
aug = iaa.AddElementwise(value=iap.Uniform(1, 10))
observed = aug.augment_images(images)
assert np.min(observed) >= 100 + 1
assert np.max(observed) <= 100 + 10
aug = iaa.AddElementwise(value=iap.Clip(iap.Normal(1, 1), -3, 3))
observed = aug.augment_images(images)
assert np.min(observed) >= 100 - 3
assert np.max(observed) <= 100 + 3
aug = iaa.AddElementwise(value=iap.Discretize(iap.Clip(iap.Normal(1, 1), -3, 3)))
observed = aug.augment_images(images)
assert np.min(observed) >= 100 - 3
assert np.max(observed) <= 100 + 3
def test_keypoints_dont_change(self):
# keypoints shouldnt be changed
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
aug = iaa.AddElementwise(value=1)
aug_det = iaa.AddElementwise(value=1).to_deterministic()
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
def test_tuple_as_value(self):
# varying values
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.AddElementwise(value=(0, 10))
aug_det = aug.to_deterministic()
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.7)
assert nb_changed_aug_det == 0
def test_samples_change_by_spatial_location(self):
# values should change between pixels
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.AddElementwise(value=(-50, 50))
nb_same = 0
nb_different = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_flat = observed_aug.flatten()
last = None
for j in sm.xrange(observed_aug_flat.size):
if last is not None:
v = observed_aug_flat[j]
if v - 0.0001 <= last <= v + 0.0001:
nb_same += 1
else:
nb_different += 1
last = observed_aug_flat[j]
assert nb_different > 0.9 * (nb_different + nb_same)
def test_per_channel(self):
# test channelwise
aug = iaa.AddElementwise(value=iap.Choice([0, 1]), per_channel=True)
observed = aug.augment_image(np.zeros((100, 100, 3), dtype=np.uint8))
sums = np.sum(observed, axis=2)
values = np.unique(sums)
assert all([(value in values) for value in [0, 1, 2, 3]])
def test_per_channel_with_probability(self):
# test channelwise with probability
aug = iaa.AddElementwise(value=iap.Choice([0, 1]), per_channel=0.5)
seen = [0, 0]
for _ in sm.xrange(400):
observed = aug.augment_image(np.zeros((20, 20, 3), dtype=np.uint8))
sums = np.sum(observed, axis=2)
values = np.unique(sums)
all_values_found = all([(value in values) for value in [0, 1, 2, 3]])
if all_values_found:
seen[0] += 1
else:
seen[1] += 1
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.AddElementwise(1)
image_aug = aug(image=image)
assert np.all(image_aug == 1)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_unusual_channel_numbers(self):
shapes = [
(1, 1, 4),
(1, 1, 5),
(1, 1, 512),
(1, 1, 513)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.AddElementwise(1)
image_aug = aug(image=image)
assert np.all(image_aug == 1)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_get_parameters(self):
# test get_parameters()
aug = iaa.AddElementwise(value=1, per_channel=False)
params = aug.get_parameters()
assert isinstance(params[0], iap.Deterministic)
assert isinstance(params[1], iap.Deterministic)
assert params[0].value == 1
assert params[1].value == 0
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
aug = iaa.AddElementwise(value=10)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_other_dtypes_bool(self):
# bool
image = np.zeros((3, 3), dtype=bool)
aug = iaa.AddElementwise(value=1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.AddElementwise(value=1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.AddElementwise(value=-1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.AddElementwise(value=-2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
def test_other_dtypes_uint_int(self):
# uint, int
for dtype in [np.uint8, np.uint16, np.int8, np.int16]:
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
image = np.full((3, 3), min_value, dtype=dtype)
aug = iaa.AddElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value + 21)
image = np.full((3, 3), max_value - 2, dtype=dtype)
aug = iaa.AddElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value - 1)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.AddElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.AddElementwise(2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(-9)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(-10)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(-11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value)
for _ in sm.xrange(10):
image = np.full((5, 5, 3), 20, dtype=dtype)
aug = iaa.AddElementwise(iap.Uniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
assert np.all(image_aug[..., 0] == image_aug[..., 1])
image = np.full((1, 1, 100), 20, dtype=dtype)
aug = iaa.AddElementwise(iap.Uniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
image = np.full((5, 5, 3), 20, dtype=dtype)
aug = iaa.AddElementwise(iap.DiscreteUniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
assert np.all(image_aug[..., 0] == image_aug[..., 1])
image = np.full((1, 1, 100), 20, dtype=dtype)
aug = iaa.AddElementwise(iap.DiscreteUniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
def test_other_dtypes_float(self):
# float
for dtype in [np.float16, np.float32]:
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
if dtype == np.float16:
atol = 1e-3 * max_value
else:
atol = 1e-9 * max_value
_allclose = functools.partial(np.allclose, atol=atol, rtol=0)
image = np.full((3, 3), min_value, dtype=dtype)
aug = iaa.AddElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value + 21)
image = np.full((3, 3), max_value - 2, dtype=dtype)
aug = iaa.AddElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, max_value - 1)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.AddElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, max_value)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.AddElementwise(2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, max_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(-9)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(-10)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(-11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value)
for _ in sm.xrange(10):
image = np.full((50, 1, 3), 0, dtype=dtype)
aug = iaa.AddElementwise(iap.Uniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert not np.allclose(image_aug[1:, :, 0], image_aug[:-1, :, 0])
assert np.allclose(image_aug[..., 0], image_aug[..., 1])
image = np.full((1, 1, 100), 0, dtype=dtype)
aug = iaa.AddElementwise(iap.Uniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1])
image = np.full((50, 1, 3), 0, dtype=dtype)
aug = iaa.AddElementwise(iap.DiscreteUniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert not np.allclose(image_aug[1:, :, 0], image_aug[:-1, :, 0])
assert np.allclose(image_aug[..., 0], image_aug[..., 1])
image = np.full((1, 1, 100), 0, dtype=dtype)
aug = iaa.AddElementwise(iap.DiscreteUniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1])
def test_pickleable(self):
aug = iaa.AddElementwise((0, 50), per_channel=True, random_state=1)
runtest_pickleable_uint8_img(aug, iterations=2)
class AdditiveGaussianNoise(unittest.TestCase):
def setUp(self):
reseed()
def test_loc_zero_scale_zero(self):
# no noise, shouldnt change anything
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
images = np.array([base_img])
aug = iaa.AdditiveGaussianNoise(loc=0, scale=0)
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
def test_loc_zero_scale_nonzero(self):
# zero-centered noise
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
images = np.array([base_img])
images_list = [base_img]
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
aug = iaa.AdditiveGaussianNoise(loc=0, scale=0.2 * 255)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
assert not np.array_equal(observed, images)
observed = aug_det.augment_images(images)
assert not np.array_equal(observed, images)
observed = aug.augment_images(images_list)
assert not array_equal_lists(observed, images_list)
observed = aug_det.augment_images(images_list)
assert not array_equal_lists(observed, images_list)
observed = aug.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints)
observed = aug_det.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints)
def test_std_dev_of_added_noise_matches_scale(self):
# std correct?
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
aug = iaa.AdditiveGaussianNoise(loc=0, scale=0.2 * 255)
images = np.ones((1, 1, 1, 1), dtype=np.uint8) * 128
nb_iterations = 1000
values = []
for i in sm.xrange(nb_iterations):
images_aug = aug.augment_images(images)
values.append(images_aug[0, 0, 0, 0])
values = np.array(values)
assert np.min(values) == 0
assert 0.1 < np.std(values) / 255.0 < 0.4
def test_nonzero_loc(self):
# non-zero loc
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
aug = iaa.AdditiveGaussianNoise(loc=0.25 * 255, scale=0.01 * 255)
images = np.ones((1, 1, 1, 1), dtype=np.uint8) * 128
nb_iterations = 1000
values = []
for i in sm.xrange(nb_iterations):
images_aug = aug.augment_images(images)
values.append(images_aug[0, 0, 0, 0] - 128)
values = np.array(values)
assert 54 < np.average(values) < 74 # loc=0.25 should be around 255*0.25=64 average
def test_tuple_as_loc(self):
# varying locs
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
aug = iaa.AdditiveGaussianNoise(loc=(0, 0.5 * 255), scale=0.0001 * 255)
aug_det = aug.to_deterministic()
images = np.ones((1, 1, 1, 1), dtype=np.uint8) * 128
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.95)
assert nb_changed_aug_det == 0
def test_stochastic_parameter_as_loc(self):
# varying locs by stochastic param
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
aug = iaa.AdditiveGaussianNoise(loc=iap.Choice([-20, 20]), scale=0.0001 * 255)
images = np.ones((1, 1, 1, 1), dtype=np.uint8) * 128
seen = [0, 0]
for i in sm.xrange(200):
observed = aug.augment_images(images)
mean = np.mean(observed)
diff_m20 = abs(mean - (128-20))
diff_p20 = abs(mean - (128+20))
if diff_m20 <= 1:
seen[0] += 1
elif diff_p20 <= 1:
seen[1] += 1
else:
assert False
assert 75 < seen[0] < 125
assert 75 < seen[1] < 125
def test_tuple_as_scale(self):
# varying stds
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
aug = iaa.AdditiveGaussianNoise(loc=0, scale=(0.01 * 255, 0.2 * 255))
aug_det = aug.to_deterministic()
images = np.ones((1, 1, 1, 1), dtype=np.uint8) * 128
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.95)
assert nb_changed_aug_det == 0
def test_stochastic_parameter_as_scale(self):
# varying stds by stochastic param
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
aug = iaa.AdditiveGaussianNoise(loc=0, scale=iap.Choice([1, 20]))
images = np.ones((1, 20, 20, 1), dtype=np.uint8) * 128
seen = [0, 0, 0]
for i in sm.xrange(200):
observed = aug.augment_images(images)
std = np.std(observed.astype(np.int32) - 128)
diff_1 = abs(std - 1)
diff_20 = abs(std - 20)
if diff_1 <= 2:
seen[0] += 1
elif diff_20 <= 5:
seen[1] += 1
else:
seen[2] += 1
assert seen[2] <= 5
assert 75 < seen[0] < 125
assert 75 < seen[1] < 125
def test___init___bad_datatypes(self):
# test exceptions for wrong parameter types
got_exception = False
try:
_ = iaa.AdditiveGaussianNoise(loc="test")
except Exception:
got_exception = True
assert got_exception
got_exception = False
try:
_ = iaa.AdditiveGaussianNoise(scale="test")
except Exception:
got_exception = True
assert got_exception
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
aug = iaa.AdditiveGaussianNoise(loc=0.5, scale=10)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_pickleable(self):
aug = iaa.AdditiveGaussianNoise(scale=(0.1, 10), per_channel=True,
random_state=1)
runtest_pickleable_uint8_img(aug, iterations=2)
class TestDropout(unittest.TestCase):
def setUp(self):
reseed()
def test_p_is_zero(self):
# no dropout, shouldnt change anything
base_img = np.ones((512, 512, 1), dtype=np.uint8) * 255
images = np.array([base_img])
images_list = [base_img]
aug = iaa.Dropout(p=0)
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
# 100% dropout, should drop everything
aug = iaa.Dropout(p=1.0)
observed = aug.augment_images(images)
expected = np.zeros((1, 512, 512, 1), dtype=np.uint8)
assert np.array_equal(observed, expected)
observed = aug.augment_images(images_list)
expected = [np.zeros((512, 512, 1), dtype=np.uint8)]
assert array_equal_lists(observed, expected)
def test_p_is_50_percent(self):
# 50% dropout
base_img = np.ones((512, 512, 1), dtype=np.uint8) * 255
images = np.array([base_img])
images_list = [base_img]
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
aug = iaa.Dropout(p=0.5)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
assert not np.array_equal(observed, images)
percent_nonzero = len(observed.flatten().nonzero()[0]) \
/ (base_img.shape[0] * base_img.shape[1] * base_img.shape[2])
assert 0.35 <= (1 - percent_nonzero) <= 0.65
observed = aug_det.augment_images(images)
assert not np.array_equal(observed, images)
percent_nonzero = len(observed.flatten().nonzero()[0]) \
/ (base_img.shape[0] * base_img.shape[1] * base_img.shape[2])
assert 0.35 <= (1 - percent_nonzero) <= 0.65
observed = aug.augment_images(images_list)
assert not array_equal_lists(observed, images_list)
percent_nonzero = len(observed[0].flatten().nonzero()[0]) \
/ (base_img.shape[0] * base_img.shape[1] * base_img.shape[2])
assert 0.35 <= (1 - percent_nonzero) <= 0.65
observed = aug_det.augment_images(images_list)
assert not array_equal_lists(observed, images_list)
percent_nonzero = len(observed[0].flatten().nonzero()[0]) \
/ (base_img.shape[0] * base_img.shape[1] * base_img.shape[2])
assert 0.35 <= (1 - percent_nonzero) <= 0.65
observed = aug.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints)
observed = aug_det.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints)
def test_tuple_as_p(self):
# varying p
aug = iaa.Dropout(p=(0.0, 1.0))
aug_det = aug.to_deterministic()
images = np.ones((1, 8, 8, 1), dtype=np.uint8) * 255
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.95)
assert nb_changed_aug_det == 0
def test_list_as_p(self):
aug = iaa.Dropout(p=[0.0, 0.5, 1.0])
images = np.ones((1, 20, 20, 1), dtype=np.uint8) * 255
nb_seen = [0, 0, 0, 0]
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
n_dropped = np.sum(observed_aug == 0)
p_observed = n_dropped / observed_aug.size
if 0 <= p_observed <= 0.01:
nb_seen[0] += 1
elif 0.5 - 0.05 <= p_observed <= 0.5 + 0.05:
nb_seen[1] += 1
elif 1.0-0.01 <= p_observed <= 1.0:
nb_seen[2] += 1
else:
nb_seen[3] += 1
assert np.allclose(nb_seen[0:3], nb_iterations*0.33, rtol=0, atol=75)
assert nb_seen[3] < 30
def test_stochastic_parameter_as_p(self):
# varying p by stochastic parameter
aug = iaa.Dropout(p=iap.Binomial(1-iap.Choice([0.0, 0.5])))
images = np.ones((1, 20, 20, 1), dtype=np.uint8) * 255
seen = [0, 0, 0]
for i in sm.xrange(400):
observed = aug.augment_images(images)
p = np.mean(observed == 0)
if 0.4 < p < 0.6:
seen[0] += 1
elif p < 0.1:
seen[1] += 1
else:
seen[2] += 1
assert seen[2] <= 10
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
def test___init___bad_datatypes(self):
# test exception for wrong parameter datatype
got_exception = False
try:
_aug = iaa.Dropout(p="test")
except Exception:
got_exception = True
assert got_exception
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
aug = iaa.Dropout(p=1.0)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_pickleable(self):
aug = iaa.Dropout(p=0.5, per_channel=True, random_state=1)
runtest_pickleable_uint8_img(aug, iterations=3)
class TestCoarseDropout(unittest.TestCase):
def setUp(self):
reseed()
def test_p_is_zero(self):
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 100
aug = iaa.CoarseDropout(p=0, size_px=4, size_percent=None, per_channel=False, min_size=4)
observed = aug.augment_image(base_img)
expected = base_img
assert np.array_equal(observed, expected)
def test_p_is_one(self):
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 100
aug = iaa.CoarseDropout(p=1.0, size_px=4, size_percent=None, per_channel=False, min_size=4)
observed = aug.augment_image(base_img)
expected = np.zeros_like(base_img)
assert np.array_equal(observed, expected)
def test_p_is_50_percent(self):
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 100
aug = iaa.CoarseDropout(p=0.5, size_px=1, size_percent=None, per_channel=False, min_size=1)
averages = []
for _ in sm.xrange(50):
observed = aug.augment_image(base_img)
averages.append(np.average(observed))
assert all([v in [0, 100] for v in averages])
assert 50 - 20 < np.average(averages) < 50 + 20
def test_size_percent(self):
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 100
aug = iaa.CoarseDropout(p=0.5, size_px=None, size_percent=0.001, per_channel=False, min_size=1)
averages = []
for _ in sm.xrange(50):
observed = aug.augment_image(base_img)
averages.append(np.average(observed))
assert all([v in [0, 100] for v in averages])
assert 50 - 20 < np.average(averages) < 50 + 20
def test_per_channel(self):
aug = iaa.CoarseDropout(p=0.5, size_px=1, size_percent=None, per_channel=True, min_size=1)
base_img = np.ones((4, 4, 3), dtype=np.uint8) * 100
found = False
for _ in sm.xrange(100):
observed = aug.augment_image(base_img)
avgs = np.average(observed, axis=(0, 1))
if len(set(avgs)) >= 2:
found = True
break
assert found
def test_stochastic_parameter_as_p(self):
# varying p by stochastic parameter
aug = iaa.CoarseDropout(p=iap.Binomial(1-iap.Choice([0.0, 0.5])), size_px=50)
images = np.ones((1, 100, 100, 1), dtype=np.uint8) * 255
seen = [0, 0, 0]
for i in sm.xrange(400):
observed = aug.augment_images(images)
p = np.mean(observed == 0)
if 0.4 < p < 0.6:
seen[0] += 1
elif p < 0.1:
seen[1] += 1
else:
seen[2] += 1
assert seen[2] <= 10
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
def test___init___bad_datatypes(self):
# test exception for bad parameters
got_exception = False
try:
_ = iaa.CoarseDropout(p="test")
except Exception:
got_exception = True
assert got_exception
def test___init___size_px_and_size_percent_both_none(self):
got_exception = False
try:
_ = iaa.CoarseDropout(p=0.5, size_px=None, size_percent=None)
except Exception:
got_exception = True
assert got_exception
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
aug = iaa.CoarseDropout(p=1.0, size_px=2)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_pickleable(self):
aug = iaa.CoarseDropout(p=0.5, size_px=10, per_channel=True,
random_state=1)
runtest_pickleable_uint8_img(aug, iterations=10, shape=(40, 40, 3))
class TestDropout2d(unittest.TestCase):
def setUp(self):
reseed()
def test___init___defaults(self):
aug = iaa.Dropout2d(p=0)
assert isinstance(aug.p, iap.Binomial)
assert np.isclose(aug.p.p.value, 1.0)
assert aug.nb_keep_channels == 1
def test___init___p_is_float(self):
aug = iaa.Dropout2d(p=0.7)
assert isinstance(aug.p, iap.Binomial)
assert np.isclose(aug.p.p.value, 0.3)
assert aug.nb_keep_channels == 1
def test___init___nb_keep_channels_is_int(self):
aug = iaa.Dropout2d(p=0, nb_keep_channels=2)
assert isinstance(aug.p, iap.Binomial)
assert np.isclose(aug.p.p.value, 1.0)
assert aug.nb_keep_channels == 2
def test_no_images_in_batch(self):
aug = iaa.Dropout2d(p=0.0, nb_keep_channels=0)
heatmaps = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
heatmaps = ia.HeatmapsOnImage(heatmaps, shape=(2, 2, 3))
heatmaps_aug = aug(heatmaps=heatmaps)
assert np.allclose(heatmaps_aug.arr_0to1, heatmaps.arr_0to1)
def test_p_is_1(self):
image = np.full((1, 2, 3), 255, dtype=np.uint8)
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=0)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
assert image_aug.dtype.name == image.dtype.name
assert np.sum(image_aug) == 0
def test_p_is_1_heatmaps(self):
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=0)
arr = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
hm = ia.HeatmapsOnImage(arr, shape=(2, 2, 3))
heatmaps_aug = aug(heatmaps=hm)
assert np.allclose(heatmaps_aug.arr_0to1, 0.0)
def test_p_is_1_segmentation_maps(self):
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=0)
arr = np.int32([
[0, 1],
[0, 1]
])
segmaps = ia.SegmentationMapsOnImage(arr, shape=(2, 2, 3))
segmaps_aug = aug(segmentation_maps=segmaps)
assert np.allclose(segmaps_aug.arr, 0.0)
def test_p_is_1_cbaois(self):
cbaois = [
ia.KeypointsOnImage([ia.Keypoint(x=0, y=1)], shape=(2, 2, 3)),
ia.BoundingBoxesOnImage([ia.BoundingBox(x1=0, y1=1, x2=2, y2=3)],
shape=(2, 2, 3)),
ia.PolygonsOnImage([ia.Polygon([(0, 0), (1, 0), (1, 1)])],
shape=(2, 2, 3)),
ia.LineStringsOnImage([ia.LineString([(0, 0), (1, 0)])],
shape=(2, 2, 3))
]
cbaoi_names = ["keypoints", "bounding_boxes", "polygons",
"line_strings"]
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=0)
for name, cbaoi in zip(cbaoi_names, cbaois):
with self.subTest(datatype=name):
cbaoi_aug = aug(**{name: cbaoi})
assert cbaoi_aug.shape == (2, 2, 3)
assert cbaoi_aug.items == []
def test_p_is_1_heatmaps__keep_one_channel(self):
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=1)
arr = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
hm = ia.HeatmapsOnImage(arr, shape=(2, 2, 3))
heatmaps_aug = aug(heatmaps=hm)
assert np.allclose(heatmaps_aug.arr_0to1, hm.arr_0to1)
def test_p_is_1_segmentation_maps__keep_one_channel(self):
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=1)
arr = np.int32([
[0, 1],
[0, 1]
])
segmaps = ia.SegmentationMapsOnImage(arr, shape=(2, 2, 3))
segmaps_aug = aug(segmentation_maps=segmaps)
assert np.allclose(segmaps_aug.arr, segmaps.arr)
def test_p_is_1_cbaois__keep_one_channel(self):
cbaois = [
ia.KeypointsOnImage([ia.Keypoint(x=0, y=1)], shape=(2, 2, 3)),
ia.BoundingBoxesOnImage([ia.BoundingBox(x1=0, y1=1, x2=2, y2=3)],
shape=(2, 2, 3)),
ia.PolygonsOnImage([ia.Polygon([(0, 0), (1, 0), (1, 1)])],
shape=(2, 2, 3)),
ia.LineStringsOnImage([ia.LineString([(0, 0), (1, 0)])],
shape=(2, 2, 3))
]
cbaoi_names = ["keypoints", "bounding_boxes", "polygons",
"line_strings"]
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=1)
for name, cbaoi in zip(cbaoi_names, cbaois):
with self.subTest(datatype=name):
cbaoi_aug = aug(**{name: cbaoi})
assert cbaoi_aug.shape == (2, 2, 3)
assert np.allclose(
cbaoi_aug.items[0].coords,
cbaoi.items[0].coords
)
def test_p_is_0(self):
image = np.full((1, 2, 3), 255, dtype=np.uint8)
aug = iaa.Dropout2d(p=0.0, nb_keep_channels=0)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
assert image_aug.dtype.name == image.dtype.name
assert np.array_equal(image_aug, image)
def test_p_is_0_heatmaps(self):
aug = iaa.Dropout2d(p=0.0, nb_keep_channels=0)
arr = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
hm = ia.HeatmapsOnImage(arr, shape=(2, 2, 3))
heatmaps_aug = aug(heatmaps=hm)
assert np.allclose(heatmaps_aug.arr_0to1, hm.arr_0to1)
def test_p_is_0_segmentation_maps(self):
aug = iaa.Dropout2d(p=0.0, nb_keep_channels=0)
arr = np.int32([
[0, 1],
[0, 1]
])
segmaps = ia.SegmentationMapsOnImage(arr, shape=(2, 2, 3))
segmaps_aug = aug(segmentation_maps=segmaps)
assert np.allclose(segmaps_aug.arr, segmaps.arr)
def test_p_is_0_cbaois(self):
cbaois = [
ia.KeypointsOnImage([ia.Keypoint(x=0, y=1)], shape=(2, 2, 3)),
ia.BoundingBoxesOnImage([ia.BoundingBox(x1=0, y1=1, x2=2, y2=3)],
shape=(2, 2, 3)),
ia.PolygonsOnImage([ia.Polygon([(0, 0), (1, 0), (1, 1)])],
shape=(2, 2, 3)),
ia.LineStringsOnImage([ia.LineString([(0, 0), (1, 0)])],
shape=(2, 2, 3))
]
cbaoi_names = ["keypoints", "bounding_boxes", "polygons",
"line_strings"]
aug = iaa.Dropout2d(p=0.0, nb_keep_channels=0)
for name, cbaoi in zip(cbaoi_names, cbaois):
with self.subTest(datatype=name):
cbaoi_aug = aug(**{name: cbaoi})
assert cbaoi_aug.shape == (2, 2, 3)
assert np.allclose(
cbaoi_aug.items[0].coords,
cbaoi.items[0].coords
)
def test_p_is_075(self):
image = np.full((1, 1, 3000), 255, dtype=np.uint8)
aug = iaa.Dropout2d(p=0.75, nb_keep_channels=0)
image_aug = aug(image=image)
nb_kept = | np.sum(image_aug == 255) | numpy.sum |
import argparse
import os
import pydicom
import numpy as np
from tqdm import tqdm
import pandas as pd
import hashlib
if __name__ == "__main__":
os.makedirs('dataset_masked', exist_ok = True)
tag_df = pd.read_csv('tags.csv')
SOPInstanceUIDs = []
StudyInstanceUIDs = []
encoded_ids = []
for mset in ['test', 'train']:
os.makedirs('dataset_masked/' + mset, exist_ok = True)
for rdir,_,files in os.walk('dataset/' + mset):
for file in files:
dicom_path = os.path.join(rdir, file)
print(dicom_path)
dcm = pydicom.read_file(dicom_path)
dcm.file_meta.ImplementationClassUID = "1.2.3.4"
MediaStorageSOPInstanceUID = dcm.file_meta.MediaStorageSOPInstanceUID
hash_object = hashlib.md5(MediaStorageSOPInstanceUID.encode())
encoded = str(hash_object.hexdigest())
dcm.file_meta.MediaStorageSOPInstanceUID = encoded
SOPInstanceUIDs.append(dcm.SOPInstanceUID)
StudyInstanceUIDs.append(dcm.StudyInstanceUID)
encoded_ids.append(encoded)
dcm_keys = list(dcm.keys())
for k in dcm_keys:
if k not in tag_df.keyword.values:
dcm.pop(k, None)
print(dcm)
new_dicom_path = dicom_path.replace('dataset/', 'dataset_masked/').replace(file, encoded+'.dicom')
dcm.save_as(new_dicom_path)
image_uid_encode_df = pd.DataFrame()
image_uid_encode_df['encode_id'] = np.array(encoded_ids)
image_uid_encode_df['SOPInstanceUID'] = np.array(SOPInstanceUIDs)
image_uid_encode_df['StudyInstanceUID'] = | np.array(StudyInstanceUIDs) | numpy.array |
from unittest import TestCase
import numpy as np
RANGE = 100000000
SIZE = 300000
class TestSort(TestCase):
def test_compare_int(self):
self.assertEqual(-1, quick_pandas.ext.argsort.compare_py([np.array([1, 2])], 0, 1))
self.assertEqual(1, quick_pandas.ext.argsort.compare_py([np.array([2, 1])], 0, 1))
self.assertEqual(1, quick_pandas.ext.argsort.compare_py([np.array([1, 2])], 1, 0))
def test_compare_float(self):
self.assertEqual(1, quick_pandas.ext.argsort.compare_py([np.array([2, 1], dtype=np.float64)], 0, 1))
self.assertEqual(-1, quick_pandas.ext.argsort.compare_py([np.array([1, 2], dtype=np.float64)], 0, 1))
self.assertEqual(-1, quick_pandas.ext.argsort.compare_py([np.array([1, np.nan], dtype=np.float64)], 0, 1))
self.assertEqual(0, quick_pandas.ext.argsort.compare_py([np.array([np.nan, np.nan], dtype=np.float64)], 0, 1))
self.assertEqual(-1, quick_pandas.ext.argsort.compare_py([np.array([1, np.inf], dtype=np.float64)], 0, 1))
self.assertEqual(-1, quick_pandas.ext.argsort.compare_py([np.array([-np.inf, 2], dtype=np.float64)], 0, 1))
self.assertEqual(-1, quick_pandas.ext.argsort.compare_py([np.array([-np.inf, np.inf], dtype=np.float64)], 0, 1))
self.assertEqual(1, quick_pandas.ext.argsort.compare_py([np.array([np.nan, np.inf], dtype=np.float64)], 0, 1))
self.assertEqual(1, quick_pandas.ext.argsort.compare_py([np.array([-np.nan, np.inf], dtype=np.float64)], 0, 1))
def test_compare_string(self):
self.assertEqual(1, quick_pandas.ext.argsort.compare_py([np.array([2, 1], dtype=str)], 0, 1))
self.assertEqual(-1, quick_pandas.ext.argsort.compare_py([np.array([1, 2], dtype=str)], 0, 1))
self.assertEqual(1, quick_pandas.ext.argsort.compare_py([np.array(['1', ''], dtype=str)], 0, 1))
self.assertEqual(-1, quick_pandas.ext.argsort.compare_py([ | np.array(['112', '113'], dtype=str) | numpy.array |
"""Unit tests specific to fourier transform tools."""
import pytest
import numpy as np
from prysm import fttools
ARRAY_SIZES = (8, 16, 32, 64, 128, 256, 512, 1024)
@pytest.mark.parametrize('samples', ARRAY_SIZES)
def test_mtp_equivalent_to_fft(samples):
inp = np.random.rand(samples, samples)
fft = np.fft.fftshift(np.fft.fft2( | np.fft.ifftshift(inp) | numpy.fft.ifftshift |
import os
from mmdet.apis import init_detector, inference_detector
import mmcv
from glob import glob
import numpy as np
from tqdm import tqdm
import argparse
def parse_args():
parser = argparse.ArgumentParser(description='MMDet test detector')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument('--out', help='output result file')
args = parser.parse_args()
return args
def main():
args = parse_args()
assert args.out\
('Please specify out path'
'with the argument "--out"')
model = init_detector(args.config, args.checkpoint, device='cuda:0')
with open(args.out, 'w') as f:
for img in tqdm(glob('data/siweituxin/test_images/*.jpg')):
result = inference_detector(model, img)
bbox_result = result
bboxes = np.vstack(bbox_result)
labels = [
| np.full(bbox.shape[0], i, dtype=np.int32) | numpy.full |
import numpy as np
import os
import cv2
import keras.backend as K
from libra.preprocessing.image_preprocessor import (setwise_preprocessing,
csv_preprocessing,
classwise_preprocessing,
set_distinguisher,
already_processed,
single_class_preprocessing)
from libra.query.supplementaries import generate_id
from libra.query.feedforward_nn import logger, clearLog
from keras import Model
from keras.models import Sequential
from keras.layers import (Input, Conv2D, Flatten, Dense, Dropout, LeakyReLU, BatchNormalization, ZeroPadding2D, Reshape, UpSampling2D)
from keras.optimizers import Adam
### Source: https://github.com/mitchelljy/DCGAN-Keras/blob/master/DCGAN.py ###
def build_discriminator(img_shape):
model = Sequential()
model.add(Conv2D(32, (3, 3), strides=2, input_shape=img_shape, padding='same'))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(Conv2D(64, (3, 3), strides=2, padding='same'))
model.add(ZeroPadding2D(padding=((0, 1), (0, 1))))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(BatchNormalization(momentum=0.8))
model.add(Conv2D(128, (3, 3), strides=2, padding='same'))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(BatchNormalization(momentum=0.8))
model.add(Conv2D(256, (3, 3), strides=1, padding='same'))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(Conv2D(512, (3, 3), strides=1, padding='same'))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(1, activation='sigmoid'))
#img = Input(shape=img_shape)
#validity = model(img)
#return Model(img, validity)
return model
### Source: https://github.com/mitchelljy/DCGAN-Keras/blob/master/DCGAN.py ###
def build_generator(img_shape, starting_filters = 64, upsample_layers = 5, noise_shape=(100,)):
model = Sequential()
model.add(Dense((img_shape[0] // (2 ** upsample_layers)) *
(img_shape[1] // (2 ** upsample_layers)) *
starting_filters,
activation='relu',
input_shape=noise_shape))
model.add(Reshape((img_shape[0] // (2 ** upsample_layers),
img_shape[1] // (2 ** upsample_layers),
starting_filters)))
model.add(BatchNormalization(momentum=0.8))
model.add(UpSampling2D())
model.add(Conv2D(1024, (3, 3), padding='same', activation='relu'))
model.add(BatchNormalization(momentum=0.8))
model.add(UpSampling2D())
model.add(Conv2D(512, (3, 3), padding='same', activation='relu'))
model.add(BatchNormalization(momentum=0.8))
model.add(UpSampling2D())
model.add(Conv2D(256, (3, 3), padding='same', activation='relu'))
model.add(BatchNormalization(momentum=0.8))
model.add(UpSampling2D())
model.add(Conv2D(128, (3, 3), padding='same', activation='relu'))
model.add(BatchNormalization(momentum=0.8))
model.add(UpSampling2D())
model.add(Conv2D(64, (3, 3), padding='same', activation='relu'))
model.add(BatchNormalization(momentum=0.8))
model.add(Conv2D(32, (3, 3), padding='same', activation='relu'))
model.add(BatchNormalization(momentum=0.8))
model.add(Conv2D(img_shape[2], (3, 3), padding='same', activation='tanh', name='ll'))
#noise = Input(shape=noise_shape)
#img = model(noise)
#return Model(noise, img)
return model
### train the GAN model ###
def train(combined_model, generator, discriminator, x_train=None, epochs=10, batch_size=32, verbose=1):
#Normalize input from -1 to 1
x_train = (x_train.astype(np.float32) - 127.5) / 127.5
loss_discriminator_history = []
acc_discriminator_history = []
loss_generator_history = []
for epoch in range(epochs):
#Train generator model to generate real-looking images that the discriminator classifies as real
noise = np.random.normal(0, 1, (batch_size, 100))
loss_generator = combined_model.train_on_batch(noise, np.ones(batch_size))
#First half of the batch: Generate fake images and train discriminator on them
noise = np.random.normal(0, 1, (batch_size//2, 100))
fake_images = generator.predict(noise)
loss_discriminator_fake, acc_discriminator_fake = discriminator.train_on_batch(fake_images, np.zeros(batch_size//2))
#Second half of the batch: Select real images from the training set uniformly at random and train discriminator on them
idx = np.random.randint(0, len(x_train), batch_size//2)
real_images = x_train[idx]
loss_discriminator_real, acc_discriminator_real = discriminator.train_on_batch(real_images, np.ones(batch_size//2))
loss_discriminator = 0.5 * np.add(loss_discriminator_fake, loss_discriminator_real)
acc_discriminator = 0.5 * np.add(acc_discriminator_fake, acc_discriminator_real)
loss_discriminator_history.append(loss_discriminator)
acc_discriminator_history.append(acc_discriminator)
loss_generator_history.append(loss_generator)
if verbose == 1:
logger(f"Epoch {(epoch+1)}: [Discriminator loss: {loss_discriminator} | Discriminators Accuracy: {100 * acc_discriminator}] [Generator loss: {loss_generator}]")
return loss_discriminator_history, acc_discriminator_history, loss_generator_history
### Use generator model to generate images (after training) ###
def generate_images(generator, num_images=3, output_path=None):
noise = np.random.normal(0, 1, (num_images, 100))
gen_images = generator.predict(noise)
for i in range(num_images):
cv2.imwrite(output_path + f"/generated_images/generated_image_{i}.jpg", gen_images[i])
### Deep Convolutional Generative Adversarial Network ###
def dcgan(instruction=None,
num_images=None,
preprocess=True,
data_path=None,
verbose=None,
epochs=None,
height=None,
width=None,
output_path=None):
#K.clear_session()
training_path = ""
logger("Preprocessing images")
num_channels = 3
if preprocess:
processInfo = single_class_preprocessing(data_path=data_path, height=height, width=width)
training_path = "proc_training_set"
num_channels = 1 if processInfo["gray_scale"] else 3
train_images = []
for file in os.listdir(data_path + "/" + training_path):
abs_path = os.path.join(data_path, training_path, file)
if os.path.isfile(abs_path):
train_images.append(cv2.imread(abs_path))
train_images = | np.array(train_images) | numpy.array |
"""
Auxiliary routines for the modeler
-----------------------------------
"""
import numpy as np
import random
import math
try:
from sim.consts import N, A, B, C
except ModuleNotFoundError:
try:
from consts import N, A, B, C
except:
from .consts import N, A, B, C
def kink_move(coords, kink, kink_minus, kink_plus):
"""
performs a kink move
:param coords: 3D coordinates
:type coords: numpy array
:param kink: the bead subjected to kink move
:type kink: int
:param kink_minus: preceding bead
:type kink_minus: int
:param kink_plus: succeeding bead
:type kink_plus: int
:return: (3,N) coordinates after the kink move
:rtype: Numpy array
"""
if coords[0, kink_minus] == coords[0, kink] == coords[0, kink_plus]:
coords[1, kink] = coords[1, kink_plus]
coords[2, kink] = coords[2, kink_minus]
# print('x')
elif coords[1, kink_minus] == coords[1, kink] == coords[1, kink_plus]:
coords[0, kink] = coords[0, kink_plus]
coords[2, kink] = coords[2, kink_minus]
# print('y')
else:
coords[0, kink] = coords[0, kink_plus]
coords[1, kink] = coords[1, kink_minus]
# print('z')
return coords
def prepare_kink(coords_):
"""
prepares input data for the kink move
:param coords_: 3D coordinates
:type coords_: (3,N) Numpy array, int
:return: 3-tuple as (preceding, actual, succeeding)
:rtype: tuple of int
"""
orthogonal = False
while not orthogonal:
kink = np.random.randint(N)
kink_minus = kink - 1
kink_plus = kink + 1
if kink == 0:
kink_minus = N - 1
elif kink == N - 1:
kink_plus = 0
# print(kink, length, coords[:, kink])
v1 = coords_[:, kink] - coords_[:, kink_minus]
v2 = coords_[:, kink_plus] - coords_[:, kink]
orthogonal = np.dot(v1, v2) == 0
return kink, kink_minus, kink_plus
def prepare_crank(coords_):
"""
generates input data for crankshaft move -- crank points and the index in the rotation list
:param coords_: (3,N) coordinates
:type coords_: numpy array
:return: tuple of 3 integers, ``(crank, crank1, rotation)``, where ``crank`` is the starting and ``crank1`` the ending \
indexes of the coordinates array for the crankshaft move: ``[crank+1:crank1-1]`` are subject to crankshaft; ``rotation`` -- \
is the index of the list with rotation matrices
:rtype: tuple
"""
potentials = [] # if this list is empty, it means that it the chosen bead `crank` has to buddy
# to form the rotation axis
while not potentials:
# selecting random bead
crank = np.random.randint(N)
# print(crank)
# generating potential positions for the second bead
potentials_x = np.where((coords_[1] == coords_[1, crank]) & (coords_[2] == coords_[2, crank]))[0]
potentials_y = np.where((coords_[0] == coords_[0, crank]) & (coords_[2] == coords_[2, crank]))[0]
potentials_z = np.where((coords_[0] == coords_[0, crank]) & (coords_[1] == coords_[1, crank]))[0]
potentials = [el for el in [(potentials_x, [0,1]),
(potentials_y, [2,3]),
(potentials_z, [4,5])] if len(el[0])>0
]
# selecting the axis with rotatios
axis_rotation = random.choice(potentials)
crank1 = random.choice(axis_rotation[0])
rotation = random.choice(axis_rotation[1]) # index in the list of rotation matrices
return crank, crank1, rotation
def crankshaft_move(coords_, A, B, rotation):
"""
performs a crankshaft move for given part of the chain around given axis on given angle.
:param coords_: (3,N) coordinates
:type coords_: numpy array
:param A: starting position to crank about
:type A: int
:param B: ending position to crank about
:type B: int
:param rotation: (3,3) rotation matrix
:type rotation: Numpu array
:return: (3,N) coordinates after crankshaft
:rtype: Numpy array
"""
tail = coords_[:, A+1:B]
tail0 = coords_[:,A:A+1]
# print(tail, tail0)
tail = tail- tail0
# print(tail)
# print(rotation.shape)
for i in range(tail.shape[1]):
coords_[:, A+i+1] = np.matmul(rotation, tail[:,i]) + tail0[:, 0]
return coords_
def make_circular_indexes(N):
"""
:param N: number of bead
:type N: int
:return: tuple of 4 lists, each list keeps the indexes of beads belonging to a particular group
:rtype: tuple
"""
tmp = [i for i in range(N)]
evens = tmp[::2]
odds = tmp[1::2]
tmp = len(evens)
idx = int(np.ceil(N / 4))
# print(idx)
i0 = odds[-idx:]
i1 = evens[:idx]
i2 = evens[-tmp + idx:]
i3 = odds[:tmp - idx]
return i0, i1, i2, i3
def settle_init_point():
"""
returns the initial point for the chain as the center of the parallepiped specified by ``A`` and ``B``.
:return: initial position of the polymer, (1,3) numpy array
:rtype: int
"""
return np.array([A//2, B//2, C//2])
def make_circular_chain(N):
"""
making circular chain of N beads
:param N: number of beads
:type N: int
:return: *squashed* 3D-coordinates of the circular polymer
:rtype: numpy array (3,N)
"""
c = | np.zeros((3, N)) | numpy.zeros |
## writed by <NAME> 2022-05-05
import os
import pandas as pd
import xarray as xr
import numpy as np
def creat_Q(basin_id,Q_file,Q_file1,Qmon):
rivers = np.loadtxt(basin_id,delimiter=",", usecols=(0,),skiprows=0,ndmin=1, dtype=np.int32)
dates = pd.date_range('1/1/1961', '31/12/2018')
shape = (len(dates),len(rivers))
dims = ('time','rivers', )
coords = {'time': dates, 'rivers': rivers}
state = xr.Dataset(coords=coords)
state.attrs['title'] = 'River reaches discharge'
state.attrs['history'] = 'created by jiaojiaogou, 2021-01-18'
state.attrs['user_comment'] = 'RAPID output river discharge (driving by CNRD v1.0)'
state.attrs['source'] = 'generated from a well-trained VIC model coupled with RAPID model'
for varname in ['qout']:
state[varname] = xr.DataArray(data=np.full(shape, np.nan),
coords=coords, dims=dims,
name=varname)
Qout = xr.open_dataset(Q_file).load()['Qout']
river_id =xr.open_dataset(Q_file).load()['rivid']
for i in range(len(rivers)):
state['qout'].values[:,i] = Qout[366:21550,int(np.where(river_id==rivers[i])[0])] #7305 61-79
if | np.any(state['qout'][:,i].values<0) | numpy.any |
# -*- coding: utf-8 -*-
"""
Expressions for calculations structure factors
For details see documentation.
"""
import numpy
from .matrix_operations import calc_det_m, calc_m1_m2, calc_m1_m2_inv_m1, calc_m_v, calc_vector_product_v1_v2_v1, calc_m_q_inv_m
from .unit_cell import calc_eq_ccs_by_unit_cell_parameters, calc_m_m_by_unit_cell_parameters, calc_m_m_norm_by_unit_cell_parameters, calc_sthovl_by_unit_cell_parameters
from .debye_waller_factor import calc_dwf
from .symmetry_elements import calc_multiplicity_by_atom_symm_elems, calc_full_symm_elems_by_reduced, calc_equivalent_reflections
from .magnetic_form_factor import calc_form_factor
from .local_susceptibility import calc_m_r_inv_m
na = numpy.newaxis
def calc_f_m_perp_by_sft(
sft_ccs, magnetic_field, eq_ccs,
flag_sft_ccs: bool = False,
flag_magnetic_field: bool = False,
flag_eq_ccs: bool = False):
"""Calculate perpendicular component of magnetic structure factor by susceptibility factor tensor.
All parameters are defined in Cartesian coordinate system (x||a*, z||c).
"""
f_m, dder_f_m = calc_m_v(
sft_ccs, magnetic_field, flag_m=flag_sft_ccs, flag_v=flag_magnetic_field)
if flag_sft_ccs:
dder_f_m["sft_ccs_real"] = dder_f_m.pop("m_real")
dder_f_m["sft_ccs_imag"] = dder_f_m.pop("m_imag")
if flag_magnetic_field:
dder_f_m["magnetic_field"] = dder_f_m.pop("v")
flag_f_m = flag_sft_ccs or flag_magnetic_field
f_m_perp, dder_f_m_perp = calc_vector_product_v1_v2_v1(
eq_ccs, f_m, flag_v1=flag_eq_ccs, flag_v2=flag_f_m)
if flag_eq_ccs:
dder_f_m_perp["eq_ccs"] = dder_f_m_perp.pop("v1")
if flag_f_m:
dder_f_m_perp["f_m_real"] = dder_f_m_perp.pop("v2_real")
dder_f_m_perp["f_m_imag"] = dder_f_m_perp.pop("v2_imag")
dder = {}
if flag_sft_ccs:
dder["sft_ccs_real"] = (
numpy.expand_dims(dder_f_m_perp["f_m_real"], axis=2)*numpy.expand_dims(dder_f_m["sft_ccs_real"].real, axis=0) +
numpy.expand_dims(dder_f_m_perp["f_m_imag"], axis=2)*numpy.expand_dims(dder_f_m["sft_ccs_real"].imag, axis=0)).sum(axis=1)
dder["sft_ccs_imag"] = (
numpy.expand_dims(dder_f_m_perp["f_m_real"], axis=2)*numpy.expand_dims(dder_f_m["sft_ccs_imag"].real, axis=0) +
numpy.expand_dims(dder_f_m_perp["f_m_imag"], axis=2)*numpy.expand_dims(dder_f_m["sft_ccs_imag"].imag, axis=0)).sum(axis=1)
if flag_magnetic_field:
dder["magnetic_field"] = (
numpy.expand_dims(dder_f_m_perp["f_m_real"], axis=2)*numpy.expand_dims(dder_f_m["magnetic_field"].real, axis=0) +
numpy.expand_dims(dder_f_m_perp["f_m_imag"], axis=2)*numpy.expand_dims(dder_f_m["magnetic_field"].imag, axis=0)).sum(axis=1)
if flag_eq_ccs:
dder["eq_ccs"] = dder_f_m_perp["eq_css"]
return f_m_perp, dder
def calc_pr1(index_hkl, reduced_symm_elems, fract_xyz, flag_fract_xyz: bool = False):
"""Calculate PR1, dimensions [hkl, rs, atoms].
For more details see documentation module "Structure factor".
"""
index_hkl_exp = numpy.expand_dims(numpy.expand_dims(index_hkl, axis=2), axis=3)
h, k, l = index_hkl_exp[0], index_hkl_exp[1], index_hkl_exp[2]
reduced_symm_elems_exp = numpy.expand_dims(numpy.expand_dims(reduced_symm_elems, axis=1), axis=3)
r_11, r_12, r_13 = reduced_symm_elems_exp[4], reduced_symm_elems_exp[5], reduced_symm_elems_exp[6]
r_21, r_22, r_23 = reduced_symm_elems_exp[7], reduced_symm_elems_exp[8], reduced_symm_elems_exp[9]
r_31, r_32, r_33 = reduced_symm_elems_exp[10], reduced_symm_elems_exp[11], reduced_symm_elems_exp[12]
fract_xyz_exp = numpy.expand_dims(numpy.expand_dims(fract_xyz, axis=1), axis=2)
x, y, z = fract_xyz_exp[0], fract_xyz_exp[1], fract_xyz_exp[2]
hh = h*(r_11*x + r_12*y + r_13*z) + k*(r_21*x + r_22*y + r_23*z) + l*(r_31*x + r_32*y + r_33*z)
res = numpy.exp(-2.*numpy.pi*1j*hh)
dder = {}
if flag_fract_xyz:
dder["fract_xyz"] = numpy.stack([
numpy.exp(-2.*numpy.pi*1j*(h*r_11 + k*r_21 + l*r_31)),
numpy.exp(-2.*numpy.pi*1j*(h*r_11 + k*r_21 + l*r_31)),
numpy.exp(-2.*numpy.pi*1j*(h*r_11 + k*r_21 + l*r_31))], axis=0)
return res, dder
def calc_pr2(index_hkl, reduced_symm_elems):
"""Calculate PR2, dimensions, dimensions [hkl, rs].
For more details see documentation module "Structure factor".
"""
index_hkl_exp = numpy.expand_dims(index_hkl, axis=2)
h, k, l = index_hkl_exp[0], index_hkl_exp[1], index_hkl_exp[2]
reduced_symm_elems_exp = numpy.expand_dims(reduced_symm_elems, axis=1)
b_1, b_2, b_3, b_d = reduced_symm_elems_exp[0], reduced_symm_elems_exp[1], reduced_symm_elems_exp[2], reduced_symm_elems_exp[3]
hh = h*(b_1.astype(float)/b_d) + k*(b_2.astype(float)/b_d) + l*(b_3.astype(float)/b_d)
res = numpy.exp(-2.*numpy.pi*1j*hh)
return res
def calc_pr3(index_hkl, translation_elems):
"""Calculate PR3, dimensions [hkl,].
For more details see documentation module "Structure factor".
"""
index_hkl_exp = numpy.expand_dims(index_hkl, axis=2)
h, k, l = index_hkl_exp[0], index_hkl_exp[1], index_hkl_exp[2]
translation_elems_exp = numpy.expand_dims(translation_elems, axis=1)
t_1, t_2, t_3, t_d = translation_elems_exp[0], translation_elems_exp[1], translation_elems_exp[2], translation_elems_exp[3]
hh = (h*t_1+k*t_2+l*t_3).astype(float)
res =(numpy.exp(-2.*numpy.pi*1j*hh/t_d)).sum(axis=1)/translation_elems.shape[-1]
return res
def calc_pr4(index_hkl, centrosymmetry_position=None):
"""Calculate PR4.
For more details see documentation module "Structure factor".
"""
h, k, l = index_hkl[0], index_hkl[1], index_hkl[2]
if centrosymmetry_position is None:
res = numpy.zeros_like(h)
else:
p_1, p_2, p_3 = centrosymmetry_position[0]/centrosymmetry_position[3], centrosymmetry_position[1]/centrosymmetry_position[3], centrosymmetry_position[2]/centrosymmetry_position[3]
res = numpy.exp(-4.*numpy.pi * 1j * (h*p_1 + k*p_2 + l*p_3))
return res
def calc_pr5(reduced_symm_elems, unit_cell_parameters, flag_unit_cell_parameters: bool=False):
"""Calculate PR5, dimensions [rs,].
For more details see documentation module "Structure factor".
"""
res, dder = calc_m_r_inv_m(unit_cell_parameters, reduced_symm_elems, flag_unit_cell_parameters=flag_unit_cell_parameters)
return res, dder
def calc_f_asym_a_by_pr(
atom_multiplicity, debye_waller, atom_occupancy, pr_1, pr_2,
flag_debye_waller: bool = False, flag_atom_occupancy: bool = False, flag_pr_1: bool = False):
"""Calculate preliminary asymmetric structure factor by preliminary defined parameters.
For more details see documentation module "Structure factor".
"""
# dimension of pr_1 is [hkl, symmetry, a]
res = (pr_2[:, :, na]*(pr_1*atom_multiplicity[na, na, :]*atom_occupancy[na, na, :]*debye_waller)).sum(axis=1)/pr_2.shape[-1]
dder = {}
# if flag_scat_length_neutron:
# dder["scat_length_neutron_real"] = (numpy.expand_dims(pr_2, axis=2)*\
# (pr_1*atom_multiplicity*atom_occupancy*debye_waller)).sum(axis=1)/pr_2.shape[-1] # FIXME: only for neutron diffraction
# dder["scat_length_neutron_imag"] = 1j*(numpy.expand_dims(pr_2, axis=2)*\
# (pr_1*atom_multiplicity*atom_occupancy*debye_waller)).sum(axis=1)/pr_2.shape[-1] # FIXME: only for neutron diffraction
if flag_debye_waller:
dder["debye_waller"] = (numpy.expand_dims(pr_2, axis=2)*\
(pr_1*atom_multiplicity*atom_occupancy))/pr_2.shape[-1]
if flag_atom_occupancy:
dder["atom_occupancy"] = (numpy.expand_dims(pr_2, axis=2)*\
(pr_1*atom_multiplicity*debye_waller))/pr_2.shape[-1]
if flag_pr_1:
dder["pr_1_real"] = (pr_2[:,:,na]*atom_multiplicity[na, na, :]*atom_occupancy[na, na, :]*debye_waller)/pr_2.shape[-1]
dder["pr_1_imag"] = 1j*(pr_2[:,:,na]*atom_multiplicity[na, na, :]*atom_occupancy[na, na, :]*debye_waller)/pr_2.shape[-1]
return res, dder
# Delete IT
# def calc_f_asym_by_pr(
# atom_multiplicity, scat_length_neutron, debye_waller, atom_occupancy, pr_1, pr_2,
# flag_scat_length_neutron: bool = False, flag_debye_waller: bool = False,
# flag_atom_occupancy: bool = False, flag_pr_1: bool = False):
# """Calculate preliminary asymmetric structure factor by preliminary defined parameters.
# For more details see documentation module "Structure factor".
# """
# # dimension of pr_1 is [hkl, symmetry, a]
# if len(scat_length_neutron.shape) == 1:
# scat_length = scat_length_neutron[na, na, :] # neutron diffraction [atoms]
# elif len(scat_length_neutron.shape) == 2:
# scat_length = scat_length_neutron[:, na, :] # X-ray diffraction [hkl, atoms]
#
# res = (pr_2*(pr_1*atom_multiplicity[na, na, :]*scat_length*atom_occupancy[na, na, :]*debye_waller).sum(axis=2)).sum(axis=1)/pr_2.shape[-1]
# dder = {}
# if flag_scat_length_neutron:
# dder["scat_length_neutron_real"] = (numpy.expand_dims(pr_2, axis=2)*\
# (pr_1*atom_multiplicity*atom_occupancy*debye_waller)).sum(axis=1)/pr_2.shape[-1] # FIXME: only for neutron diffraction
# dder["scat_length_neutron_imag"] = 1j*(numpy.expand_dims(pr_2, axis=2)*\
# (pr_1*atom_multiplicity*atom_occupancy*debye_waller)).sum(axis=1)/pr_2.shape[-1] # FIXME: only for neutron diffraction
# if flag_debye_waller:
# dder["debye_waller"] = (numpy.expand_dims(pr_2, axis=2)*\
# (pr_1*atom_multiplicity*atom_occupancy)*scat_length).sum(axis=1)/pr_2.shape[-1]
# if flag_atom_occupancy:
# dder["atom_occupancy"] = (numpy.expand_dims(pr_2, axis=2)*\
# (pr_1*atom_multiplicity*debye_waller)*scat_length).sum(axis=1)/pr_2.shape[-1]
# if flag_pr_1:
# dder["pr_1_real"] = (pr_2[:,:,na]*atom_multiplicity[na, na, :]*atom_occupancy[na, na, :]*scat_length*debye_waller)/pr_2.shape[-1]
# dder["pr_1_imag"] = 1j*(pr_2[:,:,na]*atom_multiplicity[na, na, :]*atom_occupancy[na, na, :]*scat_length*debye_waller)/pr_2.shape[-1]
# return res, dder
def calc_f_by_f_asym_a_pr(f_asym_a, scattering_length, pr_3, centrosymmetry, pr_4, flag_f_asym_a: bool = False, flag_scattering_length: bool = False):
"""Calculate structure factor by preliminary defined parameters.
For more details see documentation module "Structure factor".
Dimensions:
f_asym_a = [9, hkl, a] or [hkl, a]
scattering length = [hkl, a] or [a]
pr_3 = [hkl]
"""
if len(scattering_length.shape) == 1:
scat_length_2d = scattering_length[na, :] # neutron diffraction [atoms]
elif len(scattering_length.shape) == 2:
scat_length_2d = scattering_length[:, :] # X-ray diffraction [hkl, atoms]
if len(f_asym_a.shape) == 2: # for structure factor [hkl, a]
sum_axis = 1
hh = scat_length_2d
pr_3_ext = pr_3
elif len(f_asym_a.shape) == 3: # for tensor structure factor [9, hkl, a]
sum_axis = 2
hh = scat_length_2d[na, :, :]
pr_3_ext = numpy.expand_dims(pr_3, axis=0) # [9, hkl]
f_asym = (hh * f_asym_a).sum(axis=sum_axis)
f_asym_conj = (hh * f_asym_a.conjugate()).sum(axis=sum_axis)
f_h = pr_3_ext * f_asym
f_h_conj = pr_3_ext.conjugate() * f_asym_conj
if centrosymmetry:
res= 0.5*(f_h+pr_4*f_h_conj)
else:
res= f_h
dder = {}
if flag_f_asym_a:
ofh = numpy.ones(f_h.shape, dtype=float)
if centrosymmetry:
hhh_real = pr_3 + pr_4*pr_3.conjugate()
hhh_imag = pr_3 - pr_4*pr_3.conjugate()
dder["f_asym_a_real"] = 0.5*(numpy.expand_dims(hhh_real, axis=-1))*hh
dder["f_asym_a_imag"] = 0.5*1j*(numpy.expand_dims(hhh_imag, axis=-1))*hh
else:
dder["f_asym_a_real"] = numpy.expand_dims(pr_3_ext, axis=-1)*hh
dder["f_asym_a_imag"] = numpy.expand_dims(pr_3_ext, axis=-1)*hh*1j
# ofh = numpy.ones(f_h.shape, dtype=float)
# if centrosymmetry:
# dder["f_asym_real"] = 0.5*(pr_3+pr_4*pr_3.conjugate())*ofh
# dder["f_asym_imag"] = 0.5*(pr_3-1j*pr_4*pr_3.conjugate())*ofh
# else:
# dder["f_asym_real"] = pr_3*ofh
# dder["f_asym_imag"] = pr_3*1j*ofh
if flag_scattering_length:
pass
return res, dder
# DELETE iT
# def calc_f_by_f_asym_pr(f_asym, pr_3, centrosymmetry, pr_4, flag_f_asym: bool = False):
# """Calculate structure factor by preliminary defined parameters.
# For more details see documentation module "Structure factor".
# """
# f_h = pr_3 * f_asym
# if centrosymmetry:
# res= 0.5*(f_h+pr_4*f_h.conjugate())
# else:
# res= f_h
# dder = {}
# if flag_f_asym:
# ofh = numpy.ones(f_h.shape, dtype=float)
# if centrosymmetry:
# dder["f_asym_real"] = 0.5*(pr_3+pr_4*pr_3.conjugate())*ofh
# dder["f_asym_imag"] = 0.5*(pr_3-1j*pr_4*pr_3.conjugate())*ofh
# else:
# dder["f_asym_real"] = pr_3*ofh
# dder["f_asym_imag"] = pr_3*1j*ofh
# return res, dder
def calc_sft_ccs_asym_a_by_pr(
atom_para_multiplicity, debye_waller_factor, atom_para_occupancy,
atom_para_susceptibility, atom_para_sc_chi,
pr_1, pr_2, pr_5,
flag_debye_waller: bool = False,
flag_atom_para_occupancy: bool = False, flag_atom_para_susceptibility: bool = False,
flag_pr_1: bool = False, flag_pr_5: bool = False):
"""Calculate preliminary asymmetric structure factor tensor by preliminary defined parameters in 10**-12 cm.
For more details see documentation module "Structure factor".
The susceptibility parameters are give in mu_B
"""
mas_constr = (0.2695*atom_para_sc_chi * atom_para_susceptibility[na, :, :]).sum(axis=1)
hh, dder_hh = calc_m_q_inv_m(pr_5[:, :, na], mas_constr[:, na, :], flag_m=False, flag_q=flag_atom_para_susceptibility)
hh_1 = atom_para_multiplicity * atom_para_occupancy
hh_3 = pr_1*debye_waller_factor*hh_1[na, na, :]
res = (pr_2[na, :, :, na] * hh_3[na, :, :, :] * hh[:, na, :, :]).sum(axis=2)/pr_2.shape[-1]
dder = {}
if flag_atom_para_susceptibility:
dder_hh_2 = 0.2695*(dder_hh["q"][:,:, na,:, :]* atom_para_sc_chi[na, :, :, na,:]).sum(axis=1)
dder["atom_para_susceptibility"] = (pr_2[na, na, :, :, na] * hh_3[na, na, :, :, :] * dder_hh_2[:, :, na, :, :]).sum(axis=3)/pr_2.shape[-1]
return res, dder
# DELETE IT
# def calc_sft_ccs_asym_by_pr(
# atom_para_multiplicity, atom_para_form_factor, debye_waller_factor, atom_para_occupancy,
# atom_para_susceptibility, atom_para_sc_chi,
# pr_1, pr_2, pr_5,
# flag_atom_para_form_factor: bool = False, flag_debye_waller: bool = False,
# flag_atom_para_occupancy: bool = False, flag_atom_para_susceptibility: bool = False,
# flag_pr_1: bool = False, flag_pr_5: bool = False):
# """Calculate preliminary asymmetric structure factor tensor by preliminary defined parameters in 10**-12 cm.
# For more details see documentation module "Structure factor".
#
# The susceptibility parameters are give in mu_B
# """
# mas_constr = (0.2695*atom_para_sc_chi * atom_para_susceptibility[na, :, :]).sum(axis=1)
#
# hh, dder_hh = calc_m_q_inv_m(pr_5[:, :, na], mas_constr[:, na, :], flag_m=False, flag_q=flag_atom_para_susceptibility)
# hh_1 = atom_para_multiplicity * atom_para_occupancy
# hh_2 = atom_para_form_factor * hh_1[na, :]
# hh_3 = pr_1*debye_waller_factor*hh_2[:, na, :]
# res = (pr_2[na, :, :] * (hh_3[na, :, :, :] * hh[:, na, :, :]).sum(axis=3)).sum(axis=2)/pr_2.shape[-1]
# dder = {}
# if flag_atom_para_susceptibility:
# dder_hh_2 = 0.2695*(dder_hh["q"][:,:, na,:, :]* atom_para_sc_chi[na, :, :, na,:]).sum(axis=1)
# dder["atom_para_susceptibility"] = (pr_2[na, na, :, :, na] * hh_3[na, na, :, :, :] * dder_hh_2[:, :, na, :, :]).sum(axis=3)/pr_2.shape[-1]
# return res, dder
def calc_f_nucl_by_dictionary(dict_crystal, dict_in_out, flag_use_precalculated_data: bool = False):
"""Calculate nuclear structure factor based on the information given in dictionary.
Output information is written in the same dictionary. The following keys have to be defined.
"""
dict_crystal_keys = dict_crystal.keys()
dict_in_out_keys = dict_in_out.keys()
necessary_crystal_keys = set(["atom_fract_xyz", "atom_occupancy",
"atom_scat_length_neutron", "atom_b_iso", "atom_beta", "unit_cell_parameters"])
diff_set_crystal = necessary_crystal_keys.difference(set(dict_crystal_keys))
if len(diff_set_crystal) != 0:
raise AttributeError(f"The following attributes have to be defined {diff_set_crystal:}")
flag_reduced_symm_elems = len(set(["reduced_symm_elems", "centrosymmetry", "translation_elems"]).difference(set(dict_crystal_keys))) == 0
flag_full_symm_elems = len(set(["full_symm_elems", ]).difference(set(dict_crystal_keys))) == 0
flag_full_mcif_elems = len(set(["full_mcif_elems", ]).difference(set(dict_crystal_keys))) == 0
if not(flag_reduced_symm_elems or flag_full_symm_elems or flag_full_mcif_elems):
raise AttributeError("The symmetry elements have to be defined.")
necessary_in_out_keys = set(["index_hkl", ])
diff_set_in_out = necessary_in_out_keys.difference(set(dict_in_out_keys))
if len(diff_set_in_out) != 0:
raise AttributeError(f"The following attributes have to be defined {diff_set_in_out:}")
index_hkl = dict_in_out["index_hkl"]
if flag_reduced_symm_elems:
reduced_symm_elems = dict_crystal["reduced_symm_elems"]
centrosymmetry = dict_crystal["centrosymmetry"]
if centrosymmetry:
centrosymmetry_position = dict_crystal["centrosymmetry_position"]
else:
centrosymmetry_position = None
translation_elems = dict_crystal["translation_elems"]
elif flag_full_symm_elems:
full_symm_elems = dict_crystal["full_symm_elems"]
reduced_symm_elems = full_symm_elems
centrosymmetry = False
centrosymmetry_position = None
translation_elems = numpy.array([[0], [0], [0], [1]], dtype=int)
elif flag_full_mcif_elems:
full_mcif_elems = dict_crystal["full_mcif_elems"]
reduced_symm_elems = full_mcif_elems[:13]
centrosymmetry = False
centrosymmetry_position = None
translation_elems = numpy.array([[0], [0], [0], [1]], dtype=int)
unit_cell_parameters = dict_crystal["unit_cell_parameters"]
atom_fract_xyz = dict_crystal["atom_fract_xyz"]
atom_site_sc_fract = dict_crystal["atom_site_sc_fract"]
atom_site_sc_b = dict_crystal["atom_site_sc_b"]
atom_fract_xyz = calc_m_v(atom_site_sc_fract, numpy.mod(atom_fract_xyz, 1), flag_m=False, flag_v=False)[0] + atom_site_sc_b
atom_occupancy = dict_crystal["atom_occupancy"]
scat_length_neutron = dict_crystal["atom_scat_length_neutron"]
atom_b_iso = dict_crystal["atom_b_iso"]
atom_beta = dict_crystal["atom_beta"]
if "atom_site_aniso_sc_beta" in dict_crystal_keys:
atom_site_aniso_sc_beta = dict_crystal["atom_site_aniso_sc_beta"]
atom_site_aniso_index = dict_crystal["atom_site_aniso_index"]
atom_sc_beta = numpy.zeros((6,)+atom_beta.shape, dtype=float)
atom_sc_beta[:, :, atom_site_aniso_index] = atom_site_aniso_sc_beta
atom_beta = (atom_sc_beta*numpy.expand_dims(atom_beta, axis=0)).sum(axis=1)
flag_unit_cell_parameters = numpy.any(dict_crystal["flags_unit_cell_parameters"])
flag_atom_fract_xyz = numpy.any(dict_crystal["flags_atom_fract_xyz"])
flag_atom_occupancy = numpy.any(dict_crystal["flags_atom_occupancy"])
flag_atom_b_iso = numpy.any(dict_crystal["flags_atom_b_iso"])
flag_atom_beta = numpy.any(dict_crystal["flags_atom_beta"])
f_nucl, dder = calc_f_nucl(index_hkl,
reduced_symm_elems, centrosymmetry, centrosymmetry_position, translation_elems,
unit_cell_parameters, atom_fract_xyz, atom_occupancy, scat_length_neutron, atom_b_iso, atom_beta,
dict_in_out,
flag_unit_cell_parameters=flag_unit_cell_parameters, flag_atom_fract_xyz=flag_atom_fract_xyz,
flag_atom_occupancy=flag_atom_occupancy, flag_atom_b_iso=flag_atom_b_iso, flag_atom_beta=flag_atom_beta,
flag_use_precalculated_data=flag_use_precalculated_data)
return f_nucl, dder
def calc_f_nucl(index_hkl,
reduced_symm_elems, centrosymmetry, centrosymmetry_position, translation_elems,
unit_cell_parameters, atom_fract_xyz, atom_occupancy, scat_length_neutron, atom_b_iso, atom_beta,
dict_in_out: dict = None,
flag_unit_cell_parameters: bool = False, flag_atom_fract_xyz: bool = False,
flag_atom_occupancy: bool = False, flag_atom_b_iso: bool = False, flag_atom_beta: bool = False,
flag_use_precalculated_data: bool = False):
"""Calculate nuclear structure factor based on the information given in dictionary.
Output information is written in the same dictionary. The following keys have to be defined.
"""
if dict_in_out is None:
flag_dict = False
dict_in_out_keys = []
else:
flag_dict = True
dict_in_out_keys = dict_in_out.keys()
if (flag_use_precalculated_data and ('index_hkl' in dict_in_out_keys)):
if numpy.any(dict_in_out["index_hkl"] != index_hkl):
dict_in_out.clear()
dict_in_out["index_hkl"] = index_hkl
if (flag_use_precalculated_data and ("atom_multiplicity" in dict_in_out_keys)):
atom_multiplicity = dict_in_out["atom_multiplicity"]
else:
ones = numpy.ones_like(atom_fract_xyz[0]).astype(int)
atom_symm_elems = numpy.stack([
(numpy.round(atom_fract_xyz[0]*10**6, decimals=0)).astype(int),
(numpy.round(atom_fract_xyz[1]*10**6, decimals=0)).astype(int),
(numpy.round(atom_fract_xyz[2]*10**6, decimals=0)).astype(int),
ones*10**6], axis=0)
if "full_symm_elems" in dict_in_out_keys:
full_symm_elems = dict_in_out["full_symm_elems"]
else:
full_symm_elems = calc_full_symm_elems_by_reduced(
reduced_symm_elems, centrosymmetry, centrosymmetry_position, translation_elems)
if flag_dict:
dict_in_out["full_symm_elems"] = full_symm_elems
atom_multiplicity = calc_multiplicity_by_atom_symm_elems(full_symm_elems, atom_symm_elems)
if flag_dict:
dict_in_out["atom_multiplicity"] = atom_multiplicity
flag_pr_1 = flag_atom_fract_xyz
if (flag_use_precalculated_data and ("pr_1" in dict_in_out_keys) and not(flag_atom_fract_xyz)):
pr_1 = dict_in_out["pr_1"]
else:
pr_1, dder_pr_1 = calc_pr1(index_hkl, reduced_symm_elems, atom_fract_xyz, flag_fract_xyz=flag_atom_fract_xyz)
if flag_dict:
dict_in_out["pr_1"] = pr_1
if (flag_use_precalculated_data and ("pr_2" in dict_in_out_keys)):
pr_2 = dict_in_out["pr_2"]
else:
pr_2 = calc_pr2(index_hkl, reduced_symm_elems)
if flag_dict:
dict_in_out["pr_2"] = pr_2
if (flag_use_precalculated_data and ("pr_3" in dict_in_out_keys)):
pr_3 = dict_in_out["pr_3"]
else:
pr_3 = calc_pr3(index_hkl, translation_elems)
if flag_dict:
dict_in_out["pr_3"] = pr_3
if (flag_use_precalculated_data and ("pr_4" in dict_in_out_keys)):
pr_4 = dict_in_out["pr_4"]
else:
pr_4 = calc_pr4(index_hkl, centrosymmetry_position)
if flag_dict:
dict_in_out["pr_4"] = pr_4
flag_sthovl = flag_unit_cell_parameters
if (flag_use_precalculated_data and ("sthovl" in dict_in_out_keys) and not(flag_sthovl)):
sthovl = dict_in_out["sthovl"]
else:
sthovl, dder_sthovl = calc_sthovl_by_unit_cell_parameters(
index_hkl, unit_cell_parameters, flag_unit_cell_parameters=flag_unit_cell_parameters)
if flag_dict:
dict_in_out["sthovl"] = sthovl
# dimensions ["hkl", "reduced symmetry", "atom"]
flag_debye_waller_factor = flag_sthovl or flag_atom_b_iso or flag_atom_beta
if (flag_use_precalculated_data and ("debye_waller_factor" in dict_in_out_keys) and not(flag_debye_waller_factor)):
debye_waller_factor = dict_in_out["debye_waller_factor"]
else:
debye_waller_factor, dder_dw = calc_dwf(
index_hkl[:, :, na, na], sthovl[:, na, na], atom_b_iso[na, na, :],
atom_beta[:, na, na, :], reduced_symm_elems[:, na, :, na],
flag_sthovl=flag_sthovl, flag_b_iso=flag_atom_b_iso, flag_beta=flag_atom_beta)
if flag_dict:
dict_in_out["debye_waller_factor"] = debye_waller_factor
flag_scat_length_neutron = False
flag_debye_waller = flag_atom_b_iso or flag_atom_beta
flag_f_asym = flag_scat_length_neutron or flag_debye_waller or flag_pr_1
if (flag_use_precalculated_data and ("f_asym" in dict_in_out_keys) and
not(flag_f_asym)):
f_asym = dict_in_out["f_asym"]
else:
f_asym, dder_f_asym = calc_f_asym_a_by_pr(
atom_multiplicity, debye_waller_factor, atom_occupancy,
pr_1, pr_2,
flag_debye_waller=flag_debye_waller, flag_atom_occupancy=flag_atom_occupancy,
flag_pr_1=flag_pr_1)
if flag_dict:
dict_in_out["f_asym"] = f_asym
flag_f_nucl = flag_f_asym
if (flag_use_precalculated_data and ("f_nucl" in dict_in_out_keys) and
not(flag_f_nucl)):
f_nucl = dict_in_out["f_nucl"]
else:
f_nucl, dder_f_nucl = calc_f_by_f_asym_a_pr(f_asym, scat_length_neutron, pr_3, centrosymmetry, pr_4, flag_f_asym_a=flag_f_asym, flag_scattering_length=flag_scat_length_neutron)
if flag_dict:
dict_in_out["f_nucl"] = f_nucl
dder = {}
if flag_unit_cell_parameters:
dder["unit_cell_parameters"] = None
if flag_atom_fract_xyz:
dder["atom_fract_xyz"] = None
if flag_atom_occupancy:
dder["atom_occupancy"] = None
if flag_atom_b_iso:
dder["atom_b_iso"] = None
if flag_atom_beta:
dder["atom_beta"] = None
return f_nucl, dder
def calc_f_charge_by_dictionary(dict_crystal, wavelength:float, dict_in_out, flag_use_precalculated_data: bool = False):
"""Calculate nuclear structure factor based on the information given in dictionary.
Output information is written in the same dictionary. The following keys have to be defined.
"""
dict_crystal_keys = dict_crystal.keys()
dict_in_out_keys = dict_in_out.keys()
necessary_crystal_keys = set(["atom_fract_xyz", "atom_occupancy",
"atom_scat_length_neutron", "atom_b_iso", "atom_beta", "unit_cell_parameters"])
diff_set_crystal = necessary_crystal_keys.difference(set(dict_crystal_keys))
if len(diff_set_crystal) != 0:
raise AttributeError(f"The following attributes have to be defined {diff_set_crystal:}")
flag_reduced_symm_elems = len(set(["reduced_symm_elems", "centrosymmetry", "translation_elems"]).difference(set(dict_crystal_keys))) == 0
flag_full_symm_elems = len(set(["full_symm_elems", ]).difference(set(dict_crystal_keys))) == 0
flag_full_mcif_elems = len(set(["full_mcif_elems", ]).difference(set(dict_crystal_keys))) == 0
if not(flag_reduced_symm_elems or flag_full_symm_elems or flag_full_mcif_elems):
raise AttributeError("The symmetry elements have to be defined.")
necessary_in_out_keys = set(["index_hkl", ])
diff_set_in_out = necessary_in_out_keys.difference(set(dict_in_out_keys))
if len(diff_set_in_out) != 0:
raise AttributeError(f"The following attributes have to be defined {diff_set_in_out:}")
index_hkl = dict_in_out["index_hkl"]
if flag_reduced_symm_elems:
reduced_symm_elems = dict_crystal["reduced_symm_elems"]
centrosymmetry = dict_crystal["centrosymmetry"]
if centrosymmetry:
centrosymmetry_position = dict_crystal["centrosymmetry_position"]
else:
centrosymmetry_position = None
translation_elems = dict_crystal["translation_elems"]
elif flag_full_symm_elems:
full_symm_elems = dict_crystal["full_symm_elems"]
reduced_symm_elems = full_symm_elems
centrosymmetry = False
centrosymmetry_position = None
translation_elems = numpy.array([[0], [0], [0], [1]], dtype=int)
elif flag_full_mcif_elems:
full_mcif_elems = dict_crystal["full_mcif_elems"]
reduced_symm_elems = full_mcif_elems[:13]
centrosymmetry = False
centrosymmetry_position = None
translation_elems = numpy.array([[0], [0], [0], [1]], dtype=int)
unit_cell_parameters = dict_crystal["unit_cell_parameters"]
atom_fract_xyz = dict_crystal["atom_fract_xyz"]
atom_site_sc_fract = dict_crystal["atom_site_sc_fract"]
atom_site_sc_b = dict_crystal["atom_site_sc_b"]
atom_fract_xyz = calc_m_v(atom_site_sc_fract, numpy.mod(atom_fract_xyz, 1), flag_m=False, flag_v=False)[0] + atom_site_sc_b
atom_occupancy = dict_crystal["atom_occupancy"]
table_sthovl = dict_crystal["table_sthovl"]
table_atom_scattering_amplitude = dict_crystal["table_atom_scattering_amplitude"]
table_wavelength = dict_crystal["table_wavelength"]
table_atom_dispersion = dict_crystal["table_atom_dispersion"]
atom_dispersion = numpy.array([numpy.interp(float(wavelength), table_wavelength, hh) for hh in table_atom_dispersion], dtype=complex)
dict_in_out["atom_dispersion"] = atom_dispersion
atom_b_iso = dict_crystal["atom_b_iso"]
atom_beta = dict_crystal["atom_beta"]
if "atom_site_aniso_sc_beta" in dict_crystal_keys:
atom_site_aniso_sc_beta = dict_crystal["atom_site_aniso_sc_beta"]
atom_site_aniso_index = dict_crystal["atom_site_aniso_index"]
atom_sc_beta = numpy.zeros((6,)+atom_beta.shape, dtype=float)
atom_sc_beta[:, :, atom_site_aniso_index] = atom_site_aniso_sc_beta
atom_beta = (atom_sc_beta*numpy.expand_dims(atom_beta, axis=0)).sum(axis=1)
flag_unit_cell_parameters = numpy.any(dict_crystal["flags_unit_cell_parameters"])
flag_atom_fract_xyz = numpy.any(dict_crystal["flags_atom_fract_xyz"])
flag_atom_occupancy = numpy.any(dict_crystal["flags_atom_occupancy"])
flag_atom_b_iso = numpy.any(dict_crystal["flags_atom_b_iso"])
flag_atom_beta = numpy.any(dict_crystal["flags_atom_beta"])
f_charge, dder = calc_f_charge(index_hkl,
reduced_symm_elems, centrosymmetry, centrosymmetry_position, translation_elems,
unit_cell_parameters, atom_fract_xyz, atom_occupancy, table_sthovl, table_atom_scattering_amplitude, atom_dispersion, atom_b_iso, atom_beta,
dict_in_out,
flag_unit_cell_parameters=flag_unit_cell_parameters, flag_atom_fract_xyz=flag_atom_fract_xyz,
flag_atom_occupancy=flag_atom_occupancy, flag_atom_b_iso=flag_atom_b_iso, flag_atom_beta=flag_atom_beta,
flag_use_precalculated_data=flag_use_precalculated_data)
return f_charge, dder
def calc_f_charge(index_hkl,
reduced_symm_elems, centrosymmetry, centrosymmetry_position, translation_elems,
unit_cell_parameters, atom_fract_xyz, atom_occupancy, table_sthovl, table_atom_scattering_amplitude, atom_dispersion, atom_b_iso, atom_beta,
dict_in_out: dict = None,
flag_unit_cell_parameters: bool = False, flag_atom_fract_xyz: bool = False,
flag_atom_occupancy: bool = False, flag_atom_b_iso: bool = False, flag_atom_beta: bool = False,
flag_use_precalculated_data: bool = False):
"""Calculate nuclear structure factor based on the information given in dictionary.
Output information is written in the same dictionary. The following keys have to be defined.
"""
if dict_in_out is None:
flag_dict = False
dict_in_out_keys = []
else:
flag_dict = True
dict_in_out_keys = dict_in_out.keys()
if (flag_use_precalculated_data and ('index_hkl' in dict_in_out_keys)):
if numpy.any(dict_in_out["index_hkl"] != index_hkl):
dict_in_out.clear()
dict_in_out["index_hkl"] = index_hkl
if (flag_use_precalculated_data and ("atom_multiplicity" in dict_in_out_keys)):
atom_multiplicity = dict_in_out["atom_multiplicity"]
else:
ones = numpy.ones_like(atom_fract_xyz[0]).astype(int)
atom_symm_elems = numpy.stack([
(numpy.round(atom_fract_xyz[0]*10**6, decimals=0)).astype(int),
(numpy.round(atom_fract_xyz[1]*10**6, decimals=0)).astype(int),
(numpy.round(atom_fract_xyz[2]*10**6, decimals=0)).astype(int),
ones*10**6], axis=0)
if "full_symm_elems" in dict_in_out_keys:
full_symm_elems = dict_in_out["full_symm_elems"]
else:
full_symm_elems = calc_full_symm_elems_by_reduced(
reduced_symm_elems, centrosymmetry, centrosymmetry_position, translation_elems)
if flag_dict:
dict_in_out["full_symm_elems"] = full_symm_elems
atom_multiplicity = calc_multiplicity_by_atom_symm_elems(full_symm_elems, atom_symm_elems)
if flag_dict:
dict_in_out["atom_multiplicity"] = atom_multiplicity
flag_pr_1 = flag_atom_fract_xyz
if (flag_use_precalculated_data and ("pr_1" in dict_in_out_keys) and not(flag_atom_fract_xyz)):
pr_1 = dict_in_out["pr_1"]
else:
pr_1, dder_pr_1 = calc_pr1(index_hkl, reduced_symm_elems, atom_fract_xyz, flag_fract_xyz=flag_atom_fract_xyz)
if flag_dict:
dict_in_out["pr_1"] = pr_1
if (flag_use_precalculated_data and ("pr_2" in dict_in_out_keys)):
pr_2 = dict_in_out["pr_2"]
else:
pr_2 = calc_pr2(index_hkl, reduced_symm_elems)
if flag_dict:
dict_in_out["pr_2"] = pr_2
if (flag_use_precalculated_data and ("pr_3" in dict_in_out_keys)):
pr_3 = dict_in_out["pr_3"]
else:
pr_3 = calc_pr3(index_hkl, translation_elems)
if flag_dict:
dict_in_out["pr_3"] = pr_3
if (flag_use_precalculated_data and ("pr_4" in dict_in_out_keys)):
pr_4 = dict_in_out["pr_4"]
else:
pr_4 = calc_pr4(index_hkl, centrosymmetry_position)
if flag_dict:
dict_in_out["pr_4"] = pr_4
flag_sthovl = flag_unit_cell_parameters
if (flag_use_precalculated_data and ("sthovl" in dict_in_out_keys) and not(flag_sthovl)):
sthovl = dict_in_out["sthovl"]
else:
sthovl, dder_sthovl = calc_sthovl_by_unit_cell_parameters(
index_hkl, unit_cell_parameters, flag_unit_cell_parameters=flag_unit_cell_parameters)
if flag_dict:
dict_in_out["sthovl"] = sthovl
# dimensions ["hkl", "reduced symmetry", "atom"]
flag_debye_waller_factor = flag_sthovl or flag_atom_b_iso or flag_atom_beta
if (flag_use_precalculated_data and ("debye_waller_factor" in dict_in_out_keys) and not(flag_debye_waller_factor)):
debye_waller_factor = dict_in_out["debye_waller_factor"]
else:
debye_waller_factor, dder_dw = calc_dwf(
index_hkl[:, :, na, na], sthovl[:, na, na], atom_b_iso[na, na, :],
atom_beta[:, na, na, :], reduced_symm_elems[:, na, :, na],
flag_sthovl=flag_sthovl, flag_b_iso=flag_atom_b_iso, flag_beta=flag_atom_beta)
if flag_dict:
dict_in_out["debye_waller_factor"] = debye_waller_factor
flag_scat_length_neutron = False
flag_debye_waller = flag_atom_b_iso or flag_atom_beta
flag_f_asym = flag_scat_length_neutron or flag_debye_waller or flag_pr_1
if (flag_use_precalculated_data and ("f_asym" in dict_in_out_keys) and
not(flag_f_asym)):
f_asym = dict_in_out["f_asym"]
else:
l_scat_length_xray = [
numpy.interp(sthovl, table_sthovl, table_sc_ampl) for table_sc_ampl in table_atom_scattering_amplitude]
hh = numpy.stack(l_scat_length_xray, axis=1)
scat_length_xray = (
numpy.stack(l_scat_length_xray, axis=1) +
numpy.expand_dims(atom_dispersion, axis=0)
)
f_asym, dder_f_asym = calc_f_asym_a_by_pr(
atom_multiplicity, debye_waller_factor, atom_occupancy,
pr_1, pr_2,
flag_debye_waller=flag_debye_waller, flag_atom_occupancy=flag_atom_occupancy,
flag_pr_1=flag_pr_1)
if flag_dict:
dict_in_out["f_asym"] = f_asym
flag_f_charge = flag_f_asym
if (flag_use_precalculated_data and ("f_charge" in dict_in_out_keys) and
not(flag_f_charge)):
f_charge = dict_in_out["f_charge"]
else:
f_charge, dder_f_charge = calc_f_by_f_asym_a_pr(f_asym, scat_length_xray, pr_3, centrosymmetry, pr_4, flag_f_asym_a=flag_f_asym, flag_scattering_length=flag_scat_length_neutron)
if flag_dict:
dict_in_out["f_charge"] = f_charge
dder = {}
if flag_unit_cell_parameters:
dder["unit_cell_parameters"] = None
if flag_atom_fract_xyz:
dder["atom_fract_xyz"] = None
if flag_atom_occupancy:
dder["atom_occupancy"] = None
if flag_atom_b_iso:
dder["atom_b_iso"] = None
if flag_atom_beta:
dder["atom_beta"] = None
return f_charge, dder
def calc_sft_ccs_by_dictionary(dict_crystal, dict_in_out, flag_use_precalculated_data: bool = False):
"""Calculate structure factor tensor in CCS (X||a*, Z||c) based on the information given in dictionary.
Output information is written in the same dictionary.
"""
dict_crystal_keys = dict_crystal.keys()
dict_in_out_keys = dict_in_out.keys()
necessary_crystal_keys = set(["unit_cell_parameters", ])
diff_set_crystal = necessary_crystal_keys.difference(set(dict_crystal_keys))
if len(diff_set_crystal) != 0:
raise AttributeError(f"The following attributes have to be defined {diff_set_crystal:}")
flag_reduced_symm_elems = len(set(["reduced_symm_elems", "centrosymmetry", "translation_elems"]).difference(set(dict_crystal_keys))) == 0
flag_full_symm_elems = len(set(["full_symm_elems", ]).difference(set(dict_crystal_keys))) == 0
flag_full_mcif_elems = len(set(["full_mcif_elems", ]).difference(set(dict_crystal_keys))) == 0
if not(flag_reduced_symm_elems or flag_full_symm_elems or flag_full_mcif_elems):
raise AttributeError("The symmetry elements have to be defined.")
necessary_in_out_keys = set(["index_hkl", ])
diff_set_in_out = necessary_in_out_keys.difference(set(dict_in_out_keys))
if len(diff_set_in_out) != 0:
raise AttributeError(f"The following attributes have to be defined {diff_set_in_out:}")
index_hkl = dict_in_out["index_hkl"]
non_zero_keys = set(["mag_atom_lande_factor", "mag_atom_kappa",
"mag_atom_j0_parameters", "mag_atom_j2_parameters"])
diff_set_crystal = non_zero_keys.difference(set(dict_crystal_keys))
if len(diff_set_crystal) != 0:
sft_ccs = numpy.zeros((9, index_hkl.shape[-1]), dtype=complex)
dder = {}
return sft_ccs, dder
if "flag_only_orbital" in dict_in_out_keys:
flag_only_orbital = dict_in_out["flag_only_orbital"]
else:
flag_only_orbital = False
if flag_reduced_symm_elems:
reduced_symm_elems = dict_crystal["reduced_symm_elems"]
centrosymmetry = dict_crystal["centrosymmetry"]
if centrosymmetry:
centrosymmetry_position = dict_crystal["centrosymmetry_position"]
else:
centrosymmetry_position = None
translation_elems = dict_crystal["translation_elems"]
elif flag_full_symm_elems:
full_symm_elems = dict_crystal["full_symm_elems"]
reduced_symm_elems = full_symm_elems
centrosymmetry = False
centrosymmetry_position = None
translation_elems = numpy.array([[0], [0], [0], [1]], dtype=int)
elif flag_full_mcif_elems:
full_mcif_elems = dict_crystal["full_mcif_elems"]
reduced_symm_elems = full_mcif_elems[:13]
centrosymmetry = False
centrosymmetry_position = None
translation_elems = numpy.array([[0], [0], [0], [1]], dtype=int)
unit_cell_parameters = dict_crystal["unit_cell_parameters"]
atom_para_index = dict_crystal["atom_para_index"]
atom_para_fract_xyz = dict_crystal["atom_fract_xyz"][:, atom_para_index]
atom_para_sc_fract = dict_crystal["atom_site_sc_fract"][:, atom_para_index]
atom_para_sc_b = dict_crystal["atom_site_sc_b"][:, atom_para_index]
atom_para_fract_xyz = calc_m_v(
atom_para_sc_fract, numpy.mod(atom_para_fract_xyz, 1), flag_m=False, flag_v=False)[0] + atom_para_sc_b
atom_para_occupancy = dict_crystal["atom_occupancy"][atom_para_index]
atom_para_b_iso = dict_crystal["atom_b_iso"][atom_para_index]
atom_beta = dict_crystal["atom_beta"]
if "atom_site_aniso_sc_beta" in dict_crystal_keys:
atom_site_aniso_sc_beta = dict_crystal["atom_site_aniso_sc_beta"]
atom_site_aniso_index = dict_crystal["atom_site_aniso_index"]
atom_sc_beta = numpy.zeros((6,)+atom_beta.shape, dtype=float)
atom_sc_beta[:, :, atom_site_aniso_index] = atom_site_aniso_sc_beta
atom_beta = (atom_sc_beta*numpy.expand_dims(atom_beta, axis=0)).sum(axis=1)
atom_para_beta = atom_beta[:, atom_para_index]
mag_atom_para_index = dict_crystal["mag_atom_para_index"]
atom_para_lande_factor = dict_crystal["mag_atom_lande_factor"][mag_atom_para_index]
atom_para_kappa = dict_crystal["mag_atom_kappa"][mag_atom_para_index]
atom_para_j0_parameters = dict_crystal["mag_atom_j0_parameters"][:, mag_atom_para_index]
atom_para_j2_parameters = dict_crystal["mag_atom_j2_parameters"][:, mag_atom_para_index]
atom_para_susceptibility = dict_crystal["atom_para_susceptibility"]
atom_para_sc_chi = dict_crystal["atom_para_sc_chi"]
flag_unit_cell_parameters = numpy.any(dict_crystal["flags_unit_cell_parameters"])
flag_atom_para_fract_xyz = numpy.any(dict_crystal["flags_atom_fract_xyz"][:, atom_para_index])
flag_atom_para_occupancy = numpy.any(dict_crystal["flags_atom_occupancy"][atom_para_index])
flag_atom_para_b_iso = numpy.any(dict_crystal["flags_atom_b_iso"][atom_para_index])
flag_atom_para_beta = numpy.any(dict_crystal["flags_atom_beta"][:, atom_para_index])
flag_atom_para_susceptibility = numpy.any(dict_crystal["flags_atom_para_susceptibility"])
flag_atom_para_lande_factor = numpy.any(dict_crystal["flags_mag_atom_lande_factor"][mag_atom_para_index])
flag_atom_para_kappa = numpy.any(dict_crystal["flags_mag_atom_kappa"][mag_atom_para_index])
sft_ccs, dder = calc_sft_ccs(index_hkl,
reduced_symm_elems, centrosymmetry, centrosymmetry_position, translation_elems,
unit_cell_parameters, atom_para_fract_xyz, atom_para_occupancy, atom_para_susceptibility, atom_para_b_iso, atom_para_beta,
atom_para_lande_factor, atom_para_kappa, atom_para_j0_parameters, atom_para_j2_parameters, atom_para_sc_chi,
dict_in_out=dict_in_out, flag_only_orbital=flag_only_orbital,
flag_unit_cell_parameters=flag_unit_cell_parameters, flag_atom_para_fract_xyz=flag_atom_para_fract_xyz,
flag_atom_para_occupancy=flag_atom_para_occupancy, flag_atom_para_susceptibility=flag_atom_para_susceptibility,
flag_atom_para_b_iso=flag_atom_para_b_iso, flag_atom_para_beta=flag_atom_para_beta,
flag_atom_para_lande_factor=flag_atom_para_lande_factor, flag_atom_para_kappa=flag_atom_para_kappa,
flag_use_precalculated_data=flag_use_precalculated_data)
return sft_ccs, dder
def calc_sft_ccs(index_hkl,
reduced_symm_elems, centrosymmetry, centrosymmetry_position, translation_elems,
unit_cell_parameters, atom_para_fract_xyz, atom_para_occupancy, atom_para_susceptibility, atom_para_b_iso, atom_para_beta,
atom_para_lande_factor, atom_para_kappa, atom_para_j0_parameters, atom_para_j2_parameters, atom_para_sc_chi,
dict_in_out: dict = None, flag_only_orbital: bool = False,
flag_unit_cell_parameters: bool = False, flag_atom_para_fract_xyz: bool = False,
flag_atom_para_occupancy: bool = False, flag_atom_para_susceptibility: bool = False,
flag_atom_para_b_iso: bool = False, flag_atom_para_beta: bool = False,
flag_atom_para_lande_factor: bool = False, flag_atom_para_kappa: bool = False,
flag_use_precalculated_data: bool = False):
"""Calculate structure factor tensor in Cartesian coordinate system with X||a*, Z||c in 10**-12 cm.
Note, that the susceptibility parameters are given in mu_B.
"""
if dict_in_out is None:
flag_dict = False
dict_in_out_keys = []
else:
flag_dict = True
dict_in_out_keys = dict_in_out.keys()
if 'index_hkl' in dict_in_out_keys:
if numpy.any(dict_in_out["index_hkl"] != index_hkl):
dict_in_out.clear()
dict_in_out["index_hkl"] = index_hkl
if (flag_use_precalculated_data and ("atom_para_multiplicity" in dict_in_out_keys)):
mag_atom_multiplicity = dict_in_out["atom_para_multiplicity"]
else:
ones = numpy.ones_like(atom_para_fract_xyz[0]).astype(int)
atom_symm_elems = numpy.stack([
(numpy.round(atom_para_fract_xyz[0]*10**6, decimals=0)).astype(int),
(numpy.round(atom_para_fract_xyz[1]*10**6, decimals=0)).astype(int),
(numpy.round(atom_para_fract_xyz[2]*10**6, decimals=0)).astype(int),
ones*10**6], axis=0)
if "full_symm_elems" in dict_in_out_keys:
full_symm_elems = dict_in_out["full_symm_elems"]
else:
full_symm_elems = calc_full_symm_elems_by_reduced(
reduced_symm_elems, centrosymmetry, centrosymmetry_position, translation_elems)
if flag_dict:
dict_in_out["full_symm_elems"] = full_symm_elems
mag_atom_multiplicity = calc_multiplicity_by_atom_symm_elems(full_symm_elems, atom_symm_elems)
if flag_dict:
dict_in_out["atom_para_multiplicity"] = mag_atom_multiplicity
flag_pr_1 = flag_atom_para_fract_xyz
if (flag_use_precalculated_data and ("pr_1_atom_para" in dict_in_out_keys) and not(flag_atom_para_fract_xyz)):
pr_1 = dict_in_out["pr_1_atom_para"]
else:
pr_1, dder_pr_1 = calc_pr1(index_hkl, reduced_symm_elems, atom_para_fract_xyz, flag_fract_xyz=flag_atom_para_fract_xyz)
if flag_dict:
dict_in_out["pr_1_atom_para"] = pr_1
if (flag_use_precalculated_data and ("pr_2" in dict_in_out_keys)):
pr_2 = dict_in_out["pr_2"]
else:
pr_2 = calc_pr2(index_hkl, reduced_symm_elems)
if flag_dict:
dict_in_out["pr_2"] = pr_2
if (flag_use_precalculated_data and ("pr_3" in dict_in_out_keys)):
pr_3 = dict_in_out["pr_3"]
else:
pr_3 = calc_pr3(index_hkl, translation_elems)
if flag_dict:
dict_in_out["pr_3"] = pr_3
if (flag_use_precalculated_data and ("pr_4" in dict_in_out_keys)):
pr_4 = dict_in_out["pr_4"]
else:
pr_4 = calc_pr4(index_hkl, centrosymmetry_position)
if flag_dict:
dict_in_out["pr_4"] = pr_4
flag_sthovl = flag_unit_cell_parameters
if (flag_use_precalculated_data and ("sthovl" in dict_in_out_keys) and not(flag_sthovl)):
sthovl = dict_in_out["sthovl"]
else:
sthovl, dder_sthovl = calc_sthovl_by_unit_cell_parameters(
index_hkl, unit_cell_parameters, flag_unit_cell_parameters=flag_unit_cell_parameters)
if flag_dict:
dict_in_out["sthovl"] = sthovl
flag_pr_5 = flag_unit_cell_parameters
if (flag_use_precalculated_data and ("pr_5" in dict_in_out_keys) and not(flag_pr_5)):
pr_5 = dict_in_out["pr_5"]
else:
pr_5, dder_pr_5 = calc_pr5(reduced_symm_elems, unit_cell_parameters, flag_unit_cell_parameters=flag_unit_cell_parameters)
if flag_dict:
dict_in_out["pr_5"] = pr_5
flag_atom_para_form_factor = (flag_sthovl or flag_atom_para_lande_factor or flag_atom_para_kappa)
flag_hh = True
if "flag_only_orbital" in dict_in_out_keys:
flag_hh = flag_only_orbital == dict_in_out["flag_only_orbital"]
dict_in_out["flag_only_orbital"] = flag_only_orbital
if (flag_use_precalculated_data and ("atom_para_form_factor" in dict_in_out_keys) and not(flag_atom_para_form_factor) and flag_hh):
atom_para_form_factor = dict_in_out["atom_para_form_factor"]
else:
atom_para_form_factor, dder_ff = calc_form_factor(
sthovl[:, na], atom_para_lande_factor[na, :], atom_para_kappa[na, :], atom_para_j0_parameters[:, na, :], atom_para_j2_parameters[:, na, :],
flag_lande_factor=flag_atom_para_lande_factor,
flag_only_orbital=flag_only_orbital,
flag_sthovl=flag_sthovl,
flag_kappa=flag_atom_para_kappa)
if flag_dict:
dict_in_out["atom_para_form_factor"] = atom_para_form_factor
# dimensions ["hkl", "reduced symmetry", "atom"]
flag_debye_waller_factor = flag_sthovl or flag_atom_para_b_iso or flag_atom_para_beta
if (flag_use_precalculated_data and ("atom_para_debye_waller_factor" in dict_in_out_keys) and not(flag_debye_waller_factor)):
debye_waller_factor = dict_in_out["atom_para_debye_waller_factor"]
else:
debye_waller_factor, dder_dw = calc_dwf(
index_hkl[:, :, na, na], sthovl[:, na, na], atom_para_b_iso[na, na, :],
atom_para_beta[:, na, na, :], reduced_symm_elems[:, na, :, na],
flag_sthovl=flag_sthovl, flag_b_iso=flag_atom_para_b_iso, flag_beta=flag_atom_para_beta)
if flag_dict:
dict_in_out["atom_para_debye_waller_factor"] = debye_waller_factor
flag_scat_length_neutron = False
flag_debye_waller = flag_atom_para_b_iso or flag_atom_para_beta
flag_sft_ccs_asym = flag_atom_para_form_factor or flag_debye_waller or flag_atom_para_occupancy or flag_atom_para_susceptibility or flag_pr_1 or flag_pr_5
if (flag_use_precalculated_data and ("sft_ccs_asym" in dict_in_out_keys) and
not(flag_sft_ccs_asym)):
sft_ccs_asym = dict_in_out["sft_ccs_asym"]
else:
sft_ccs_asym, dder_sft_ccs_asym = calc_sft_ccs_asym_a_by_pr(
mag_atom_multiplicity, debye_waller_factor, atom_para_occupancy, atom_para_susceptibility, atom_para_sc_chi,
pr_1, pr_2, pr_5,
flag_debye_waller=flag_debye_waller, flag_atom_para_occupancy=flag_atom_para_occupancy,
flag_atom_para_susceptibility = flag_atom_para_susceptibility,
flag_pr_1=flag_pr_1, flag_pr_5=flag_pr_5)
if flag_dict:
dict_in_out["sft_ccs_asym"] = sft_ccs_asym
flag_sft_ccs = flag_sft_ccs_asym
if (flag_use_precalculated_data and ("sft_ccs" in dict_in_out_keys) and
not(flag_sft_ccs)):
sft_ccs = dict_in_out["sft_ccs"]
else:
sft_ccs, dder_sft_ccs = calc_f_by_f_asym_a_pr(sft_ccs_asym, atom_para_form_factor, pr_3, centrosymmetry, pr_4, flag_f_asym_a=flag_sft_ccs_asym, flag_scattering_length=flag_atom_para_form_factor)
if flag_dict:
dict_in_out["sft_ccs"] = sft_ccs
dder = {}
if flag_unit_cell_parameters:
dder["unit_cell_parameters"] = None
if flag_atom_para_fract_xyz:
dder["atom_para_fract_xyz"] = None
if flag_atom_para_occupancy:
dder["atom_para_occupancy"] = None
if flag_atom_para_b_iso:
dder["atom_para_b_iso"] = None
if flag_atom_para_beta:
dder["atom_para_beta"] = None
if flag_atom_para_susceptibility:
dder["atom_para_susceptibility"] = (
dder_sft_ccs["f_asym_a_real"][:, na, :, :]*dder_sft_ccs_asym["atom_para_susceptibility"]+
dder_sft_ccs["f_asym_a_imag"][:, na, :, :]*dder_sft_ccs_asym["atom_para_susceptibility"])
return sft_ccs, dder
def calc_index_hkl_multiplicity_in_range(sthovl_min, sthovl_max, unit_cell_parameters, reduced_symm_elems, translation_elems, centrosymmetry: bool):
a, b, c = unit_cell_parameters[0], unit_cell_parameters[1], unit_cell_parameters[2]
h_max = int(2.*a*sthovl_max)
k_max = int(2.*b*sthovl_max)
l_max = int(2.*c*sthovl_max)
index_h = numpy.arange(-h_max, h_max+1, 1, dtype=int)
index_k = numpy.arange(-k_max, k_max+1, 1, dtype=int)
index_l = numpy.arange(-l_max, l_max+1, 1, dtype=int)
index_h, index_k, index_l = numpy.meshgrid(index_h, index_k, index_l, indexing="ij")
index_h, index_k, index_l = index_h.flatten(), index_k.flatten(), index_l.flatten()
index_hkl_full = numpy.stack([index_h, index_k, index_l], axis=0)
index_hkl_equivalent = calc_equivalent_reflections(index_hkl_full, reduced_symm_elems, centrosymmetry=centrosymmetry)
label_hkl_equivalent = 1000000*index_hkl_equivalent[0] + 1000*index_hkl_equivalent[1] + index_hkl_equivalent[2]
index_max = numpy.argsort(label_hkl_equivalent, axis=1)[:,-1]
index_hkl_sort = index_hkl_equivalent[:, numpy.arange(index_max.size),index_max]
index_hkl_unique, counts_unique = numpy.unique(index_hkl_sort, axis=1, return_counts=True)
pr_3 = calc_pr3(index_hkl_unique, translation_elems)
flag = numpy.logical_not(numpy.isclose(pr_3, 0.))
index_hkl = index_hkl_unique[:, flag]
counts = counts_unique[flag]
sthovl, dder_sthovl = calc_sthovl_by_unit_cell_parameters(index_hkl, unit_cell_parameters)
arg_sort_sthovl = numpy.argsort(sthovl)
index_hkl_sort = index_hkl[:, arg_sort_sthovl]
counts_sort = counts[arg_sort_sthovl]
sthovl_sort = sthovl[arg_sort_sthovl]
flag = | numpy.logical_and(sthovl_sort>= sthovl_min, sthovl_sort <= sthovl_max) | numpy.logical_and |
#
# nd2cat (n-dimensional 2 categorical)
# Author: <NAME>
#
import numpy as np
import pandas as pd
import scipy.ndimage
import skimage
import skimage.color
import skimage.io as io
import skimage.transform as transform
from scipy.ndimage.filters import maximum_filter
from sklearn.cluster import KMeans
from sklearn.cluster import MiniBatchKMeans
from sklearn.cluster import MeanShift
from sklearn.cluster import AgglomerativeClustering
from sklearn.decomposition import PCA
def relabel_clusters(clusters):
k = clusters.shape[0]
ch = clusters.shape[1]
def dist(a, b):
return np.sqrt(np.sum(np.square(a-b)))
# used will be a list of tuples of (centroid_sum, n)
used = []
unused = list(range(k))
prev_cluster = | np.zeros((ch,)) | numpy.zeros |
# by TR
from obspy.core import UTCDateTime
try:
from sito.util import dist2gps
except:
pass
import logging
import numpy as np
import matplotlib
from matplotlib.colors import Normalize
from matplotlib import cbook
from numpy import ma
log = logging.getLogger(__name__)
def equi(m, lat, lon, radius, indeg=True):
if indeg:
radius = radius / 360. * 6371. * 2 * np.pi
X = []
Y = []
for azimuth in range(0, 360):
glat2, glon2 = dist2gps(radius, azimuth, lat, lon)
X.append(glon2)
Y.append(glat2)
X.append(X[0])
Y.append(Y[0])
#~ m.plot(X,Y,**kwargs) #Should work, but doesn't...
X, Y = m(X, Y)
return X, Y
def line(m, lat, lon, azi, start, end, indeg=True):
if indeg:
start = start / 360. * 6371. * 2 * np.pi
end = end / 360. * 6371. * 2 * np.pi
X = []
Y = []
for distance in np.linspace(start, end, 100):
glat2, glon2 = dist2gps(distance, azi, lat, lon)
X.append(glon2)
Y.append(glat2)
X, Y = m(X, Y)
return X, Y
def _getUTCListFromSth(stream, time, raisenumber=False):
N = len(stream)
if isinstance(time, basestring):
if 'onset' in time or 'time' in time: #isinstance(relative, basestring):
ret = stream.getHI(time)
elif time == 'middle':
starttime = stream.getHI('starttime')
endtime = stream.getHI('endtime')
ret = [starttime[i] + (endtime[i] - starttime[i]) / 2
for i in range(N)]
else:
raise ValueError('time is string but not expected one.')
elif isinstance(time, UTCDateTime):
ret = [time] * N
elif cbook.iterable(time):
if np.any([not isinstance(entry, (UTCDateTime, float, int, long)) for entry in time]):
raise ValueError('time is list, but not of UTCDateTime or float objects.')
if len(time) != N:
raise ValueError('time is list, but has not the length of stream.')
ret = None
elif isinstance(time, (float, int, long)) and not raisenumber:
ret = None
else:
raise ValueError('time has wrong type.')
return ret
def getTimeIntervall(stream, start=None, end=None, relative='starttime', ret_rel='utc'):
"""
Create two lists of UTCDateTimes - start list and end list
'time' can stand for UTCDateTime, list of UTCDateTimes, header entry out of
('ponset', 'sonset', 'startime', 'endtime') or 'middle'
:param start, end: - None (means start- resp. endtime)
- time object
- or seconds relative to param relative
:param relative: times (if given as seconds=numbers) are taken relative to
this parameter, is also needed for param ret_rel='relative
-time object
:param ret_rel: - 'utc' output in absolute UTCDateTime
- 'relative': output in seconds relative to param relative
- time object: output in seconds relative to time
:return: start and end list of UTCDateTime or None if stream has length 0
"""
N = len(stream)
if N == 0:
return
# get list of UTCDateTimes for start_out and end_out
if start == None:
start = 'starttime'
if end == None:
end = 'endtime'
start_out = _getUTCListFromSth(stream, start)
end_out = _getUTCListFromSth(stream, end)
# get list of UTCDateTimes for relative if needed
if start_out == None or end_out == None or ret_rel == 'relative':
relative = _getUTCListFromSth(stream, relative, raisenumber=True)
# get list of UTCDateTimes for start_out and end_out
if start_out == None:
if cbook.iterable(start):
start_out = [utc + start[i] for i, utc in enumerate(relative)]
else:
start_out = [i + start for i in relative]
if end_out == None:
if cbook.iterable(start):
end_out = [utc + end[i] for i, utc in enumerate(relative)]
else:
end_out = [i + end for i in relative]
# convert UTCDateTimes to seconds if ret_rel demands it
if ret_rel == 'utc':
return start_out, end_out
elif ret_rel != 'relative':
relative = _getUTCListFromSth(stream, ret_rel)
start_out = [start_out[i] - relative[i] for i in range(N)]
end_out = [end_out[i] - relative[i] for i in range(N)]
return start_out, end_out
def getDataWindow(stream, start=None, end=None, relative='starttime'):
"""
Return array with data in time window (start, end) around relative.
'time' can stand for UTCDateTime, list of UTCDateTimes, header entry out of
('ponset', 'sonset', 'startime', 'endtime') or 'middle'
:param stream: Stream object with data
:param start, end: time or float (seconds) relative to param=relative
:param relative: time, is needed if start or end in seconds (float)
:return: np.array of shape (N_stream, N_data)
"""
stream = stream.slice2(start, end, relative=relative)
N_stream = len(stream)
if N_stream == 0:
raise ValueError('Stream has length 0')
samp = stream.getHI('sampling_rate')
if min(samp) != max(samp):
stream.downsample2(min(samp))
log.warning('Downsampling stream because of differing sampling rate.')
npts = stream.getHI('npts')
if min(npts) != max(npts):
log.warning('Traces in stream have different NPTS. '
'Difference: %d samples' % (max(npts) - min(npts)))
data = np.zeros((N_stream, max(npts)))
for i, trace in enumerate(stream):
data[i, :len(trace.data)] = trace.data
return data
# create colormap Blue -> White -> Red for xcorr plots
cdict = {'red': ((0.0, 0.0, 0.0),
# (0.3, 0.5, 0.5),
(0.5, 1.0, 1.0),
# (0.7, 1.0, 1.0),
(1.0, 1.0, 1.0)),
'green': ((0.0, 0.0, 0.0),
# (0.3, 1.0, 1.0),
(0.5, 1.0, 1.0),
# (0.7, 1.0, 1.0),
(1.0, 0.0, 0.0)),
'blue': ((0.0, 1.0, 1.0),
# (0.3, 1.0, 1.0),
(0.5, 1.0, 1.0),
# (0.7, 0.0, 0.0),
(1.0, 0.0, 0.0))}
xcorr_cmap = matplotlib.colors.LinearSegmentedColormap('xcorr_cmap', cdict, 256)
class DLogNorm(Normalize):
"""
Normalize a given positive or negative value to the 0-1 range on a log scale
negative values are mapped to 0-0.5
positive values are mapped to 0.5-1
Derived from:
matplotlib.colors.LogNorm
"""
def __init__(self, vmin=None, vmax=None, cmin=1e-5, cmax=1e-5, clip=False):
"""
If *vmin* or *vmax* is not given, they are taken from the input's
minimum and maximum value respectively. If *clip* is *True* and
the given value falls outside the range, the returned value
will be 0 or 1, whichever is closer. Returns 0 if::
vmin==vmax
Works with scalars or arrays, including masked arrays. If
*clip* is *True*, masked values are set to 1; otherwise they
remain masked. Clipping silently defeats the purpose of setting
the over, under, and masked colors in the colormap, so it is
likely to lead to surprises; therefore the default is
*clip* = *False*.
cmin, cmax gives the range of logarithmic plot for positive (cmax)
and negative (cmin) values. All values with smaller absolute value
are mapped to 0.5.
"""
self.vmin = vmin
self.vmax = vmax
self.cmin = cmin
self.cmax = cmax
self.clip = clip
def __call__(self, value, clip=None):
if clip is None:
clip = self.clip
if cbook.iterable(value):
vtype = 'array'
val = ma.asarray(value).astype(np.float)
else:
vtype = 'scalar'
val = ma.array([value]).astype(np.float)
self.autoscale_None(val)
vmin, vmax = self.vmin, self.vmax
cmin, cmax = self.cmin * vmin, self.cmax * vmax
if vmin > vmax:
raise ValueError("minvalue must be less than or equal to maxvalue")
elif vmin == vmax:
result = 0.0 * val
else:
if clip:
mask = ma.getmask(val)
val = ma.array(np.clip(val.filled(vmax), vmin, vmax),
mask=mask)
result = 0. * val + 0.5
result[val > cmax] = (ma.log10(val[val > cmax]) - ma.log10(cmax)) / (np.log10(vmax) - np.log10(cmax)) / 2. + 0.5
result[val < cmin] = -(ma.log10(-val[val < cmin]) - ma.log10(-cmin)) / (np.log10(-vmin) - np.log10(-cmin)) / 2. + 0.5
if vtype == 'scalar':
result = result[0]
return result
def inverse(self, value):
if not self.scaled():
raise ValueError("Not invertible until scaled")
vmin, vmax = self.vmin, self.vmax
cmin, cmax = self.cmin * vmin, self.cmax * vmax
if cbook.iterable(value):
val = np.asarray(value)
result = 0.0 * val
result[val > 0.5] = cmax * (vmax / cmax) ** (2. * val[val > 0.5] - 1)
result[val < 0.5] = cmin * (vmin / cmin) ** (-2. * val[val < 0.5] + 1)
return result
else:
if value == 0.5:
return 0
elif value > 0.5:
return cmax * (vmax / cmax) ** (2. * value - 1)
elif value < 0.5:
return cmin * (vmin / cmin) ** (-2. * value + 1)
def ticks(self):
vmin, vmax = self.vmin, self.vmax
cmin, cmax = self.cmin, self.cmax
a1 = np.logspace(np.log10(cmax * vmax) + 1, np.log10(vmax), int(-np.log10(cmax)))
a2 = -np.logspace(np.log10(-cmin * vmin) + 1, np.log10(-vmin), int(- | np.log10(cmin) | numpy.log10 |
import pickle
import logging
import numpy as np
import pandas as pd
from collections import Counter
from typing import Union, Sequence, Optional, List, Any
from sklearn.base import BaseEstimator
from indra.statements import Evidence, Statement, get_all_descendants
from indra.belief import BeliefScorer, check_extra_evidence, \
get_stmt_evidence, SimpleScorer
logger = logging.getLogger(__name__)
class SklearnScorer(BeliefScorer):
"""Use a pre-trained Sklearn classifier to predict belief scores.
An implementing instance of this base class has two personalities: as a
subclass of BeliefScorer, it implements the functions required by the
BeliefEngine, `score_statements` and `check_prior_probs`. It also behaves
like an sklearn model by composition, implementing methods `fit`,
`predict`, `predict_proba`, and `predict_log_proba`, which are passed
through to an internal sklearn model.
A key role of this wrapper class is to implement the preprocessing of
statement properties into a feature matrix in a standard way, so that
a classifier trained on one corpus of statement data will still work when
used on another corpus.
Implementing subclasses must implement at least one of the methods for
building the feature matrix, `stmts_to_matrix` or `df_to_matrix`.
Parameters
----------
model :
Any instance of a classifier object supporting the methods `fit`,
`predict_proba`, `predict`, and `predict_log_proba`.
"""
def __init__(
self,
model: BaseEstimator,
):
self.model = model
def check_prior_probs(
self,
statements: Sequence[Statement],
) -> None:
"""Empty implementation for now."""
pass
def score_statements(
self,
statements: Sequence[Statement],
extra_evidence: Optional[List[List[Evidence]]] = None,
) -> Sequence[float]:
return self.predict_proba(statements, extra_evidence)[:, 1]
def stmts_to_matrix(
self,
stmts: Sequence[Statement],
extra_evidence: Optional[List[List[Evidence]]] = None,
) -> np.ndarray:
"""Convert a list of Statements to a feature matrix."""
raise NotImplementedError('Need to implement the stmts_to_matrix '
'method')
def df_to_matrix(
self,
df: pd.DataFrame,
) -> np.ndarray:
"""Convert a statement DataFrame to a feature matrix."""
raise NotImplementedError('Need to implement the df_to_matrix '
'method')
def to_matrix(self,
stmt_data: Union[np.ndarray, Sequence[Statement], pd.DataFrame],
extra_evidence: Optional[List[List[Evidence]]] = None,
) -> np.ndarray:
"""Get stmt feature matrix by calling appropriate method.
If `stmt_data` is already a matrix (e.g., obtained after performing a
train/test split on a matrix generated for a full statement corpus), it
is returned directly; if a DataFrame of Statement metadata,
`self.df_to_matrix` is called; if a list of Statements,
`self.stmts_to_matrix` is called.
Parameters
----------
stmt_data :
Statement content to be used to generate a feature matrix.
extra_evidence :
A list corresponding to the given list of statements, where
each entry is a list of Evidence objects providing additional
support for the corresponding statement (i.e., Evidences that
aren't already included in the Statement's own evidence list).
Returns
-------
:
Feature matrix for the statement data.
"""
# If we got a Numpy array, just use it!
if isinstance(stmt_data, np.ndarray):
stmt_arr = stmt_data
# Otherwise check if we have a dataframe or a list of statements
# and call the appropriate *_to_matrix method
elif isinstance(stmt_data, pd.DataFrame):
if extra_evidence is not None:
raise NotImplementedError(
'extra_evidence cannot be used with a statement DataFrame.')
stmt_arr = self.df_to_matrix(stmt_data)
# Check if stmt_data is a list/tuple (i.e., of Statements):
elif isinstance(stmt_data, (list, tuple)):
# Check that the first entry is a Statement
if not isinstance(stmt_data[0], Statement):
raise ValueError('stmt_data must contain Statements.')
stmt_arr = self.stmts_to_matrix(stmt_data, extra_evidence)
# If it's something else, error
else:
raise TypeError(f'stmt_data is type {type(stmt_data)}: '
'must be a numpy array, DataFrame, or '
'list/tuple of Statements')
return stmt_arr
def fit(self,
stmt_data: Union[np.ndarray, Sequence[Statement], pd.DataFrame],
y_arr: Sequence[float],
extra_evidence: Optional[List[List[Evidence]]] = None,
*args,
**kwargs,
):
"""Preprocess stmt data and run sklearn model `fit` method.
Additional `args` and `kwargs` are passed to the `fit` method of the
wrapped sklearn model.
Parameters
----------
stmt_data :
Statement content to be used to generate a feature matrix.
y_arr :
Class values for the statements (e.g., a vector of 0s and 1s
indicating correct or incorrect).
extra_evidence :
A list corresponding to the given list of statements, where
each entry is a list of Evidence objects providing additional
support for the corresponding statement (i.e., Evidences that
aren't already included in the Statement's own evidence list).
"""
# Check dimensions of stmts (x) and y_arr
if len(stmt_data) != len(y_arr):
raise ValueError("Number of stmts/rows must match length of y_arr.")
# Get the data matrix based on the stmt list or stmt DataFrame
stmt_arr = self.to_matrix(stmt_data, extra_evidence)
# Call the fit method of the internal sklearn model
self.model.fit(stmt_arr, y_arr, *args, **kwargs)
def predict_proba(
self,
stmt_data: Union[np.ndarray, Sequence[Statement], pd.DataFrame],
extra_evidence: Optional[List[List[Evidence]]] = None,
*args,
**kwargs,
) -> np.ndarray:
"""Preprocess stmt data and run sklearn model `predict_proba` method.
Additional `args` and `kwargs` are passed to the `predict_proba` method
of the wrapped sklearn model.
Parameters
----------
stmt_data :
Statement content to be used to generate a feature matrix.
extra_evidence :
A list corresponding to the given list of statements, where
each entry is a list of Evidence objects providing additional
support for the corresponding statement (i.e., Evidences that
aren't already included in the Statement's own evidence list).
"""
# Call the prediction method of the internal sklearn model
stmt_arr = self.to_matrix(stmt_data, extra_evidence)
return self.model.predict_proba(stmt_arr, *args, **kwargs)
def predict(
self,
stmt_data: Union[np.ndarray, Sequence[Statement], pd.DataFrame],
extra_evidence: Optional[List[List[Evidence]]] = None,
*args,
**kwargs,
) -> np.ndarray:
"""Preprocess stmt data and run sklearn model `predict` method.
Additional `args` and `kwargs` are passed to the `predict` method of
the wrapped sklearn model.
Parameters
----------
stmt_data :
Statement content to be used to generate a feature matrix.
extra_evidence :
A list corresponding to the given list of statements, where
each entry is a list of Evidence objects providing additional
support for the corresponding statement (i.e., Evidences that
aren't already included in the Statement's own evidence list).
"""
stmt_arr = self.to_matrix(stmt_data, extra_evidence)
return self.model.predict(stmt_arr, *args, **kwargs)
def predict_log_proba(
self,
stmt_data: Union[np.ndarray, Sequence[Statement], pd.DataFrame],
extra_evidence: Optional[List[List[Evidence]]] = None,
*args,
**kwargs,
) -> np.ndarray:
"""Preprocess stmt data and run sklearn model `predict_log_proba`.
Additional `args` and `kwargs` are passed to the `predict` method of
the wrapped sklearn model.
Parameters
----------
stmt_data :
Statement content to be used to generate a feature matrix.
extra_evidence :
A list corresponding to the given list of statements, where
each entry is a list of Evidence objects providing additional
support for the corresponding statement (i.e., Evidences that
aren't already included in the Statement's own evidence list).
"""
stmt_arr = self.to_matrix(stmt_data, extra_evidence)
return self.model.predict_log_proba(stmt_arr, *args, **kwargs)
class CountsScorer(SklearnScorer):
"""Belief model learned from evidence counts and other stmt properties.
If using a DataFrame for Statement data, it should have the following
columns:
* `stmt_type`
* `source_counts`
Alternatively, if the DataFrame doesn't have a `source_counts` column, it
should have columns with names matching the sources in `self.source_list`.
Parameters
----------
model :
Any instance of a classifier object supporting the methods `fit`,
`predict_proba`, `predict`, and `predict_log_proba`.
source_list :
List of strings denoting the evidence sources (evidence.source_api
values) to be used for prediction.
include_more_specific :
If True, will add extra columns to the statement data matrix for the
source counts drawn from more specific evidences; if use_num_pmids is
True, will also add an additional column for the number of PMIDs from
more specific evidences. If False, these columns will not be included
even if the `extra_evidence` argument is passed to the
`stmts_to_matrix` method. This is to ensure that the featurization of
statements is consistent between training and prediction.
use_stmt_type :
Whether to include statement type as a feature.
use_num_members :
Whether to include a feature denoting the number of members of the
statement. Primarily for stratifying belief predictions about Complex
statements with more than two members. Cannot be used for statement
data passed in as a DataFrame.
use_num_pmids :
Whether to include a feature for the total number of unique PMIDs
supporting each statement. Cannot be used for statement passed in as a
DataFrame.
use_promoter :
Whether to include a feature giving the fraction of evidence (0 to 1)
containing the (case-insensitive) word "promoter". Tends to improve
misclassification of Complex statements that actually refer to
protein-DNA binding.
use_avg_evidence_len :
Whether to include a feature giving the average evidence sentence
length (in space-separated tokens).
Example
-------
.. code-block:: python
from sklearn.linear_model import LogisticRegression
clf = LogisticRegression()
all_stmt_sources = CountsScorer.get_all_sources(stmts)
scorer = CountsScorer(clf, all_stmt_sources, use_stmt_type=True,
use_num_pmids=True)
scorer.fit(stmts, y_arr)
be = BeliefEngine(scorer)
be.set_hierarchy_probs(stmts)
"""
def __init__(
self,
model: BaseEstimator,
source_list: List[str],
include_more_specific: bool = False,
use_stmt_type: bool = False,
use_num_members: bool = False,
use_num_pmids: bool = False,
use_promoter: bool = False,
use_avg_evidence_len: bool = False,
):
# Call superclass constructor to store the model
super(CountsScorer, self).__init__(model)
self.source_list = source_list
self.include_more_specific = include_more_specific
self.use_stmt_type = use_stmt_type
self.use_num_members = use_num_members
self.use_num_pmids = use_num_pmids
self.use_promoter = use_promoter
self.use_avg_evidence_len = use_avg_evidence_len
# Build dictionary mapping INDRA Statement types to integers
if use_stmt_type:
all_stmt_types = get_all_descendants(Statement)
self.stmt_type_map = {t.__name__: ix
for ix, t in enumerate(all_stmt_types)}
@staticmethod
def get_all_sources(
stmts: Sequence[Statement],
include_more_specific: bool = True,
include_less_specific: bool = True,
) -> List[str]:
"""Get a list of all the source_apis supporting the given statements.
Useful for determining the set of sources to be used for fitting
and prediction.
Parameters
----------
stmts :
A list of INDRA Statements to collect source APIs for.
include_more_specific :
If True (default), then includes the source APIs for the more
specific statements in the `supports` attribute of each statement.
include_less_specific :
If True (default), then includes the source APIs for the less
specific statements in the `supported_by` attribute of each
statement.
Returns
-------
:
A list of (unique) source_apis found in the set of statements.
"""
stmt_sources = set([ev.source_api for s in stmts for ev in s.evidence])
if include_more_specific:
stmt_sources.update([ev.source_api
for stmt in stmts
for supp_stmt in stmt.supports
for ev in supp_stmt.evidence])
if include_less_specific:
stmt_sources.update([ev.source_api
for stmt in stmts
for supp_by_stmt in stmt.supported_by
for ev in supp_by_stmt.evidence])
return list(stmt_sources)
def stmts_to_matrix(
self,
stmts: Sequence[Statement],
extra_evidence: Optional[List[List[Evidence]]] = None,
) -> np.ndarray:
"""Convert a list of Statements to a feature matrix.
Features are encoded as follows:
* One column for every source listed in `self.source_list`, containing
the number of statement evidences from that source. If
`self.include_more_specific` is True and `extra_evidence` is
provided, these are used in combination with the Statement's own
evidence in determining source counts.
* If `self.use_stmt_type` is set, statement type is included via
one-hot encoding, with one column for each statement type.
* If `self.use_num_members` is set, a column is added for the number
of agents in the Statement.
* If `self.use_num_pmids` is set, a column is added with the total
total number of unique PMIDs supporting the Statement. If
`extra_evidence` is provided, these are used in combination with the
Statement's own evidence in determining the number of PMIDs.
Parameters
----------
stmts :
A list or tuple of INDRA Statements to be used to generate a
feature matrix.
extra_evidence :
A list corresponding to the given list of statements, where
each entry is a list of Evidence objects providing additional
support for the corresponding statement (i.e., Evidences that
aren't already included in the Statement's own evidence list).
Returns
-------
:
Feature matrix for the statement data.
"""
# Check arguments for including more specific evidences
if self.include_more_specific and extra_evidence is None:
logger.info("CountScorer is set to include_more_specific "
"evidences but no extra_evidence was included.")
extra_evidence = [[] for stmt in stmts]
elif not self.include_more_specific and extra_evidence is not None:
logger.warning("extra_evidence was included but CountScorer "
"instance is not set to include_more_specific "
"evidences so extra_evidence will be ignored.")
# Check our list of extra evidences
check_extra_evidence(extra_evidence, len(stmts))
# Add categorical features and collect source_apis
cat_features = []
stmt_sources = set()
for ix, stmt in enumerate(stmts):
# Collect all source_apis from stmt evidences
dir_pmids = set()
promoter_ct = 0
evidence_lens = []
for ev in stmt.evidence:
stmt_sources.add(ev.source_api)
dir_pmids.add(ev.pmid)
if ev.text is not None:
evidence_lens.append(len(ev.text.split()))
if 'promoter' in ev.text.lower():
promoter_ct += 1
indir_pmids = set()
if self.include_more_specific and extra_evidence:
for ev in extra_evidence[ix]:
stmt_sources.add(ev.source_api)
indir_pmids.add(ev.pmid)
# Collect non-source count features (e.g. type) from stmts
feature_row: List[Any] = [] # Appease the Type Hint Gods
# One-hot encoding of stmt type
if self.use_stmt_type:
stmt_type_ix = self.stmt_type_map[type(stmt).__name__]
type_features = [1 if ix == stmt_type_ix else 0
for ix in range(len(self.stmt_type_map))]
feature_row.extend(type_features)
# Add field for number of members
if self.use_num_members:
feature_row.append(len(stmt.agent_list()))
# Add field with number of unique PMIDs
if self.use_num_pmids:
feature_row.append(len(dir_pmids))
if self.include_more_specific and extra_evidence:
feature_row.append(len(indir_pmids))
# Add a field specifying the percentage of evidences containing
# the word "promoter":
if self.use_promoter:
promoter_pct = promoter_ct / len(stmt.evidence) \
if len(stmt.evidence) > 0 else 0
feature_row.append(promoter_pct)
# Add a field giving length of the sentence in words
if self.use_avg_evidence_len:
avg_evidence_len = | np.mean(evidence_lens) | numpy.mean |
#!/usr/bin/env python3
import random
import numpy as np
import matplotlib.pyplot as plt
from copy import deepcopy
from IPython import embed
class Robot:
def __init__(self, length=20.0):
"""
Creates robot and initializes location/orientation to 0, 0, 0.
"""
self.x = 0.0
self.y = 0.0
self.orientation = 0.0
self.length = length
self.steering_noise = 0.0
self.distance_noise = 0.0
self.steering_drift = 0.0
def set(self, x, y, orientation):
"""
Sets a robot coordinate.
"""
self.x = x
self.y = y
self.orientation = orientation % (2.0 * np.pi)
def set_noise(self, steering_noise, distance_noise):
"""
Sets the noise parameters.
"""
# makes it possible to change the noise parameters
# this is often useful in particle filters
self.steering_noise = steering_noise
self.distance_noise = distance_noise
def set_steering_drift(self, drift):
"""
Sets the systematical steering drift parameter
"""
self.steering_drift = drift
def move(self, steering, distance, tolerance=0.001, max_steering_angle=np.pi / 4.0):
"""
steering = front wheel steering angle, limited by max_steering_angle
distance = total distance driven, most be non-negative
"""
if steering > max_steering_angle:
steering = max_steering_angle
if steering < -max_steering_angle:
steering = -max_steering_angle
if distance < 0.0:
distance = 0.0
# apply noise
steering2 = random.gauss(steering, self.steering_noise)
distance2 = random.gauss(distance, self.distance_noise)
# apply steering drift
steering2 += self.steering_drift
# Execute motion
turn = np.tan(steering2) * distance2 / self.length
if abs(turn) < tolerance:
# approximate by straight line motion
self.x += distance2 * np.cos(self.orientation)
self.y += distance2 * np.sin(self.orientation)
self.orientation = (self.orientation + turn) % (2.0 * np.pi)
else:
# approximate bicycle model for motion
radius = distance2 / turn
cx = self.x - (np.sin(self.orientation) * radius)
cy = self.y + (np.cos(self.orientation) * radius)
self.orientation = (self.orientation + turn) % (2.0 * np.pi)
self.x = cx + ( | np.sin(self.orientation) | numpy.sin |
from __future__ import print_function, division
import configargparse #pip install configargparse
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
import matplotlib.pyplot as plt
import time
import os
import copy
import PIL
import PIL.Image
print(torch.__version__)
from TorchClassifier.Datasetutil.Visutil import imshow, vistestresult
from TorchClassifier.Datasetutil.Torchdatasetutil import loadTorchdataset
from TorchClassifier.myTorchModels.TorchCNNmodels import createTorchCNNmodel
# from TFClassifier.Datasetutil.TFdatasetutil import loadTFdataset #loadtfds, loadkerasdataset, loadimagefolderdataset
# from TFClassifier.myTFmodels.CNNsimplemodels import createCNNsimplemodel
# from TFClassifier.Datasetutil.Visutil import plot25images, plot9imagesfromtfdataset, plot_history
# from TFClassifier.myTFmodels.optimizer_factory import build_learning_rate, setupTensorboardWriterforLR
model = None
device = None
# import logger
parser = configargparse.ArgParser(description='myTorchClassify')
parser.add_argument('--data_name', type=str, default='CIFAR10',
help='data name: hymenoptera_data, CIFAR10, flower_photos')
parser.add_argument('--data_type', default='torchvisiondataset', choices=['trainvalfolder', 'traintestfolder', 'torchvisiondataset'],
help='the type of data')
parser.add_argument('--data_path', type=str, default='./../ImageClassificationData',
help='path to get data') #/Developer/MyRepo/ImageClassificationData
parser.add_argument('--img_height', type=int, default=28,
help='resize to img height, 224')
parser.add_argument('--img_width', type=int, default=28,
help='resize to img width, 224')
parser.add_argument('--save_path', type=str, default='./outputs/',
help='path to save the model')
# network
parser.add_argument('--model_name', default='mobilenet', choices=['mlpmodel1', 'lenet', 'resnetmodel1', 'vggmodel1', 'cnnmodel1'],
help='the network')
parser.add_argument('--arch', default='Pytorch', choices=['Tensorflow', 'Pytorch'],
help='Model Name, default: Pytorch.')
parser.add_argument('--learningratename', default='warmupexpdecay', choices=['fixedstep', 'fixed', 'warmupexpdecay'],
help='learning rate name')
parser.add_argument('--optimizer', default='Adam', choices=['SGD', 'Adam'],
help='select the optimizer')
parser.add_argument('--batchsize', type=int, default=32,
help='batch size')
parser.add_argument('--epochs', type=int, default=15,
help='epochs')
parser.add_argument('--GPU', type=bool, default=True,
help='use GPU')
parser.add_argument('--TPU', type=bool, default=False,
help='use TPU')
parser.add_argument('--MIXED_PRECISION', type=bool, default=False,
help='use MIXED_PRECISION')
parser.add_argument('--TAG', default='0915',
help='setup the experimental TAG to differentiate different running results')
parser.add_argument('--reproducible', type=bool, default=False,
help='get reproducible results we can set the random seed for Python, Numpy and PyTorch')
args = parser.parse_args()
def test_model(model, dataloaders, class_names, criterion, batch_size):
numclasses = len(class_names)
# track test loss
test_loss = 0.0
class_correct = list(0. for i in range(numclasses))
class_total = list(0. for i in range(numclasses))
model.eval()
if 'test' in dataloaders.keys():
test_loader=dataloaders['test']
else:
print("test dataset not available")
return
# iterate over test data
bathindex = 0
for data, target in test_loader:
bathindex = bathindex +1
# move tensors to GPU if CUDA is available
# if train_on_gpu:
# data, target = data.cuda(), target.cuda()
data = data.to(device)
target = target.to(device)
# forward pass: compute predicted outputs by passing inputs to the model
outputs = model(data)
if type(outputs) is tuple: #model may output multiple tensors as tuple
outputs, _ = outputs
# calculate the batch loss
loss = criterion(outputs, target)
# update test loss
test_loss += loss.item()*data.size(0)
# convert output probabilities to predicted class
_, pred = torch.max(outputs, 1)
# compare predictions to true label
correct_tensor = pred.eq(target.data.view_as(pred))
train_on_gpu = torch.cuda.is_available()
correct = np.squeeze(correct_tensor.numpy()) if not train_on_gpu else np.squeeze(correct_tensor.cpu().numpy())
# calculate test accuracy for each object class
for i in range(batch_size):
if i<len(target.data):#the actual batch size of the last batch is smaller than the batch_size
label = target.data[i]
class_correct[label] += correct[i].item()
class_total[label] += 1
# average test loss
test_loss = test_loss/len(test_loader.dataset)
print('Test Loss: {:.6f}\n'.format(test_loss))
for i in range(numclasses):
if class_total[i] > 0:
print('Test Accuracy of %5s: %2d%% (%2d/%2d)' % (
class_names[i], 100 * class_correct[i] / class_total[i],
np.sum(class_correct[i]), np.sum(class_total[i])))
else:
print('Test Accuracy of %5s: N/A (no training examples)' % (class_names[i]))
print('\nTest Accuracy (Overall): %2d%% (%2d/%2d)' % (
100. * np.sum(class_correct) / np.sum(class_total),
np.sum(class_correct), np.sum(class_total)))
def visualize_model(model, dataloaders, class_names, num_images=6):
was_training = model.training
model.eval()
images_so_far = 0
fig = plt.figure()
with torch.no_grad():
for i, (inputs, labels) in enumerate(dataloaders['val']):
inputs = inputs.to(device)
labels = labels.to(device)
outputs = model(inputs)
if type(outputs) is tuple: #model may output multiple tensors as tuple
outputs, _ = outputs
_, preds = torch.max(outputs, 1)
for j in range(inputs.size()[0]):
images_so_far += 1
ax = plt.subplot(num_images//2, 2, images_so_far)
ax.axis('off')
ax.set_title('predicted: {}'.format(class_names[preds[j]]))
imshow(inputs.cpu().data[j])
if images_so_far == num_images:
model.train(mode=was_training)
return
model.train(mode=was_training)
def main():
print("Torch Version: ", torch.__version__)
print("Torchvision Version: ", torchvision.__version__)
args.save_path=args.save_path+args.data_name+'_'+args.model_name+'_'+args.TAG
print("Output path:", args.save_path)
if not os.path.exists(args.save_path):
os.makedirs(args.save_path)
if args.GPU:
num_gpu = torch.cuda.device_count()
print("Num GPUs:", num_gpu)
# Which GPU Is The Current GPU?
print(torch.cuda.current_device())
# Get the name of the current GPU
print(torch.cuda.get_device_name(torch.cuda.current_device()))
# Is PyTorch using a GPU?
print(torch.cuda.is_available())
global device
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
else:
print("No GPU and TPU enabled")
#Load dataset
dataloaders, dataset_sizes, class_names, img_shape = loadTorchdataset(args.data_name,args.data_type, args.data_path, args.img_height, args.img_width, args.batchsize)
numclasses =len(class_names)
model_ft = createTorchCNNmodel(args.model_name, numclasses, img_shape)
modelpath=os.path.join(args.save_path, 'model_best.pt')
print("modelpath ", modelpath)
model_ft.load_state_dict(torch.load(modelpath))
model_ft = model_ft.to(device)
criterion = nn.CrossEntropyLoss()
test_model(model_ft, dataloaders, class_names, criterion, args.batchsize)
if 'test' in dataloaders.keys():
test_loader=dataloaders['test']
# obtain one batch of test images
dataiter = iter(test_loader)
images, labels = dataiter.next()
images.numpy()
images = images.to(device)
# get sample outputs
outputs = model_ft(images)#torch.Size([32, 10])
if type(outputs) is tuple: #model may output multiple tensors as tuple
outputs, _ = outputs
# convert output probabilities to predicted class
_, preds_tensor = torch.max(outputs, 1) #https://pytorch.org/docs/stable/generated/torch.max.html, dim=1, [32,10]->[32]
on_gpu = torch.cuda.is_available()
preds = np.squeeze(preds_tensor.numpy()) if not on_gpu else np.squeeze(preds_tensor.cpu().numpy()) #to numpy array list
#preds = np.squeeze(preds_tensor.cpu().numpy())
vistestresult(images, labels, preds, class_names, args.save_path)
#Start accuracy evaluation
test_loss, test_acc = evaluate(model_ft, dataloaders['test'], criterion, device)
print(f'Test Loss: {test_loss:.3f} | Test Acc: {test_acc*100:.2f}%')
images, labels, probs = get_predictions(model_ft, dataloaders['test'], device)
pred_labels = torch.argmax(probs, 1)
plot_confusion_matrix(labels, pred_labels)
corrects = torch.eq(labels, pred_labels)
#get all of the incorrect examples and sort them by descending confidence in their prediction
incorrect_examples = []
for image, label, prob, correct in zip(images, labels, probs, corrects):
if not correct:
incorrect_examples.append((image, label, prob))
incorrect_examples.sort(reverse = True, key = lambda x: torch.max(x[2], dim = 0).values)
N_IMAGES = 25
# plot_most_incorrect(incorrect_examples, N_IMAGES)
#plot the examples the model got wrong and was most confident about.
def plot_most_incorrect(incorrect, n_images):
rows = int(np.sqrt(n_images))
cols = int( | np.sqrt(n_images) | numpy.sqrt |
import numpy as np
import pandas as pd
from humidity_variability.utils import add_date_columns, jitter, add_GMT
from helpful_utilities.meteo import F_to_C
from helpful_utilities.general import lowpass_butter
def fit_seasonal_cycle(doy, data, nbases=5):
"""Fit seasonal cycle of daily data with specified number of Fourier bases.
Parameters
----------
doy : numpy.ndarray
Day of year for each data value
data : numpy.ndarray
Data values for seasonal fit
nbases : int
Number of Fourier bases to use. Default is 5.
Returns
-------
rec : numpy.ndarray
Reconstructed seasonal structure, of same length as data
residual : numpy.ndarray
The residual from the seasonal fit.
rec_ann : numpy.ndarray
The 365-day version of the seasonal cycle
"""
mu = np.mean(data)
data -= mu
t_basis = (doy - 0.5)/365
ann_basis = (np.arange(1, 366) - 0.5)/365
nt = len(t_basis)
bases = np.empty((nbases, nt), dtype=complex)
bases_ann = np.empty((nbases, 365), dtype=complex)
for counter in range(nbases):
bases[counter, :] = np.exp(2*(counter + 1)*np.pi*1j*t_basis)
bases_ann[counter, :] = np.exp(2*(counter + 1)*np.pi*1j*ann_basis)
coeff = 2/nt*(np.dot(bases, data))
rec = np.real(np.dot(np.conj(coeff), bases))
residual = data - rec
rec_ann = np.real(np.dot(np.conj(coeff), bases_ann)) + mu # add mean value back into climatology
return rec, residual, rec_ann
def fit_seasonal_cycle_lowpass(doy, data, cut_freq=1/30):
"""Estimate the seasonal cycle by using a lowpass filter on the empirical seasonal cycle.
Parameters
----------
doy : numpy.ndarray
Day of year for each data value
data : numpy.ndarray
Data values for seasonal fit
cut_freq : float
Cutoff frequency (in 1/days) for the lowpass filter
Returns
-------
rec : numpy.ndarray
Reconstructed seasonal structure, of same length as data
residual : numpy.ndarray
The residual from the seasonal fit.
rec_ann : numpy.ndarray
The 365-day version of the seasonal cycle
"""
tmp_df = pd.DataFrame({'doy': doy, 'data': data})
empirical_sc = tmp_df.groupby('doy').mean()
ann_doy = empirical_sc.index
smooth_sc = lowpass_butter(1, cut_freq, 3, empirical_sc.values.flatten())
residual = np.empty_like(data)
for counter, this_doy in enumerate(ann_doy):
match_idx = doy == this_doy
smooth_sc_val = smooth_sc[counter]
residual[match_idx] = data[match_idx] - smooth_sc_val
rec = data - residual
rec_ann = smooth_sc
return rec, residual, rec_ann
def calculate_amplification_index2(df, meta, T0, half_width, grouping, fit_data, qs, this_q=0.05):
"""Calculate the fraction of hot days that are dry within a grouping of stations or gridboxes.
This version accounts for the temperature dependence of q5
Parameters
----------
df : pandas.DataFrame
Contains the temperature and humidity anomaly data for all stations/gridboxes and times of interest.
meta : pandas.DataFrame
Contains (at least) the weights for each station/gridbox, as well as the station_id
T0 : float
The middle percentile to define hot days
half_width : float
The half-width around T0 to consider a hot day
grouping : string
Can be 'month' or 'year': how are hot and hot/dry days grouped?
fit_data : string
Full path to npz file containing the relevant parameters from the quantile smoothing spline fit
qs : numpy.ndarray
The quantiles fit by the QSS model
this_q : float
The threshold below which to consider a day "dry"
Returns
-------
amplification : numpy.ndarray
Time series of the amplification index
"""
# names of temperature and humidity variables
humidity_var = 'Q'
temp_var = 'TMP'
# For each station or gridbox, map temperatures to percentiles
# Load fitted QR model
df_fit = np.load(fit_data)
q_quantiles = df_fit['s0_H'][:, qs == this_q, :].squeeze()
temperature_percentiles = df_fit['temperature_percentiles']
lat_vec = np.round(df_fit['lats'], decimals=3)
lon_vec = np.round(df_fit['lons'], decimals=3)
# Calculate percentiles at each station of temperature
df.loc[:, '%s_perc' % temp_var] = df.groupby('station_id')['%s_anom' % temp_var].rank(pct=True)
# Get humidity threshold for each temperature at each station
df_updated = []
for this_station in np.unique(df['station_id']):
tmp_df = df.loc[df['station_id'] == this_station].reset_index()
this_lat = np.round(tmp_df['lat'][0], decimals=3)
this_lon = np.round(tmp_df['lon'][0], decimals=3)
match_idx = (lat_vec == this_lat) & (lon_vec == this_lon)
this_quantile = q_quantiles[:, match_idx].squeeze()
# Interpolate
this_quantile_interp = np.interp(tmp_df['%s_perc' % temp_var],
temperature_percentiles/100,
this_quantile)
tmp_df.loc[:, '%s_cut' % humidity_var] = this_quantile_interp
df_updated.append(tmp_df)
df = pd.concat(df_updated).reset_index()
del df_updated
# Drop spare index columns
df = df.drop(df.columns[:2], axis='columns')
# Assign each day a binary index for whether it is hot, and whether it is hot and dry
df = df.assign(is_hot=np.nan*np.ones(len(df)))
df = df.assign(is_hot_dry=np.nan*np.ones(len(df)))
for station in np.unique(df['station_id']):
tmp_df = df.loc[df['station_id'] == station]
is_hot = ((tmp_df['%s_perc' % temp_var] > (T0 - half_width)/100) &
(tmp_df['%s_perc' % temp_var] < (T0 + half_width)/100))
is_hot_dry = (is_hot &
(tmp_df['%s_anom' % humidity_var] < tmp_df['%s_cut' % humidity_var])).astype(float)
is_hot = is_hot.astype(float)
df.loc[df['station_id'] == station, 'is_hot_dry'] = is_hot_dry
df.loc[df['station_id'] == station, 'is_hot'] = is_hot
weights = meta.set_index('station_id')
if grouping == 'month':
groupby_names = ['year', 'month', 'station_id']
elif grouping == 'year':
groupby_names = ['year', 'station_id']
hot_dry_weighted = df.groupby(groupby_names)['is_hot_dry'].sum()*weights['area_weights']
hot_weighted = df.groupby(groupby_names)['is_hot'].sum()*weights['area_weights']
# Since the amplification index is the ratio of hot, dry to hot, we don't need to normalize the weights
# The same number of stations are present in each month/year combo for both metrics, so will cancel out
# But note that any analysis of the hot, dry or hot time series alone has not been normalized
# appropriately
amplification = (hot_dry_weighted.groupby(groupby_names[:-1]).sum() /
hot_weighted.groupby(groupby_names[:-1]).sum())
return amplification
def preprocess_data(this_id, datadir, start_year, end_year, start_month, end_month, offset, spread):
"""
Preprocess GSOD data, getting rid of bad data, and removing stations with insufficient data.
Also subsets data to desired (start_year, end_year) and (start_month, end_month)
TODO: Move hard coded things to args.
Parameters
---------
this_id : str
GSOD station id
datadir : str
Location of GSOD csv files
start_year : int
First year of analysis. Will subset to this range.
end_year : int
Last year of analysis. Will subset to this range.
start_month : int
First month of analysis. Will subset to this range.
end_month : int
Last month of analysis. Will subset to this range.
offset : float
For jittering, should data be offset?
spread : float
For jittering, what is the uncertainty in the data?
Returns
-------
0 if insufficient data
otherwise
df : pandas.dataframe
Dataframe containing subset data for station
"""
f = '%s/%s.csv' % (datadir, this_id)
df = pd.read_csv(f)
# Perform data QC
# Drop missing data
df = df[~np.isnan(df['dewp'])]
df = df[~np.isnan(df['temp'])]
# Drop places where less than four obs were used for average
df = df[~((df['temp_c'] < 4) | (df['dewp_c'] < 4))]
# Drop places where dew point exceeds temperature
# Not strictly correct because both are daily averages, but unlikely to happen in valid data
df = df[df['temp'] >= df['dewp']]
# Add additional date columns
df = add_date_columns(df)
df_start_year = np.min(df['year'].values)
if df_start_year > start_year:
return 0
# add GMT anoms
df = add_GMT(df)
# Drop Feb 29, and rework day of year counters
leaps = np.arange(1904, 2020, 4) # leap years
for ll in leaps:
old_doy = df.loc[(df['year'] == ll) & (df['month'] > 2), 'doy'].values
df.loc[(df['year'] == ll) & (df['month'] > 2), 'doy'] = old_doy - 1
df = df[~((df['month'] == 2) & (df['doy'] == 60))]
# Add jitter
df = df.assign(temp=jitter(df['temp'], offset, spread))
df = df.assign(dewp=jitter(df['dewp'], offset, spread))
# convert to C
df = df.assign(dewp=F_to_C(df['dewp']))
df = df.assign(temp=F_to_C(df['temp']))
# Fit seasonal cycle with first three harmonics and remove
_, residual_T, _ = fit_seasonal_cycle(df['doy'], df['temp'], nbases=3)
# Dew point seasonal cycle requires 10 harmonics because rapid uptick in monsoon regions
_, residual_DP, _ = fit_seasonal_cycle(df['doy'], df['dewp'], nbases=10)
df = df.assign(dewp_anom=residual_DP)
df = df.assign(temp_anom=residual_T)
del residual_T, residual_DP
# Pull out JJAS
df = df.loc[(df['month'] >= start_month) & (df['month'] <= end_month)]
# Pull out correct year span
df = df[(df['year'] >= start_year) & (df['year'] <= end_year)]
# Check if sufficient data
yrs = np.arange(start_year, end_year + 1) # inclusive
frac_avail = np.zeros((len(yrs)))
for ct, yy in enumerate(yrs):
count = len(df[(df['year'] == yy)])
frac_avail[ct] = count/122 # 122 days in JJAS!
frac_with_80 = np.sum(frac_avail > 0.8)/len(frac_avail)
# Conditions to include station:
# (1) Overall, must have at least 80% of coverage over at least 80% of years
# (2) Must have data in first three and last three years of record
# (3) Can't have more than one missing year in a row
data_sufficient = ((np.mean(frac_avail[:3]) > 0) &
(np.mean(frac_avail[-3:]) > 0) &
(frac_with_80 > 0.8))
no_data = | np.where(frac_avail[:-1] == 0) | numpy.where |
# Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.
""" Tests lambda functions. """
import dace
import numpy as np
import pytest
def test_inline_lambda_tasklet():
@dace.program
def lamb(A: dace.float64[20], B: dace.float64[20], C: dace.float64[20]):
for i in dace.map[0:20]:
with dace.tasklet:
a >> A[i]
b << B[i]
c << C[i]
f = lambda a, b: a + b
a = f(b, c)
A = np.random.rand(20)
B = np.random.rand(20)
C = | np.random.rand(20) | numpy.random.rand |
"""
This module contains some auxilary functions
"""
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import csv
import copy
import json
import random
import logging
from pathlib import Path
import pickle
from datetime import datetime
import numpy as np
from misc.tb_logger import Logger
from utils import img_utils
from misc.metrics import (
batch_dice,
batch_jaccard,
calculate_confusion_matrix_from_arrays,
calculate_dices,
calculate_jaccards,
evalExp,
pxEval_maximizeFMeasure
)
def count_params(model: nn.Module) -> (int, int):
"""
Calculates the total and trainable parameters in model.
"""
total = sum(p.numel() for p in model.parameters())
trainable = sum(p.numel() for p in model.parameters() if p.requires_grad)
return (total, trainable)
def to_gpu(x: torch.Tensor):
return x.cuda(non_blocking=True) if torch.cuda.is_available() else x
def save_model(model_path: str, model, best_jaccard, best_dice, best_uu_metrics, best_um_metrics, best_umm_metrics,
epoch):
torch.save({"best_jaccard": best_jaccard, "best_dice": best_dice, "best_uu_metrics": best_uu_metrics,
"best_um_metrics": best_um_metrics, "best_umm_metrics": best_umm_metrics, "epoch": epoch,
"model": model}, model_path)
def save_model_a2d2(model_path: str, model, best_jaccard, best_dice,epoch):
torch.save({"best_jaccard": best_jaccard, "best_dice": best_dice, "epoch": epoch,
"model": model}, model_path)
def make_info_string(sep=',', **kwargs):
"""
Construct an information string in the following view: key1: value1[sep]key2: value2[sep][(keyN: valueN)]
params:
sep : a separator between instances. Possible values: ',', '\n'
**kwargs : params
"""
if sep not in [',', '\n']:
ValueError("Wrong separator: {}. 'sep' must be: ',' or '\n'".format(sep))
info_str = ""
for key, value in kwargs.items():
info_str += "{0}: {1}{2} ".format(key, value, sep)
info_str = info_str[:-2] if info_str[-2] == ',' else info_str[:-1]
return info_str
def save_runparams(params: dict, file_path: str):
"""
Perform saving run parameters into file.
"""
with open(str(file_path), "w") as file:
file.write(json.dumps(params, indent=True, sort_keys=True))
def write2csv(data, file_name, type_of_header):
"""
Perform writing info to a csv file.
params:
data : list of data
file_name : name of file
type_of_header :
"""
if type_of_header == "maxf":
header = "epoch,uu_MaxF,um_MaxF,umm_MaxF,mMaxf".split(",")
elif type_of_header == "avgprec":
header = "epoch,uu_AvgPrec,um_AvgPrec,umm_AvgPrec,mAvgPrec".split(",")
elif type_of_header == "prec":
header = "epoch,uu_PRE,um_PRE,umm_PRE,mPRE".split(",")
elif type_of_header == "rec":
header = "epoch,uu_REC,um_REC,umm_REC,mREC".split(",")
elif type_of_header == "loss":
header = "epoch,train_loss,valid_loss".split(",")
elif type_of_header == "jd":
header = "epoch,Jaccard,DICE".split(",")
else:
raise ValueError("Unknown type of header: {}".format(type_of_header))
data = [header] + data
with open(file_name, "w", newline='') as csv_file:
writer = csv.writer(csv_file, delimiter=',')
for line in data:
writer.writerow(line)
def train_routine(
args,
console_logger: logging.Logger,
root: str,
model: nn.Module,
criterion,
optimizer,
scheduler,
train_loader,
valid_loader,
fm_eval_dataset,
validation,
fold,
num_classes=1,
n_epochs=100,
status_every=5):
"""
General trainig routine.
params:
args : argument parser parameters for saving it
console_logger : logger object for logging
root : root dir where stores trained models
model : model for training
criterion : loss function
optimizer : SGD, Adam or other
scheduler : learning rate scheduler
train_loader :
valid_loader :
fm_eval_dataset : dataset for F-max evaluation
validation : validation routine
fold : number of fold
num_classes : number of classes
n_epochs : number of training epochs
status_every : the parameter which controls the frequency of status printing
"""
#Load model if it exists
root = Path(root)
root.mkdir(exist_ok=True, parents=True)
model_root = root / args.model_type / 'model{}'.format(fold)
model_root.mkdir(exist_ok=True, parents=True)
#CSV
csvs_path = model_root / 'csv'
csvs_path.mkdir(exist_ok=True, parents=True)
model_path = model_root / 'model.pt'
logging_path = model_root / 'train.log'
#run params saving
save_runparams(vars(args), file_path=(model_root / 'rparams.txt'))
#file logger definition
file_logger = logging.getLogger("file-logger")
file_logger.setLevel(logging.INFO)
fh = logging.FileHandler(str(logging_path), mode='w')
formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
fh.setFormatter(formatter)
file_logger.addHandler(fh)
#Logging to the TensorBoardX
tbx_logger = Logger(log_dir=str(model_root / "tbxlogs"))
if model_path.exists():
state = torch.load(str(model_path))
epoch = state["epoch"]
best_jaccard = state["best_jaccard"]
best_dice = state["best_dice"]
best_uu_metrics = state["best_uu_metrics"]
best_um_metrics = state["best_um_metrics"]
best_umm_metrics = state["best_umm_metrics"]
model.load_state_dict(state["model"])
console_logger.info("\nModel '{0}' was restored. Best Jaccard: {1}, Best DICE: {2}, Epoch: {3}".format(str(model_path), best_jaccard, best_dice, epoch))
else:
epoch = 1
best_jaccard = 0
best_dice = 0
best_uu_metrics = {"MaxF": 0, "AvgPrec": 0, "PRE": 0, "REC": 0}
best_um_metrics = {"MaxF": 0, "AvgPrec": 0, "PRE": 0, "REC": 0}
best_umm_metrics = {"MaxF": 0, "AvgPrec": 0, "PRE": 0, "REC": 0}
n_epochs = n_epochs + epoch
best_model = copy.deepcopy(model.state_dict())
train_losses = []
valid_losses = []
jaccards = []
dices = []
#CSV data for logging
maxf_csv_data = []
avgprec_csv_data = []
prec_csv_data = []
rec_csv_data = []
loss_csv_data = []
jacc_dice_csv_data = []
for epoch in range(epoch, n_epochs):
epoch_train_losses = []
#Train mode
model.train()
#scheduler step
scheduler.step()
try:
for i, (inputs, targets) in enumerate(train_loader):
inputs = to_gpu(inputs)
targets = to_gpu(targets)
optimizer.zero_grad()
with torch.set_grad_enabled(True):
outputs = model(inputs)
loss = criterion(targets, outputs)
loss.backward()
optimizer.step()
epoch_train_losses.append(loss.item())
#Train loss per epoch
epoch_train_loss = np.mean(epoch_train_losses).astype(dtype=np.float64)
#Validation
valid_dict = validation(model, criterion, valid_loader)
uu_metrics, um_metrics, umm_metrics = fmeasure_evaluation([model], fm_eval_dataset)
train_losses.append(epoch_train_loss)
valid_losses.append(valid_dict["val_loss"])
jaccards.append(valid_dict["val_jacc"])
dices.append(valid_dict["val_dice"])
if valid_dict["val_jacc"] > best_jaccard:
best_jaccard = valid_dict["val_jacc"]
best_dice = valid_dict["val_dice"]
best_uu_metrics = {"MaxF": uu_metrics["MaxF"], "AvgPrec": uu_metrics["AvgPrec"], "PRE": uu_metrics["PRE_wp"][0], "REC": uu_metrics["REC_wp"][0]}
best_um_metrics = {"MaxF": um_metrics["MaxF"], "AvgPrec": um_metrics["AvgPrec"], "PRE": um_metrics["PRE_wp"][0], "REC": um_metrics["REC_wp"][0]}
best_umm_metrics = {"MaxF": umm_metrics["MaxF"], "AvgPrec": umm_metrics["AvgPrec"], "PRE": umm_metrics["PRE_wp"][0], "REC": umm_metrics["REC_wp"][0]}
best_model = copy.deepcopy(model.state_dict())
if epoch and (epoch % status_every == 0):
info_str = "\nEpoch: {}, LR: {}\n".format(epoch, scheduler.get_lr())
info_str += "-"*30
info_str += "\nTrain loss: {0}".format(epoch_train_loss)
info_str += "\nValid loss: {0}".format(valid_dict["val_loss"])
info_str += "\nValid Jaccard: {0}".format(valid_dict["val_jacc"])
info_str += "\nValid DICE: {0}\n".format(valid_dict["val_dice"])
#MaxF, PRE, REC, AvgPrec printing
info_str += "\nUU_MaxF: {0}".format(uu_metrics["MaxF"])
info_str += "\nUU_AvgPrec: {0}".format(uu_metrics["AvgPrec"])
info_str += "\nUU_PRE: {0}".format(uu_metrics["PRE_wp"][0])
info_str += "\nUU_REC: {0}\n".format(uu_metrics["REC_wp"][0])
info_str += "\nUM_MaxF: {0}".format(um_metrics["MaxF"])
info_str += "\nUM_AvgPrec: {0}".format(um_metrics["AvgPrec"])
info_str += "\nUM_PRE: {0}".format(um_metrics["PRE_wp"][0])
info_str += "\nUM_REC: {0}\n".format(um_metrics["REC_wp"][0])
info_str += "\nUMM_MaxF: {0}".format(umm_metrics["MaxF"])
info_str += "\nUMM_AvgPrec: {0}".format(umm_metrics["AvgPrec"])
info_str += "\nUMM_PRE: {0}".format(umm_metrics["PRE_wp"][0])
info_str += "\nUMM_REC: {0}\n".format(umm_metrics["REC_wp"][0])
info_str += "-"*30
info_str += "\n"
console_logger.info(info_str)
#Log to file
info_str = "\nepoch: {}, ".format(epoch)
info_str += "train_loss: {}, ".format(epoch_train_loss)
info_str += "val_loss: {}, ".format(valid_dict["val_loss"])
info_str += "val_jaccard: {}, ".format(valid_dict["val_jacc"])
info_str += "val_dice: {}\n".format(valid_dict["val_dice"])
file_logger.info(info_str)
#Log to the tbX
tbx_logger.log_scalars(tag="losses", values={"train_loss": epoch_train_loss, "valid_loss": valid_dict["val_loss"]}, step=epoch)
tbx_logger.log_scalars(tag="metrics", values={"jaccard": valid_dict["val_jacc"], "DICE": valid_dict["val_dice"]}, step=epoch)
#MaxF
tbx_logger.log_scalars(tag="MaxF", values={"uu_maxF": uu_metrics["MaxF"], "um_maxF": um_metrics["MaxF"], "umm_maxF": umm_metrics["MaxF"], "mmaxF": (uu_metrics["MaxF"] + um_metrics["MaxF"] + umm_metrics["MaxF"])/3}, step=epoch)
#AvgPrec
tbx_logger.log_scalars(tag="AvgPrec", values={"uu_AvgPrec": uu_metrics["AvgPrec"], "um_AvgPrec": um_metrics["AvgPrec"], "umm_AvgPrec": umm_metrics["AvgPrec"], "mAvgPrec": (uu_metrics["AvgPrec"] + um_metrics["AvgPrec"] + umm_metrics["AvgPrec"])/3}, step=epoch)
#PRE
tbx_logger.log_scalars(tag="PRE", values={"uu_PRE": uu_metrics["PRE_wp"][0], "um_PRE": um_metrics["PRE_wp"][0], "umm_PRE": umm_metrics["PRE_wp"][0], "mPRE": (uu_metrics["PRE_wp"][0] + um_metrics["PRE_wp"][0] + umm_metrics["PRE_wp"][0])/3}, step=epoch)
#REC
tbx_logger.log_scalars(tag="REC", values={"uu_REC": uu_metrics["REC_wp"][0], "um_REC": um_metrics["REC_wp"][0], "umm_REC": umm_metrics["REC_wp"][0], "mREC": (uu_metrics["REC_wp"][0] + um_metrics["REC_wp"][0] + umm_metrics["REC_wp"][0])/3}, step=epoch)
#Log to csv
maxf_csv_data.append("{},{},{},{},{}".format(epoch, uu_metrics["MaxF"], um_metrics["MaxF"], umm_metrics["MaxF"], (uu_metrics["MaxF"] + um_metrics["MaxF"] + umm_metrics["MaxF"])/3).split(","))
avgprec_csv_data.append("{},{},{},{},{}".format(epoch, uu_metrics["AvgPrec"], um_metrics["AvgPrec"], umm_metrics["AvgPrec"], (uu_metrics["AvgPrec"] + um_metrics["AvgPrec"] + umm_metrics["AvgPrec"])/3).split(","))
prec_csv_data.append("{},{},{},{},{}".format(epoch, uu_metrics["PRE_wp"][0], um_metrics["PRE_wp"][0], umm_metrics["PRE_wp"][0], (uu_metrics["PRE_wp"][0] + um_metrics["PRE_wp"][0] + umm_metrics["PRE_wp"][0])/3).split(","))
rec_csv_data.append("{},{},{},{},{}".format(epoch, uu_metrics["REC_wp"][0], um_metrics["REC_wp"][0], umm_metrics["REC_wp"][0], (uu_metrics["REC_wp"][0] + um_metrics["REC_wp"][0] + umm_metrics["REC_wp"][0])/3).split(","))
loss_csv_data.append("{},{},{}".format(epoch, epoch_train_loss, valid_dict["val_loss"]).split(","))
jacc_dice_csv_data.append("{},{},{}".format(epoch, valid_dict["val_jacc"], valid_dict["val_dice"]).split(","))
except KeyboardInterrupt:
console_logger.info("KeyboardInterrupt, saving snapshot.")
save_model(str(model_path), best_model, best_jaccard, best_dice, best_uu_metrics, best_um_metrics, best_umm_metrics, epoch)
console_logger.info("Done!")
info_str = "\nTraining process is done!\n" + "*"*30
info_str += "\nTrain loss: {0}".format(np.mean(train_losses).astype(dtype=np.float64))
info_str += "\nValid loss: {0}".format(np.mean(valid_losses).astype(dtype=np.float64))
info_str += "\nMean Jaccard: {0}".format(np.mean(jaccards).astype(dtype=np.float64))
info_str += "\nMean DICE: {0}".format(np.mean(dices).astype(dtype=np.float64))
info_str += "\nBest Jaccard: {0}".format(best_jaccard)
info_str += "\nBest DICE: {0}".format(best_dice)
info_str += "\nBest UU_Metrics: {0}".format(best_uu_metrics)
info_str += "\nBest UM_Metrics: {0}".format(best_um_metrics)
info_str += "\nBest UMM_Metrics: {0}".format(best_umm_metrics)
info_str += "\nMean MaxF: {0}\n".format((best_uu_metrics["MaxF"] + best_um_metrics["MaxF"] + best_umm_metrics["MaxF"])/3)
info_str += "*"*30
console_logger.info(info_str)
file_logger.info(info_str)
#model saving
save_model(str(model_path), best_model, best_jaccard, best_dice, best_uu_metrics, best_um_metrics, best_umm_metrics, n_epochs)
#Save to CSV
write2csv(data=maxf_csv_data, file_name=str(csvs_path / "maxf.csv"), type_of_header="maxf")
write2csv(data=avgprec_csv_data, file_name=str(csvs_path / "avgprec.csv"), type_of_header="avgprec")
write2csv(data=prec_csv_data, file_name=str(csvs_path / "prec.csv"), type_of_header="prec")
write2csv(data=rec_csv_data, file_name=str(csvs_path / "rec.csv"), type_of_header="rec")
write2csv(data=loss_csv_data, file_name=str(csvs_path / "loss.csv"), type_of_header="loss")
write2csv(data=jacc_dice_csv_data, file_name=str(csvs_path / "jd.csv"), type_of_header="jd")
def train_routine_a2d2(
args,
console_logger: logging.Logger,
root: str,
model: nn.Module,
criterion,
optimizer,
scheduler,
train_loader,
valid_loader,
fm_eval_dataset,
validation,
fold,
num_classes=1,
n_epochs=100,
status_every=5):
"""
General trainig routine.
params:
args : argument parser parameters for saving it
console_logger : logger object for logging
root : root dir where stores trained models
model : model for training
criterion : loss function
optimizer : SGD, Adam or other
scheduler : learning rate scheduler
train_loader :
valid_loader :
fm_eval_dataset : dataset for F-max evaluation
validation : validation routine
fold : number of fold
num_classes : number of classes
n_epochs : number of training epochs
status_every : the parameter which controls the frequency of status printing
"""
# Load model if it exists
root = Path(root)
root.mkdir(exist_ok=True, parents=True)
model_root = root / args.model_type / 'model{}'.format(fold)
model_root.mkdir(exist_ok=True, parents=True)
# CSV
csvs_path = model_root / 'csv'
csvs_path.mkdir(exist_ok=True, parents=True)
model_path = model_root / 'model.pt'
logging_path = model_root / 'train.log'
# run params saving
save_runparams(vars(args), file_path=(model_root / 'rparams.txt'))
# file logger definition
file_logger = logging.getLogger("file-logger")
file_logger.setLevel(logging.INFO)
fh = logging.FileHandler(str(logging_path), mode='w')
formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
fh.setFormatter(formatter)
file_logger.addHandler(fh)
# Logging to the TensorBoardX
tbx_logger = Logger(log_dir=str(model_root / "tbxlogs"))
if model_path.exists():
state = torch.load(str(model_path))
epoch = state["epoch"]
best_jaccard = state["best_jaccard"]
best_dice = state["best_dice"]
# best_metrics = state["best_metrics"]
model.load_state_dict(state["model"])
console_logger.info(
"\nModel '{0}' was restored. Best Jaccard: {1}, Best DICE: {2}, Epoch: {3}".format(str(model_path),
best_jaccard, best_dice,
epoch))
else:
epoch = 1
best_jaccard = 0
best_dice = 0
# best_metrics = {"MaxF": 0, "AvgPrec": 0, "PRE": 0, "REC": 0}
n_epochs = n_epochs + epoch
best_model = copy.deepcopy(model.state_dict())
train_losses = []
valid_losses = []
jaccards = []
dices = []
# CSV data for logging
loss_csv_data = []
jacc_dice_csv_data = []
# maxf_csv_data = []
# avgprec_csv_data = []
# prec_csv_data = []
# rec_csv_data = []
for epoch in range(epoch, n_epochs):
epoch_train_losses = []
# Train mode
model.train()
# scheduler step
scheduler.step()
try:
optimizer.zero_grad()
for i, (inputs1, inputs2, targets) in enumerate(train_loader):
inputs1 = to_gpu(inputs1)
inputs2 = to_gpu(inputs2)
targets = to_gpu(targets)
with torch.set_grad_enabled(True):
outputs = model(inputs1, inputs2)
loss = criterion(targets, outputs)
loss = loss/args.batch_factor
loss.backward()
if i % args.batch_factor == 0:
optimizer.step()
optimizer.zero_grad()
epoch_train_losses.append((loss*args.batch_factor).item())
# Train loss per epoch
epoch_train_loss = np.mean(epoch_train_losses).astype(dtype=np.float64)
# Validation
valid_dict, _, _ = validation(model, criterion, valid_loader, tbx_logger, epoch)
# metrics = fmeasure_evaluation([model], fm_eval_dataset)
train_losses.append(epoch_train_loss)
valid_losses.append(valid_dict["val_loss"])
jaccards.append(valid_dict["val_jacc"])
dices.append(valid_dict["val_dice"])
if valid_dict["val_jacc"] > best_jaccard:
best_jaccard = valid_dict["val_jacc"]
best_dice = valid_dict["val_dice"]
# best_metrics = {"MaxF": metrics["MaxF"], "AvgPrec": metrics["AvgPrec"],
# "PRE": metrics["PRE_wp"][0], "REC": metrics["REC_wp"][0]}
best_model = copy.deepcopy(model.state_dict())
if epoch and (epoch % status_every == 0):
info_str = "\nEpoch: {}, LR: {}\n".format(epoch, scheduler.get_lr())
info_str += "-" * 30
info_str += "\nTrain loss: {0}".format(epoch_train_loss)
info_str += "\nValid loss: {0}".format(valid_dict["val_loss"])
info_str += "\nValid Jaccard: {0}".format(valid_dict["val_jacc"])
info_str += "\nValid DICE: {0}\n".format(valid_dict["val_dice"])
# # MaxF, PRE, REC, AvgPrec printing
# info_str += "\nUU_MaxF: {0}".format(metrics["MaxF"])
# info_str += "\nUU_AvgPrec: {0}".format(metrics["AvgPrec"])
# info_str += "\nUU_PRE: {0}".format(metrics["PRE_wp"][0])
# info_str += "\nUU_REC: {0}\n".format(metrics["REC_wp"][0])
info_str += "-" * 30
info_str += "\n"
console_logger.info(info_str)
# Log to file
info_str = "\nepoch: {}, ".format(epoch)
info_str += "train_loss: {}, ".format(epoch_train_loss)
info_str += "val_loss: {}, ".format(valid_dict["val_loss"])
info_str += "val_jaccard: {}, ".format(valid_dict["val_jacc"])
info_str += "val_dice: {}\n".format(valid_dict["val_dice"])
file_logger.info(info_str)
# Log to the tbX
tbx_logger.log_scalars(tag="losses",
values={"train_loss": epoch_train_loss, "valid_loss": valid_dict["val_loss"]},
step=epoch)
tbx_logger.log_scalars(tag="metrics",
values={"jaccard": valid_dict["val_jacc"], "DICE": valid_dict["val_dice"]},
step=epoch)
console_logger.info("End of Epoch: Saving snapshot.")
save_model_a2d2(str(model_path), best_model, best_jaccard, best_dice, epoch)
except KeyboardInterrupt:
console_logger.info("KeyboardInterrupt, saving snapshot.")
#save_model(str(model_path), best_model, best_jaccard, best_dice, best_uu_metrics, best_um_metrics, best_umm_metrics, epoch)
save_model_a2d2(str(model_path), best_model, best_jaccard, best_dice, epoch)
console_logger.info("Done!")
info_str = "\nTraining process is done!\n" + "*" * 30
info_str += "\nTrain loss: {0}".format(np.mean(train_losses).astype(dtype=np.float64))
info_str += "\nValid loss: {0}".format(np.mean(valid_losses).astype(dtype=np.float64))
info_str += "\nMean Jaccard: {0}".format(np.mean(jaccards).astype(dtype=np.float64))
info_str += "\nMean DICE: {0}".format(np.mean(dices).astype(dtype=np.float64))
info_str += "\nBest Jaccard: {0}".format(best_jaccard)
info_str += "\nBest DICE: {0}\n".format(best_dice)
# info_str += "\nBest Metrics: {0}".format(best_metrics)
info_str += "*" * 30
console_logger.info(info_str)
file_logger.info(info_str)
# model saving
# save_model(str(model_path), best_model, best_jaccard, best_dice, best_uu_metrics, best_um_metrics, best_umm_metrics, n_epochs)
save_model_a2d2(str(model_path), best_model, best_jaccard, best_dice, n_epochs)
# Save to CSV
# write2csv(data=maxf_csv_data, file_name=str(csvs_path / "maxf.csv"), type_of_header="maxf")
# write2csv(data=avgprec_csv_data, file_name=str(csvs_path / "avgprec.csv"), type_of_header="avgprec")
# write2csv(data=prec_csv_data, file_name=str(csvs_path / "prec.csv"), type_of_header="prec")
# write2csv(data=rec_csv_data, file_name=str(csvs_path / "rec.csv"), type_of_header="rec")
write2csv(data=loss_csv_data, file_name=str(csvs_path / "loss.csv"), type_of_header="loss")
write2csv(data=jacc_dice_csv_data, file_name=str(csvs_path / "jd.csv"), type_of_header="jd")
def binary_validation_routine_a2d2(model: nn.Module, criterion, valid_loader, tbx_logger=None, epoch: int=0, \
save_image: bool=False):
"""
This method by the given criterion, model and validation loader calculates Jaccard and DICE metrics with the validation loss for binary problem
"""
with torch.set_grad_enabled(False):
valid_losses = []
jaccards = []
dices = []
target_images = []
output_images = []
model.eval()
for idx, (inputs1, inputs2, targets) in enumerate(valid_loader):
inputs1 = to_gpu(inputs1)
inputs2 = to_gpu(inputs2)
targets = to_gpu(targets)
outputs = model(inputs1, inputs2)
loss = criterion(targets, outputs)
valid_losses.append(loss.item())
jaccards += batch_jaccard(targets, (outputs > 0).float())
dices += batch_dice(targets, (outputs > 0).float())
if save_image:
# Fix tensors to be images
outputs = img_utils.getMaskFromTensor(outputs)
inputs1 = img_utils.UnNormalize_tensor(inputs1, mean=[0.63263481, 0.63265741, 0.62899464], std=[0.25661512, 0.25698695, 0.2594808])
# Log Images
zeros = to_gpu(torch.zeros(targets.size()))
mask_target = torch.cat((zeros, targets, zeros), 1)
mask_output = torch.cat((zeros, outputs, zeros), 1)
img_target = torch.add(inputs1 * 0.70, mask_target * 0.30)
img_output = torch.add(inputs1 * 0.70, mask_output * 0.30)
img_target = img_utils.getImageFromUnitTensor(torch.squeeze(img_target).cpu())
img_output = img_utils.getImageFromUnitTensor(torch.squeeze(img_output).cpu())
target_images.append(img_target)
output_images.append(img_output)
# Calculates losses
valid_loss = np.mean(valid_losses).astype(dtype=np.float64)
valid_jaccard = np.mean(jaccards).astype(dtype=np.float64)
valid_dice = np.mean(dices).astype(dtype=np.float64)
if not save_image:
# Fix tensors to be images
outputs = img_utils.getMaskFromTensor(outputs)
inputs1 = img_utils.UnNormalize_tensor(inputs1, mean=[0.63263481, 0.63265741, 0.62899464], std=[0.25661512, 0.25698695, 0.2594808])
# Log Images
zeros = to_gpu(torch.zeros(targets.size()))
mask_target = torch.cat((zeros, targets, zeros), 1)
mask_output = torch.cat((zeros, outputs, zeros), 1)
img_target = torch.add(inputs1 * 0.70, mask_target * 0.30)
img_output = torch.add(inputs1 * 0.70, mask_output * 0.30)
tbx_logger.log_image( torch.squeeze(img_target), epoch, dataformats='CHW', title='target')
tbx_logger.log_image( torch.squeeze(img_output), epoch, dataformats='CHW', title='predicted')
#'Size of images not equal for grid formation'
# tbx_logger.log_image(torch.squeeze(inputs1), epoch, dataformats='CHW', title='inout image')
# tbx_logger.log_image(torch.squeeze(targets), epoch, dataformats='HW', title='target_mask')
# tbx_logger.log_image(torch.squeeze(outputs), epoch, dataformats='HW', title='output_mask')
return {"val_loss": valid_loss, "val_jacc": valid_jaccard, "val_dice": valid_dice}, target_images, output_images
def fmeasure_evaluation_a2d2(model: nn.Module, valid_dataset):
"""
This method by the given models and validation dataset calculates F-max measure, Precision, Recall and others metrics
"""
# Eval mode for all models
model.eval()
thresh = np.array(range(0, 256)) / 255.0
# UU
totalFP = np.zeros(thresh.shape)
totalFN = np.zeros(thresh.shape)
totalPosNum = 0
totalNegNum = 0
for idx, batch in enumerate(valid_dataset):
img, img2, mask = batch
img = to_gpu(img.unsqueeze(0).contiguous().float())
img2 = to_gpu(img2.unsqueeze(0).contiguous().float())
mask = mask.squeeze().data.cpu().numpy().astype(dtype=np.bool)
with torch.set_grad_enabled(False):
predict = model(img, img2)
predict = torch.sigmoid(predict)
probs = (predict).squeeze(0).squeeze(0).data.cpu().numpy().astype(dtype=np.float32)
FN, FP, posNum, negNum = evalExp(mask, probs, thresh, validMap=None, validArea=None)
totalFP += FP
totalFN += FN
totalPosNum += posNum
totalNegNum += negNum
metrics = pxEval_maximizeFMeasure(totalPosNum, totalNegNum, totalFN, totalFP, thresh=thresh)
return metrics
def train_routine_a2d2_no_lidar(
args,
console_logger: logging.Logger,
root: str,
model: nn.Module,
criterion,
optimizer,
scheduler,
train_loader,
valid_loader,
fm_eval_dataset,
validation,
fold,
num_classes=1,
n_epochs=100,
status_every=5):
"""
General trainig routine.
params:
args : argument parser parameters for saving it
console_logger : logger object for logging
root : root dir where stores trained models
model : model for training
criterion : loss function
optimizer : SGD, Adam or other
scheduler : learning rate scheduler
train_loader :
valid_loader :
fm_eval_dataset : dataset for F-max evaluation
validation : validation routine
fold : number of fold
num_classes : number of classes
n_epochs : number of training epochs
status_every : the parameter which controls the frequency of status printing
"""
# Load model if it exists
root = Path(root)
root.mkdir(exist_ok=True, parents=True)
model_root = root / args.model_type / 'model{}'.format(fold)
model_root.mkdir(exist_ok=True, parents=True)
# CSV
csvs_path = model_root / 'csv'
csvs_path.mkdir(exist_ok=True, parents=True)
model_path = model_root / 'model.pt'
logging_path = model_root / 'train.log'
# run params saving
save_runparams(vars(args), file_path=(model_root / 'rparams.txt'))
# file logger definition
file_logger = logging.getLogger("file-logger")
file_logger.setLevel(logging.INFO)
fh = logging.FileHandler(str(logging_path), mode='w')
formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
fh.setFormatter(formatter)
file_logger.addHandler(fh)
# Logging to the TensorBoardX
tbx_logger = Logger(log_dir=str(model_root / "tbxlogs"))
if model_path.exists():
state = torch.load(str(model_path))
epoch = state["epoch"]
best_jaccard = state["best_jaccard"]
best_dice = state["best_dice"]
# best_metrics = state["best_metrics"]
model.load_state_dict(state["model"])
console_logger.info(
"\nModel '{0}' was restored. Best Jaccard: {1}, Best DICE: {2}, Epoch: {3}".format(str(model_path),
best_jaccard, best_dice,
epoch))
else:
epoch = 1
best_jaccard = 0
best_dice = 0
# best_metrics = {"MaxF": 0, "AvgPrec": 0, "PRE": 0, "REC": 0}
n_epochs = n_epochs + epoch
best_model = copy.deepcopy(model.state_dict())
train_losses = []
valid_losses = []
jaccards = []
dices = []
# CSV data for logging
loss_csv_data = []
jacc_dice_csv_data = []
# maxf_csv_data = []
# avgprec_csv_data = []
# prec_csv_data = []
# rec_csv_data = []
for epoch in range(epoch, n_epochs):
epoch_train_losses = []
# Train mode
model.train()
# scheduler step
scheduler.step()
try:
optimizer.zero_grad()
for i, (inputs, targets) in enumerate(train_loader):
inputs = to_gpu(inputs)
targets = to_gpu(targets)
with torch.set_grad_enabled(True):
outputs = model(inputs)
loss = criterion(targets, outputs)
loss = loss/args.batch_factor
loss.backward()
if i % args.batch_factor == 0:
optimizer.step()
optimizer.zero_grad()
epoch_train_losses.append((loss*args.batch_factor).item())
# Train loss per epoch
epoch_train_loss = np.mean(epoch_train_losses).astype(dtype=np.float64)
# Validation
valid_dict, _, _ = validation(model, criterion, valid_loader, tbx_logger, epoch)
# metrics = fmeasure_evaluation([model], fm_eval_dataset)
train_losses.append(epoch_train_loss)
valid_losses.append(valid_dict["val_loss"])
jaccards.append(valid_dict["val_jacc"])
dices.append(valid_dict["val_dice"])
if valid_dict["val_jacc"] > best_jaccard:
best_jaccard = valid_dict["val_jacc"]
best_dice = valid_dict["val_dice"]
# best_metrics = {"MaxF": metrics["MaxF"], "AvgPrec": metrics["AvgPrec"],
# "PRE": metrics["PRE_wp"][0], "REC": metrics["REC_wp"][0]}
best_model = copy.deepcopy(model.state_dict())
if epoch and (epoch % status_every == 0):
info_str = "\nEpoch: {}, LR: {}\n".format(epoch, scheduler.get_lr())
info_str += "-" * 30
info_str += "\nTrain loss: {0}".format(epoch_train_loss)
info_str += "\nValid loss: {0}".format(valid_dict["val_loss"])
info_str += "\nValid Jaccard: {0}".format(valid_dict["val_jacc"])
info_str += "\nValid DICE: {0}\n".format(valid_dict["val_dice"])
# # MaxF, PRE, REC, AvgPrec printing
# info_str += "\nUU_MaxF: {0}".format(uu_metrics["MaxF"])
# info_str += "\nUU_AvgPrec: {0}".format(uu_metrics["AvgPrec"])
# info_str += "\nUU_PRE: {0}".format(uu_metrics["PRE_wp"][0])
# info_str += "\nUU_REC: {0}\n".format(uu_metrics["REC_wp"][0])
# info_str += "\nUM_MaxF: {0}".format(um_metrics["MaxF"])
# info_str += "\nUM_AvgPrec: {0}".format(um_metrics["AvgPrec"])
# info_str += "\nUM_PRE: {0}".format(um_metrics["PRE_wp"][0])
# info_str += "\nUM_REC: {0}\n".format(um_metrics["REC_wp"][0])
# info_str += "\nUMM_MaxF: {0}".format(umm_metrics["MaxF"])
# info_str += "\nUMM_AvgPrec: {0}".format(umm_metrics["AvgPrec"])
# info_str += "\nUMM_PRE: {0}".format(umm_metrics["PRE_wp"][0])
# info_str += "\nUMM_REC: {0}\n".format(umm_metrics["REC_wp"][0])
info_str += "-" * 30
info_str += "\n"
console_logger.info(info_str)
# Log to file
info_str = "\nepoch: {}, ".format(epoch)
info_str += "train_loss: {}, ".format(epoch_train_loss)
info_str += "val_loss: {}, ".format(valid_dict["val_loss"])
info_str += "val_jaccard: {}, ".format(valid_dict["val_jacc"])
info_str += "val_dice: {}\n".format(valid_dict["val_dice"])
file_logger.info(info_str)
# Log to the tbX
tbx_logger.log_scalars(tag="losses",
values={"train_loss": epoch_train_loss, "valid_loss": valid_dict["val_loss"]},
step=epoch)
tbx_logger.log_scalars(tag="metrics",
values={"jaccard": valid_dict["val_jacc"], "DICE": valid_dict["val_dice"]},
step=epoch)
# # MaxF
# tbx_logger.log_scalars(tag="MaxF", values={"uu_maxF": uu_metrics["MaxF"], "um_maxF": um_metrics["MaxF"],
# "umm_maxF": umm_metrics["MaxF"], "mmaxF": (uu_metrics[
# "MaxF"] +
# um_metrics[
# "MaxF"] +
# umm_metrics[
# "MaxF"]) / 3},
# step=epoch)
# # AvgPrec
# tbx_logger.log_scalars(tag="AvgPrec",
# values={"uu_AvgPrec": uu_metrics["AvgPrec"], "um_AvgPrec": um_metrics["AvgPrec"],
# "umm_AvgPrec": umm_metrics["AvgPrec"], "mAvgPrec": (uu_metrics[
# "AvgPrec"] +
# um_metrics[
# "AvgPrec"] +
# umm_metrics[
# "AvgPrec"]) / 3},
# step=epoch)
# # PRE
# tbx_logger.log_scalars(tag="PRE",
# values={"uu_PRE": uu_metrics["PRE_wp"][0], "um_PRE": um_metrics["PRE_wp"][0],
# "umm_PRE": umm_metrics["PRE_wp"][0], "mPRE": (uu_metrics["PRE_wp"][0] +
# um_metrics["PRE_wp"][0] +
# umm_metrics["PRE_wp"][
# 0]) / 3}, step=epoch)
# # REC
# tbx_logger.log_scalars(tag="REC",
# values={"uu_REC": uu_metrics["REC_wp"][0], "um_REC": um_metrics["REC_wp"][0],
# "umm_REC": umm_metrics["REC_wp"][0], "mREC": (uu_metrics["REC_wp"][0] +
# um_metrics["REC_wp"][0] +
# umm_metrics["REC_wp"][
# 0]) / 3}, step=epoch)
#
# # Log to csv
# maxf_csv_data.append(
# "{},{},{},{},{}".format(epoch, uu_metrics["MaxF"], um_metrics["MaxF"], umm_metrics["MaxF"],
# (uu_metrics["MaxF"] + um_metrics["MaxF"] + umm_metrics["MaxF"]) / 3).split(
# ","))
# avgprec_csv_data.append(
# "{},{},{},{},{}".format(epoch, uu_metrics["AvgPrec"], um_metrics["AvgPrec"], umm_metrics["AvgPrec"],
# (uu_metrics["AvgPrec"] + um_metrics["AvgPrec"] + umm_metrics[
# "AvgPrec"]) / 3).split(","))
# prec_csv_data.append("{},{},{},{},{}".format(epoch, uu_metrics["PRE_wp"][0], um_metrics["PRE_wp"][0],
# umm_metrics["PRE_wp"][0], (
# uu_metrics["PRE_wp"][0] + um_metrics["PRE_wp"][
# 0] + umm_metrics["PRE_wp"][0]) / 3).split(","))
# rec_csv_data.append("{},{},{},{},{}".format(epoch, uu_metrics["REC_wp"][0], um_metrics["REC_wp"][0],
# umm_metrics["REC_wp"][0], (
# uu_metrics["REC_wp"][0] + um_metrics["REC_wp"][
# 0] + umm_metrics["REC_wp"][0]) / 3).split(","))
# loss_csv_data.append("{},{},{}".format(epoch, epoch_train_loss, valid_dict["val_loss"]).split(","))
# jacc_dice_csv_data.append(
# "{},{},{}".format(epoch, valid_dict["val_jacc"], valid_dict["val_dice"]).split(","))
console_logger.info("End of Epoch: Saving snapshot.")
save_model_a2d2(str(model_path), best_model, best_jaccard, best_dice, epoch)
except KeyboardInterrupt:
console_logger.info("KeyboardInterrupt, saving snapshot.")
#save_model(str(model_path), best_model, best_jaccard, best_dice, best_uu_metrics, best_um_metrics, best_umm_metrics, epoch)
save_model_a2d2(str(model_path), best_model, best_jaccard, best_dice, epoch)
console_logger.info("Done!")
info_str = "\nTraining process is done!\n" + "*" * 30
info_str += "\nTrain loss: {0}".format(np.mean(train_losses).astype(dtype=np.float64))
info_str += "\nValid loss: {0}".format(np.mean(valid_losses).astype(dtype=np.float64))
info_str += "\nMean Jaccard: {0}".format(np.mean(jaccards).astype(dtype=np.float64))
info_str += "\nMean DICE: {0}".format(np.mean(dices).astype(dtype=np.float64))
info_str += "\nBest Jaccard: {0}".format(best_jaccard)
info_str += "\nBest DICE: {0}\n".format(best_dice)
# info_str += "\nBest UU_Metrics: {0}".format(best_uu_metrics)
# info_str += "\nBest UM_Metrics: {0}".format(best_um_metrics)
# info_str += "\nBest UMM_Metrics: {0}".format(best_umm_metrics)
# info_str += "\nMean MaxF: {0}\n".format((best_uu_metrics["MaxF"] + best_um_metrics["MaxF"] + best_umm_metrics["MaxF"])/3)
info_str += "*" * 30
console_logger.info(info_str)
file_logger.info(info_str)
# model saving
# save_model(str(model_path), best_model, best_jaccard, best_dice, best_uu_metrics, best_um_metrics, best_umm_metrics, n_epochs)
save_model_a2d2(str(model_path), best_model, best_jaccard, best_dice, n_epochs)
# Save to CSV
# write2csv(data=maxf_csv_data, file_name=str(csvs_path / "maxf.csv"), type_of_header="maxf")
# write2csv(data=avgprec_csv_data, file_name=str(csvs_path / "avgprec.csv"), type_of_header="avgprec")
# write2csv(data=prec_csv_data, file_name=str(csvs_path / "prec.csv"), type_of_header="prec")
# write2csv(data=rec_csv_data, file_name=str(csvs_path / "rec.csv"), type_of_header="rec")
write2csv(data=loss_csv_data, file_name=str(csvs_path / "loss.csv"), type_of_header="loss")
write2csv(data=jacc_dice_csv_data, file_name=str(csvs_path / "jd.csv"), type_of_header="jd")
def binary_validation_routine_a2d2_no_lidar(model: nn.Module, criterion, valid_loader, tbx_logger=None, epoch: int=0, \
save_image: bool=False):
"""
This method by the given criterion, model and validation loader calculates Jaccard and DICE metrics with the validation loss for binary problem
"""
with torch.set_grad_enabled(False):
valid_losses = []
jaccards = []
dices = []
target_images = []
output_images = []
model.eval()
for idx, (inputs, targets) in enumerate(valid_loader):
inputs = to_gpu(inputs)
targets = to_gpu(targets)
outputs = model(inputs)
loss = criterion(targets, outputs)
valid_losses.append(loss.item())
jaccards += batch_jaccard(targets, (outputs > 0).float())
dices += batch_dice(targets, (outputs > 0).float())
if save_image:
# Fix tensors to be images
outputs = img_utils.getMaskFromTensor(outputs)
inputs = img_utils.UnNormalize_tensor(inputs, mean=[0.63263481, 0.63265741, 0.62899464], std=[0.25661512, 0.25698695, 0.2594808])
# Log Images
zeros = to_gpu(torch.zeros(targets.size()))
mask_target = torch.cat((zeros, targets, zeros), 1)
mask_output = torch.cat((zeros, outputs, zeros), 1)
img_target = torch.add(inputs * 0.70, mask_target * 0.30)
img_output = torch.add(inputs * 0.70, mask_output * 0.30)
img_target = img_utils.getImageFromUnitTensor(torch.squeeze(img_target).cpu())
img_output = img_utils.getImageFromUnitTensor(torch.squeeze(img_output).cpu())
target_images.append(img_target)
output_images.append(img_output)
# Calculates losses
valid_loss = np.mean(valid_losses).astype(dtype=np.float64)
valid_jaccard = np.mean(jaccards).astype(dtype=np.float64)
valid_dice = np.mean(dices).astype(dtype=np.float64)
if not save_image:
# Fix tensors to be images
outputs = img_utils.getMaskFromTensor(outputs)
inputs = img_utils.UnNormalize_tensor(inputs, mean=[0.63263481, 0.63265741, 0.62899464], std=[0.25661512, 0.25698695, 0.2594808])
# Log Images
zeros = to_gpu(torch.zeros(targets.size()))
mask_target = torch.cat((zeros, targets, zeros), 1)
mask_output = torch.cat((zeros, outputs, zeros), 1)
img_target = torch.add(inputs * 0.70, mask_target * 0.30)
img_output = torch.add(inputs * 0.70, mask_output * 0.30)
tbx_logger.log_image(torch.squeeze(img_target), epoch, dataformats='CHW', title='target')
tbx_logger.log_image(torch.squeeze(img_output), epoch, dataformats='CHW', title='predicted')
# 'Size of images not equal for grid formation'
# tbx_logger.log_image(torch.squeeze(inputs), epoch, dataformats='CHW', title='inout image')
# tbx_logger.log_image(torch.squeeze(targets), epoch, dataformats='HW', title='target_mask')
# tbx_logger.log_image(torch.squeeze(outputs), epoch, dataformats='HW', title='output_mask')
return {"val_loss": valid_loss, "val_jacc": valid_jaccard, "val_dice": valid_dice}, target_images, output_images
def fmeasure_evaluation_a2d2_no_lidar(model: nn.Module, valid_dataset):
"""
This method by the given models and validation dataset calculates F-max measure, Precision, Recall and others metrics
"""
# Eval mode for all models
model.eval()
thresh = np.array(range(0, 256)) / 255.0
# UU
totalFP = np.zeros(thresh.shape)
totalFN = np.zeros(thresh.shape)
totalPosNum = 0
totalNegNum = 0
for idx, batch in enumerate(valid_dataset):
img, mask = batch
img = to_gpu(img.unsqueeze(0).contiguous().float())
mask = mask.squeeze().data.cpu().numpy().astype(dtype=np.bool)
with torch.set_grad_enabled(False):
predict = model(img)
predict = torch.sigmoid(predict)
probs = (predict).squeeze(0).squeeze(0).data.cpu().numpy().astype(dtype=np.float32)
FN, FP, posNum, negNum = evalExp(mask, probs, thresh, validMap=None, validArea=None)
totalFP += FP
totalFN += FN
totalPosNum += posNum
totalNegNum += negNum
metrics = pxEval_maximizeFMeasure(totalPosNum, totalNegNum, totalFN, totalFP, thresh=thresh)
return metrics
def binary_validation_routine(model: nn.Module, criterion, valid_loader):
"""
This method by the given criterion, model and validation loader calculates Jaccard and DICE metrics with the validation loss for binary problem
"""
with torch.set_grad_enabled(False):
valid_losses = []
jaccards = []
dices = []
model.eval()
for idx, batch in enumerate(valid_loader):
inputs, targets = batch
inputs = to_gpu(inputs)
targets = to_gpu(targets)
outputs = model(inputs)
loss = criterion(targets, outputs)
valid_losses.append(loss.item())
jaccards += batch_jaccard(targets, (outputs > 0).float())
dices += batch_dice(targets, (outputs > 0).float())
# Calculates losses
valid_loss = np.mean(valid_losses).astype(dtype=np.float64)
valid_jaccard = np.mean(jaccards).astype(dtype=np.float64)
valid_dice = np.mean(dices).astype(dtype=np.float64)
return {"val_loss": valid_loss, "val_jacc": valid_jaccard, "val_dice": valid_dice}
def multi_validation_routine(model: nn.Module, criterion, valid_loader, num_classes=1):
"""
This method by the given criterion, model and validation loader calculates Jaccard and DICE metrics with the validation loss for a multi-class problem
"""
with torch.set_grad_enabled(False):
valid_losses = []
# Eval mode
model.eval()
confusion_matrix = np.zeros((num_classes, num_classes), dtype=np.uint32)
for idx, batch in enumerate(valid_loader):
inputs, targets = batch
inputs = to_gpu(inputs)
targets = to_gpu(targets)
outputs = model(inputs)
loss = criterion(targets, outputs)
valid_losses.append(loss.item())
output = outputs.data.cpu().numpy().argmax(axis=1) # output classes
target = targets.data.cpu().numpy()
confusion_matrix += calculate_confusion_matrix_from_arrays(output, target, num_classes)
confusion_matrix = confusion_matrix[1:, 1:] # remove background
# Jaccards and Dices
jaccards = calculate_jaccards(confusion_matrix)
dices = calculate_dices(confusion_matrix)
mean_valid_loss = | np.mean(valid_losses) | numpy.mean |
import os
import pathlib
import collections
import numpy as np
import torch
import torch.utils.data
import cv2 # pytype: disable=attribute-error
class EchoSet(torch.utils.data.Dataset):
def __init__(self,
root,
split="train",
min_spacing=16,
max_length=128,
fixed_length=128,
pad=8,
random_clip=False,
dataset_mode='repeat',
SDmode = 'reg' # reg, cla
):
self.folder = pathlib.Path(root)
self.split = split
self.max_length = max_length
self.min_length = min_spacing
self.fixed_length = fixed_length
self.padding = pad
self.mode = dataset_mode # repeat, sample, full
self.random_clip = random_clip
self.attenuation = 3 # Exponent to smooth the labels, choose odd numbers, not too big
self.SDmode = SDmode #reg, cla
self.fnames = []
self.outcome = []
self.ejection = []
self.fps = []
if not os.path.exists(root):
raise ValueError("Path does not exist: "+root)
with open(self.folder / "FileList.csv") as f:
self.header = f.readline().strip().split(",")
filenameIndex = self.header.index("FileName")
splitIndex = self.header.index("Split")
efIndex = self.header.index("EF")
fpsIndex = self.header.index("FPS")
for line in f:
lineSplit = line.strip().split(',')
# Get name of the video file
fileName = os.path.splitext(lineSplit[filenameIndex])[0]+".avi"
# Get subset category (train, val, test)
fileMode = lineSplit[splitIndex].lower()
# Get EF
ef = lineSplit[efIndex]
fps = lineSplit[fpsIndex]
# Keep only entries where the video exist and "mode" corresponds to what is asked
if split in ["all", fileMode] and os.path.exists(self.folder / "Videos" / fileName):
self.fnames.append(fileName)
self.outcome.append(lineSplit)
self.ejection.append(float(ef))
self.fps.append(int(fps))
self.frames = collections.defaultdict(list)
self.trace = collections.defaultdict(_defaultdict_of_lists)
# Volume and frames metadata - not used in UVT
with open(self.folder / "VolumeTracings.csv") as f:
header = f.readline().strip().split(",")
assert header == ["FileName", "X1", "Y1", "X2", "Y2", "Frame"]
# Read lines one by one and store processed data
for line in f:
filename, x1, y1, x2, y2, frame = line.strip().split(',')
x1 = float(x1)
y1 = float(y1)
x2 = float(x2)
y2 = float(y2)
frame = int(frame)
filename = os.path.splitext(filename)[0]
# New frame index for the given filename
if frame not in self.trace[filename]:
self.frames[filename].append(frame)
# Add volume lines to trace
self.trace[filename][frame].append((x1, y1, x2, y2))
# Transform into numpy array
for filename in self.frames:
for frame in self.frames[filename]:
self.trace[filename][frame] = np.array(self.trace[filename][frame])
# Reject all files which do not have both ED and ES frames
keep = [(len(self.frames[os.path.splitext(f)[0]]) >= 2) and (abs(self.frames[os.path.splitext(f)[0]][0] - self.frames[os.path.splitext(f)[0]][-1]) > self.min_length) for f in self.fnames]
# Prepare for getitem
self.fnames = [f for (f, k) in zip(self.fnames, keep) if k]
self.outcome = [f for (f, k) in zip(self.outcome, keep) if k]
self.ejection = [f for (f, k) in zip(self.ejection, keep) if k]
self.fps = [f for (f, k) in zip(self.fps, keep) if k]
def __getitem__(self, index):
if self.mode == 'repeat':
path = os.path.join(self.folder, "Videos", self.fnames[index])
# Load video into np.array
video = loadvideo(path).astype(np.float32)
key = os.path.splitext(self.fnames[index])[0]
# Scale pixel values from 0-255 to 0-1
video /= 255.0
video = np.moveaxis(video, 0, 1)
samp_size = abs(self.frames[key][0]-self.frames[key][-1])
if samp_size > self.fixed_length//2:
video = video[::2,:,:,:]
large_key = int(self.frames[key][-1]//2)
small_key = int(self.frames[key][0]//2)
else:
large_key = self.frames[key][-1]
small_key = self.frames[key][0]
# Frames, Channel, Height, Width
f, c, h, w = video.shape
first_poi = min(small_key, large_key)
last_poi = max(small_key, large_key)
label = np.zeros(f)
if self.SDmode == 'cla':
label[small_key] = 1 # End systole (small)
label[large_key] = 2 # End diastole (large)
elif self.SDmode == 'reg':
label[small_key] = -1 # End systole (small)
label[large_key] = 1 # End diastole (large)
dist = abs(small_key-large_key)
sign = -int((small_key-large_key)/dist)
center = min(small_key,large_key)+dist//2
label[small_key+sign:large_key:sign] = sign*np.power((np.arange(small_key+sign,large_key,sign)-center)/dist*2,self.attenuation)
nlabel = []
nvideo = []
repeat = 0
while len(nvideo) < self.fixed_length+1:
nvideo.append(video[first_poi])
nvideo.extend(video[first_poi+1:last_poi])
nvideo.append(video[last_poi])
nvideo.extend(video[last_poi-1:first_poi:-1])
nlabel.append(label[first_poi])
nlabel.extend(label[first_poi+1:last_poi])
nlabel.append(label[last_poi])
nlabel.extend(label[last_poi-1:first_poi:-1])
repeat += 1
nvideo = np.stack(nvideo)
nlabel = np.stack(nlabel)
start_index = | np.random.randint(nvideo.shape[0]-self.fixed_length) | numpy.random.randint |
#!/usr/bin/env python3
###############################################################################
# #
# RMG - Reaction Mechanism Generator #
# #
# Copyright (c) 2002-2021 Prof. <NAME> (<EMAIL>), #
# Prof. <NAME> (<EMAIL>) and the RMG Team (<EMAIL>) #
# #
# Permission is hereby granted, free of charge, to any person obtaining a #
# copy of this software and associated documentation files (the 'Software'), #
# to deal in the Software without restriction, including without limitation #
# the rights to use, copy, modify, merge, publish, distribute, sublicense, #
# and/or sell copies of the Software, and to permit persons to whom the #
# Software is furnished to do so, subject to the following conditions: #
# #
# The above copyright notice and this permission notice shall be included in #
# all copies or substantial portions of the Software. #
# #
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING #
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER #
# DEALINGS IN THE SOFTWARE. #
# #
###############################################################################
"""
This module provides the :class:`StatMechJob` class, which represents a
statistical mechanics job used to compute and save the statistical mechanics
information for a single species or transition state.
"""
import logging
import math
import os
import matplotlib.pyplot as plt
import numpy as np
import rmgpy.constants as constants
from rmgpy.exceptions import InputError, ElementError, StatmechError
from rmgpy.molecule.molecule import Molecule
from rmgpy.species import TransitionState, Species
from rmgpy.statmech.ndTorsions import HinderedRotor2D, HinderedRotorClassicalND
from rmgpy.statmech.rotation import LinearRotor, NonlinearRotor
from rmgpy.statmech.torsion import HinderedRotor, FreeRotor
from rmgpy.statmech.translation import Translation, IdealGasTranslation
from rmgpy.statmech.vibration import HarmonicOscillator
from rmgpy.quantity import Quantity
from arkane.common import ArkaneSpecies, symbol_by_number, get_principal_moments_of_inertia
from arkane.encorr.corr import get_atom_correction, get_bac
from arkane.ess import ESSAdapter, ess_factory, _registered_ess_adapters, GaussianLog, QChemLog
from arkane.encorr.isodesmic import ErrorCancelingSpecies, IsodesmicRingScheme
from arkane.modelchem import LevelOfTheory, CompositeLevelOfTheory, standardize_name
from arkane.output import prettify
from arkane.encorr.reference import ReferenceDatabase
from arkane.thermo import ThermoJob
################################################################################
class ScanLog(object):
"""
Represent a text file containing a table of angles and corresponding
scan energies.
"""
angleFactors = {
'radians': 1.0,
'rad': 1.0,
'degrees': 180.0 / math.pi,
'deg': 180.0 / math.pi,
}
energyFactors = {
'J/mol': 1.0,
'kJ/mol': 1.0 / 1000.,
'cal/mol': 1.0 / 4.184,
'kcal/mol': 1.0 / 4184.,
'cm^-1': 1.0 / (constants.h * constants.c * 100. * constants.Na),
'hartree': 1.0 / (constants.E_h * constants.Na),
}
def __init__(self, path):
self.path = path
def load(self):
"""
Load the scan energies from the file. Returns arrays containing the
angles (in radians) and energies (in J/mol).
"""
angles, energies = [], []
angle_units, energy_units, angle_factor, energy_factor = None, None, None, None
with open(self.path, 'r') as stream:
for line in stream:
line = line.strip()
if line == '':
continue
tokens = line.split()
if angle_units is None or energy_units is None:
angle_units = tokens[1][1:-1]
energy_units = tokens[3][1:-1]
try:
angle_factor = ScanLog.angleFactors[angle_units]
except KeyError:
raise ValueError('Invalid angle units {0!r}.'.format(angle_units))
try:
energy_factor = ScanLog.energyFactors[energy_units]
except KeyError:
raise ValueError('Invalid energy units {0!r}.'.format(energy_units))
else:
angles.append(float(tokens[0]) / angle_factor)
energies.append(float(tokens[1]) / energy_factor)
angles = np.array(angles)
energies = np.array(energies)
energies -= energies[0]
return angles, energies
def save(self, angles, energies, angle_units='radians', energy_units='kJ/mol'):
"""
Save the scan energies to the file using the given `angles` in radians
and corresponding energies `energies` in J/mol. The file is created to
use the given `angle_units` for angles and `energy_units` for energies.
"""
assert len(angles) == len(energies)
try:
angle_factor = ScanLog.angleFactors[angle_units]
except KeyError:
raise ValueError('Invalid angle units {0!r}.'.format(angle_units))
try:
energy_factor = ScanLog.energyFactors[energy_units]
except KeyError:
raise ValueError('Invalid energy units {0!r}.'.format(energy_units))
with open(self.path, 'w') as stream:
stream.write('{0:>24} {1:>24}\n'.format(
'Angle ({0})'.format(angle_units),
'Energy ({0})'.format(energy_units),
))
for angle, energy in zip(angles, energies):
stream.write('{0:23.10f} {1:23.10f}\n'.format(angle * angle_factor, energy * energy_factor))
################################################################################
def hinderedRotor(scanLog, pivots, top, symmetry=None, fit='best'):
"""Read a hindered rotor directive, and return the attributes in a list"""
return [scanLog, pivots, top, symmetry, fit]
def freeRotor(pivots, top, symmetry):
"""Read a free rotor directive, and return the attributes in a list"""
return [pivots, top, symmetry]
def hinderedRotor2D(scandir, pivots1, top1, symmetry1, pivots2, top2, symmetry2, symmetry='none'):
"""Read a two dimensional hindered rotor directive, and return the attributes in a list"""
return [scandir, pivots1, top1, symmetry1, pivots2, top2, symmetry2, symmetry]
def hinderedRotorClassicalND(calcPath, pivots, tops, sigmas, semiclassical):
"""Read an N dimensional hindered rotor directive, and return the attributes in a list"""
return [calcPath, pivots, tops, sigmas, semiclassical]
class StatMechJob(object):
"""
A representation of a Arkane statistical mechanics job. This job is used
to compute and save the statistical mechanics information for a single
species or transition state.
"""
def __init__(self, species, path):
self.species = species
self.path = path
self.level_of_theory = None
self.frequencyScaleFactor = 1.0
self.includeHinderedRotors = True
self.useIsodesmicReactions = False
self.isodesmicReactionList = None
self.referenceSets = None
self.applyAtomEnergyCorrections = True
self.applyBondEnergyCorrections = True
self.bondEnergyCorrectionType = 'p'
self.atomEnergies = None
self.bonds = None
self.arkane_species = ArkaneSpecies(species=species)
self.hindered_rotor_plots = []
def execute(self, output_directory=None, plot=False, pdep=False):
"""
Execute the statmech job, saving the results within
the `output_directory`.
If `plot` is True, then plots of the hindered rotor fits will be saved.
"""
self.load(pdep, plot)
if output_directory is not None:
try:
self.write_output(output_directory)
except Exception as e:
logging.warning("Could not write statmech output file due to error: "
"{0} in species {1}".format(e, self.species.label))
if plot:
hr_dir = os.path.join(output_directory, 'plots')
if not os.path.exists(hr_dir):
os.mkdir(hr_dir)
try:
self.save_hindered_rotor_figures(hr_dir)
except Exception as e:
logging.warning("Could not save hindered rotor scans due to error: "
"{0} in species {1}".format(e, self.species.label))
logging.debug('Finished statmech job for species {0}.'.format(self.species))
logging.debug(repr(self.species))
def load(self, pdep=False, plot=False):
"""
Load the statistical mechanics parameters for each conformer from
the associated files on disk. Creates :class:`Conformer` objects for
each conformer and appends them to the list of conformers on the
species object.
"""
path = self.path
directory = os.path.abspath(os.path.dirname(path))
def create_log(log_path, check_for_errors=True):
if not os.path.isfile(log_path):
modified_log_path = os.path.join(directory, log_path)
if not os.path.isfile(modified_log_path):
raise InputError('Could not find log file for species {0} '
'in the specified path {1}'.format(self.species.label, log_path))
else:
log_path = modified_log_path
return ess_factory(log_path, check_for_errors=check_for_errors)
is_ts = isinstance(self.species, TransitionState)
file_extension = os.path.splitext(path)[-1]
if file_extension in ['.yml', '.yaml']:
self.arkane_species.load_yaml(path=path, label=self.species.label, pdep=pdep)
self.species.conformer = self.arkane_species.conformer
if is_ts:
self.species.frequency = self.arkane_species.imaginary_frequency
else:
self.species.transport_data = self.arkane_species.transport_data
self.species.energy_transfer_model = self.arkane_species.energy_transfer_model
if self.arkane_species.adjacency_list is not None:
self.species.molecule = [Molecule().from_adjacency_list(adjlist=self.arkane_species.adjacency_list)]
elif self.arkane_species.inchi is not None:
self.species.molecule = [Molecule().from_inchi(inchistr=self.arkane_species.inchi)]
elif self.arkane_species.smiles is not None:
self.species.molecule = [Molecule().from_smiles(smilesstr=self.arkane_species.smiles)]
return
logging.info('Loading statistical mechanics parameters for {0}...'.format(self.species.label))
global_context = {
'__builtins__': None,
}
local_context = {
'__builtins__': None,
'True': True,
'False': False,
'HinderedRotor': hinderedRotor,
'FreeRotor': freeRotor,
'HinderedRotor2D': hinderedRotor2D,
'HinderedRotorClassicalND': hinderedRotorClassicalND,
'ScanLog': ScanLog,
'Log': create_log, # The Log class no longer exists, so route the path to ess_factory instead
'LevelOfTheory': LevelOfTheory,
'CompositeLevelOfTheory': CompositeLevelOfTheory,
}
local_context.update({ess_adapter_name: create_log for ess_adapter_name in _registered_ess_adapters.keys()})
with open(path, 'r') as f:
try:
exec(f.read(), global_context, local_context)
except (NameError, TypeError, SyntaxError):
logging.error('The species file {0} was invalid:'.format(path))
raise
if self.bonds is None:
try:
self.bonds = local_context['bonds']
except KeyError:
self.bonds = {}
try:
linear = local_context['linear']
except KeyError:
linear = None
try:
external_symmetry = local_context['externalSymmetry']
except KeyError:
external_symmetry = None
try:
spin_multiplicity = local_context['spin_multiplicity']
except KeyError:
spin_multiplicity = 0
try:
optical_isomers = local_context['opticalIsomers']
except KeyError:
logging.debug('No opticalIsomers provided, estimating them from the quantum file.')
optical_isomers = None
try:
energy = local_context['energy']
except KeyError:
raise InputError('Required attribute "energy" not found in species file {0!r}.'.format(path))
if isinstance(energy, dict):
# Standardize model chemistry names
energy = {standardize_name(k) if isinstance(k, str) else k: v for k, v in energy.items()}
freq_level = getattr(self.level_of_theory, 'freq', self.level_of_theory)
energy_level = getattr(self.level_of_theory, 'energy', self.level_of_theory)
try:
energy = energy[energy_level.to_model_chem()]
except KeyError:
try:
energy = energy[energy_level]
except KeyError:
raise InputError(f'{energy_level} not found in dictionary of energy values in species file {path}.')
else:
freq_level = energy_level = None
e0, e_electronic = None, None # E0 = e_electronic + ZPE
energy_log = None
if isinstance(energy, ESSAdapter):
energy_log = energy
# Update energy level of theory with software
if energy_level is not None:
energy_software = energy_log.get_software()
if energy_level.software is not None and energy_level.software != energy_software:
logging.warning(f'{energy_level.software} was specified as energy software but does not match'
f' detected software. Software will be updated to {energy_software}.')
energy_level = energy_level.update(software=energy_software)
elif isinstance(energy, float):
e_electronic = energy
elif isinstance(energy, tuple) and len(energy) == 2:
# this is likely meant to be a quantity object with ZPE already accounted for
energy = Quantity(energy)
e0 = energy.value_si # in J/mol
elif isinstance(energy, tuple) and len(energy) == 3:
if energy[2].lower() == 'e_electronic':
energy = Quantity(energy[:2])
e_electronic = energy.value_si / constants.E_h / constants.Na # convert J/mol to Hartree
elif energy[2].lower() in ['e0']:
energy = Quantity(energy[:2])
e0 = energy.value_si # in J/mol
else:
raise InputError('The third argument for E0 energy value should be e_elect (for energy w/o ZPE) '
'or E0 (including the ZPE). Got: {0}'.format(energy[2]))
try:
statmech_log = local_context['frequencies']
except KeyError:
raise InputError('Required attribute "frequencies" not found in species file {0!r}.'.format(path))
try:
geom_log = local_context['geometry']
except KeyError:
geom_log = statmech_log
logging.debug("Reading geometry from the specified frequencies file.")
# Update frequency level of theory with software and set new composite level of theory
if freq_level is not None:
freq_software = statmech_log.get_software()
if freq_level.software is not None and freq_level.software != freq_software:
logging.warning(f'{freq_level.software} was specified as frequency software but does not match detected'
f' software. Software will be updated to {freq_software}.')
freq_level = freq_level.update(software=freq_software)
if freq_level is not None and energy_level is not None:
if energy_level == freq_level:
self.level_of_theory = energy_level
else:
self.level_of_theory = CompositeLevelOfTheory(freq=freq_level, energy=energy_level)
if 'frequencyScaleFactor' in local_context:
logging.warning('Ignoring frequency scale factor in species file {0!r}.'.format(path))
rotors = []
if self.includeHinderedRotors:
self.raw_hindered_rotor_data = []
try:
rotors = local_context['rotors']
except KeyError:
pass
# If hindered/free rotors are included in Statmech job, ensure that the same (freq) log file is used for
# both the species's optimized geometry and Hessian. This approach guarantees that the geometry and Hessian
# will be defined in the same Cartesian coordinate system ("Input Orientation", as opposed to
# "Standard Orientation", or something else). Otherwise, if the geometry and Hessian are read from different
# log files, it is very easy for them to be defined in different coordinate systems, unless the user is very
# careful. The current implementation only performs this check for Gaussian logs. If QChem logs are used, only
# a warning is output reminding the user to ensure the geometry and Hessian are defined in consistent
# coordinates.
if len(rotors) > 0:
if isinstance(statmech_log, GaussianLog):
if statmech_log.path != geom_log.path:
raise InputError('For {0!r}, the geometry log, {1!r}, and frequency log, {2!r}, are not the same. '
'In order to ensure the geometry and Hessian of {0!r} are defined in consistent '
'coordinate systems for hindered/free rotor projection, either use the frequency '
'log for both geometry and frequency, or remove rotors.'.format(
self.species.label, geom_log.path, statmech_log.path))
elif isinstance(statmech_log, QChemLog):
logging.warning('QChem log will be used for Hessian of {0!r}. Please verify that the geometry '
'and Hessian of {0!r} are defined in the same coordinate system'.format(
self.species.label))
logging.debug(' Reading molecular degrees of freedom...')
conformer, unscaled_frequencies = statmech_log.load_conformer(symmetry=external_symmetry,
spin_multiplicity=spin_multiplicity,
optical_isomers=optical_isomers,
label=self.species.label)
for mode in conformer.modes:
if isinstance(mode, (Translation, IdealGasTranslation)):
break
else:
# Sometimes the translational mode is not appended to modes for monoatomic species
conformer.modes.append(IdealGasTranslation(mass=self.species.molecular_weight))
if conformer.spin_multiplicity == 0:
raise ValueError("Could not read spin multiplicity from log file {0},\n"
"please specify the multiplicity in the input file.".format(self.path))
logging.debug(' Reading optimized geometry...')
coordinates, number, mass = geom_log.load_geometry()
if self.species.conformer is not None and len(self.species.conformer.modes):
# check that conformer has an IdealGasTranslation mode, append one if it doesn't
for mode in self.species.conformer.modes:
if isinstance(mode, IdealGasTranslation):
break
else:
self.species.conformer.modes.append(IdealGasTranslation(mass=(mass, "amu")))
# check that conformer has a LinearRotor or a NonlinearRotor mode, append one if it doesn't
for mode in self.species.conformer.modes:
if isinstance(mode, (LinearRotor, NonlinearRotor)):
break
else:
# get the moments of inertia and the external symmetry
moments_of_inertia = get_principal_moments_of_inertia(coords=self.species.conformer.coordinates,
numbers=self.species.conformer.number)
symmetry = geom_log.get_symmetry_properties()[1]
if any([moment_of_inertia == 0.0 for moment_of_inertia in moments_of_inertia]):
# this is a linear rotor
moments_of_inertia = [moment_of_inertia for moment_of_inertia in moments_of_inertia
if moment_of_inertia != 0.0]
if abs(moments_of_inertia[0] - moments_of_inertia[1]) > 0.01:
raise StatmechError(f'Expected two identical moments of inertia for a linear rigis rotor, '
f'but got {moments_of_inertia}')
self.species.conformer.modes.append(LinearRotor(inertia=(moments_of_inertia[0], "amu*angstrom^2"),
symmetry=symmetry))
else:
# this is a non-linear rotor
self.species.conformer.modes.append(NonlinearRotor(inertia=(moments_of_inertia, "amu*angstrom^2"),
symmetry=symmetry))
# Infer atoms from geometry
atoms = {}
for atom_num in number:
try:
symbol = symbol_by_number[atom_num]
except KeyError:
raise ElementError('Could not recognize element number {0}.'.format(atom_num))
atoms[symbol] = atoms.get(symbol, 0) + 1
# Save atoms for use in writing thermo output
if isinstance(self.species, Species):
self.species.props['element_counts'] = atoms
conformer.coordinates = (coordinates, "angstroms")
conformer.number = number
conformer.mass = (mass, "amu")
# The 1.014 factor represents the relationship between the harmonic frequencies scaling factor
# and the zero point energy scaling factor, see https://pubs.acs.org/doi/10.1021/ct100326h Section 3.1.3.
zpe_scale_factor = self.frequencyScaleFactor / 1.014
logging.debug(' Reading energy...')
if e0 is None:
if e_electronic is None:
# The energy read from the log file is without the ZPE
e_electronic = energy_log.load_energy(zpe_scale_factor) # in J/mol
else:
e_electronic *= constants.E_h * constants.Na # convert Hartree/particle into J/mol
# Make sure that isodesmic reactions are configured properly if requested
if self.useIsodesmicReactions: # Make sure atom and bond corrections are not applied
if not self.applyAtomEnergyCorrections:
logging.warning('Atom corrections not requested but MUST be used since isodesmic reactions are '
'being used')
self.applyAtomEnergyCorrections = True
if self.applyBondEnergyCorrections:
logging.warning('Bond corrections requested but will not be used since isodesmic reactions are '
'being used')
self.applyBondEnergyCorrections = False
# Apply atom corrections
if self.applyAtomEnergyCorrections:
atom_corrections = get_atom_correction(self.level_of_theory,
atoms, self.atomEnergies)
else:
atom_corrections = 0
logging.warning('Atom corrections are not being used. Do not trust energies and thermo.')
# Apply bond corrections
if self.applyBondEnergyCorrections:
if not self.bonds and hasattr(self.species, 'molecule') and self.species.molecule:
self.bonds = self.species.molecule[0].enumerate_bonds()
bond_corrections = get_bac(self.level_of_theory, self.bonds, coordinates, number,
bac_type=self.bondEnergyCorrectionType,
multiplicity=conformer.spin_multiplicity)
else:
bond_corrections = 0
e_electronic_with_corrections = e_electronic + atom_corrections + bond_corrections
# Get ZPE only for polyatomic species (monoatomic species don't have frequencies, so ZPE = 0)
zpe = statmech_log.load_zero_point_energy() * zpe_scale_factor if len(number) > 1 else 0
logging.debug('Scaled zero point energy (ZPE) is {0} J/mol'.format(zpe))
logging.debug(' Harmonic frequencies scaling factor used = {0:g}'.format(self.frequencyScaleFactor))
logging.debug(' Zero point energy scaling factor used = {0:g}'.format(zpe_scale_factor))
logging.debug(' Scaled ZPE (0 K) = {0:g} kcal/mol'.format(zpe / 4184.))
# If loading a transition state, also read the imaginary frequency
if is_ts:
neg_freq = statmech_log.load_negative_frequency()
self.species.frequency = (neg_freq * self.frequencyScaleFactor, "cm^-1")
# Read and fit the 1D hindered rotors if applicable
# If rotors are found, the vibrational frequencies are also
# recomputed with the torsional modes removed
hessian = statmech_log.load_force_constant_matrix()
if hessian is not None and len(mass) > 1 and len(rotors) > 0:
frequencies, rotors, conformer = self._fit_rotors(rotors, conformer, hessian, is_ts, linear, directory,
plot)
elif len(conformer.modes) > 2:
if len(rotors) > 0:
logging.warning('Force Constant Matrix Missing Ignoring rotors, if running Gaussian if not already '
'present you need to add the keyword iop(7/33=1) in your Gaussian frequency job for '
'Gaussian to generate the force constant matrix, if running Molpro include keyword '
'print, hessian')
frequencies = conformer.modes[2].frequencies.value_si
rotors = np.array([])
else:
if len(rotors) > 0:
logging.warning('Force Constant Matrix Missing Ignoring rotors, if running Gaussian if not already '
'present you need to add the keyword iop(7/33=1) in your Gaussian frequency job for '
'Gaussian to generate the force constant matrix, if running Molpro include keyword'
'print, hessian')
frequencies = np.array([])
rotors = np.array([])
for mode in conformer.modes:
if isinstance(mode, HarmonicOscillator):
mode.frequencies = (frequencies * self.frequencyScaleFactor, "cm^-1")
if self.useIsodesmicReactions:
# First, check that a species structure has been given
if not self.species.molecule:
raise InputError('A structure must be defined in the species block of the input file to perform '
'isodesmic reaction calculations. For example, append the following to the species '
'block: `structure=SMILES(CC)` using ethane as an example here.')
# Next, load the reference set database
reference_db = ReferenceDatabase()
reference_db.load(paths=self.referenceSets)
# Set the uncorrected value for E0 on the conformer object so that we can perform the uncorrected thermo job
conformer.E0 = ((e_electronic_with_corrections + zpe) * 0.001, 'kJ/mol')
self.species.conformer = conformer
uncorrected_thermo_job = ThermoJob(species=self.species, thermo_class='nasa')
uncorrected_thermo_job.generate_thermo()
uncorrected_thermo = self.species.thermo.get_enthalpy(298)
scheme = IsodesmicRingScheme(target=ErrorCancelingSpecies(self.species.molecule[0],
(uncorrected_thermo, 'J/mol'),
self.level_of_theory),
reference_set=reference_db.extract_level_of_theory(self.level_of_theory))
isodesmic_thermo, self.isodesmicReactionList = scheme.calculate_target_enthalpy()
# Set the difference as the isodesmic EO correction
e_electronic_with_corrections += isodesmic_thermo.value_si - uncorrected_thermo
e0 = e_electronic_with_corrections + zpe
logging.debug(' E0 (0 K) = {0:g} kcal/mol'.format(e0 / 4184.))
conformer.E0 = (e0 * 0.001, "kJ/mol")
# save supporting information for calculation
self.supporting_info = [self.species.label]
optical_isomers_read, symmetry_read, point_group_read = statmech_log.get_symmetry_properties()
self.supporting_info.append(external_symmetry if external_symmetry else symmetry_read)
self.supporting_info.append(optical_isomers if optical_isomers else optical_isomers_read)
self.supporting_info.append(point_group_read)
for mode in conformer.modes:
if isinstance(mode, (LinearRotor, NonlinearRotor)):
self.supporting_info.append(mode)
break
else:
self.supporting_info.append(None)
if unscaled_frequencies:
self.supporting_info.append(unscaled_frequencies)
else:
self.supporting_info.append(None)
if is_ts:
self.supporting_info.append(neg_freq)
else:
self.supporting_info.append(None)
self.supporting_info.append(e_electronic)
self.supporting_info.append(e_electronic + zpe)
self.supporting_info.append(e0)
self.supporting_info.append(list([symbol_by_number[x] for x in number])) # atom symbols
self.supporting_info.append(coordinates)
try:
t1d = energy_log.get_T1_diagnostic()
except (NotImplementedError, AttributeError):
t1d = None
self.supporting_info.append(t1d)
try:
d1d = energy_log.get_D1_diagnostic()
except (NotImplementedError, AttributeError):
d1d = None
self.supporting_info.append(d1d)
# save conformer
self.species.conformer = conformer
def _fit_rotors(self, rotors, conformer, hessian, is_ts, linear, directory, plot):
logging.debug(' Fitting {0} hindered rotors...'.format(len(rotors)))
rotor_count = 0
for j, q in enumerate(rotors):
symmetry = None
if len(q) == 3:
# No potential scan is given, this is a free rotor
pivots, top, symmetry = q
inertia = conformer.get_internal_reduced_moment_of_inertia(pivots, top) * constants.Na * 1e23
rotor = FreeRotor(inertia=(inertia, "amu*angstrom^2"), symmetry=symmetry)
conformer.modes.append(rotor)
rotor_count += 1
elif len(q) == 8:
scan_dir, pivots1, top1, symmetry1, pivots2, top2, symmetry2, symmetry = q
logging.info("Calculating energy levels for 2D-HR, may take a while...")
rotor = HinderedRotor2D(name='r' + str(j), torsigma1=symmetry1, torsigma2=symmetry2,
symmetry=symmetry, calc_path=os.path.join(directory, scan_dir),
pivots1=pivots1, pivots2=pivots2, top1=top1, top2=top2)
rotor.run()
conformer.modes.append(rotor)
rotor_count += 2
elif len(q) == 5 and isinstance(q[1][0], list):
scan_dir, pivots, tops, sigmas, semiclassical = q
rotor = HinderedRotorClassicalND(pivots, tops, sigmas, calc_path=os.path.join(directory, scan_dir),
conformer=conformer, F=hessian,
semiclassical=semiclassical, is_linear=linear, is_ts=is_ts)
rotor.run()
conformer.modes.append(rotor)
rotor_count += len(pivots)
elif len(q) in [4, 5]:
# This is a hindered rotor
if len(q) == 5:
scan_log, pivots, top, symmetry, fit = q
elif len(q) == 4:
# the symmetry number will be derived from the scan
scan_log, pivots, top, fit = q
# Load the hindered rotor scan energies
if isinstance(scan_log, ScanLog):
if not os.path.isfile(scan_log.path):
modified_scan_path = os.path.join(directory, scan_log.path)
if not os.path.isfile(modified_scan_path):
raise InputError('Could not find scan energy file for species {0} '
'in the specified path {1}'.format(self.species.label, scan_log.path))
else:
scan_log.path = modified_scan_path
if isinstance(scan_log, (GaussianLog, QChemLog)):
v_list, angle = scan_log.load_scan_energies()
try:
pivot_atoms = scan_log.load_scan_pivot_atoms()
except Exception as e:
logging.warning("Unable to find pivot atoms in scan due to error: {}".format(e))
pivot_atoms = 'N/A'
try:
frozen_atoms = scan_log.load_scan_frozen_atoms()
except Exception as e:
logging.warning("Unable to find pivot atoms in scan due to error: {}".format(e))
frozen_atoms = 'N/A'
elif isinstance(scan_log, ScanLog):
angle, v_list = scan_log.load()
# no way to find pivot atoms or frozen atoms from ScanLog
pivot_atoms = 'N/A'
frozen_atoms = 'N/A'
else:
raise InputError('Invalid log file type {0} for scan log.'.format(scan_log.__class__))
if symmetry is None:
symmetry = determine_rotor_symmetry(v_list, self.species.label, pivots)
self.raw_hindered_rotor_data.append((self.species.label, rotor_count, symmetry, angle,
v_list, pivot_atoms, frozen_atoms))
inertia = conformer.get_internal_reduced_moment_of_inertia(pivots, top) * constants.Na * 1e23
cosine_rotor = HinderedRotor(inertia=(inertia, "amu*angstrom^2"), symmetry=symmetry)
cosine_rotor.fit_cosine_potential_to_data(angle, v_list)
fourier_rotor = HinderedRotor(inertia=(inertia, "amu*angstrom^2"), symmetry=symmetry)
fourier_rotor.fit_fourier_potential_to_data(angle, v_list)
Vlist_cosine = np.zeros_like(angle)
Vlist_fourier = np.zeros_like(angle)
for i in range(angle.shape[0]):
Vlist_cosine[i] = cosine_rotor.get_potential(angle[i])
Vlist_fourier[i] = fourier_rotor.get_potential(angle[i])
if fit == 'cosine':
rotor = cosine_rotor
rotor_count += 1
conformer.modes.append(rotor)
elif fit == 'fourier':
rotor = fourier_rotor
rotor_count += 1
conformer.modes.append(rotor)
elif fit == 'best':
rms_cosine = np.sqrt(np.sum((Vlist_cosine - v_list) * (Vlist_cosine - v_list)) /
(len(v_list) - 1)) / 4184.
rms_fourier = np.sqrt(np.sum((Vlist_fourier - v_list) * (Vlist_fourier - v_list)) /
(len(v_list) - 1)) / 4184.
# Keep the rotor with the most accurate potential
rotor = cosine_rotor if rms_cosine < rms_fourier else fourier_rotor
# However, keep the cosine rotor if it is accurate enough, the
# fourier rotor is not significantly more accurate, and the cosine
# rotor has the correct symmetry
if rms_cosine < 0.05 and rms_cosine / rms_fourier < 2.0 and rms_cosine / rms_fourier < 4.0 \
and symmetry == cosine_rotor.symmetry:
rotor = cosine_rotor
conformer.modes.append(rotor)
if plot:
try:
self.create_hindered_rotor_figure(angle, v_list, cosine_rotor, fourier_rotor, rotor,
rotor_count)
except Exception as e:
logging.warning("Could not plot hindered rotor graph due to error: {0}".format(e))
rotor_count += 1
logging.debug(' Determining frequencies from reduced force constant matrix...')
frequencies = np.array(project_rotors(conformer, hessian, rotors, linear, is_ts, label=self.species.label))
return frequencies, rotors, conformer
def write_output(self, output_directory):
"""
Save the results of the statmech job to the `output.py` file located
in `output_directory`.
"""
output_file = os.path.join(output_directory, 'output.py')
logging.info('Saving statistical mechanics parameters for {0}...'.format(self.species.label))
f = open(output_file, 'a')
conformer = self.species.conformer
coordinates = conformer.coordinates.value_si * 1e10
number = conformer.number.value_si
f.write('# Coordinates for {0} in Input Orientation (angstroms):\n'.format(self.species.label))
for i in range(coordinates.shape[0]):
x = coordinates[i, 0]
y = coordinates[i, 1]
z = coordinates[i, 2]
f.write('# {0} {1:9.4f} {2:9.4f} {3:9.4f}\n'.format(symbol_by_number[number[i]], x, y, z))
result = 'conformer(label={0!r}, E0={1!r}, modes={2!r}, spin_multiplicity={3:d}, optical_isomers={4:d}'.format(
self.species.label,
conformer.E0,
conformer.modes,
conformer.spin_multiplicity,
conformer.optical_isomers,
)
try:
result += ', frequency={0!r}'.format(self.species.frequency)
except AttributeError:
pass
result += ')'
f.write('{0}\n\n'.format(prettify(result)))
if self.useIsodesmicReactions:
f.write('\n#Isodesmic Reactions Used:\n#------------------------\n#')
for i, rxn in enumerate(self.isodesmicReactionList):
thermo = rxn.calculate_target_thermo()
f.write('Reaction {0}: {1:9.3f} kcal/mol\n#'.format(i+1, thermo.value_si/4184.0))
reactant_string = '\tReactants:\n#\t\t1.0*{0}\n#'.format(rxn.target.molecule.to_smiles())
product_string = '\tProducts:\n#'
for spcs, v in rxn.species.items():
if v > 0: # Product
product_string += '\t\t{0}*{1}\n#'.format(v, spcs.molecule.to_smiles())
else: # Reactant
reactant_string += '\t\t{0}*{1}\n#'.format(abs(v), spcs.molecule.to_smiles())
f.write(reactant_string + product_string + '\n#')
f.write('\n')
f.close()
def create_hindered_rotor_figure(self, angle, v_list, cosine_rotor, fourier_rotor, rotor, rotor_index):
"""
Plot the potential for the rotor, along with its cosine and Fourier
series potential fits, and save it in the `hindered_rotor_plots` attribute.
"""
phi = np.arange(0, 6.3, 0.02, np.float64)
Vlist_cosine = np.zeros_like(phi)
Vlist_fourier = np.zeros_like(phi)
for i in range(phi.shape[0]):
Vlist_cosine[i] = cosine_rotor.get_potential(phi[i])
Vlist_fourier[i] = fourier_rotor.get_potential(phi[i])
fig = plt.figure(figsize=(6, 5))
plt.plot(angle, v_list / 4184., 'ok')
linespec = '-r' if rotor is cosine_rotor else '--r'
plt.plot(phi, Vlist_cosine / 4184., linespec)
linespec = '-b' if rotor is fourier_rotor else '--b'
plt.plot(phi, Vlist_fourier / 4184., linespec)
plt.legend(['scan', 'cosine', 'fourier'], loc=1)
plt.xlim(0, 2 * constants.pi)
plt.xlabel('Angle')
plt.ylabel('Potential (kcal/mol)')
plt.title('{0} hindered rotor #{1:d}'.format(self.species.label, rotor_index + 1))
axes = fig.get_axes()[0]
axes.set_xticks([float(j * constants.pi / 4) for j in range(0, 9)])
axes.set_xticks([float(j * constants.pi / 8) for j in range(0, 17)], minor=True)
axes.set_xticklabels(
['$0$', '$\pi/4$', '$\pi/2$', '$3\pi/4$', '$\pi$', '$5\pi/4$', '$3\pi/2$', '$7\pi/4$', '$2\pi$'])
self.hindered_rotor_plots.append((fig, rotor_index))
plt.close(fig)
def save_hindered_rotor_figures(self, directory):
"""
Save hindered rotor plots as set of files of the form
``rotor_[species_label]_0.pdf`` in the specified directory
"""
if hasattr(self, 'hindered_rotor_plots'):
for fig, rotor_index in self.hindered_rotor_plots:
fig.savefig(os.path.join(directory, 'rotor_{0}_{1:d}.pdf'.format(self.species.label, rotor_index)))
################################################################################
def is_linear(coordinates):
"""
Determine whether or not the species is linear from its 3D coordinates
First, try to reduce the problem into just two dimensions, use 3D if the problem cannot be reduced
`coordinates` is a numpy.array of the species' xyz coordinates
"""
# epsilon is in degrees
# (from our experience, linear molecules have precisely 180.0 degrees between all atom triples)
epsilon = 0.1
number_of_atoms = len(coordinates)
if number_of_atoms == 1:
return False
if number_of_atoms == 2:
return True
# A tensor containing all distance vectors in the molecule
d = -np.array([c[:, np.newaxis] - c[np.newaxis, :] for c in coordinates.T])
for i in range(2, len(coordinates)):
u1 = d[:, 0, 1] / np.linalg.norm(d[:, 0, 1]) # unit vector between atoms 0 and 1
u2 = d[:, 1, i] / np.linalg.norm(d[:, 1, i]) # unit vector between atoms 1 and i
a = math.degrees(np.arccos(np.clip(np.dot(u1, u2), -1.0, 1.0))) # angle between atoms 0, 1, i
if abs(180 - a) > epsilon and abs(a) > epsilon:
return False
return True
def project_rotors(conformer, hessian, rotors, linear, is_ts, get_projected_out_freqs=False, label=None):
"""
For a given `conformer` with associated force constant matrix `hessian`, lists of
rotor information `rotors`, `pivots`, and `top1`, and the linearity of the
molecule `linear`, project out the nonvibrational modes from the force
constant matrix and use this to determine the vibrational frequencies. The
list of vibrational frequencies is returned in cm^-1.
Refer to Gaussian whitepaper (http://gaussian.com/vib/) for procedure to calculate
harmonic oscillator vibrational frequencies using the force constant matrix.
"""
n_rotors = 0
for rotor in rotors:
if len(rotor) == 8:
n_rotors += 2
elif len(rotor) == 5 and isinstance(rotor[1][0], list):
n_rotors += len(rotor[1])
else:
n_rotors += 1
mass = conformer.mass.value_si
coordinates = conformer.coordinates.value
if linear is None:
linear = is_linear(coordinates)
if linear:
logging.info('Determined species {0} to be linear.'.format(label))
n_atoms = len(conformer.mass.value)
n_vib = 3 * n_atoms - (5 if linear else 6) - n_rotors - (1 if is_ts else 0)
# Put origin in center of mass
xm = 0.0
ym = 0.0
zm = 0.0
totmass = 0.0
for i in range(n_atoms):
xm += mass[i] * coordinates[i, 0]
ym += mass[i] * coordinates[i, 1]
zm += mass[i] * coordinates[i, 2]
totmass += mass[i]
xm /= totmass
ym /= totmass
zm /= totmass
for i in range(n_atoms):
coordinates[i, 0] -= xm
coordinates[i, 1] -= ym
coordinates[i, 2] -= zm
# Make vector with the root of the mass in amu for each atom
amass = np.sqrt(mass / constants.amu)
# Rotation matrix
inertia = conformer.get_moment_of_inertia_tensor()
inertia_xyz = np.linalg.eigh(inertia)[1]
external = 6
if linear:
external = 5
d = np.zeros((n_atoms * 3, external), np.float64)
# Transform the coordinates to the principal axes
p = np.dot(coordinates, inertia_xyz)
for i in range(n_atoms):
# Projection vectors for translation
d[3 * i + 0, 0] = amass[i]
d[3 * i + 1, 1] = amass[i]
d[3 * i + 2, 2] = amass[i]
# Construction of the projection vectors for external rotation
for i in range(n_atoms):
d[3 * i, 3] = (p[i, 1] * inertia_xyz[0, 2] - p[i, 2] * inertia_xyz[0, 1]) * amass[i]
d[3 * i + 1, 3] = (p[i, 1] * inertia_xyz[1, 2] - p[i, 2] * inertia_xyz[1, 1]) * amass[i]
d[3 * i + 2, 3] = (p[i, 1] * inertia_xyz[2, 2] - p[i, 2] * inertia_xyz[2, 1]) * amass[i]
d[3 * i, 4] = (p[i, 2] * inertia_xyz[0, 0] - p[i, 0] * inertia_xyz[0, 2]) * amass[i]
d[3 * i + 1, 4] = (p[i, 2] * inertia_xyz[1, 0] - p[i, 0] * inertia_xyz[1, 2]) * amass[i]
d[3 * i + 2, 4] = (p[i, 2] * inertia_xyz[2, 0] - p[i, 0] * inertia_xyz[2, 2]) * amass[i]
if not linear:
d[3 * i, 5] = (p[i, 0] * inertia_xyz[0, 1] - p[i, 1] * inertia_xyz[0, 0]) * amass[i]
d[3 * i + 1, 5] = (p[i, 0] * inertia_xyz[1, 1] - p[i, 1] * inertia_xyz[1, 0]) * amass[i]
d[3 * i + 2, 5] = (p[i, 0] * inertia_xyz[2, 1] - p[i, 1] * inertia_xyz[2, 0]) * amass[i]
# Make sure projection matrix is orthonormal
inertia = np.identity(n_atoms * 3, np.float64)
p = np.zeros((n_atoms * 3, 3 * n_atoms + external), np.float64)
p[:, 0:external] = d[:, 0:external]
p[:, external:external + 3 * n_atoms] = inertia[:, 0:3 * n_atoms]
for i in range(3 * n_atoms + external):
norm = 0.0
for j in range(3 * n_atoms):
norm += p[j, i] * p[j, i]
for j in range(3 * n_atoms):
if norm > 1E-15:
p[j, i] /= np.sqrt(norm)
else:
p[j, i] = 0.0
for j in range(i + 1, 3 * n_atoms + external):
proj = 0.0
for k in range(3 * n_atoms):
proj += p[k, i] * p[k, j]
for k in range(3 * n_atoms):
p[k, j] -= proj * p[k, i]
# Order p, there will be vectors that are 0.0
i = 0
while i < 3 * n_atoms:
norm = 0.0
for j in range(3 * n_atoms):
norm += p[j, i] * p[j, i]
if norm < 0.5:
p[:, i:3 * n_atoms + external - 1] = p[:, i + 1:3 * n_atoms + external]
else:
i += 1
# T is the transformation vector from cartesian to internal coordinates
T = np.zeros((n_atoms * 3, 3 * n_atoms - external), np.float64)
T[:, 0:3 * n_atoms - external] = p[:, external:3 * n_atoms]
# Generate mass-weighted force constant matrix
# This converts the axes to mass-weighted Cartesian axes
# Units of Fm are J/m^2*kg = 1/s^2
weighted_hessian = hessian.copy()
for i in range(n_atoms):
for j in range(n_atoms):
for u in range(3):
for v in range(3):
weighted_hessian[3 * i + u, 3 * j + v] /= math.sqrt(mass[i] * mass[j])
hessian_int = np.dot(T.T, np.dot(weighted_hessian, T))
# Get eigenvalues of internal force constant matrix, V = 3N-6 * 3N-6
eig, v = np.linalg.eigh(hessian_int)
logging.debug('Frequencies from internal Hessian')
for i in range(3 * n_atoms - external):
with np.warnings.catch_warnings():
np.warnings.filterwarnings('ignore', r'invalid value encountered in sqrt')
logging.debug(np.sqrt(eig[i]) / (2 * math.pi * constants.c * 100))
# Now we can start thinking about projecting out the internal rotations
d_int = np.zeros((3 * n_atoms, n_rotors), np.float64)
counter = 0
for i, rotor in enumerate(rotors):
if len(rotor) == 5 and isinstance(rotor[1][0], list):
scan_dir, pivots_list, tops, sigmas, semiclassical = rotor
elif len(rotor) == 5:
scanLog, pivots, top, symmetry, fit = rotor
pivots_list = [pivots]
tops = [top]
elif len(rotor) == 3:
pivots, top, symmetry = rotor
pivots_list = [pivots]
tops = [top]
elif len(rotor) == 8:
scan_dir, pivots1, top1, symmetry1, pivots2, top2, symmetry2, symmetry = rotor
pivots_list = [pivots1, pivots2]
tops = [top1, top2]
else:
raise ValueError("{} not a proper rotor format".format(rotor))
for k in range(len(tops)):
top = tops[k]
pivots = pivots_list[k]
# Determine pivot atom
if pivots[0] in top:
pivot1 = pivots[0]
pivot2 = pivots[1]
elif pivots[1] in top:
pivot1 = pivots[1]
pivot2 = pivots[0]
else:
raise ValueError('Could not determine pivot atom for rotor {}.'.format(label))
# Projection vectors for internal rotation
e12 = coordinates[pivot1 - 1, :] - coordinates[pivot2 - 1, :]
for j in range(n_atoms):
atom = j + 1
if atom in top:
e31 = coordinates[atom - 1, :] - coordinates[pivot1 - 1, :]
d_int[3 * (atom - 1):3 * (atom - 1) + 3, counter] = np.cross(e31, e12) * amass[atom - 1]
else:
e31 = coordinates[atom - 1, :] - coordinates[pivot2 - 1, :]
d_int[3 * (atom - 1):3 * (atom - 1) + 3, counter] = np.cross(e31, -e12) * amass[atom - 1]
counter += 1
# Normal modes in mass weighted cartesian coordinates
vmw = np.dot(T, v)
eigm = np.zeros((3 * n_atoms - external, 3 * n_atoms - external), np.float64)
for i in range(3 * n_atoms - external):
eigm[i, i] = eig[i]
fm = np.dot(vmw, np.dot(eigm, vmw.T))
# Internal rotations are not normal modes => project them on the normal modes and orthogonalize
# d_int_proj = (3N-6) x (3N) x (3N) x (Nrotors)
d_int_proj = np.dot(vmw.T, d_int)
# Reconstruct d_int
for i in range(n_rotors):
for j in range(3 * n_atoms):
d_int[j, i] = 0
for k in range(3 * n_atoms - external):
d_int[j, i] += d_int_proj[k, i] * vmw[j, k]
# Ortho normalize
for i in range(n_rotors):
norm = 0.0
for j in range(3 * n_atoms):
norm += d_int[j, i] * d_int[j, i]
for j in range(3 * n_atoms):
d_int[j, i] /= np.sqrt(norm)
for j in range(i + 1, n_rotors):
proj = 0.0
for k in range(3 * n_atoms):
proj += d_int[k, i] * d_int[k, j]
for k in range(3 * n_atoms):
d_int[k, j] -= proj * d_int[k, i]
# calculate the frequencies corresponding to the internal rotors
int_proj = np.dot(fm, d_int)
kmus = np.array([np.linalg.norm(int_proj[:, i]) for i in range(int_proj.shape[1])])
int_rotor_freqs = np.sqrt(kmus) / (2.0 * math.pi * constants.c * 100.0)
if get_projected_out_freqs:
return int_rotor_freqs
# Do the projection
d_int_proj = | np.dot(vmw.T, d_int) | numpy.dot |
import lmfit
import numpy as np
from numpy.linalg import inv
import scipy as sp
import itertools
import matplotlib as mpl
from collections import OrderedDict, defaultdict
from pycqed.utilities import timer as tm_mod
from sklearn.mixture import GaussianMixture as GM
from sklearn.tree import DecisionTreeClassifier as DTC
from pycqed.analysis import fitting_models as fit_mods
from pycqed.analysis import analysis_toolbox as a_tools
import pycqed.analysis_v2.base_analysis as ba
import pycqed.analysis_v2.readout_analysis as roa
from pycqed.analysis_v2.readout_analysis import \
Singleshot_Readout_Analysis_Qutrit as SSROQutrit
import pycqed.analysis_v2.tomography_qudev as tomo
from pycqed.analysis.tools.plotting import SI_val_to_msg_str
from copy import deepcopy
from pycqed.measurement.sweep_points import SweepPoints
from pycqed.measurement.calibration.calibration_points import CalibrationPoints
import matplotlib.pyplot as plt
from pycqed.analysis.three_state_rotation import predict_proba_avg_ro
import logging
from pycqed.utilities import math
from pycqed.utilities.general import find_symmetry_index
import pycqed.measurement.waveform_control.segment as seg_mod
import datetime as dt
log = logging.getLogger(__name__)
try:
import qutip as qtp
except ImportError as e:
log.warning('Could not import qutip, tomography code will not work')
class AveragedTimedomainAnalysis(ba.BaseDataAnalysis):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.single_timestamp = True
self.params_dict = {
'value_names': 'value_names',
'measured_values': 'measured_values',
'measurementstring': 'measurementstring',
'exp_metadata': 'exp_metadata'}
self.numeric_params = []
if kwargs.get('auto', True):
self.run_analysis()
def process_data(self):
self.metadata = self.raw_data_dict.get('exp_metadata', {})
if self.metadata is None:
self.metadata = {}
cal_points = self.metadata.get('cal_points', None)
cal_points = self.options_dict.get('cal_points', cal_points)
cal_points_list = roa.convert_channel_names_to_index(
cal_points, len(self.raw_data_dict['measured_values'][0]),
self.raw_data_dict['value_names'])
self.proc_data_dict['cal_points_list'] = cal_points_list
measured_values = self.raw_data_dict['measured_values']
cal_idxs = self._find_calibration_indices()
scales = [np.std(x[cal_idxs]) for x in measured_values]
observable_vectors = np.zeros((len(cal_points_list),
len(measured_values)))
observable_vector_stds = np.ones_like(observable_vectors)
for i, observable in enumerate(cal_points_list):
for ch_idx, seg_idxs in enumerate(observable):
x = measured_values[ch_idx][seg_idxs] / scales[ch_idx]
if len(x) > 0:
observable_vectors[i][ch_idx] = np.mean(x)
if len(x) > 1:
observable_vector_stds[i][ch_idx] = np.std(x)
Omtx = (observable_vectors[1:] - observable_vectors[0]).T
d0 = observable_vectors[0]
corr_values = np.zeros(
(len(cal_points_list) - 1, len(measured_values[0])))
for i in range(len(measured_values[0])):
d = np.array([x[i] / scale for x, scale in zip(measured_values,
scales)])
corr_values[:, i] = inv(Omtx.T.dot(Omtx)).dot(Omtx.T).dot(d - d0)
self.proc_data_dict['corr_values'] = corr_values
def measurement_operators_and_results(self):
"""
Converts the calibration points to measurement operators. Assumes that
the calibration points are ordered the same as the basis states for
the tomography calculation (e.g. for two qubits |gg>, |ge>, |eg>, |ee>).
Also assumes that each calibration in the passed cal_points uses
different segments.
Returns:
A tuple of
the measured values with outthe calibration points;
the measurement operators corresponding to each channel;
and the expected covariation matrix between the operators.
"""
d = len(self.proc_data_dict['cal_points_list'])
cal_point_idxs = [set() for _ in range(d)]
for i, idxs_lists in enumerate(self.proc_data_dict['cal_points_list']):
for idxs in idxs_lists:
cal_point_idxs[i].update(idxs)
cal_point_idxs = [sorted(list(idxs)) for idxs in cal_point_idxs]
cal_point_idxs = np.array(cal_point_idxs)
raw_data = self.raw_data_dict['measured_values']
means = [None] * d
residuals = [list() for _ in raw_data]
for i, cal_point_idx in enumerate(cal_point_idxs):
means[i] = [np.mean(ch_data[cal_point_idx]) for ch_data in raw_data]
for j, ch_residuals in enumerate(residuals):
ch_residuals += list(raw_data[j][cal_point_idx] - means[i][j])
means = np.array(means)
residuals = np.array(residuals)
Fs = [np.diag(ms) for ms in means.T]
Omega = residuals.dot(residuals.T) / len(residuals.T)
data_idxs = np.setdiff1d(np.arange(len(raw_data[0])),
cal_point_idxs.flatten())
data = np.array([ch_data[data_idxs] for ch_data in raw_data])
return data, Fs, Omega
def _find_calibration_indices(self):
cal_indices = set()
cal_points = self.options_dict['cal_points']
nr_segments = self.raw_data_dict['measured_values'].shape[-1]
for observable in cal_points:
if isinstance(observable, (list, np.ndarray)):
for idxs in observable:
cal_indices.update({idx % nr_segments for idx in idxs})
else: # assume dictionaries
for idxs in observable.values():
cal_indices.update({idx % nr_segments for idx in idxs})
return list(cal_indices)
def all_cal_points(d, nr_ch, reps=1):
"""
Generates a list of calibration points for a Hilbert space of dimension d,
with nr_ch channels and reps reprtitions of each calibration point.
"""
return [[list(range(-reps*i, -reps*(i-1)))]*nr_ch for i in range(d, 0, -1)]
class Single_Qubit_TimeDomainAnalysis(ba.BaseDataAnalysis):
def process_data(self):
"""
This takes care of rotating and normalizing the data if required.
this should work for several input types.
- I/Q values (2 quadratures + cal points)
- weight functions (1 quadrature + cal points)
- counts (no cal points)
There are several options possible to specify the normalization
using the options dict.
cal_points (tuple) of indices of the calibrati on points
zero_coord, one_coord
"""
cal_points = self.options_dict.get('cal_points', None)
zero_coord = self.options_dict.get('zero_coord', None)
one_coord = self.options_dict.get('one_coord', None)
if cal_points is None:
# default for all standard Timedomain experiments
cal_points = [list(range(-4, -2)), list(range(-2, 0))]
if len(self.raw_data_dict['measured_values']) == 1:
# if only one weight function is used rotation is not required
self.proc_data_dict['corr_data'] = a_tools.rotate_and_normalize_data_1ch(
self.raw_data_dict['measured_values'][0],
cal_zero_points=cal_points[0],
cal_one_points=cal_points[1])
else:
self.proc_data_dict['corr_data'], zero_coord, one_coord = \
a_tools.rotate_and_normalize_data(
data=self.raw_data_dict['measured_values'][0:2],
zero_coord=zero_coord,
one_coord=one_coord,
cal_zero_points=cal_points[0],
cal_one_points=cal_points[1])
# This should be added to the hdf5 datafile but cannot because of the
# way that the "new" analysis works.
# self.add_dataset_to_analysisgroup('Corrected data',
# self.proc_data_dict['corr_data'])
class MultiQubit_TimeDomain_Analysis(ba.BaseDataAnalysis):
"""
Base class for multi-qubit time-domain analyses.
Parameters that can be specified in the options dict:
- rotation_type: type of rotation to be done on the raw data.
Types of rotations supported by this class:
- 'cal_states' (default, no need to specify): rotation based on
CalibrationPoints for 1D and TwoD data. Supports 2 and 3 cal states
per qubit
- 'fixed_cal_points' (only for TwoD, with 2 cal states):
does PCA on the columns corresponding to the highest cal state
to find the indices of that cal state in the columns, then uses
those to get the data points for the other cal state. Does
rotation using the mean of the data points corresponding to the
two cal states as the zero and one coordinates to rotate
the data.
- 'PCA': ignores cal points and does pca; in the case of TwoD data it
does PCA row by row
- 'column_PCA': cal points and does pca; in the case of TwoD data it
does PCA column by column
- 'global_PCA' (only for TwoD): does PCA on the whole 2D array
- main_sp (default: None): dict with keys qb_name used to specify which
sweep parameter should be used as axis label in plot
- functionality to split measurements with tiled sweep_points:
- split_params (default: None): list of strings with sweep parameters
names expected to be found in SweepPoints. Groups data by these
parameters and stores it in proc_data_dict['split_data_dict'].
- select_split (default: None): dict with keys qb_names and values
a tuple (sweep_param_name, value) or (sweep_param_name, index).
Stored in self.measurement_strings which specify the plot title.
The selected parameter must also be part of the split_params for
that qubit.
"""
def __init__(self,
qb_names: list=None, label: str='',
t_start: str=None, t_stop: str=None, data_file_path: str=None,
options_dict: dict=None, extract_only: bool=False,
do_fitting: bool=True, auto=True,
params_dict=None, numeric_params=None, **kwargs):
super().__init__(t_start=t_start, t_stop=t_stop, label=label,
data_file_path=data_file_path,
options_dict=options_dict,
extract_only=extract_only,
do_fitting=do_fitting, **kwargs)
self.qb_names = qb_names
self.params_dict = params_dict
if self.params_dict is None:
self.params_dict = {}
self.numeric_params = numeric_params
self.measurement_strings = {}
if self.numeric_params is None:
self.numeric_params = []
if not hasattr(self, "job"):
self.create_job(qb_names=qb_names, t_start=t_start, t_stop=t_stop,
label=label, data_file_path=data_file_path,
do_fitting=do_fitting, options_dict=options_dict,
extract_only=extract_only, params_dict=params_dict,
numeric_params=numeric_params, **kwargs)
if auto:
self.run_analysis()
def extract_data(self):
super().extract_data()
if self.qb_names is None:
self.qb_names = self.get_param_value('ro_qubits')
if self.qb_names is None:
raise ValueError('Provide the "qb_names."')
self.measurement_strings = {
qbn: self.raw_data_dict['measurementstring'] for qbn in
self.qb_names}
self.channel_map = self.get_param_value('meas_obj_value_names_map')
if self.channel_map is None:
# if the new name meas_obj_value_names_map is not found, try with
# the old name channel_map
self.channel_map = self.get_param_value('channel_map')
if self.channel_map is None:
value_names = self.raw_data_dict['value_names']
if np.ndim(value_names) > 0:
value_names = value_names
if 'w' in value_names[0]:
self.channel_map = a_tools.get_qb_channel_map_from_hdf(
self.qb_names, value_names=value_names,
file_path=self.raw_data_dict['folder'])
else:
self.channel_map = {}
for qbn in self.qb_names:
self.channel_map[qbn] = value_names
if len(self.channel_map) == 0:
raise ValueError('No qubit RO channels have been found.')
# creates self.sp
self.get_sweep_points()
def get_sweep_points(self):
self.sp = self.get_param_value('sweep_points')
if self.sp is not None:
self.sp = SweepPoints(self.sp)
def create_sweep_points_dict(self):
sweep_points_dict = self.get_param_value('sweep_points_dict')
hard_sweep_params = self.get_param_value('hard_sweep_params')
if self.sp is not None:
self.mospm = self.get_param_value('meas_obj_sweep_points_map')
main_sp = self.get_param_value('main_sp')
if self.mospm is None:
raise ValueError('When providing "sweep_points", '
'"meas_obj_sweep_points_map" has to be '
'provided in addition.')
if main_sp is not None:
self.proc_data_dict['sweep_points_dict'] = {}
for qbn, p in main_sp.items():
dim = self.sp.find_parameter(p)
if dim == 1:
log.warning(f"main_sp is only implemented for sweep "
f"dimension 0, but {p} is in dimension 1.")
self.proc_data_dict['sweep_points_dict'][qbn] = \
{'sweep_points': self.sp.get_sweep_params_property(
'values', dim, p)}
else:
self.proc_data_dict['sweep_points_dict'] = \
{qbn: {'sweep_points': self.sp.get_sweep_params_property(
'values', 0, self.mospm[qbn])[0]}
for qbn in self.qb_names}
elif sweep_points_dict is not None:
# assumed to be of the form {qbn1: swpts_array1, qbn2: swpts_array2}
self.proc_data_dict['sweep_points_dict'] = \
{qbn: {'sweep_points': sweep_points_dict[qbn]}
for qbn in self.qb_names}
elif hard_sweep_params is not None:
self.proc_data_dict['sweep_points_dict'] = \
{qbn: {'sweep_points': list(hard_sweep_params.values())[0][
'values']} for qbn in self.qb_names}
else:
self.proc_data_dict['sweep_points_dict'] = \
{qbn: {'sweep_points': self.data_filter(
self.raw_data_dict['hard_sweep_points'])}
for qbn in self.qb_names}
def create_sweep_points_2D_dict(self):
soft_sweep_params = self.get_param_value('soft_sweep_params')
if self.sp is not None:
self.proc_data_dict['sweep_points_2D_dict'] = OrderedDict()
for qbn in self.qb_names:
self.proc_data_dict['sweep_points_2D_dict'][qbn] = \
OrderedDict()
for pn in self.mospm[qbn]:
if pn in self.sp[1]:
self.proc_data_dict['sweep_points_2D_dict'][qbn][
pn] = self.sp[1][pn][0]
elif soft_sweep_params is not None:
self.proc_data_dict['sweep_points_2D_dict'] = \
{qbn: {pn: soft_sweep_params[pn]['values'] for
pn in soft_sweep_params}
for qbn in self.qb_names}
else:
if len(self.raw_data_dict['soft_sweep_points'].shape) == 1:
self.proc_data_dict['sweep_points_2D_dict'] = \
{qbn: {self.raw_data_dict['sweep_parameter_names'][1]:
self.raw_data_dict['soft_sweep_points']} for
qbn in self.qb_names}
else:
sspn = self.raw_data_dict['sweep_parameter_names'][1:]
self.proc_data_dict['sweep_points_2D_dict'] = \
{qbn: {sspn[i]: self.raw_data_dict['soft_sweep_points'][i]
for i in range(len(sspn))} for qbn in self.qb_names}
if self.get_param_value('percentage_done', 100) < 100:
# This indicated an interrupted measurement.
# Remove non-measured sweep points in that case.
# raw_data_dict['soft_sweep_points'] is obtained in
# BaseDataAnalysis.add_measured_data(), and its length should
# always correspond to the actual number of measured soft sweep
# points.
ssl = len(self.raw_data_dict['soft_sweep_points'])
for sps in self.proc_data_dict['sweep_points_2D_dict'].values():
for k, v in sps.items():
sps[k] = v[:ssl]
def create_meas_results_per_qb(self):
measured_RO_channels = list(self.raw_data_dict['measured_data'])
meas_results_per_qb_raw = {}
meas_results_per_qb = {}
for qb_name, RO_channels in self.channel_map.items():
meas_results_per_qb_raw[qb_name] = {}
meas_results_per_qb[qb_name] = {}
if isinstance(RO_channels, str):
meas_ROs_per_qb = [RO_ch for RO_ch in measured_RO_channels
if RO_channels in RO_ch]
for meas_RO in meas_ROs_per_qb:
meas_results_per_qb_raw[qb_name][meas_RO] = \
self.raw_data_dict[
'measured_data'][meas_RO]
meas_results_per_qb[qb_name][meas_RO] = \
self.data_filter(
meas_results_per_qb_raw[qb_name][meas_RO])
elif isinstance(RO_channels, list):
for qb_RO_ch in RO_channels:
meas_ROs_per_qb = [RO_ch for RO_ch in measured_RO_channels
if qb_RO_ch in RO_ch]
for meas_RO in meas_ROs_per_qb:
meas_results_per_qb_raw[qb_name][meas_RO] = \
self.raw_data_dict[
'measured_data'][meas_RO]
meas_results_per_qb[qb_name][meas_RO] = \
self.data_filter(
meas_results_per_qb_raw[qb_name][meas_RO])
else:
raise TypeError('The RO channels for {} must either be a list '
'or a string.'.format(qb_name))
self.proc_data_dict['meas_results_per_qb_raw'] = \
meas_results_per_qb_raw
self.proc_data_dict['meas_results_per_qb'] = \
meas_results_per_qb
def process_data(self):
super().process_data()
self.data_filter = self.get_param_value('data_filter')
prep_params = self.get_param_value('preparation_params',
default_value=dict())
self.data_with_reset = False
if self.data_filter is None:
if 'active' in prep_params.get('preparation_type', 'wait'):
reset_reps = prep_params.get('reset_reps', 1)
self.data_filter = lambda x: x[reset_reps::reset_reps+1]
self.data_with_reset = True
elif "preselection" in prep_params.get('preparation_type', 'wait'):
self.data_filter = lambda x: x[1::2] # filter preselection RO
if self.data_filter is None:
self.data_filter = lambda x: x
self.create_sweep_points_dict()
self.create_meas_results_per_qb()
# temporary fix for appending calibration points to x values but
# without breaking sequences not yet using this interface.
self.rotate = self.get_param_value('rotate', default_value=False)
cal_points = self.get_param_value('cal_points')
last_ge_pulses = self.get_param_value('last_ge_pulses',
default_value=False)
try:
self.cp = CalibrationPoints.from_string(cal_points)
# for now assuming the same for all qubits.
self.cal_states_dict = self.cp.get_indices(
self.qb_names, prep_params)[self.qb_names[0]]
cal_states_rots = self.cp.get_rotations(last_ge_pulses,
self.qb_names[0])[self.qb_names[0]] if self.rotate \
else None
self.cal_states_rotations = self.get_param_value(
'cal_states_rotations', default_value=cal_states_rots)
sweep_points_w_calpts = \
{qbn: {'sweep_points': self.cp.extend_sweep_points(
self.proc_data_dict['sweep_points_dict'][qbn][
'sweep_points'], qbn)} for qbn in self.qb_names}
self.proc_data_dict['sweep_points_dict'] = sweep_points_w_calpts
except TypeError as e:
log.error(e)
log.warning("Failed retrieving cal point objects or states. "
"Please update measurement to provide cal point object "
"in metadata. Trying to get them using the old way ...")
self.cal_states_rotations = self.get_param_value(
'cal_states_rotations', default_value=None) \
if self.rotate else None
self.cal_states_dict = self.get_param_value('cal_states_dict',
default_value={})
if self.get_param_value('global_PCA') is not None:
log.warning('Parameter "global_PCA" is deprecated. Please set '
'rotation_type="global_PCA" instead.')
self.rotation_type = self.get_param_value(
'rotation_type',
default_value='cal_states' if self.rotate else 'no_rotation')
# create projected_data_dict
self.data_to_fit = deepcopy(self.get_param_value('data_to_fit'))
if self.data_to_fit is None:
# If we have cal points, but data_to_fit is not specified,
# choose a reasonable default value. In cases with only two cal
# points, this decides which projected plot is generated. (In
# cases with three cal points, we will anyways get all three
# projected plots.)
if 'e' in self.cal_states_dict.keys():
self.data_to_fit = {qbn: 'pe' for qbn in self.qb_names}
elif 'g' in self.cal_states_dict.keys():
self.data_to_fit = {qbn: 'pg' for qbn in self.qb_names}
else:
self.data_to_fit = {}
# TODO: Steph 15.09.2020
# This is a hack to allow list inside data_to_fit. These lists are
# currently only supported by MultiCZgate_CalibAnalysis
for qbn in self.data_to_fit:
if isinstance(self.data_to_fit[qbn], (list, tuple)):
self.data_to_fit[qbn] = self.data_to_fit[qbn][0]
if self.rotate or self.rotation_type == 'global_PCA':
self.cal_states_analysis()
else:
# this assumes data obtained with classifier detector!
# ie pg, pe, pf are expected to be in the value_names
self.proc_data_dict['projected_data_dict'] = OrderedDict()
for qbn, data_dict in self.proc_data_dict[
'meas_results_per_qb'].items():
self.proc_data_dict['projected_data_dict'][qbn] = OrderedDict()
for state_prob in ['pg', 'pe', 'pf']:
self.proc_data_dict['projected_data_dict'][qbn].update(
{state_prob: data for key, data in data_dict.items()
if state_prob in key})
if self.cal_states_dict is None:
self.cal_states_dict = {}
self.num_cal_points = np.array(list(
self.cal_states_dict.values())).flatten().size
# correct probabilities given calibration matrix
if self.get_param_value("correction_matrix") is not None:
self.proc_data_dict['projected_data_dict_corrected'] = OrderedDict()
for qbn, data_dict in self.proc_data_dict[
'meas_results_per_qb'].items():
self.proc_data_dict['projected_data_dict'][qbn] = OrderedDict()
probas_raw = np.asarray([data_dict[k] for k in data_dict
for state_prob in ['pg', 'pe', 'pf'] if
state_prob in k])
corr_mtx = self.get_param_value("correction_matrix")[qbn]
probas_corrected = np.linalg.inv(corr_mtx).T @ probas_raw
for state_prob in ['pg', 'pe', 'pf']:
self.proc_data_dict['projected_data_dict_corrected'][qbn].update(
{state_prob: data for key, data in
zip(["pg", "pe", "pf"], probas_corrected)})
# get data_to_fit
self.proc_data_dict['data_to_fit'] = OrderedDict()
for qbn, prob_data in self.proc_data_dict[
'projected_data_dict'].items():
if qbn in self.data_to_fit:
self.proc_data_dict['data_to_fit'][qbn] = prob_data[
self.data_to_fit[qbn]]
# create msmt_sweep_points, sweep_points, cal_points_sweep_points
for qbn in self.qb_names:
if self.num_cal_points > 0:
self.proc_data_dict['sweep_points_dict'][qbn][
'msmt_sweep_points'] = \
self.proc_data_dict['sweep_points_dict'][qbn][
'sweep_points'][:-self.num_cal_points]
self.proc_data_dict['sweep_points_dict'][qbn][
'cal_points_sweep_points'] = \
self.proc_data_dict['sweep_points_dict'][qbn][
'sweep_points'][-self.num_cal_points::]
else:
self.proc_data_dict['sweep_points_dict'][qbn][
'msmt_sweep_points'] = self.proc_data_dict[
'sweep_points_dict'][qbn]['sweep_points']
self.proc_data_dict['sweep_points_dict'][qbn][
'cal_points_sweep_points'] = []
if self.options_dict.get('TwoD', False):
self.create_sweep_points_2D_dict()
# handle data splitting if needed
self.split_data()
def split_data(self):
def unique(l):
try:
return np.unique(l, return_inverse=True)
except Exception:
h = [repr(a) for a in l]
_, i, j = np.unique(h, return_index=True, return_inverse=True)
return l[i], j
split_params = self.get_param_value('split_params', [])
if not len(split_params):
return
pdd = self.proc_data_dict
pdd['split_data_dict'] = {}
for qbn in self.qb_names:
pdd['split_data_dict'][qbn] = {}
for p in split_params:
dim = self.sp.find_parameter(p)
sv = self.sp.get_sweep_params_property(
'values', param_names=p, dimension=dim)
usp, ind = unique(sv)
if len(usp) <= 1:
continue
svs = [self.sp.subset(ind == i, dim) for i in
range(len(usp))]
[s.remove_sweep_parameter(p) for s in svs]
sdd = {}
pdd['split_data_dict'][qbn][p] = sdd
for i in range(len(usp)):
subset = (np.concatenate(
[ind == i,
[True] * len(pdd['sweep_points_dict'][qbn][
'cal_points_sweep_points'])]))
sdd[i] = {}
sdd[i]['value'] = usp[i]
sdd[i]['sweep_points'] = svs[i]
d = pdd['sweep_points_dict'][qbn]
if dim == 0:
sdd[i]['sweep_points_dict'] = {
'sweep_points': d['sweep_points'][subset],
'msmt_sweep_points':
d['msmt_sweep_points'][ind == i],
'cal_points_sweep_points':
d['cal_points_sweep_points'],
}
sdd[i]['sweep_points_2D_dict'] = pdd[
'sweep_points_2D_dict'][qbn]
else:
sdd[i]['sweep_points_dict'] = \
pdd['sweep_points_dict'][qbn]
sdd[i]['sweep_points_2D_dict'] = {
k: v[ind == i] for k, v in pdd[
'sweep_points_2D_dict'][qbn].items()}
for d in ['projected_data_dict', 'data_to_fit']:
if isinstance(pdd[d][qbn], dict):
if dim == 0:
sdd[i][d] = {k: v[:, subset] for
k, v in pdd[d][qbn].items()}
else:
sdd[i][d] = {k: v[ind == i, :] for
k, v in pdd[d][qbn].items()}
else:
if dim == 0:
sdd[i][d] = pdd[d][qbn][:, subset]
else:
sdd[i][d] = pdd[d][qbn][ind == i, :]
select_split = self.get_param_value('select_split')
if select_split is not None:
for qbn, select in select_split.items():
p, v = select
if p not in pdd['split_data_dict'][qbn]:
log.warning(f"Split parameter {p} for {qbn} not "
f"found. Ignoring this selection.")
try:
ind = [a['value'] for a in pdd['split_data_dict'][
qbn][p].values()].index(v)
except ValueError:
ind = v
try:
pdd['split_data_dict'][qbn][p][ind]
except ValueError:
log.warning(f"Value {v} for split parameter {p} "
f"of {qbn} not found. Ignoring this "
f"selection.")
continue
for d in ['projected_data_dict', 'data_to_fit',
'sweep_points_dict', 'sweep_points_2D_dict']:
pdd[d][qbn] = pdd['split_data_dict'][qbn][p][ind][d]
self.measurement_strings[qbn] += f' ({p}: {v})'
def get_cal_data_points(self):
self.num_cal_points = np.array(list(
self.cal_states_dict.values())).flatten().size
do_PCA = self.rotation_type == 'PCA' or \
self.rotation_type == 'column_PCA'
self.cal_states_dict_for_rotation = OrderedDict()
states = False
cal_states_rotations = self.cal_states_rotations
for key in cal_states_rotations.keys():
if key == 'g' or key == 'e' or key == 'f':
states = True
for qbn in self.qb_names:
self.cal_states_dict_for_rotation[qbn] = OrderedDict()
if states:
cal_states_rot_qb = cal_states_rotations
else:
cal_states_rot_qb = cal_states_rotations[qbn]
for i in range(len(cal_states_rot_qb)):
cal_state = \
[k for k, idx in cal_states_rot_qb.items()
if idx == i][0]
self.cal_states_dict_for_rotation[qbn][cal_state] = \
None if do_PCA and self.num_cal_points != 3 else \
self.cal_states_dict[cal_state]
def cal_states_analysis(self):
self.get_cal_data_points()
self.proc_data_dict['projected_data_dict'] = OrderedDict(
{qbn: '' for qbn in self.qb_names})
for qbn in self.qb_names:
cal_states_dict = self.cal_states_dict_for_rotation[qbn]
if len(cal_states_dict) not in [0, 2, 3]:
raise NotImplementedError('Calibration states rotation is '
'currently only implemented for 0, '
'2, or 3 cal states per qubit.')
data_mostly_g = self.get_param_value('data_mostly_g',
default_value=True)
if self.get_param_value('TwoD', default_value=False):
if self.rotation_type == 'global_PCA':
self.proc_data_dict['projected_data_dict'].update(
self.global_pca_TwoD(
qbn, self.proc_data_dict['meas_results_per_qb'],
self.channel_map, self.data_to_fit,
data_mostly_g=data_mostly_g))
elif len(cal_states_dict) == 3:
self.proc_data_dict['projected_data_dict'].update(
self.rotate_data_3_cal_states_TwoD(
qbn, self.proc_data_dict['meas_results_per_qb'],
self.channel_map,
self.cal_states_dict_for_rotation))
elif self.rotation_type == 'fixed_cal_points':
rotated_data_dict, zero_coord, one_coord = \
self.rotate_data_TwoD_same_fixed_cal_idxs(
qbn, self.proc_data_dict['meas_results_per_qb'],
self.channel_map, self.cal_states_dict_for_rotation,
self.data_to_fit)
self.proc_data_dict['projected_data_dict'].update(
rotated_data_dict)
self.proc_data_dict['rotation_coordinates'] = \
[zero_coord, one_coord]
else:
self.proc_data_dict['projected_data_dict'].update(
self.rotate_data_TwoD(
qbn, self.proc_data_dict['meas_results_per_qb'],
self.channel_map, self.cal_states_dict_for_rotation,
self.data_to_fit, data_mostly_g=data_mostly_g,
column_PCA=self.rotation_type == 'column_PCA'))
else:
if len(cal_states_dict) == 3:
self.proc_data_dict['projected_data_dict'].update(
self.rotate_data_3_cal_states(
qbn, self.proc_data_dict['meas_results_per_qb'],
self.channel_map,
self.cal_states_dict_for_rotation))
else:
self.proc_data_dict['projected_data_dict'].update(
self.rotate_data(
qbn, self.proc_data_dict['meas_results_per_qb'],
self.channel_map, self.cal_states_dict_for_rotation,
self.data_to_fit, data_mostly_g=data_mostly_g))
@staticmethod
def rotate_data_3_cal_states(qb_name, meas_results_per_qb, channel_map,
cal_states_dict):
# FOR 3 CAL STATES
rotated_data_dict = OrderedDict()
meas_res_dict = meas_results_per_qb[qb_name]
rotated_data_dict[qb_name] = OrderedDict()
cal_pts_idxs = list(cal_states_dict[qb_name].values())
cal_points_data = np.zeros((len(cal_pts_idxs), 2))
if list(meas_res_dict) == channel_map[qb_name]:
raw_data = np.array([v for v in meas_res_dict.values()]).T
for i, cal_idx in enumerate(cal_pts_idxs):
cal_points_data[i, :] = np.mean(raw_data[cal_idx, :],
axis=0)
rotated_data = predict_proba_avg_ro(raw_data, cal_points_data)
for i, state in enumerate(list(cal_states_dict[qb_name])):
rotated_data_dict[qb_name][f'p{state}'] = rotated_data[:, i]
else:
raise NotImplementedError('Calibration states rotation with 3 '
'cal states only implemented for '
'2 readout channels per qubit.')
return rotated_data_dict
@staticmethod
def rotate_data(qb_name, meas_results_per_qb, channel_map,
cal_states_dict, data_to_fit, data_mostly_g=True):
# ONLY WORKS FOR 2 CAL STATES
meas_res_dict = meas_results_per_qb[qb_name]
rotated_data_dict = OrderedDict()
if len(cal_states_dict[qb_name]) == 0:
cal_zero_points = None
cal_one_points = None
else:
cal_zero_points = list(cal_states_dict[qb_name].values())[0]
cal_one_points = list(cal_states_dict[qb_name].values())[1]
rotated_data_dict[qb_name] = OrderedDict()
if len(meas_res_dict) == 1:
# one RO channel per qubit
if cal_zero_points is None and cal_one_points is None:
data = meas_res_dict[list(meas_res_dict)[0]]
data = (data - np.min(data))/(np.max(data) - np.min(data))
data = a_tools.set_majority_sign(
data, -1 if data_mostly_g else 1)
rotated_data_dict[qb_name][data_to_fit[qb_name]] = data
else:
rotated_data_dict[qb_name][data_to_fit[qb_name]] = \
a_tools.rotate_and_normalize_data_1ch(
data=meas_res_dict[list(meas_res_dict)[0]],
cal_zero_points=cal_zero_points,
cal_one_points=cal_one_points)
elif list(meas_res_dict) == channel_map[qb_name]:
# two RO channels per qubit
data, _, _ = a_tools.rotate_and_normalize_data_IQ(
data=np.array([v for v in meas_res_dict.values()]),
cal_zero_points=cal_zero_points,
cal_one_points=cal_one_points)
if cal_zero_points is None:
data = a_tools.set_majority_sign(
data, -1 if data_mostly_g else 1)
rotated_data_dict[qb_name][data_to_fit[qb_name]] = data
else:
# multiple readouts per qubit per channel
if isinstance(channel_map[qb_name], str):
qb_ro_ch0 = channel_map[qb_name]
else:
qb_ro_ch0 = channel_map[qb_name][0]
ro_suffixes = [s[len(qb_ro_ch0)+1::] for s in
list(meas_res_dict) if qb_ro_ch0 in s]
for i, ro_suf in enumerate(ro_suffixes):
if len(ro_suffixes) == len(meas_res_dict):
# one RO ch per qubit
if cal_zero_points is None and cal_one_points is None:
data = meas_res_dict[list(meas_res_dict)[i]]
data = (data - np.min(data))/(np.max(data) - np.min(data))
data = a_tools.set_majority_sign(
data, -1 if data_mostly_g else 1)
rotated_data_dict[qb_name][ro_suf] = data
else:
rotated_data_dict[qb_name][ro_suf] = \
a_tools.rotate_and_normalize_data_1ch(
data=meas_res_dict[list(meas_res_dict)[i]],
cal_zero_points=cal_zero_points,
cal_one_points=cal_one_points)
else:
# two RO ch per qubit
keys = [k for k in meas_res_dict if ro_suf in k]
correct_keys = [k for k in keys
if k[len(qb_ro_ch0)+1::] == ro_suf]
data_array = np.array([meas_res_dict[k]
for k in correct_keys])
data, _, _ = a_tools.rotate_and_normalize_data_IQ(
data=data_array,
cal_zero_points=cal_zero_points,
cal_one_points=cal_one_points)
if cal_zero_points is None:
data = a_tools.set_majority_sign(
data, -1 if data_mostly_g else 1)
rotated_data_dict[qb_name][ro_suf] = data
return rotated_data_dict
@staticmethod
def rotate_data_3_cal_states_TwoD(qb_name, meas_results_per_qb,
channel_map, cal_states_dict):
# FOR 3 CAL STATES
meas_res_dict = meas_results_per_qb[qb_name]
rotated_data_dict = OrderedDict()
rotated_data_dict[qb_name] = OrderedDict()
cal_pts_idxs = list(cal_states_dict[qb_name].values())
cal_points_data = np.zeros((len(cal_pts_idxs), 2))
if list(meas_res_dict) == channel_map[qb_name]:
# two RO channels per qubit
raw_data_arr = meas_res_dict[list(meas_res_dict)[0]]
for i, state in enumerate(list(cal_states_dict[qb_name])):
rotated_data_dict[qb_name][f'p{state}'] = np.zeros(
raw_data_arr.shape)
for col in range(raw_data_arr.shape[1]):
raw_data = np.concatenate([
v[:, col].reshape(len(v[:, col]), 1) for
v in meas_res_dict.values()], axis=1)
for i, cal_idx in enumerate(cal_pts_idxs):
cal_points_data[i, :] = np.mean(raw_data[cal_idx, :],
axis=0)
# rotated data is (raw_data_arr.shape[0], 3)
rotated_data = predict_proba_avg_ro(
raw_data, cal_points_data)
for i, state in enumerate(list(cal_states_dict[qb_name])):
rotated_data_dict[qb_name][f'p{state}'][:, col] = \
rotated_data[:, i]
else:
raise NotImplementedError('Calibration states rotation with 3 '
'cal states only implemented for '
'2 readout channels per qubit.')
# transpose data
for i, state in enumerate(list(cal_states_dict[qb_name])):
rotated_data_dict[qb_name][f'p{state}'] = \
rotated_data_dict[qb_name][f'p{state}'].T
return rotated_data_dict
@staticmethod
def global_pca_TwoD(qb_name, meas_results_per_qb, channel_map,
data_to_fit, data_mostly_g=True):
meas_res_dict = meas_results_per_qb[qb_name]
if list(meas_res_dict) != channel_map[qb_name]:
raise NotImplementedError('Global PCA is only implemented '
'for two-channel RO!')
raw_data_arr = meas_res_dict[list(meas_res_dict)[0]]
rotated_data_dict = OrderedDict({qb_name: OrderedDict()})
rotated_data_dict[qb_name][data_to_fit[qb_name]] = \
deepcopy(raw_data_arr.transpose())
data_array = np.array(
[v.T.flatten() for v in meas_res_dict.values()])
rot_flat_data, _, _ = \
a_tools.rotate_and_normalize_data_IQ(
data=data_array)
data = np.reshape(rot_flat_data, raw_data_arr.T.shape)
data = a_tools.set_majority_sign(data, -1 if data_mostly_g else 1)
rotated_data_dict[qb_name][data_to_fit[qb_name]] = data
return rotated_data_dict
@staticmethod
def rotate_data_TwoD(qb_name, meas_results_per_qb, channel_map,
cal_states_dict, data_to_fit,
column_PCA=False, data_mostly_g=True):
meas_res_dict = meas_results_per_qb[qb_name]
rotated_data_dict = OrderedDict()
if len(cal_states_dict[qb_name]) == 0:
cal_zero_points = None
cal_one_points = None
else:
cal_zero_points = list(cal_states_dict[qb_name].values())[0]
cal_one_points = list(cal_states_dict[qb_name].values())[1]
rotated_data_dict[qb_name] = OrderedDict()
if len(meas_res_dict) == 1:
# one RO channel per qubit
raw_data_arr = meas_res_dict[list(meas_res_dict)[0]]
rotated_data_dict[qb_name][data_to_fit[qb_name]] = \
deepcopy(raw_data_arr.transpose())
if column_PCA:
for row in range(raw_data_arr.shape[0]):
data = a_tools.rotate_and_normalize_data_1ch(
data=raw_data_arr[row, :],
cal_zero_points=cal_zero_points,
cal_one_points=cal_one_points)
data = a_tools.set_majority_sign(
data, -1 if data_mostly_g else 1)
rotated_data_dict[qb_name][data_to_fit[qb_name]][
:, row] = data
else:
for col in range(raw_data_arr.shape[1]):
data = a_tools.rotate_and_normalize_data_1ch(
data=raw_data_arr[:, col],
cal_zero_points=cal_zero_points,
cal_one_points=cal_one_points)
if cal_zero_points is None:
data = a_tools.set_majority_sign(
data, -1 if data_mostly_g else 1)
rotated_data_dict[qb_name][data_to_fit[qb_name]][col] = data
elif list(meas_res_dict) == channel_map[qb_name]:
# two RO channels per qubit
raw_data_arr = meas_res_dict[list(meas_res_dict)[0]]
rotated_data_dict[qb_name][data_to_fit[qb_name]] = \
deepcopy(raw_data_arr.transpose())
if column_PCA:
for row in range(raw_data_arr.shape[0]):
data_array = np.array(
[v[row, :] for v in meas_res_dict.values()])
data, _, _ = \
a_tools.rotate_and_normalize_data_IQ(
data=data_array,
cal_zero_points=cal_zero_points,
cal_one_points=cal_one_points)
data = a_tools.set_majority_sign(
data, -1 if data_mostly_g else 1)
rotated_data_dict[qb_name][data_to_fit[qb_name]][
:, row] = data
else:
for col in range(raw_data_arr.shape[1]):
data_array = np.array(
[v[:, col] for v in meas_res_dict.values()])
data, _, _ = a_tools.rotate_and_normalize_data_IQ(
data=data_array,
cal_zero_points=cal_zero_points,
cal_one_points=cal_one_points)
if cal_zero_points is None:
data = a_tools.set_majority_sign(
data, -1 if data_mostly_g else 1)
rotated_data_dict[qb_name][
data_to_fit[qb_name]][col] = data
else:
# multiple readouts per qubit per channel
if isinstance(channel_map[qb_name], str):
qb_ro_ch0 = channel_map[qb_name]
else:
qb_ro_ch0 = channel_map[qb_name][0]
ro_suffixes = [s[len(qb_ro_ch0)+1::] for s in
list(meas_res_dict) if qb_ro_ch0 in s]
for i, ro_suf in enumerate(ro_suffixes):
if len(ro_suffixes) == len(meas_res_dict):
# one RO ch per qubit
raw_data_arr = meas_res_dict[list(meas_res_dict)[i]]
rotated_data_dict[qb_name][ro_suf] = \
deepcopy(raw_data_arr.transpose())
for col in range(raw_data_arr.shape[1]):
data = a_tools.rotate_and_normalize_data_1ch(
data=raw_data_arr[:, col],
cal_zero_points=cal_zero_points,
cal_one_points=cal_one_points)
if cal_zero_points is None:
data = a_tools.set_majority_sign(
data, -1 if data_mostly_g else 1)
rotated_data_dict[qb_name][ro_suf][col] = data
else:
# two RO ch per qubit
raw_data_arr = meas_res_dict[list(meas_res_dict)[i]]
rotated_data_dict[qb_name][ro_suf] = \
deepcopy(raw_data_arr.transpose())
for col in range(raw_data_arr.shape[1]):
data_array = np.array(
[v[:, col] for k, v in meas_res_dict.items()
if ro_suf in k])
data, _, _ = a_tools.rotate_and_normalize_data_IQ(
data=data_array,
cal_zero_points=cal_zero_points,
cal_one_points=cal_one_points)
if cal_zero_points is None:
data = a_tools.set_majority_sign(
data, -1 if data_mostly_g else 1)
rotated_data_dict[qb_name][ro_suf][col] = data
return rotated_data_dict
@staticmethod
def rotate_data_TwoD_same_fixed_cal_idxs(qb_name, meas_results_per_qb,
channel_map, cal_states_dict,
data_to_fit):
meas_res_dict = meas_results_per_qb[qb_name]
if list(meas_res_dict) != channel_map[qb_name]:
raise NotImplementedError('rotate_data_TwoD_same_fixed_cal_idxs '
'only implemented for two-channel RO!')
if len(cal_states_dict[qb_name]) == 0:
cal_zero_points = None
cal_one_points = None
else:
cal_zero_points = list(cal_states_dict[qb_name].values())[0]
cal_one_points = list(cal_states_dict[qb_name].values())[1]
# do pca on the one cal states
raw_data_arr = meas_res_dict[list(meas_res_dict)[0]]
rot_dat_e = np.zeros(raw_data_arr.shape[1])
for row in cal_one_points:
rot_dat_e += a_tools.rotate_and_normalize_data_IQ(
data=np.array([v[row, :] for v in meas_res_dict.values()]),
cal_zero_points=None, cal_one_points=None)[0]
rot_dat_e /= len(cal_one_points)
# find the values of the zero and one cal points
col_idx = np.argmax(np.abs(rot_dat_e))
zero_coord = [np.mean([v[r, col_idx] for r in cal_zero_points])
for v in meas_res_dict.values()]
one_coord = [np.mean([v[r, col_idx] for r in cal_one_points])
for v in meas_res_dict.values()]
# rotate all data based on the fixed zero_coord and one_coord
rotated_data_dict = OrderedDict({qb_name: OrderedDict()})
rotated_data_dict[qb_name][data_to_fit[qb_name]] = \
deepcopy(raw_data_arr.transpose())
for col in range(raw_data_arr.shape[1]):
data_array = np.array(
[v[:, col] for v in meas_res_dict.values()])
rotated_data_dict[qb_name][
data_to_fit[qb_name]][col], _, _ = \
a_tools.rotate_and_normalize_data_IQ(
data=data_array,
zero_coord=zero_coord,
one_coord=one_coord)
return rotated_data_dict, zero_coord, one_coord
def get_xaxis_label_unit(self, qb_name):
hard_sweep_params = self.get_param_value('hard_sweep_params')
sweep_name = self.get_param_value('sweep_name')
sweep_unit = self.get_param_value('sweep_unit')
if self.sp is not None:
main_sp = self.get_param_value('main_sp', None)
if main_sp is not None and qb_name in main_sp:
param_names = [main_sp[qb_name]]
else:
param_names = self.mospm[qb_name]
_, xunit, xlabel = self.sp.get_sweep_params_description(
param_names=param_names, dimension=0)[0]
elif hard_sweep_params is not None:
xlabel = list(hard_sweep_params)[0]
xunit = list(hard_sweep_params.values())[0][
'unit']
elif (sweep_name is not None) and (sweep_unit is not None):
xlabel = sweep_name
xunit = sweep_unit
else:
xlabel = self.raw_data_dict['sweep_parameter_names']
xunit = self.raw_data_dict['sweep_parameter_units']
if np.ndim(xlabel) > 0:
xlabel = xlabel[0]
if np.ndim(xunit) > 0:
xunit = xunit[0]
return xlabel, xunit
@staticmethod
def get_cal_state_color(cal_state_label):
if cal_state_label == 'g' or cal_state_label == r'$|g\rangle$':
return 'k'
elif cal_state_label == 'e' or cal_state_label == r'$|e\rangle$':
return 'gray'
elif cal_state_label == 'f' or cal_state_label == r'$|f\rangle$':
return 'C8'
else:
return 'C4'
@staticmethod
def get_latex_prob_label(prob_label):
if '$' in prob_label:
return prob_label
elif 'p' in prob_label.lower():
return r'$|{}\rangle$'.format(prob_label[-1])
else:
return r'$|{}\rangle$'.format(prob_label)
def prepare_plots(self):
if self.get_param_value('plot_proj_data', default_value=True):
select_split = self.get_param_value('select_split')
fig_name_suffix = self.get_param_value('fig_name_suffix', '')
title_suffix = self.get_param_value('title_suffix', '')
for qb_name, corr_data in self.proc_data_dict[
'projected_data_dict'].items():
fig_name = f'projected_plot_{qb_name}'
title_suf = title_suffix
if select_split is not None:
param, idx = select_split[qb_name]
# remove qb_name from param
p = '_'.join([e for e in param.split('_') if e != qb_name])
# create suffix
suf = f'({p}, {str(np.round(idx, 3))})'
# add suffix
fig_name += f'_{suf}'
title_suf = f'{suf}_{title_suf}' if \
len(title_suf) else suf
if isinstance(corr_data, dict):
for data_key, data in corr_data.items():
if not self.rotate:
data_label = data_key
plot_name_suffix = data_key
plot_cal_points = False
data_axis_label = 'Population'
else:
fn = f'{fig_name}_{data_key}'
data_label = 'Data'
plot_name_suffix = ''
tf = f'{data_key}_{title_suf}' if \
len(title_suf) else data_key
plot_cal_points = (
not self.options_dict.get('TwoD', False))
data_axis_label = \
'Strongest principal component (arb.)' if \
'pca' in self.rotation_type.lower() else \
'{} state population'.format(
self.get_latex_prob_label(data_key))
self.prepare_projected_data_plot(
fn, data, qb_name=qb_name,
data_label=data_label,
title_suffix=tf,
plot_name_suffix=plot_name_suffix,
fig_name_suffix=fig_name_suffix,
data_axis_label=data_axis_label,
plot_cal_points=plot_cal_points)
else:
fig_name = 'projected_plot_' + qb_name
self.prepare_projected_data_plot(
fig_name, corr_data, qb_name=qb_name,
plot_cal_points=(
not self.options_dict.get('TwoD', False)))
if self.get_param_value('plot_raw_data', default_value=True):
self.prepare_raw_data_plots(plot_filtered=False)
if 'preparation_params' in self.metadata:
if 'active' in self.metadata['preparation_params'].get(
'preparation_type', 'wait'):
self.prepare_raw_data_plots(plot_filtered=True)
def prepare_raw_data_plots(self, plot_filtered=False):
if plot_filtered or not self.data_with_reset:
key = 'meas_results_per_qb'
suffix = 'filtered' if self.data_with_reset else ''
func_for_swpts = lambda qb_name: self.proc_data_dict[
'sweep_points_dict'][qb_name]['sweep_points']
else:
key = 'meas_results_per_qb_raw'
suffix = ''
func_for_swpts = lambda qb_name: self.raw_data_dict[
'hard_sweep_points']
for qb_name, raw_data_dict in self.proc_data_dict[key].items():
if qb_name not in self.qb_names:
continue
sweep_points = func_for_swpts(qb_name)
if len(raw_data_dict) == 1:
numplotsx = 1
numplotsy = 1
elif len(raw_data_dict) == 2:
numplotsx = 1
numplotsy = 2
else:
numplotsx = 2
numplotsy = len(raw_data_dict) // 2 + len(raw_data_dict) % 2
plotsize = self.get_default_plot_params(set=False)['figure.figsize']
fig_title = (self.raw_data_dict['timestamp'] + ' ' +
self.raw_data_dict['measurementstring'] +
'\nRaw data ' + suffix + ' ' + qb_name)
plot_name = 'raw_plot_' + qb_name + suffix
xlabel, xunit = self.get_xaxis_label_unit(qb_name)
for ax_id, ro_channel in enumerate(raw_data_dict):
if self.get_param_value('TwoD', default_value=False):
if self.sp is None:
soft_sweep_params = self.get_param_value(
'soft_sweep_params')
if soft_sweep_params is not None:
yunit = list(soft_sweep_params.values())[0]['unit']
else:
yunit = self.raw_data_dict[
'sweep_parameter_units'][1]
if np.ndim(yunit) > 0:
yunit = yunit[0]
for pn, ssp in self.proc_data_dict['sweep_points_2D_dict'][
qb_name].items():
ylabel = pn
if self.sp is not None:
yunit = self.sp.get_sweep_params_property(
'unit', dimension=1, param_names=pn)
ylabel = self.sp.get_sweep_params_property(
'label', dimension=1, param_names=pn)
self.plot_dicts[f'{plot_name}_{ro_channel}_{pn}'] = {
'fig_id': plot_name + '_' + pn,
'ax_id': ax_id,
'plotfn': self.plot_colorxy,
'xvals': sweep_points,
'yvals': ssp,
'zvals': raw_data_dict[ro_channel].T,
'xlabel': xlabel,
'xunit': xunit,
'ylabel': ylabel,
'yunit': yunit,
'numplotsx': numplotsx,
'numplotsy': numplotsy,
'plotsize': (plotsize[0]*numplotsx,
plotsize[1]*numplotsy),
'title': fig_title,
'clabel': '{} (Vpeak)'.format(ro_channel)}
else:
self.plot_dicts[plot_name + '_' + ro_channel] = {
'fig_id': plot_name,
'ax_id': ax_id,
'plotfn': self.plot_line,
'xvals': sweep_points,
'xlabel': xlabel,
'xunit': xunit,
'yvals': raw_data_dict[ro_channel],
'ylabel': '{} (Vpeak)'.format(ro_channel),
'yunit': '',
'numplotsx': numplotsx,
'numplotsy': numplotsy,
'plotsize': (plotsize[0]*numplotsx,
plotsize[1]*numplotsy),
'title': fig_title}
if len(raw_data_dict) == 1:
self.plot_dicts[
plot_name + '_' + list(raw_data_dict)[0]]['ax_id'] = None
def prepare_projected_data_plot(
self, fig_name, data, qb_name, title_suffix='', sweep_points=None,
plot_cal_points=True, plot_name_suffix='', fig_name_suffix='',
data_label='Data', data_axis_label='', do_legend_data=True,
do_legend_cal_states=True):
if len(fig_name_suffix):
fig_name = f'{fig_name}_{fig_name_suffix}'
if data_axis_label == '':
data_axis_label = 'Strongest principal component (arb.)' if \
'pca' in self.rotation_type.lower() else \
'{} state population'.format(self.get_latex_prob_label(
self.data_to_fit[qb_name]))
plotsize = self.get_default_plot_params(set=False)['figure.figsize']
plotsize = (plotsize[0], plotsize[0]/1.25)
if sweep_points is None:
sweep_points = self.proc_data_dict['sweep_points_dict'][qb_name][
'sweep_points']
plot_names_cal = []
if plot_cal_points and self.num_cal_points != 0:
yvals = data[:-self.num_cal_points]
xvals = sweep_points[:-self.num_cal_points]
# plot cal points
for i, cal_pts_idxs in enumerate(
self.cal_states_dict.values()):
plot_dict_name_cal = fig_name + '_' + \
list(self.cal_states_dict)[i] + '_' + \
plot_name_suffix
plot_names_cal += [plot_dict_name_cal]
self.plot_dicts[plot_dict_name_cal] = {
'fig_id': fig_name,
'plotfn': self.plot_line,
'plotsize': plotsize,
'xvals': self.proc_data_dict['sweep_points_dict'][qb_name][
'cal_points_sweep_points'][cal_pts_idxs],
'yvals': data[cal_pts_idxs],
'setlabel': list(self.cal_states_dict)[i],
'do_legend': do_legend_cal_states,
'legend_bbox_to_anchor': (1, 0.5),
'legend_pos': 'center left',
'linestyle': 'none',
'line_kws': {'color': self.get_cal_state_color(
list(self.cal_states_dict)[i])}}
self.plot_dicts[plot_dict_name_cal+'_line'] = {
'fig_id': fig_name,
'plotsize': plotsize,
'plotfn': self.plot_hlines,
'y': np.mean(data[cal_pts_idxs]),
'xmin': self.proc_data_dict['sweep_points_dict'][qb_name][
'sweep_points'][0],
'xmax': self.proc_data_dict['sweep_points_dict'][qb_name][
'sweep_points'][-1],
'colors': 'gray'}
else:
yvals = data
xvals = sweep_points
title = (self.raw_data_dict['timestamp'] + ' ' +
self.raw_data_dict['measurementstring'])
title += '\n' + f'{qb_name}_{title_suffix}' if len(title_suffix) else \
' ' + qb_name
plot_dict_name = f'{fig_name}_{plot_name_suffix}'
xlabel, xunit = self.get_xaxis_label_unit(qb_name)
if self.get_param_value('TwoD', default_value=False):
if self.sp is None:
soft_sweep_params = self.get_param_value(
'soft_sweep_params')
if soft_sweep_params is not None:
yunit = list(soft_sweep_params.values())[0]['unit']
else:
yunit = self.raw_data_dict['sweep_parameter_units'][1]
if np.ndim(yunit) > 0:
yunit = yunit[0]
for pn, ssp in self.proc_data_dict['sweep_points_2D_dict'][
qb_name].items():
ylabel = pn
if self.sp is not None:
yunit = self.sp.get_sweep_params_property(
'unit', dimension=1, param_names=pn)
ylabel = self.sp.get_sweep_params_property(
'label', dimension=1, param_names=pn)
self.plot_dicts[f'{plot_dict_name}_{pn}'] = {
'plotfn': self.plot_colorxy,
'fig_id': fig_name + '_' + pn,
'xvals': xvals,
'yvals': ssp,
'zvals': yvals,
'xlabel': xlabel,
'xunit': xunit,
'ylabel': ylabel,
'yunit': yunit,
'zrange': self.get_param_value('zrange', None),
'title': title,
'clabel': data_axis_label}
else:
self.plot_dicts[plot_dict_name] = {
'plotfn': self.plot_line,
'fig_id': fig_name,
'plotsize': plotsize,
'xvals': xvals,
'xlabel': xlabel,
'xunit': xunit,
'yvals': yvals,
'ylabel': data_axis_label,
'yunit': '',
'setlabel': data_label,
'title': title,
'linestyle': 'none',
'do_legend': do_legend_data,
'legend_bbox_to_anchor': (1, 0.5),
'legend_pos': 'center left'}
# add plot_params to each plot dict
plot_params = self.get_param_value('plot_params', default_value={})
for plt_name in self.plot_dicts:
self.plot_dicts[plt_name].update(plot_params)
if len(plot_names_cal) > 0:
if do_legend_data and not do_legend_cal_states:
for plot_name in plot_names_cal:
plot_dict_cal = self.plot_dicts.pop(plot_name)
self.plot_dicts[plot_name] = plot_dict_cal
class Idling_Error_Rate_Analyisis(ba.BaseDataAnalysis):
def __init__(self, t_start: str=None, t_stop: str=None,
label: str='', data_file_path: str=None,
options_dict: dict=None, extract_only: bool=False,
do_fitting: bool=True, auto=True):
super().__init__(t_start=t_start, t_stop=t_stop,
label=label,
data_file_path=data_file_path,
options_dict=options_dict,
extract_only=extract_only, do_fitting=do_fitting)
self.params_dict = {'xlabel': 'sweep_name',
'xunit': 'sweep_unit',
'xvals': 'sweep_points',
'measurementstring': 'measurementstring',
'value_names': 'value_names',
'value_units': 'value_units',
'measured_values': 'measured_values'}
self.numeric_params = []
if auto:
self.run_analysis()
def process_data(self):
post_sel_th = self.options_dict.get('post_sel_th', 0.5)
raw_shots = self.raw_data_dict['measured_values'][0][0]
post_sel_shots = raw_shots[::2]
data_shots = raw_shots[1::2]
data_shots[np.where(post_sel_shots > post_sel_th)] = np.nan
states = ['0', '1', '+']
self.proc_data_dict['xvals'] = np.unique(self.raw_data_dict['xvals'])
for i, state in enumerate(states):
self.proc_data_dict['shots_{}'.format(state)] =data_shots[i::3]
self.proc_data_dict['yvals_{}'.format(state)] = \
np.nanmean(np.reshape(self.proc_data_dict['shots_{}'.format(state)],
(len(self.proc_data_dict['xvals']), -1),
order='F'), axis=1)
def prepare_plots(self):
# assumes that value names are unique in an experiment
states = ['0', '1', '+']
for i, state in enumerate(states):
yvals = self.proc_data_dict['yvals_{}'.format(state)]
xvals = self.proc_data_dict['xvals']
self.plot_dicts['Prepare in {}'.format(state)] = {
'ax_id': 'main',
'plotfn': self.plot_line,
'xvals': xvals,
'xlabel': self.raw_data_dict['xlabel'][0],
'xunit': self.raw_data_dict['xunit'][0][0],
'yvals': yvals,
'ylabel': 'Counts',
'yrange': [0, 1],
'xrange': self.options_dict.get('xrange', None),
'yunit': 'frac',
'setlabel': 'Prepare in {}'.format(state),
'do_legend':True,
'title': (self.raw_data_dict['timestamps'][0]+' - ' +
self.raw_data_dict['timestamps'][-1] + '\n' +
self.raw_data_dict['measurementstring'][0]),
'legend_pos': 'upper right'}
if self.do_fitting:
for state in ['0', '1', '+']:
self.plot_dicts['fit_{}'.format(state)] = {
'ax_id': 'main',
'plotfn': self.plot_fit,
'fit_res': self.fit_dicts['fit {}'.format(state)]['fit_res'],
'plot_init': self.options_dict['plot_init'],
'setlabel': 'fit |{}>'.format(state),
'do_legend': True,
'legend_pos': 'upper right'}
self.plot_dicts['fit_text']={
'ax_id':'main',
'box_props': 'fancy',
'xpos':1.05,
'horizontalalignment':'left',
'plotfn': self.plot_text,
'text_string': self.proc_data_dict['fit_msg']}
def analyze_fit_results(self):
fit_msg =''
states = ['0', '1', '+']
for state in states:
fr = self.fit_res['fit {}'.format(state)]
N1 = fr.params['N1'].value, fr.params['N1'].stderr
N2 = fr.params['N2'].value, fr.params['N2'].stderr
fit_msg += ('Prep |{}> : \n\tN_1 = {:.2g} $\pm$ {:.2g}'
'\n\tN_2 = {:.2g} $\pm$ {:.2g}\n').format(
state, N1[0], N1[1], N2[0], N2[1])
self.proc_data_dict['fit_msg'] = fit_msg
def prepare_fitting(self):
self.fit_dicts = OrderedDict()
states = ['0', '1', '+']
for i, state in enumerate(states):
yvals = self.proc_data_dict['yvals_{}'.format(state)]
xvals = self.proc_data_dict['xvals']
mod = lmfit.Model(fit_mods.idle_error_rate_exp_decay)
mod.guess = fit_mods.idle_err_rate_guess.__get__(mod, mod.__class__)
# Done here explicitly so that I can overwrite a specific guess
guess_pars = mod.guess(N=xvals, data=yvals)
vary_N2 = self.options_dict.get('vary_N2', True)
if not vary_N2:
guess_pars['N2'].value = 1e21
guess_pars['N2'].vary = False
self.fit_dicts['fit {}'.format(states[i])] = {
'model': mod,
'fit_xvals': {'N': xvals},
'fit_yvals': {'data': yvals},
'guess_pars': guess_pars}
# Allows fixing the double exponential coefficient
class Grovers_TwoQubitAllStates_Analysis(ba.BaseDataAnalysis):
def __init__(self, t_start: str=None, t_stop: str=None,
label: str='', data_file_path: str=None,
options_dict: dict=None, extract_only: bool=False,
do_fitting: bool=True, auto=True):
super().__init__(t_start=t_start, t_stop=t_stop,
label=label,
data_file_path=data_file_path,
options_dict=options_dict,
extract_only=extract_only, do_fitting=do_fitting)
self.params_dict = {'xlabel': 'sweep_name',
'xunit': 'sweep_unit',
'xvals': 'sweep_points',
'measurementstring': 'measurementstring',
'value_names': 'value_names',
'value_units': 'value_units',
'measured_values': 'measured_values'}
self.numeric_params = []
if auto:
self.run_analysis()
def process_data(self):
self.proc_data_dict = OrderedDict()
normalize_to_cal_points = self.options_dict.get('normalize_to_cal_points', True)
cal_points = [
[[-4, -3], [-2, -1]],
[[-4, -2], [-3, -1]],
]
for idx in [0,1]:
yvals = list(self.raw_data_dict['measured_data'].values())[idx][0]
self.proc_data_dict['ylabel_{}'.format(idx)] = \
self.raw_data_dict['value_names'][0][idx]
self.proc_data_dict['yunit'] = self.raw_data_dict['value_units'][0][idx]
if normalize_to_cal_points:
yvals = a_tools.rotate_and_normalize_data_1ch(yvals,
cal_zero_points=cal_points[idx][0],
cal_one_points=cal_points[idx][1])
self.proc_data_dict['yvals_{}'.format(idx)] = yvals
y0 = self.proc_data_dict['yvals_0']
y1 = self.proc_data_dict['yvals_1']
p_success = ((y0[0]*y1[0]) +
(1-y0[1])*y1[1] +
(y0[2])*(1-y1[2]) +
(1-y0[3])*(1-y1[3]) )/4
self.proc_data_dict['p_success'] = p_success
def prepare_plots(self):
# assumes that value names are unique in an experiment
for i in [0, 1]:
yvals = self.proc_data_dict['yvals_{}'.format(i)]
xvals = self.raw_data_dict['xvals'][0]
ylabel = self.proc_data_dict['ylabel_{}'.format(i)]
self.plot_dicts['main_{}'.format(ylabel)] = {
'plotfn': self.plot_line,
'xvals': self.raw_data_dict['xvals'][0],
'xlabel': self.raw_data_dict['xlabel'][0],
'xunit': self.raw_data_dict['xunit'][0][0],
'yvals': self.proc_data_dict['yvals_{}'.format(i)],
'ylabel': ylabel,
'yunit': self.proc_data_dict['yunit'],
'title': (self.raw_data_dict['timestamps'][0] + ' \n' +
self.raw_data_dict['measurementstring'][0]),
'do_legend': False,
'legend_pos': 'upper right'}
self.plot_dicts['limit_text']={
'ax_id':'main_{}'.format(ylabel),
'box_props': 'fancy',
'xpos':1.05,
'horizontalalignment':'left',
'plotfn': self.plot_text,
'text_string': 'P succes = {:.3f}'.format(self.proc_data_dict['p_success'])}
class FlippingAnalysis(Single_Qubit_TimeDomainAnalysis):
def __init__(self, t_start: str=None, t_stop: str=None,
data_file_path: str=None,
options_dict: dict=None, extract_only: bool=False,
do_fitting: bool=True, auto=True):
super().__init__(t_start=t_start, t_stop=t_stop,
data_file_path=data_file_path,
options_dict=options_dict,
extract_only=extract_only, do_fitting=do_fitting)
self.single_timestamp = True
self.params_dict = {'xlabel': 'sweep_name',
'xunit': 'sweep_unit',
'measurementstring': 'measurementstring',
'sweep_points': 'sweep_points',
'value_names': 'value_names',
'value_units': 'value_units',
'measured_values': 'measured_values'}
# This analysis makes a hardcoded assumption on the calibration points
self.options_dict['cal_points'] = [list(range(-4, -2)),
list(range(-2, 0))]
self.numeric_params = []
if auto:
self.run_analysis()
def prepare_fitting(self):
self.fit_dicts = OrderedDict()
# Even though we expect an exponentially damped oscillation we use
# a simple cosine as this gives more reliable fitting and we are only
# interested in extracting the frequency of the oscillation
cos_mod = lmfit.Model(fit_mods.CosFunc)
guess_pars = fit_mods.Cos_guess(
model=cos_mod, t=self.raw_data_dict['sweep_points'][:-4],
data=self.proc_data_dict['corr_data'][:-4])
# This enforces the oscillation to start at the equator
# and ensures that any over/under rotation is absorbed in the
# frequency
guess_pars['amplitude'].value = 0.5
guess_pars['amplitude'].vary = False
guess_pars['offset'].value = 0.5
guess_pars['offset'].vary = False
self.fit_dicts['cos_fit'] = {
'fit_fn': fit_mods.CosFunc,
'fit_xvals': {'t': self.raw_data_dict['sweep_points'][:-4]},
'fit_yvals': {'data': self.proc_data_dict['corr_data'][:-4]},
'guess_pars': guess_pars}
# In the case there are very few periods we fall back on a small
# angle approximation to extract the drive detuning
poly_mod = lmfit.models.PolynomialModel(degree=1)
# the detuning can be estimated using on a small angle approximation
# c1 = d/dN (cos(2*pi*f N) ) evaluated at N = 0 -> c1 = -2*pi*f
poly_mod.set_param_hint('frequency', expr='-c1/(2*pi)')
guess_pars = poly_mod.guess(x=self.raw_data_dict['sweep_points'][:-4],
data=self.proc_data_dict['corr_data'][:-4])
# Constraining the line ensures that it will only give a good fit
# if the small angle approximation holds
guess_pars['c0'].vary = False
guess_pars['c0'].value = 0.5
self.fit_dicts['line_fit'] = {
'model': poly_mod,
'fit_xvals': {'x': self.raw_data_dict['sweep_points'][:-4]},
'fit_yvals': {'data': self.proc_data_dict['corr_data'][:-4]},
'guess_pars': guess_pars}
def analyze_fit_results(self):
sf_line = self._get_scale_factor_line()
sf_cos = self._get_scale_factor_cos()
self.proc_data_dict['scale_factor'] = self.get_scale_factor()
msg = 'Scale fact. based on '
if self.proc_data_dict['scale_factor'] == sf_cos:
msg += 'cos fit\n'
else:
msg += 'line fit\n'
msg += 'cos fit: {:.4f}\n'.format(sf_cos)
msg += 'line fit: {:.4f}'.format(sf_line)
self.raw_data_dict['scale_factor_msg'] = msg
# TODO: save scale factor to file
def get_scale_factor(self):
"""
Returns the scale factor that should correct for the error in the
pulse amplitude.
"""
# Model selection based on the Bayesian Information Criterion (BIC)
# as calculated by lmfit
if (self.fit_dicts['line_fit']['fit_res'].bic <
self.fit_dicts['cos_fit']['fit_res'].bic):
scale_factor = self._get_scale_factor_line()
else:
scale_factor = self._get_scale_factor_cos()
return scale_factor
def _get_scale_factor_cos(self):
# 1/period of the oscillation corresponds to the (fractional)
# over/under rotation error per gate
frequency = self.fit_dicts['cos_fit']['fit_res'].params['frequency']
# the square is needed to account for the difference between
# power and amplitude
scale_factor = (1+frequency)**2
phase = np.rad2deg(self.fit_dicts['cos_fit']['fit_res'].params['phase']) % 360
# phase ~90 indicates an under rotation so the scale factor
# has to be larger than 1. A phase ~270 indicates an over
# rotation so then the scale factor has to be smaller than one.
if phase > 180:
scale_factor = 1/scale_factor
return scale_factor
def _get_scale_factor_line(self):
# 1/period of the oscillation corresponds to the (fractional)
# over/under rotation error per gate
frequency = self.fit_dicts['line_fit']['fit_res'].params['frequency']
scale_factor = (1+frequency)**2
# no phase sign check is needed here as this is contained in the
# sign of the coefficient
return scale_factor
def prepare_plots(self):
self.plot_dicts['main'] = {
'plotfn': self.plot_line,
'xvals': self.raw_data_dict['sweep_points'],
'xlabel': self.raw_data_dict['xlabel'],
'xunit': self.raw_data_dict['xunit'], # does not do anything yet
'yvals': self.proc_data_dict['corr_data'],
'ylabel': 'Excited state population',
'yunit': '',
'setlabel': 'data',
'title': (self.raw_data_dict['timestamp'] + ' ' +
self.raw_data_dict['measurementstring']),
'do_legend': True,
'legend_pos': 'upper right'}
if self.do_fitting:
self.plot_dicts['line_fit'] = {
'ax_id': 'main',
'plotfn': self.plot_fit,
'fit_res': self.fit_dicts['line_fit']['fit_res'],
'plot_init': self.options_dict['plot_init'],
'setlabel': 'line fit',
'do_legend': True,
'legend_pos': 'upper right'}
self.plot_dicts['cos_fit'] = {
'ax_id': 'main',
'plotfn': self.plot_fit,
'fit_res': self.fit_dicts['cos_fit']['fit_res'],
'plot_init': self.options_dict['plot_init'],
'setlabel': 'cos fit',
'do_legend': True,
'legend_pos': 'upper right'}
self.plot_dicts['text_msg'] = {
'ax_id': 'main',
'ypos': 0.15,
'plotfn': self.plot_text,
'box_props': 'fancy',
'text_string': self.raw_data_dict['scale_factor_msg']}
class Intersect_Analysis(Single_Qubit_TimeDomainAnalysis):
"""
Analysis to extract the intercept of two parameters.
relevant options_dict parameters
ch_idx_A (int) specifies first channel for intercept
ch_idx_B (int) specifies second channel for intercept if same as first
it will assume data was taken interleaved.
"""
def __init__(self, t_start: str=None, t_stop: str=None,
data_file_path: str=None,
options_dict: dict=None, extract_only: bool=False,
do_fitting: bool=True, auto=True):
super().__init__(t_start=t_start, t_stop=t_stop,
data_file_path=data_file_path,
options_dict=options_dict,
extract_only=extract_only, do_fitting=do_fitting)
self.single_timestamp = False
self.params_dict = {'xlabel': 'sweep_name',
'xvals': 'sweep_points',
'xunit': 'sweep_unit',
'measurementstring': 'measurementstring',
'value_names': 'value_names',
'value_units': 'value_units',
'measured_values': 'measured_values'}
self.numeric_params = []
if auto:
self.run_analysis()
def process_data(self):
"""
selects the relevant acq channel based on "ch_idx_A" and "ch_idx_B"
specified in the options dict. If ch_idx_A and ch_idx_B are the same
it will unzip the data.
"""
self.proc_data_dict = deepcopy(self.raw_data_dict)
# The channel containing the data must be specified in the options dict
ch_idx_A = self.options_dict.get('ch_idx_A', 0)
ch_idx_B = self.options_dict.get('ch_idx_B', 0)
self.proc_data_dict['ylabel'] = self.raw_data_dict['value_names'][0][ch_idx_A]
self.proc_data_dict['yunit'] = self.raw_data_dict['value_units'][0][ch_idx_A]
if ch_idx_A == ch_idx_B:
yvals = list(self.raw_data_dict['measured_data'].values())[ch_idx_A][0]
self.proc_data_dict['xvals_A'] = self.raw_data_dict['xvals'][0][::2]
self.proc_data_dict['xvals_B'] = self.raw_data_dict['xvals'][0][1::2]
self.proc_data_dict['yvals_A'] = yvals[::2]
self.proc_data_dict['yvals_B'] = yvals[1::2]
else:
self.proc_data_dict['xvals_A'] = self.raw_data_dict['xvals'][0]
self.proc_data_dict['xvals_B'] = self.raw_data_dict['xvals'][0]
self.proc_data_dict['yvals_A'] = list(self.raw_data_dict
['measured_data'].values())[ch_idx_A][0]
self.proc_data_dict['yvals_B'] = list(self.raw_data_dict
['measured_data'].values())[ch_idx_B][0]
def prepare_fitting(self):
self.fit_dicts = OrderedDict()
self.fit_dicts['line_fit_A'] = {
'model': lmfit.models.PolynomialModel(degree=2),
'fit_xvals': {'x': self.proc_data_dict['xvals_A']},
'fit_yvals': {'data': self.proc_data_dict['yvals_A']}}
self.fit_dicts['line_fit_B'] = {
'model': lmfit.models.PolynomialModel(degree=2),
'fit_xvals': {'x': self.proc_data_dict['xvals_B']},
'fit_yvals': {'data': self.proc_data_dict['yvals_B']}}
def analyze_fit_results(self):
fr_0 = self.fit_res['line_fit_A'].best_values
fr_1 = self.fit_res['line_fit_B'].best_values
c0 = (fr_0['c0'] - fr_1['c0'])
c1 = (fr_0['c1'] - fr_1['c1'])
c2 = (fr_0['c2'] - fr_1['c2'])
poly_coeff = [c0, c1, c2]
poly = np.polynomial.polynomial.Polynomial([fr_0['c0'],
fr_0['c1'], fr_0['c2']])
ic = np.polynomial.polynomial.polyroots(poly_coeff)
self.proc_data_dict['intersect_L'] = ic[0], poly(ic[0])
self.proc_data_dict['intersect_R'] = ic[1], poly(ic[1])
if (((np.min(self.proc_data_dict['xvals']))< ic[0]) and
( ic[0] < (np.max(self.proc_data_dict['xvals'])))):
self.proc_data_dict['intersect'] =self.proc_data_dict['intersect_L']
else:
self.proc_data_dict['intersect'] =self.proc_data_dict['intersect_R']
def prepare_plots(self):
self.plot_dicts['main'] = {
'plotfn': self.plot_line,
'xvals': self.proc_data_dict['xvals_A'],
'xlabel': self.proc_data_dict['xlabel'][0],
'xunit': self.proc_data_dict['xunit'][0][0],
'yvals': self.proc_data_dict['yvals_A'],
'ylabel': self.proc_data_dict['ylabel'],
'yunit': self.proc_data_dict['yunit'],
'setlabel': 'A',
'title': (self.proc_data_dict['timestamps'][0] + ' \n' +
self.proc_data_dict['measurementstring'][0]),
'do_legend': True,
'yrange': (0,1),
'legend_pos': 'upper right'}
self.plot_dicts['on'] = {
'plotfn': self.plot_line,
'ax_id': 'main',
'xvals': self.proc_data_dict['xvals_B'],
'xlabel': self.proc_data_dict['xlabel'][0],
'xunit': self.proc_data_dict['xunit'][0][0],
'yvals': self.proc_data_dict['yvals_B'],
'ylabel': self.proc_data_dict['ylabel'],
'yunit': self.proc_data_dict['yunit'],
'setlabel': 'B',
'do_legend': True,
'legend_pos': 'upper right'}
if self.do_fitting:
self.plot_dicts['line_fit_A'] = {
'ax_id': 'main',
'plotfn': self.plot_fit,
'fit_res': self.fit_dicts['line_fit_A']['fit_res'],
'plot_init': self.options_dict['plot_init'],
'setlabel': 'Fit A',
'do_legend': True}
self.plot_dicts['line_fit_B'] = {
'ax_id': 'main',
'plotfn': self.plot_fit,
'fit_res': self.fit_dicts['line_fit_B']['fit_res'],
'plot_init': self.options_dict['plot_init'],
'setlabel': 'Fit B',
'do_legend': True}
ic, ic_unit = SI_val_to_msg_str(
self.proc_data_dict['intersect'][0],
self.proc_data_dict['xunit'][0][0], return_type=float)
self.plot_dicts['intercept_message'] = {
'ax_id': 'main',
'plotfn': self.plot_line,
'xvals': [self.proc_data_dict['intersect'][0]],
'yvals': [self.proc_data_dict['intersect'][1]],
'line_kws': {'alpha': .5, 'color':'gray',
'markersize':15},
'marker': 'o',
'setlabel': 'Intercept: {:.1f} {}'.format(ic, ic_unit),
'do_legend': True}
def get_intersect(self):
return self.proc_data_dict['intersect']
class CZ_1QPhaseCal_Analysis(ba.BaseDataAnalysis):
"""
Analysis to extract the intercept for a single qubit phase calibration
experiment
N.B. this is a less generic version of "Intersect_Analysis" and should
be deprecated (MAR Dec 2017)
"""
def __init__(self, t_start: str=None, t_stop: str=None,
data_file_path: str=None,
options_dict: dict=None, extract_only: bool=False,
do_fitting: bool=True, auto=True):
super().__init__(t_start=t_start, t_stop=t_stop,
data_file_path=data_file_path,
options_dict=options_dict,
extract_only=extract_only, do_fitting=do_fitting)
self.single_timestamp = False
self.params_dict = {'xlabel': 'sweep_name',
'xunit': 'sweep_unit',
'xvals': 'sweep_points',
'measurementstring': 'measurementstring',
'value_names': 'value_names',
'value_units': 'value_units',
'measured_values': 'measured_values'}
self.numeric_params = []
if auto:
self.run_analysis()
def process_data(self):
"""
selects the relevant acq channel based on "ch_idx" in options dict and
then splits the data for th
"""
self.proc_data_dict = OrderedDict()
# The channel containing the data must be specified in the options dict
ch_idx = self.options_dict['ch_idx']
yvals = list(self.raw_data_dict['measured_data'].values())[ch_idx][0]
self.proc_data_dict['ylabel'] = self.raw_data_dict['value_names'][0][ch_idx]
self.proc_data_dict['yunit'] = self.raw_data_dict['value_units'][0][ch_idx]
self.proc_data_dict['xvals_off'] = self.raw_data_dict['xvals'][0][::2]
self.proc_data_dict['xvals_on'] = self.raw_data_dict['xvals'][0][1::2]
self.proc_data_dict['yvals_off'] = yvals[::2]
self.proc_data_dict['yvals_on'] = yvals[1::2]
def prepare_fitting(self):
self.fit_dicts = OrderedDict()
self.fit_dicts['line_fit_off'] = {
'model': lmfit.models.PolynomialModel(degree=1),
'fit_xvals': {'x': self.proc_data_dict['xvals_off']},
'fit_yvals': {'data': self.proc_data_dict['yvals_off']}}
self.fit_dicts['line_fit_on'] = {
'model': lmfit.models.PolynomialModel(degree=1),
'fit_xvals': {'x': self.proc_data_dict['xvals_on']},
'fit_yvals': {'data': self.proc_data_dict['yvals_on']}}
def analyze_fit_results(self):
fr_0 = self.fit_res['line_fit_off'].best_values
fr_1 = self.fit_res['line_fit_on'].best_values
ic = -(fr_0['c0'] - fr_1['c0'])/(fr_0['c1'] - fr_1['c1'])
self.proc_data_dict['zero_phase_diff_intersect'] = ic
def prepare_plots(self):
self.plot_dicts['main'] = {
'plotfn': self.plot_line,
'xvals': self.proc_data_dict['xvals_off'],
'xlabel': self.raw_data_dict['xlabel'][0],
'xunit': self.raw_data_dict['xunit'][0][0],
'yvals': self.proc_data_dict['yvals_off'],
'ylabel': self.proc_data_dict['ylabel'],
'yunit': self.proc_data_dict['yunit'],
'setlabel': 'CZ off',
'title': (self.raw_data_dict['timestamps'][0] + ' \n' +
self.raw_data_dict['measurementstring'][0]),
'do_legend': True,
'yrange': (0,1),
'legend_pos': 'upper right'}
self.plot_dicts['on'] = {
'plotfn': self.plot_line,
'ax_id': 'main',
'xvals': self.proc_data_dict['xvals_on'],
'xlabel': self.raw_data_dict['xlabel'][0],
'xunit': self.raw_data_dict['xunit'][0][0],
'yvals': self.proc_data_dict['yvals_on'],
'ylabel': self.proc_data_dict['ylabel'],
'yunit': self.proc_data_dict['yunit'],
'setlabel': 'CZ on',
'do_legend': True,
'legend_pos': 'upper right'}
if self.do_fitting:
self.plot_dicts['line_fit_off'] = {
'ax_id': 'main',
'plotfn': self.plot_fit,
'fit_res': self.fit_dicts['line_fit_off']['fit_res'],
'plot_init': self.options_dict['plot_init'],
'setlabel': 'Fit CZ off',
'do_legend': True}
self.plot_dicts['line_fit_on'] = {
'ax_id': 'main',
'plotfn': self.plot_fit,
'fit_res': self.fit_dicts['line_fit_on']['fit_res'],
'plot_init': self.options_dict['plot_init'],
'setlabel': 'Fit CZ on',
'do_legend': True}
ic, ic_unit = SI_val_to_msg_str(
self.proc_data_dict['zero_phase_diff_intersect'],
self.raw_data_dict['xunit'][0][0], return_type=float)
self.plot_dicts['intercept_message'] = {
'ax_id': 'main',
'plotfn': self.plot_line,
'xvals': [self.proc_data_dict['zero_phase_diff_intersect']],
'yvals': [np.mean(self.proc_data_dict['xvals_on'])],
'line_kws': {'alpha': 0},
'setlabel': 'Intercept: {:.1f} {}'.format(ic, ic_unit),
'do_legend': True}
def get_zero_phase_diff_intersect(self):
return self.proc_data_dict['zero_phase_diff_intersect']
class Oscillation_Analysis(ba.BaseDataAnalysis):
"""
Very basic analysis to determine the phase of a single oscillation
that has an assumed period of 360 degrees.
"""
def __init__(self, t_start: str=None, t_stop: str=None,
data_file_path: str=None,
label: str='',
options_dict: dict=None, extract_only: bool=False,
do_fitting: bool=True, auto=True):
super().__init__(t_start=t_start, t_stop=t_stop,
label=label,
data_file_path=data_file_path,
options_dict=options_dict,
extract_only=extract_only, do_fitting=do_fitting)
self.single_timestamp = False
self.params_dict = {'xlabel': 'sweep_name',
'xunit': 'sweep_unit',
'xvals': 'sweep_points',
'measurementstring': 'measurementstring',
'value_names': 'value_names',
'value_units': 'value_units',
'measured_values': 'measured_values'}
self.numeric_params = []
if auto:
self.run_analysis()
def process_data(self):
self.proc_data_dict = OrderedDict()
idx = 1
self.proc_data_dict['yvals'] = list(self.raw_data_dict['measured_data'].values())[idx][0]
self.proc_data_dict['ylabel'] = self.raw_data_dict['value_names'][0][idx]
self.proc_data_dict['yunit'] = self.raw_data_dict['value_units'][0][idx]
def prepare_fitting(self):
self.fit_dicts = OrderedDict()
cos_mod = fit_mods.CosModel
guess_pars = fit_mods.Cos_guess(
model=cos_mod, t=self.raw_data_dict['xvals'][0],
data=self.proc_data_dict['yvals'], freq_guess=1/360)
guess_pars['frequency'].value = 1/360
guess_pars['frequency'].vary = False
self.fit_dicts['cos_fit'] = {
'fit_fn': fit_mods.CosFunc,
'fit_xvals': {'t': self.raw_data_dict['xvals'][0]},
'fit_yvals': {'data': self.proc_data_dict['yvals']},
'guess_pars': guess_pars}
def analyze_fit_results(self):
fr = self.fit_res['cos_fit'].best_values
self.proc_data_dict['phi'] = np.rad2deg(fr['phase'])
def prepare_plots(self):
self.plot_dicts['main'] = {
'plotfn': self.plot_line,
'xvals': self.raw_data_dict['xvals'][0],
'xlabel': self.raw_data_dict['xlabel'][0],
'xunit': self.raw_data_dict['xunit'][0][0],
'yvals': self.proc_data_dict['yvals'],
'ylabel': self.proc_data_dict['ylabel'],
'yunit': self.proc_data_dict['yunit'],
'title': (self.raw_data_dict['timestamps'][0] + ' \n' +
self.raw_data_dict['measurementstring'][0]),
'do_legend': True,
# 'yrange': (0,1),
'legend_pos': 'upper right'}
if self.do_fitting:
self.plot_dicts['cos_fit'] = {
'ax_id': 'main',
'plotfn': self.plot_fit,
'fit_res': self.fit_dicts['cos_fit']['fit_res'],
'plot_init': self.options_dict['plot_init'],
'setlabel': 'Fit',
'do_legend': True}
class Conditional_Oscillation_Analysis(ba.BaseDataAnalysis):
"""
Analysis to extract quantities from a conditional oscillation.
"""
def __init__(self, t_start: str=None, t_stop: str=None,
data_file_path: str=None,
label: str='',
options_dict: dict=None, extract_only: bool=False,
do_fitting: bool=True, auto=True):
super().__init__(t_start=t_start, t_stop=t_stop,
label=label,
data_file_path=data_file_path,
options_dict=options_dict,
extract_only=extract_only, do_fitting=do_fitting)
self.single_timestamp = False
self.params_dict = {'xlabel': 'sweep_name',
'xunit': 'sweep_unit',
'xvals': 'sweep_points',
'measurementstring': 'measurementstring',
'value_names': 'value_names',
'value_units': 'value_units',
'measured_values': 'measured_values'}
self.numeric_params = []
if auto:
self.run_analysis()
def process_data(self):
"""
selects the relevant acq channel based on "ch_idx_osc" and
"ch_idx_spec" in the options dict and then splits the data for the
off and on cases
"""
self.proc_data_dict = OrderedDict()
# The channel containing the data must be specified in the options dict
ch_idx_spec = self.options_dict.get('ch_idx_spec', 0)
ch_idx_osc = self.options_dict.get('ch_idx_osc', 1)
normalize_to_cal_points = self.options_dict.get('normalize_to_cal_points', True)
cal_points = [
[[-4, -3], [-2, -1]],
[[-4, -2], [-3, -1]],
]
i = 0
for idx, type_str in zip([ch_idx_osc, ch_idx_spec], ['osc', 'spec']):
yvals = list(self.raw_data_dict['measured_data'].values())[idx][0]
self.proc_data_dict['ylabel_{}'.format(type_str)] = self.raw_data_dict['value_names'][0][idx]
self.proc_data_dict['yunit'] = self.raw_data_dict['value_units'][0][idx]
if normalize_to_cal_points:
yvals = a_tools.rotate_and_normalize_data_1ch(yvals,
cal_zero_points=cal_points[i][0],
cal_one_points=cal_points[i][1])
i +=1
self.proc_data_dict['yvals_{}_off'.format(type_str)] = yvals[::2]
self.proc_data_dict['yvals_{}_on'.format(type_str)] = yvals[1::2]
self.proc_data_dict['xvals_off'] = self.raw_data_dict['xvals'][0][::2]
self.proc_data_dict['xvals_on'] = self.raw_data_dict['xvals'][0][1::2]
else:
self.proc_data_dict['yvals_{}_off'.format(type_str)] = yvals[::2]
self.proc_data_dict['yvals_{}_on'.format(type_str)] = yvals[1::2]
self.proc_data_dict['xvals_off'] = self.raw_data_dict['xvals'][0][::2]
self.proc_data_dict['xvals_on'] = self.raw_data_dict['xvals'][0][1::2]
def prepare_fitting(self):
self.fit_dicts = OrderedDict()
cos_mod = fit_mods.CosModel
guess_pars = fit_mods.Cos_guess(
model=cos_mod, t=self.proc_data_dict['xvals_off'][:-2],
data=self.proc_data_dict['yvals_osc_off'][:-2],
freq_guess=1/360)
guess_pars['frequency'].value = 1/360
guess_pars['frequency'].vary = False
self.fit_dicts['cos_fit_off'] = {
'fit_fn': fit_mods.CosFunc,
'fit_xvals': {'t': self.proc_data_dict['xvals_off'][:-2]},
'fit_yvals': {'data': self.proc_data_dict['yvals_osc_off'][:-2]},
'guess_pars': guess_pars}
cos_mod = fit_mods.CosModel
guess_pars = fit_mods.Cos_guess(
model=cos_mod, t=self.proc_data_dict['xvals_on'][:-2],
data=self.proc_data_dict['yvals_osc_on'][:-2],
freq_guess=1/360)
guess_pars['frequency'].value = 1/360
guess_pars['frequency'].vary = False
self.fit_dicts['cos_fit_on'] = {
'fit_fn': fit_mods.CosFunc,
'fit_xvals': {'t': self.proc_data_dict['xvals_on'][:-2]},
'fit_yvals': {'data': self.proc_data_dict['yvals_osc_on'][:-2]},
'guess_pars': guess_pars}
def analyze_fit_results(self):
fr_0 = self.fit_res['cos_fit_off'].params
fr_1 = self.fit_res['cos_fit_on'].params
phi0 = np.rad2deg(fr_0['phase'].value)
phi1 = np.rad2deg(fr_1['phase'].value)
phi0_stderr = np.rad2deg(fr_0['phase'].stderr)
phi1_stderr = np.rad2deg(fr_1['phase'].stderr)
self.proc_data_dict['phi_0'] = phi0, phi0_stderr
self.proc_data_dict['phi_1'] = phi1, phi1_stderr
phi_cond_stderr = (phi0_stderr**2+phi1_stderr**2)**.5
self.proc_data_dict['phi_cond'] = (phi1 -phi0), phi_cond_stderr
osc_amp = np.mean([fr_0['amplitude'], fr_1['amplitude']])
osc_amp_stderr = np.sqrt(fr_0['amplitude'].stderr**2 +
fr_1['amplitude']**2)/2
self.proc_data_dict['osc_amp_0'] = (fr_0['amplitude'].value,
fr_0['amplitude'].stderr)
self.proc_data_dict['osc_amp_1'] = (fr_1['amplitude'].value,
fr_1['amplitude'].stderr)
self.proc_data_dict['osc_offs_0'] = (fr_0['offset'].value,
fr_0['offset'].stderr)
self.proc_data_dict['osc_offs_1'] = (fr_1['offset'].value,
fr_1['offset'].stderr)
offs_stderr = (fr_0['offset'].stderr**2+fr_1['offset'].stderr**2)**.5
self.proc_data_dict['offs_diff'] = (
fr_1['offset'].value - fr_0['offset'].value, offs_stderr)
# self.proc_data_dict['osc_amp'] = (osc_amp, osc_amp_stderr)
self.proc_data_dict['missing_fraction'] = (
np.mean(self.proc_data_dict['yvals_spec_on'][:-2]) -
np.mean(self.proc_data_dict['yvals_spec_off'][:-2]))
def prepare_plots(self):
self._prepare_main_oscillation_figure()
self._prepare_spectator_qubit_figure()
def _prepare_main_oscillation_figure(self):
self.plot_dicts['main'] = {
'plotfn': self.plot_line,
'xvals': self.proc_data_dict['xvals_off'],
'xlabel': self.raw_data_dict['xlabel'][0],
'xunit': self.raw_data_dict['xunit'][0][0],
'yvals': self.proc_data_dict['yvals_osc_off'],
'ylabel': self.proc_data_dict['ylabel_osc'],
'yunit': self.proc_data_dict['yunit'],
'setlabel': 'CZ off',
'title': (self.raw_data_dict['timestamps'][0] + ' \n' +
self.raw_data_dict['measurementstring'][0]),
'do_legend': True,
# 'yrange': (0,1),
'legend_pos': 'upper right'}
self.plot_dicts['on'] = {
'plotfn': self.plot_line,
'ax_id': 'main',
'xvals': self.proc_data_dict['xvals_on'],
'xlabel': self.raw_data_dict['xlabel'][0],
'xunit': self.raw_data_dict['xunit'][0][0],
'yvals': self.proc_data_dict['yvals_osc_on'],
'ylabel': self.proc_data_dict['ylabel_osc'],
'yunit': self.proc_data_dict['yunit'],
'setlabel': 'CZ on',
'do_legend': True,
'legend_pos': 'upper right'}
if self.do_fitting:
self.plot_dicts['cos_fit_off'] = {
'ax_id': 'main',
'plotfn': self.plot_fit,
'fit_res': self.fit_dicts['cos_fit_off']['fit_res'],
'plot_init': self.options_dict['plot_init'],
'setlabel': 'Fit CZ off',
'do_legend': True}
self.plot_dicts['cos_fit_on'] = {
'ax_id': 'main',
'plotfn': self.plot_fit,
'fit_res': self.fit_dicts['cos_fit_on']['fit_res'],
'plot_init': self.options_dict['plot_init'],
'setlabel': 'Fit CZ on',
'do_legend': True}
# offset as a guide for the eye
y = self.fit_res['cos_fit_off'].params['offset'].value
self.plot_dicts['cos_off_offset'] ={
'plotfn': self.plot_matplot_ax_method,
'ax_id':'main',
'func': 'axhline',
'plot_kws': {
'y': y, 'color': 'C0', 'linestyle': 'dotted'}
}
phase_message = (
'Phase diff.: {:.1f} $\pm$ {:.1f} deg\n'
'Phase off: {:.1f} $\pm$ {:.1f}deg\n'
'Phase on: {:.1f} $\pm$ {:.1f}deg\n'
'Osc. amp. off: {:.4f} $\pm$ {:.4f}\n'
'Osc. amp. on: {:.4f} $\pm$ {:.4f}\n'
'Offs. diff.: {:.4f} $\pm$ {:.4f}\n'
'Osc. offs. off: {:.4f} $\pm$ {:.4f}\n'
'Osc. offs. on: {:.4f} $\pm$ {:.4f}'.format(
self.proc_data_dict['phi_cond'][0],
self.proc_data_dict['phi_cond'][1],
self.proc_data_dict['phi_0'][0],
self.proc_data_dict['phi_0'][1],
self.proc_data_dict['phi_1'][0],
self.proc_data_dict['phi_1'][1],
self.proc_data_dict['osc_amp_0'][0],
self.proc_data_dict['osc_amp_0'][1],
self.proc_data_dict['osc_amp_1'][0],
self.proc_data_dict['osc_amp_1'][1],
self.proc_data_dict['offs_diff'][0],
self.proc_data_dict['offs_diff'][1],
self.proc_data_dict['osc_offs_0'][0],
self.proc_data_dict['osc_offs_0'][1],
self.proc_data_dict['osc_offs_1'][0],
self.proc_data_dict['osc_offs_1'][1]))
self.plot_dicts['phase_message'] = {
'ax_id': 'main',
'ypos': 0.9,
'xpos': 1.45,
'plotfn': self.plot_text,
'box_props': 'fancy',
'line_kws': {'alpha': 0},
'text_string': phase_message}
def _prepare_spectator_qubit_figure(self):
self.plot_dicts['spectator_qubit'] = {
'plotfn': self.plot_line,
'xvals': self.proc_data_dict['xvals_off'],
'xlabel': self.raw_data_dict['xlabel'][0],
'xunit': self.raw_data_dict['xunit'][0][0],
'yvals': self.proc_data_dict['yvals_spec_off'],
'ylabel': self.proc_data_dict['ylabel_spec'],
'yunit': self.proc_data_dict['yunit'],
'setlabel': 'CZ off',
'title': (self.raw_data_dict['timestamps'][0] + ' \n' +
self.raw_data_dict['measurementstring'][0]),
'do_legend': True,
# 'yrange': (0,1),
'legend_pos': 'upper right'}
self.plot_dicts['spec_on'] = {
'plotfn': self.plot_line,
'ax_id': 'spectator_qubit',
'xvals': self.proc_data_dict['xvals_on'],
'xlabel': self.raw_data_dict['xlabel'][0],
'xunit': self.raw_data_dict['xunit'][0][0],
'yvals': self.proc_data_dict['yvals_spec_on'],
'ylabel': self.proc_data_dict['ylabel_spec'],
'yunit': self.proc_data_dict['yunit'],
'setlabel': 'CZ on',
'do_legend': True,
'legend_pos': 'upper right'}
if self.do_fitting:
leak_msg = (
'Missing fraction: {:.2f} % '.format(
self.proc_data_dict['missing_fraction']*100))
self.plot_dicts['leak_msg'] = {
'ax_id': 'spectator_qubit',
'ypos': 0.7,
'plotfn': self.plot_text,
'box_props': 'fancy',
'line_kws': {'alpha': 0},
'text_string': leak_msg}
# offset as a guide for the eye
y = self.fit_res['cos_fit_on'].params['offset'].value
self.plot_dicts['cos_on_offset'] ={
'plotfn': self.plot_matplot_ax_method,
'ax_id':'main',
'func': 'axhline',
'plot_kws': {
'y': y, 'color': 'C1', 'linestyle': 'dotted'}
}
class StateTomographyAnalysis(ba.BaseDataAnalysis):
"""
Analyses the results of the state tomography experiment and calculates
the corresponding quantum state.
Possible options that can be passed in the options_dict parameter:
cal_points: A data structure specifying the indices of the calibration
points. See the AveragedTimedomainAnalysis for format.
The calibration points need to be in the same order as the
used basis for the result.
data_type: 'averaged' or 'singleshot'. For singleshot data each
measurement outcome is saved and arbitrary order correlations
between the states can be calculated.
meas_operators: (optional) A list of qutip operators or numpy 2d arrays.
This overrides the measurement operators otherwise
found from the calibration points.
covar_matrix: (optional) The covariance matrix of the measurement
operators as a 2d numpy array. Overrides the one found
from the calibration points.
use_covariance_matrix (bool): Flag to define whether to use the
covariance matrix
basis_rots_str: A list of standard PycQED pulse names that were
applied to qubits before measurement
basis_rots: As an alternative to single_qubit_pulses, the basis
rotations applied to the system as qutip operators or numpy
matrices can be given.
mle: True/False, whether to do maximum likelihood fit. If False, only
least squares fit will be done, which could give negative
eigenvalues for the density matrix.
imle: True/False, whether to do iterative maximum likelihood fit. If
True, it takes preference over maximum likelihood method. Otherwise
least squares fit will be done, then 'mle' option will be checked.
pauli_raw: True/False, extracts Pauli expected values from a measurement
without assignment correction based on calibration data. If True,
takes preference over other methods except pauli_corr.
pauli_values: True/False, extracts Pauli expected values from a
measurement with assignment correction based on calibration data.
If True, takes preference over other methods.
iterations (optional): maximum number of iterations allowed in imle.
Tomographies with more qubits require more iterations to converge.
tolerance (optional): minimum change across iterations allowed in imle.
The iteration will stop if it goes under this value. Tomographies
with more qubits require smaller tolerance to converge.
rho_target (optional): A qutip density matrix that the result will be
compared to when calculating fidelity.
"""
def __init__(self, *args, **kwargs):
auto = kwargs.pop('auto', True)
super().__init__(*args, **kwargs)
kwargs['auto'] = auto
self.single_timestamp = True
self.params_dict = {'exp_metadata': 'exp_metadata'}
self.numeric_params = []
self.data_type = self.options_dict['data_type']
if self.data_type == 'averaged':
self.base_analysis = AveragedTimedomainAnalysis(*args, **kwargs)
elif self.data_type == 'singleshot':
self.base_analysis = roa.MultiQubit_SingleShot_Analysis(
*args, **kwargs)
else:
raise KeyError("Invalid tomography data mode: '" + self.data_type +
"'. Valid modes are 'averaged' and 'singleshot'.")
if kwargs.get('auto', True):
self.run_analysis()
def process_data(self):
tomography_qubits = self.options_dict.get('tomography_qubits', None)
data, Fs, Omega = self.base_analysis.measurement_operators_and_results(
tomography_qubits)
if 'data_filter' in self.options_dict:
data = self.options_dict['data_filter'](data.T).T
data = data.T
for i, v in enumerate(data):
data[i] = v / v.sum()
data = data.T
Fs = self.options_dict.get('meas_operators', Fs)
Fs = [qtp.Qobj(F) for F in Fs]
d = Fs[0].shape[0]
self.proc_data_dict['d'] = d
Omega = self.options_dict.get('covar_matrix', Omega)
if Omega is None:
Omega = np.diag(np.ones(len(Fs)))
elif len(Omega.shape) == 1:
Omega = np.diag(Omega)
metadata = self.raw_data_dict.get('exp_metadata',
self.options_dict.get(
'exp_metadata', {}))
if metadata is None:
metadata = {}
self.raw_data_dict['exp_metadata'] = metadata
basis_rots_str = metadata.get('basis_rots_str', None)
basis_rots_str = self.options_dict.get('basis_rots_str', basis_rots_str)
if basis_rots_str is not None:
nr_qubits = int(np.round(np.log2(d)))
pulse_list = list(itertools.product(basis_rots_str,
repeat=nr_qubits))
rotations = tomo.standard_qubit_pulses_to_rotations(pulse_list)
else:
rotations = metadata.get('basis_rots', None)
rotations = self.options_dict.get('basis_rots', rotations)
if rotations is None:
raise KeyError("Either 'basis_rots_str' or 'basis_rots' "
"parameter must be passed in the options "
"dictionary or in the experimental metadata.")
rotations = [qtp.Qobj(U) for U in rotations]
all_Fs = tomo.rotated_measurement_operators(rotations, Fs)
all_Fs = list(itertools.chain(*np.array(all_Fs, dtype=np.object).T))
all_mus = np.array(list(itertools.chain(*data.T)))
all_Omegas = sp.linalg.block_diag(*[Omega] * len(data[0]))
self.proc_data_dict['meas_operators'] = all_Fs
self.proc_data_dict['covar_matrix'] = all_Omegas
self.proc_data_dict['meas_results'] = all_mus
if self.options_dict.get('pauli_values', False):
rho_pauli = tomo.pauli_values_tomography(all_mus,Fs,basis_rots_str)
self.proc_data_dict['rho_raw'] = rho_pauli
self.proc_data_dict['rho'] = rho_pauli
elif self.options_dict.get('pauli_raw', False):
pauli_raw = self.generate_raw_pauli_set()
rho_raw = tomo.pauli_set_to_density_matrix(pauli_raw)
self.proc_data_dict['rho_raw'] = rho_raw
self.proc_data_dict['rho'] = rho_raw
elif self.options_dict.get('imle', False):
it = metadata.get('iterations', None)
it = self.options_dict.get('iterations', it)
tol = metadata.get('tolerance', None)
tol = self.options_dict.get('tolerance', tol)
rho_imle = tomo.imle_tomography(
all_mus, all_Fs, it, tol)
self.proc_data_dict['rho_imle'] = rho_imle
self.proc_data_dict['rho'] = rho_imle
else:
rho_ls = tomo.least_squares_tomography(
all_mus, all_Fs,
all_Omegas if self.get_param_value('use_covariance_matrix', False)
else None )
self.proc_data_dict['rho_ls'] = rho_ls
self.proc_data_dict['rho'] = rho_ls
if self.options_dict.get('mle', False):
rho_mle = tomo.mle_tomography(
all_mus, all_Fs,
all_Omegas if self.get_param_value('use_covariance_matrix', False) else None,
rho_guess=rho_ls)
self.proc_data_dict['rho_mle'] = rho_mle
self.proc_data_dict['rho'] = rho_mle
rho = self.proc_data_dict['rho']
self.proc_data_dict['purity'] = (rho * rho).tr().real
rho_target = metadata.get('rho_target', None)
rho_target = self.options_dict.get('rho_target', rho_target)
if rho_target is not None:
self.proc_data_dict['fidelity'] = tomo.fidelity(rho, rho_target)
if d == 4:
self.proc_data_dict['concurrence'] = tomo.concurrence(rho)
else:
self.proc_data_dict['concurrence'] = 0
def prepare_plots(self):
self.prepare_density_matrix_plot()
d = self.proc_data_dict['d']
if 2 ** (d.bit_length() - 1) == d:
# dimension is power of two, plot expectation values of pauli
# operators
self.prepare_pauli_basis_plot()
def prepare_density_matrix_plot(self):
self.tight_fig = self.options_dict.get('tight_fig', False)
rho_target = self.raw_data_dict['exp_metadata'].get('rho_target', None)
rho_target = self.options_dict.get('rho_target', rho_target)
d = self.proc_data_dict['d']
xtick_labels = self.options_dict.get('rho_ticklabels', None)
ytick_labels = self.options_dict.get('rho_ticklabels', None)
if 2 ** (d.bit_length() - 1) == d:
nr_qubits = d.bit_length() - 1
fmt_string = '{{:0{}b}}'.format(nr_qubits)
labels = [fmt_string.format(i) for i in range(2 ** nr_qubits)]
if xtick_labels is None:
xtick_labels = ['$|' + lbl + r'\rangle$' for lbl in labels]
if ytick_labels is None:
ytick_labels = [r'$\langle' + lbl + '|$' for lbl in labels]
color = (0.5 * np.angle(self.proc_data_dict['rho'].full()) / np.pi) % 1.
cmap = self.options_dict.get('rho_colormap', self.default_phase_cmap())
if self.options_dict.get('pauli_raw', False):
title = 'Density matrix reconstructed from the Pauli (raw) set\n'
elif self.options_dict.get('pauli_values', False):
title = 'Density matrix reconstructed from the Pauli set\n'
elif self.options_dict.get('mle', False):
title = 'Maximum likelihood fit of the density matrix\n'
elif self.options_dict.get('it_mle', False):
title = 'Iterative maximum likelihood fit of the density matrix\n'
else:
title = 'Least squares fit of the density matrix\n'
empty_artist = mpl.patches.Rectangle((0, 0), 0, 0, visible=False)
legend_entries = [(empty_artist,
r'Purity, $Tr(\rho^2) = {:.1f}\%$'.format(
100 * self.proc_data_dict['purity']))]
if rho_target is not None:
legend_entries += [
(empty_artist, r'Fidelity, $F = {:.1f}\%$'.format(
100 * self.proc_data_dict['fidelity']))]
if d == 4:
legend_entries += [
(empty_artist, r'Concurrence, $C = {:.2f}$'.format(
self.proc_data_dict['concurrence']))]
meas_string = self.base_analysis.\
raw_data_dict['measurementstring']
if isinstance(meas_string, list):
if len(meas_string) > 1:
meas_string = meas_string[0] + ' to ' + meas_string[-1]
else:
meas_string = meas_string[0]
self.plot_dicts['density_matrix'] = {
'plotfn': self.plot_bar3D,
'3d': True,
'3d_azim': -35,
'3d_elev': 35,
'xvals': np.arange(d),
'yvals': np.arange(d),
'zvals': np.abs(self.proc_data_dict['rho'].full()),
'zrange': (0, 1),
'color': color,
'colormap': cmap,
'bar_widthx': 0.5,
'bar_widthy': 0.5,
'xtick_loc': np.arange(d),
'xtick_labels': xtick_labels,
'ytick_loc': np.arange(d),
'ytick_labels': ytick_labels,
'ctick_loc': np.linspace(0, 1, 5),
'ctick_labels': ['$0$', r'$\frac{1}{2}\pi$', r'$\pi$',
r'$\frac{3}{2}\pi$', r'$2\pi$'],
'clabel': 'Phase (rad)',
'title': (title + self.raw_data_dict['timestamp'] + ' ' +
meas_string),
'do_legend': True,
'legend_entries': legend_entries,
'legend_kws': dict(loc='upper left', bbox_to_anchor=(0, 0.94))
}
if rho_target is not None:
rho_target = qtp.Qobj(rho_target)
if rho_target.type == 'ket':
rho_target = rho_target * rho_target.dag()
elif rho_target.type == 'bra':
rho_target = rho_target.dag() * rho_target
self.plot_dicts['density_matrix_target'] = {
'plotfn': self.plot_bar3D,
'3d': True,
'3d_azim': -35,
'3d_elev': 35,
'xvals': np.arange(d),
'yvals': np.arange(d),
'zvals': np.abs(rho_target.full()),
'zrange': (0, 1),
'color': (0.5 * np.angle(rho_target.full()) / np.pi) % 1.,
'colormap': cmap,
'bar_widthx': 0.5,
'bar_widthy': 0.5,
'xtick_loc': np.arange(d),
'xtick_labels': xtick_labels,
'ytick_loc': np.arange(d),
'ytick_labels': ytick_labels,
'ctick_loc': np.linspace(0, 1, 5),
'ctick_labels': ['$0$', r'$\frac{1}{2}\pi$', r'$\pi$',
r'$\frac{3}{2}\pi$', r'$2\pi$'],
'clabel': 'Phase (rad)',
'title': ('Target density matrix\n' +
self.raw_data_dict['timestamp'] + ' ' +
meas_string),
'bar_kws': dict(zorder=1),
}
def generate_raw_pauli_set(self):
nr_qubits = self.proc_data_dict['d'].bit_length() - 1
pauli_raw_values = []
for op in tomo.generate_pauli_set(nr_qubits)[1]:
nr_terms = 0
sum_terms = 0.
for meas_op, meas_res in zip(self.proc_data_dict['meas_operators'],
self.proc_data_dict['meas_results']):
trace = (meas_op*op).tr().real
clss = int(trace*2)
if clss < 0:
sum_terms -= meas_res
nr_terms += 1
elif clss > 0:
sum_terms += meas_res
nr_terms += 1
pauli_raw_values.append(2**nr_qubits*sum_terms/nr_terms)
return pauli_raw_values
def generate_corr_pauli_set(self,Fs,rotations):
nr_qubits = self.proc_data_dict['d'].bit_length() - 1
Fs_corr = []
assign_corr = []
for i,F in enumerate(Fs):
new_op = np.zeros(2**nr_qubits)
new_op[i] = 1
Fs_corr.append(qtp.Qobj(np.diag(new_op)))
assign_corr.append(np.diag(F.full()))
pauli_Fs = tomo.rotated_measurement_operators(rotations, Fs_corr)
pauli_Fs = list(itertools.chain(*np.array(pauli_Fs, dtype=np.object).T))
mus = self.proc_data_dict['meas_results']
pauli_mus = np.reshape(mus,[-1,2**nr_qubits])
for i,raw_mus in enumerate(pauli_mus):
pauli_mus[i] = np.matmul(np.linalg.inv(assign_corr),np.array(raw_mus))
pauli_mus = pauli_mus.flatten()
pauli_values = []
for op in tomo.generate_pauli_set(nr_qubits)[1]:
nr_terms = 0
sum_terms = 0.
for meas_op, meas_res in zip(pauli_Fs,pauli_mus):
trace = (meas_op*op).tr().real
clss = int(trace*2)
if clss < 0:
sum_terms -= meas_res
nr_terms += 1
elif clss > 0:
sum_terms += meas_res
nr_terms += 1
pauli_values.append(2**nr_qubits*sum_terms/nr_terms)
return pauli_values
def prepare_pauli_basis_plot(self):
yexp = tomo.density_matrix_to_pauli_basis(self.proc_data_dict['rho'])
nr_qubits = self.proc_data_dict['d'].bit_length() - 1
labels = list(itertools.product(*[['I', 'X', 'Y', 'Z']]*nr_qubits))
labels = [''.join(label_list) for label_list in labels]
if nr_qubits == 1:
order = [1, 2, 3]
elif nr_qubits == 2:
order = [1, 2, 3, 4, 8, 12, 5, 6, 7, 9, 10, 11, 13, 14, 15]
elif nr_qubits == 3:
order = [1, 2, 3, 4, 8, 12, 16, 32, 48] + \
[5, 6, 7, 9, 10, 11, 13, 14, 15] + \
[17, 18, 19, 33, 34, 35, 49, 50, 51] + \
[20, 24, 28, 36, 40, 44, 52, 56, 60] + \
[21, 22, 23, 25, 26, 27, 29, 30, 31] + \
[37, 38, 39, 41, 42, 43, 45, 46, 47] + \
[53, 54, 55, 57, 58, 59, 61, 62, 63]
else:
order = np.arange(4**nr_qubits)[1:]
if self.options_dict.get('pauli_raw', False):
fit_type = 'raw counts'
elif self.options_dict.get('pauli_values', False):
fit_type = 'corrected counts'
elif self.options_dict.get('mle', False):
fit_type = 'maximum likelihood estimation'
elif self.options_dict.get('imle', False):
fit_type = 'iterative maximum likelihood estimation'
else:
fit_type = 'least squares fit'
meas_string = self.base_analysis. \
raw_data_dict['measurementstring']
if np.ndim(meas_string) > 0:
if len(meas_string) > 1:
meas_string = meas_string[0] + ' to ' + meas_string[-1]
else:
meas_string = meas_string[0]
self.plot_dicts['pauli_basis'] = {
'plotfn': self.plot_bar,
'xcenters': np.arange(len(order)),
'xwidth': 0.4,
'xrange': (-1, len(order)),
'yvals': np.array(yexp)[order],
'xlabel': r'Pauli operator, $\hat{O}$',
'ylabel': r'Expectation value, $\mathrm{Tr}(\hat{O} \hat{\rho})$',
'title': 'Pauli operators, ' + fit_type + '\n' +
self.raw_data_dict['timestamp'] + ' ' + meas_string,
'yrange': (-1.1, 1.1),
'xtick_loc': np.arange(4**nr_qubits - 1),
'xtick_rotation': 90,
'xtick_labels': np.array(labels)[order],
'bar_kws': dict(zorder=10),
'setlabel': 'Fit to experiment',
'do_legend': True
}
if nr_qubits > 2:
self.plot_dicts['pauli_basis']['plotsize'] = (10, 5)
rho_target = self.raw_data_dict['exp_metadata'].get('rho_target', None)
rho_target = self.options_dict.get('rho_target', rho_target)
if rho_target is not None:
rho_target = qtp.Qobj(rho_target)
ytar = tomo.density_matrix_to_pauli_basis(rho_target)
self.plot_dicts['pauli_basis_target'] = {
'plotfn': self.plot_bar,
'ax_id': 'pauli_basis',
'xcenters': np.arange(len(order)),
'xwidth': 0.8,
'yvals': np.array(ytar)[order],
'xtick_loc': np.arange(len(order)),
'xtick_labels': np.array(labels)[order],
'bar_kws': dict(color='0.8', zorder=0),
'setlabel': 'Target values',
'do_legend': True
}
purity_str = r'Purity, $Tr(\rho^2) = {:.1f}\%$'.format(
100 * self.proc_data_dict['purity'])
if rho_target is not None:
fidelity_str = '\n' + r'Fidelity, $F = {:.1f}\%$'.format(
100 * self.proc_data_dict['fidelity'])
else:
fidelity_str = ''
if self.proc_data_dict['d'] == 4:
concurrence_str = '\n' + r'Concurrence, $C = {:.1f}\%$'.format(
100 * self.proc_data_dict['concurrence'])
else:
concurrence_str = ''
self.plot_dicts['pauli_info_labels'] = {
'ax_id': 'pauli_basis',
'plotfn': self.plot_line,
'xvals': [0],
'yvals': [0],
'line_kws': {'alpha': 0},
'setlabel': purity_str + fidelity_str,
'do_legend': True
}
def default_phase_cmap(self):
cols = np.array(((41, 39, 231), (61, 130, 163), (208, 170, 39),
(209, 126, 4), (181, 28, 20), (238, 76, 152),
(251, 130, 242), (162, 112, 251))) / 255
n = len(cols)
cdict = {
'red': [[i/n, cols[i%n][0], cols[i%n][0]] for i in range(n+1)],
'green': [[i/n, cols[i%n][1], cols[i%n][1]] for i in range(n+1)],
'blue': [[i/n, cols[i%n][2], cols[i%n][2]] for i in range(n+1)],
}
return mpl.colors.LinearSegmentedColormap('DMDefault', cdict)
class ReadoutROPhotonsAnalysis(Single_Qubit_TimeDomainAnalysis):
"""
Analyses the photon number in the RO based on the
readout_photons_in_resonator function
function specific options for options dict:
f_qubit
chi
artif_detuning
print_fit_results
"""
def __init__(self, t_start: str=None, t_stop: str=None,
label: str='', data_file_path: str=None,
close_figs: bool=False, options_dict: dict=None,
extract_only: bool=False, do_fitting: bool=False,
auto: bool=True):
super().__init__(t_start=t_start, t_stop=t_stop,
data_file_path=data_file_path,
options_dict=options_dict,
close_figs=close_figs, label=label,
extract_only=extract_only, do_fitting=do_fitting)
if self.options_dict.get('TwoD', None) is None:
self.options_dict['TwoD'] = True
self.label = label
self.params_dict = {
'measurementstring': 'measurementstring',
'sweep_points': 'sweep_points',
'sweep_points_2D': 'sweep_points_2D',
'value_names': 'value_names',
'value_units': 'value_units',
'measured_values': 'measured_values'}
self.numeric_params = self.options_dict.get('numeric_params',
OrderedDict())
self.kappa = self.options_dict.get('kappa_effective', None)
self.chi = self.options_dict.get('chi', None)
self.T2 = self.options_dict.get('T2echo', None)
self.artif_detuning = self.options_dict.get('artif_detuning', 0)
if (self.kappa is None) or (self.chi is None) or (self.T2 is None):
raise ValueError('kappa_effective, chi and T2echo must be passed to '
'the options_dict.')
if auto:
self.run_analysis()
def process_data(self):
self.proc_data_dict = OrderedDict()
self.proc_data_dict['qubit_state'] = [[],[]]
self.proc_data_dict['delay_to_relax'] = self.raw_data_dict[
'sweep_points_2D'][0]
self.proc_data_dict['ramsey_times'] = []
for i,x in enumerate(np.transpose(self.raw_data_dict[
'measured_data']['raw w0 _measure'][0])):
self.proc_data_dict['qubit_state'][0].append([])
self.proc_data_dict['qubit_state'][1].append([])
for j,y in enumerate(np.transpose(self.raw_data_dict[
'measured_data']['raw w0 _measure'][0])[i]):
if j%2 == 0:
self.proc_data_dict['qubit_state'][0][i].append(y)
else:
self.proc_data_dict['qubit_state'][1][i].append(y)
for i,x in enumerate( self.raw_data_dict['sweep_points'][0]):
if i % 2 == 0:
self.proc_data_dict['ramsey_times'].append(x)
#I STILL NEED to pass Chi
def prepare_fitting(self):
self.proc_data_dict['photon_number'] = [[],[]]
self.proc_data_dict['fit_results'] = []
self.proc_data_dict['ramsey_fit_results'] = [[],[]]
for i,tau in enumerate(self.proc_data_dict['delay_to_relax']):
self.proc_data_dict['ramsey_fit_results'][0].append(self.fit_Ramsey(
self.proc_data_dict['ramsey_times'][:-4],
self.proc_data_dict['qubit_state'][0][i][:-4]/
max(self.proc_data_dict['qubit_state'][0][i][:-4]),
state=0,
kw=self.options_dict))
self.proc_data_dict['ramsey_fit_results'][1].append(self.fit_Ramsey(
self.proc_data_dict['ramsey_times'][:-4],
self.proc_data_dict['qubit_state'][1][i][:-4]/
max(self.proc_data_dict['qubit_state'][1][i][:-4]),
state=1,
kw=self.options_dict))
n01 = self.proc_data_dict['ramsey_fit_results'
][0][i][0].params['n0'].value
n02 = self.proc_data_dict['ramsey_fit_results'
][1][i][0].params['n0'].value
self.proc_data_dict['photon_number'][0].append(n01)
self.proc_data_dict['photon_number'][1].append(n02)
def run_fitting(self):
print_fit_results = self.params_dict.pop('print_fit_results',False)
exp_dec_mod = lmfit.Model(fit_mods.ExpDecayFunc)
exp_dec_mod.set_param_hint('n',
value=1,
vary=False)
exp_dec_mod.set_param_hint('offset',
value=0,
min=0,
vary=True)
exp_dec_mod.set_param_hint('tau',
value=self.proc_data_dict[
'delay_to_relax'][-1],
min=1e-11,
vary=True)
exp_dec_mod.set_param_hint('amplitude',
value=1,
min=0,
vary=True)
params = exp_dec_mod.make_params()
self.fit_res = OrderedDict()
self.fit_res['ground_state'] = exp_dec_mod.fit(
data=self.proc_data_dict['photon_number'][0],
params=params,
t=self.proc_data_dict['delay_to_relax'])
self.fit_res['excited_state'] = exp_dec_mod.fit(
data=self.proc_data_dict['photon_number'][1],
params=params,
t=self.proc_data_dict['delay_to_relax'])
if print_fit_results:
print(self.fit_res['ground_state'].fit_report())
print(self.fit_res['excited_state'].fit_report())
def fit_Ramsey(self, x, y, state, **kw):
x = np.array(x)
y = np.array(y)
exp_dec_p_mod = lmfit.Model(fit_mods.ExpDecayPmod)
comb_exp_dec_mod = lmfit.Model(fit_mods.CombinedOszExpDecayFunc)
average = np.mean(y)
ft_of_data = np.fft.fft(y)
index_of_fourier_maximum = np.argmax(np.abs(
ft_of_data[1:len(ft_of_data) // 2])) + 1
max_ramsey_delay = x[-1] - x[0]
fft_axis_scaling = 1 / max_ramsey_delay
freq_est = fft_axis_scaling * index_of_fourier_maximum
n_est = (freq_est-self.artif_detuning)/(2 * self.chi)
exp_dec_p_mod.set_param_hint('T2echo',
value=self.T2,
vary=False)
exp_dec_p_mod.set_param_hint('offset',
value=average,
min=0,
vary=True)
exp_dec_p_mod.set_param_hint('delta',
value=self.artif_detuning,
vary=False)
exp_dec_p_mod.set_param_hint('amplitude',
value=1,
min=0,
vary=True)
exp_dec_p_mod.set_param_hint('kappa',
value=self.kappa[state],
vary=False)
exp_dec_p_mod.set_param_hint('chi',
value=self.chi,
vary=False)
exp_dec_p_mod.set_param_hint('n0',
value=n_est,
min=0,
vary=True)
exp_dec_p_mod.set_param_hint('phase',
value=0,
vary=True)
comb_exp_dec_mod.set_param_hint('tau',
value=self.T2,
vary=True)
comb_exp_dec_mod.set_param_hint('offset',
value=average,
min=0,
vary=True)
comb_exp_dec_mod.set_param_hint('oscillation_offset',
value=average,
min=0,
vary=True)
comb_exp_dec_mod.set_param_hint('amplitude',
value=1,
min=0,
vary=True)
comb_exp_dec_mod.set_param_hint('tau_gauss',
value=self.kappa[state],
vary=True)
comb_exp_dec_mod.set_param_hint('n0',
value=n_est,
min=0,
vary=True)
comb_exp_dec_mod.set_param_hint('phase',
value=0,
vary=True)
comb_exp_dec_mod.set_param_hint('delta',
value=self.artif_detuning,
vary=False)
comb_exp_dec_mod.set_param_hint('chi',
value=self.chi,
vary=False)
if (np.average(y[:4]) >
np.average(y[4:8])):
phase_estimate = 0
else:
phase_estimate = np.pi
exp_dec_p_mod.set_param_hint('phase',
value=phase_estimate, vary=True)
comb_exp_dec_mod.set_param_hint('phase',
value=phase_estimate, vary=True)
amplitude_guess = 0.5
if np.all(np.logical_and(y >= 0, y <= 1)):
exp_dec_p_mod.set_param_hint('amplitude',
value=amplitude_guess,
min=0.00,
max=4.0,
vary=True)
comb_exp_dec_mod.set_param_hint('amplitude',
value=amplitude_guess,
min=0.00,
max=4.0,
vary=True)
else:
print('data is not normalized, varying amplitude')
exp_dec_p_mod.set_param_hint('amplitude',
value=max(y),
min=0.00,
max=4.0,
vary=True)
comb_exp_dec_mod.set_param_hint('amplitude',
value=max(y),
min=0.00,
max=4.0,
vary=True)
fit_res_1 = exp_dec_p_mod.fit(data=y,
t=x,
params= exp_dec_p_mod.make_params())
fit_res_2 = comb_exp_dec_mod.fit(data=y,
t=x,
params= comb_exp_dec_mod.make_params())
if fit_res_1.chisqr > .35:
log.warning('Fit did not converge, varying phase')
fit_res_lst = []
for phase_estimate in np.linspace(0, 2*np.pi, 10):
for i, del_amp in enumerate(np.linspace(
-max(y)/10, max(y)/10, 10)):
exp_dec_p_mod.set_param_hint('phase',
value=phase_estimate,
vary=False)
exp_dec_p_mod.set_param_hint('amplitude',
value=max(y)+ del_amp)
fit_res_lst += [exp_dec_p_mod.fit(
data=y,
t=x,
params= exp_dec_p_mod.make_params())]
chisqr_lst = [fit_res_1.chisqr for fit_res_1 in fit_res_lst]
fit_res_1 = fit_res_lst[np.argmin(chisqr_lst)]
if fit_res_2.chisqr > .35:
log.warning('Fit did not converge, varying phase')
fit_res_lst = []
for phase_estimate in np.linspace(0, 2*np.pi, 10):
for i, del_amp in enumerate(np.linspace(
-max(y)/10, max(y)/10, 10)):
comb_exp_dec_mod.set_param_hint('phase',
value=phase_estimate,
vary=False)
comb_exp_dec_mod.set_param_hint('amplitude',
value=max(y)+ del_amp)
fit_res_lst += [comb_exp_dec_mod.fit(
data=y,
t=x,
params= comb_exp_dec_mod.make_params())]
chisqr_lst = [fit_res_2.chisqr for fit_res_2 in fit_res_lst]
fit_res_2 = fit_res_lst[np.argmin(chisqr_lst)]
if fit_res_1.chisqr < fit_res_2.chisqr:
self.proc_data_dict['params'] = exp_dec_p_mod.make_params()
return [fit_res_1,fit_res_1,fit_res_2]
else:
self.proc_data_dict['params'] = comb_exp_dec_mod.make_params()
return [fit_res_2,fit_res_1,fit_res_2]
def prepare_plots(self):
self.prepare_2D_sweep_plot()
self.prepare_photon_number_plot()
self.prepare_ramsey_plots()
def prepare_2D_sweep_plot(self):
self.plot_dicts['off_full_data_'+self.label] = {
'title': 'Raw data |g>',
'plotfn': self.plot_colorxy,
'xvals': self.proc_data_dict['ramsey_times'],
'xlabel': 'Ramsey delays',
'xunit': 's',
'yvals': self.proc_data_dict['delay_to_relax'],
'ylabel': 'Delay after first RO-pulse',
'yunit': 's',
'zvals': np.array(self.proc_data_dict['qubit_state'][0]) }
self.plot_dicts['on_full_data_'+self.label] = {
'title': 'Raw data |e>',
'plotfn': self.plot_colorxy,
'xvals': self.proc_data_dict['ramsey_times'],
'xlabel': 'Ramsey delays',
'xunit': 's',
'yvals': self.proc_data_dict['delay_to_relax'],
'ylabel': 'Delay after first RO-pulse',
'yunit': 's',
'zvals': np.array(self.proc_data_dict['qubit_state'][1]) }
def prepare_ramsey_plots(self):
x_fit = np.linspace(self.proc_data_dict['ramsey_times'][0],
max(self.proc_data_dict['ramsey_times']),101)
for i in range(len(self.proc_data_dict['ramsey_fit_results'][0])):
self.plot_dicts['off_'+str(i)] = {
'title': 'Ramsey w t_delay = '+\
str(self.proc_data_dict['delay_to_relax'][i])+ \
' s, in |g> state',
'ax_id':'ramsey_off_'+str(i),
'plotfn': self.plot_line,
'xvals': self.proc_data_dict['ramsey_times'],
'xlabel': 'Ramsey delays',
'xunit': 's',
'yvals': np.array(self.proc_data_dict['qubit_state'][0][i]/
max(self.proc_data_dict['qubit_state'][0][i][:-4])),
'ylabel': 'Measured qubit state',
'yunit': '',
'marker': 'o',
'setlabel': '|g> data_'+str(i),
'do_legend': True }
self.plot_dicts['off_fit_'+str(i)] = {
'title': 'Ramsey w t_delay = '+ \
str(self.proc_data_dict['delay_to_relax'][i])+ \
' s, in |g> state',
'ax_id':'ramsey_off_'+str(i),
'plotfn': self.plot_line,
'xvals': x_fit,
'yvals': self.proc_data_dict['ramsey_fit_results'][0][i][1].eval(
self.proc_data_dict['ramsey_fit_results'][0][i][1].params,
t=x_fit),
'linestyle': '-',
'marker': '',
'setlabel': '|g> fit_model'+str(i),
'do_legend': True }
self.plot_dicts['off_fit_2_'+str(i)] = {
'title': 'Ramsey w t_delay = '+ \
str(self.proc_data_dict['delay_to_relax'][i])+ \
' s, in |g> state',
'ax_id':'ramsey_off_'+str(i),
'plotfn': self.plot_line,
'xvals': x_fit,
'yvals': self.proc_data_dict['ramsey_fit_results'][0][i][2].eval(
self.proc_data_dict['ramsey_fit_results'][0][i][2].params,
t=x_fit),
'linestyle': '-',
'marker': '',
'setlabel': '|g> fit_simpel_model'+str(i),
'do_legend': True }
self.plot_dicts['hidden_g_'+str(i)] = {
'ax_id':'ramsey_off_'+str(i),
'plotfn': self.plot_line,
'xvals': [0],
'yvals': [0],
'color': 'w',
'setlabel': 'Residual photon count = '
''+str(self.proc_data_dict['photon_number'][0][i]),
'do_legend': True }
self.plot_dicts['on_'+str(i)] = {
'title': 'Ramsey w t_delay = '+ \
str(self.proc_data_dict['delay_to_relax'][i])+ \
' s, in |e> state',
'ax_id':'ramsey_on_'+str(i),
'plotfn': self.plot_line,
'xvals': self.proc_data_dict['ramsey_times'],
'xlabel': 'Ramsey delays',
'xunit': 's',
'yvals': np.array(self.proc_data_dict['qubit_state'][1][i]/
max(self.proc_data_dict['qubit_state'][1][i][:-4])),
'ylabel': 'Measured qubit state',
'yunit': '',
'marker': 'o',
'setlabel': '|e> data_'+str(i),
'do_legend': True }
self.plot_dicts['on_fit_'+str(i)] = {
'title': 'Ramsey w t_delay = '+ \
str(self.proc_data_dict['delay_to_relax'][i])+ \
' s, in |e> state',
'ax_id':'ramsey_on_'+str(i),
'plotfn': self.plot_line,
'xvals': x_fit,
'yvals': self.proc_data_dict['ramsey_fit_results'][1][i][1].eval(
self.proc_data_dict['ramsey_fit_results'][1][i][1].params,
t=x_fit),
'linestyle': '-',
'marker': '',
'setlabel': '|e> fit_model'+str(i),
'do_legend': True }
self.plot_dicts['on_fit_2_'+str(i)] = {
'title': 'Ramsey w t_delay = '+ \
str(self.proc_data_dict['delay_to_relax'][i])+ \
' s, in |e> state',
'ax_id':'ramsey_on_'+str(i),
'plotfn': self.plot_line,
'xvals': x_fit,
'yvals': self.proc_data_dict['ramsey_fit_results'][1][i][2].eval(
self.proc_data_dict['ramsey_fit_results'][1][i][2].params,
t=x_fit),
'linestyle': '-',
'marker': '',
'setlabel': '|e> fit_simpel_model'+str(i),
'do_legend': True }
self.plot_dicts['hidden_e_'+str(i)] = {
'ax_id':'ramsey_on_'+str(i),
'plotfn': self.plot_line,
'xvals': [0],
'yvals': [0],
'color': 'w',
'setlabel': 'Residual photon count = '
''+str(self.proc_data_dict['photon_number'][1][i]),
'do_legend': True }
def prepare_photon_number_plot(self):
ylabel = 'Average photon number'
yunit = ''
x_fit = np.linspace(min(self.proc_data_dict['delay_to_relax']),
max(self.proc_data_dict['delay_to_relax']),101)
minmax_data = [min(min(self.proc_data_dict['photon_number'][0]),
min(self.proc_data_dict['photon_number'][1])),
max(max(self.proc_data_dict['photon_number'][0]),
max(self.proc_data_dict['photon_number'][1]))]
minmax_data[0] -= minmax_data[0]/5
minmax_data[1] += minmax_data[1]/5
self.proc_data_dict['photon_number'][1],
self.fit_res['excited_state'].eval(
self.fit_res['excited_state'].params,
t=x_fit)
self.plot_dicts['Photon number count'] = {
'plotfn': self.plot_line,
'xlabel': 'Delay after first RO-pulse',
'ax_id': 'Photon number count ',
'xunit': 's',
'xvals': self.proc_data_dict['delay_to_relax'],
'yvals': self.proc_data_dict['photon_number'][0],
'ylabel': ylabel,
'yunit': yunit,
'yrange': minmax_data,
'title': 'Residual photon number',
'color': 'b',
'linestyle': '',
'marker': 'o',
'setlabel': '|g> data',
'func': 'semilogy',
'do_legend': True}
self.plot_dicts['main2'] = {
'plotfn': self.plot_line,
'xunit': 's',
'xvals': x_fit,
'yvals': self.fit_res['ground_state'].eval(
self.fit_res['ground_state'].params,
t=x_fit),
'yrange': minmax_data,
'ax_id': 'Photon number count ',
'color': 'b',
'linestyle': '-',
'marker': '',
'setlabel': '|g> fit',
'func': 'semilogy',
'do_legend': True}
self.plot_dicts['main3'] = {
'plotfn': self.plot_line,
'xunit': 's',
'xvals': self.proc_data_dict['delay_to_relax'],
'yvals': self.proc_data_dict['photon_number'][1],
'yrange': minmax_data,
'ax_id': 'Photon number count ',
'color': 'r',
'linestyle': '',
'marker': 'o',
'setlabel': '|e> data',
'func': 'semilogy',
'do_legend': True}
self.plot_dicts['main4'] = {
'plotfn': self.plot_line,
'xunit': 's',
'ax_id': 'Photon number count ',
'xvals': x_fit,
'yvals': self.fit_res['excited_state'].eval(
self.fit_res['excited_state'].params,
t=x_fit),
'yrange': minmax_data,
'ylabel': ylabel,
'color': 'r',
'linestyle': '-',
'marker': '',
'setlabel': '|e> fit',
'func': 'semilogy',
'do_legend': True}
self.plot_dicts['hidden_1'] = {
'ax_id': 'Photon number count ',
'plotfn': self.plot_line,
'yrange': minmax_data,
'xvals': [0],
'yvals': [0],
'color': 'w',
'setlabel': 'tau_g = '
''+str("%.3f" %
(self.fit_res['ground_state'].params['tau'].value*1e9))+''
' ns',
'do_legend': True }
self.plot_dicts['hidden_2'] = {
'ax_id': 'Photon number count ',
'plotfn': self.plot_line,
'yrange': minmax_data,
'xvals': [0],
'yvals': [0],
'color': 'w',
'setlabel': 'tau_e = '
''+str("%.3f" %
(self.fit_res['excited_state'].params['tau'].value*1e9))+''
' ns',
'do_legend': True}
class RODynamicPhaseAnalysis(MultiQubit_TimeDomain_Analysis):
def __init__(self, qb_names: list=None, t_start: str=None, t_stop: str=None,
data_file_path: str=None, single_timestamp: bool=False,
options_dict: dict=None, extract_only: bool=False,
do_fitting: bool=True, auto=True):
super().__init__(qb_names=qb_names, t_start=t_start, t_stop=t_stop,
data_file_path=data_file_path,
options_dict=options_dict,
extract_only=extract_only,
do_fitting=do_fitting,
auto=False)
if auto:
self.run_analysis()
def process_data(self):
super().process_data()
if 'qbp_name' in self.metadata:
self.pulsed_qbname = self.metadata['qbp_name']
else:
self.pulsed_qbname = self.options_dict.get('pulsed_qbname')
self.measured_qubits = [qbn for qbn in self.channel_map if
qbn != self.pulsed_qbname]
def prepare_fitting(self):
self.fit_dicts = OrderedDict()
for qbn in self.measured_qubits:
ro_dict = self.proc_data_dict['projected_data_dict'][qbn]
sweep_points = self.proc_data_dict['sweep_points_dict'][qbn][
'msmt_sweep_points']
for ro_suff, data in ro_dict.items():
cos_mod = lmfit.Model(fit_mods.CosFunc)
if self.num_cal_points != 0:
data = data[:-self.num_cal_points]
guess_pars = fit_mods.Cos_guess(
model=cos_mod,
t=sweep_points,
data=data)
guess_pars['amplitude'].vary = True
guess_pars['offset'].vary = True
guess_pars['frequency'].vary = True
guess_pars['phase'].vary = True
key = 'cos_fit_{}{}'.format(qbn, ro_suff)
self.fit_dicts[key] = {
'fit_fn': fit_mods.CosFunc,
'fit_xvals': {'t': sweep_points},
'fit_yvals': {'data': data},
'guess_pars': guess_pars}
def analyze_fit_results(self):
self.dynamic_phases = OrderedDict()
for meas_qbn in self.measured_qubits:
self.dynamic_phases[meas_qbn] = \
(self.fit_dicts['cos_fit_{}_measure'.format(meas_qbn)][
'fit_res'].best_values['phase'] -
self.fit_dicts['cos_fit_{}_ref_measure'.format(meas_qbn)][
'fit_res'].best_values['phase'])*180/np.pi
def prepare_plots(self):
super().prepare_plots()
if self.do_fitting:
for meas_qbn in self.measured_qubits:
sweep_points_dict = self.proc_data_dict['sweep_points_dict'][
meas_qbn]
if self.num_cal_points != 0:
yvals = [self.proc_data_dict['projected_data_dict'][meas_qbn][
'_ref_measure'][:-self.num_cal_points],
self.proc_data_dict['projected_data_dict'][meas_qbn][
'_measure'][:-self.num_cal_points]]
sweep_points = sweep_points_dict['msmt_sweep_points']
# plot cal points
for i, cal_pts_idxs in enumerate(
self.cal_states_dict.values()):
key = list(self.cal_states_dict)[i] + meas_qbn
self.plot_dicts[key] = {
'fig_id': 'dyn_phase_plot_' + meas_qbn,
'plotfn': self.plot_line,
'xvals': np.mean([
sweep_points_dict['cal_points_sweep_points'][
cal_pts_idxs],
sweep_points_dict['cal_points_sweep_points'][
cal_pts_idxs]],
axis=0),
'yvals': np.mean([
self.proc_data_dict['projected_data_dict'][meas_qbn][
'_ref_measure'][cal_pts_idxs],
self.proc_data_dict['projected_data_dict'][meas_qbn][
'_measure'][cal_pts_idxs]],
axis=0),
'setlabel': list(self.cal_states_dict)[i],
'do_legend': True,
'legend_bbox_to_anchor': (1, 0.5),
'legend_pos': 'center left',
'linestyle': 'none',
'line_kws': {'color': self.get_cal_state_color(
list(self.cal_states_dict)[i])}}
else:
yvals = [self.proc_data_dict['projected_data_dict'][meas_qbn][
'_ref_measure'],
self.proc_data_dict['projected_data_dict'][meas_qbn][
'_measure']]
sweep_points = sweep_points_dict['sweep_points']
self.plot_dicts['dyn_phase_plot_' + meas_qbn] = {
'plotfn': self.plot_line,
'xvals': [sweep_points, sweep_points],
'xlabel': self.raw_data_dict['xlabel'][0],
'xunit': self.raw_data_dict['xunit'][0][0],
'yvals': yvals,
'ylabel': 'Excited state population',
'yunit': '',
'setlabel': ['with measurement', 'no measurement'],
'title': (self.raw_data_dict['timestamps'][0] + ' ' +
self.raw_data_dict['measurementstring'][0]),
'linestyle': 'none',
'do_legend': True,
'legend_bbox_to_anchor': (1, 0.5),
'legend_pos': 'center left'}
self.plot_dicts['cos_fit_' + meas_qbn + '_ref_measure'] = {
'fig_id': 'dyn_phase_plot_' + meas_qbn,
'plotfn': self.plot_fit,
'fit_res': self.fit_dicts['cos_fit_{}_ref_measure'.format(
meas_qbn)]['fit_res'],
'setlabel': 'cos fit',
'do_legend': True,
'legend_bbox_to_anchor': (1, 0.5),
'legend_pos': 'center left'}
self.plot_dicts['cos_fit_' + meas_qbn + '_measure'] = {
'fig_id': 'dyn_phase_plot_' + meas_qbn,
'plotfn': self.plot_fit,
'fit_res': self.fit_dicts['cos_fit_{}_measure'.format(
meas_qbn)]['fit_res'],
'setlabel': 'cos fit',
'do_legend': True,
'legend_bbox_to_anchor': (1, 0.5),
'legend_pos': 'center left'}
textstr = 'Dynamic phase = {:.2f}'.format(
self.dynamic_phases[meas_qbn]) + r'$^{\circ}$'
self.plot_dicts['text_msg_' + meas_qbn] = {
'fig_id': 'dyn_phase_plot_' + meas_qbn,
'ypos': -0.175,
'xpos': 0.5,
'horizontalalignment': 'center',
'verticalalignment': 'top',
'plotfn': self.plot_text,
'text_string': textstr}
class FluxAmplitudeSweepAnalysis(MultiQubit_TimeDomain_Analysis):
def __init__(self, qb_names, *args, **kwargs):
self.mask_freq = kwargs.pop('mask_freq', None)
self.mask_amp = kwargs.pop('mask_amp', None)
super().__init__(qb_names, *args, **kwargs)
def extract_data(self):
super().extract_data()
# Set some default values specific to FluxPulseScopeAnalysis if the
# respective options have not been set by the user or in the metadata.
# (We do not do this in the init since we have to wait until
# metadata has been extracted.)
if self.get_param_value('rotation_type', default_value=None) is None:
self.options_dict['rotation_type'] = 'global_PCA'
if self.get_param_value('TwoD', default_value=None) is None:
self.options_dict['TwoD'] = True
def process_data(self):
super().process_data()
pdd = self.proc_data_dict
nr_sp = {qb: len(pdd['sweep_points_dict'][qb]['sweep_points'])
for qb in self.qb_names}
nr_sp2d = {qb: len(list(pdd['sweep_points_2D_dict'][qb].values())[0])
for qb in self.qb_names}
nr_cp = self.num_cal_points
# make matrix out of vector
data_reshaped = {qb: np.reshape(deepcopy(
pdd['data_to_fit'][qb]).T.flatten(), (nr_sp[qb], nr_sp2d[qb]))
for qb in self.qb_names}
pdd['data_reshaped'] = data_reshaped
# remove calibration points from data to fit
data_no_cp = {qb: np.array([pdd['data_reshaped'][qb][i, :]
for i in range(nr_sp[qb]-nr_cp)])
for qb in self.qb_names}
# apply mask
for qb in self.qb_names:
if self.mask_freq is None:
self.mask_freq = [True]*nr_sp2d[qb] # by default, no point is masked
if self.mask_amp is None:
self.mask_amp = [True]*(nr_sp[qb]-nr_cp)
pdd['freqs_masked'] = {}
pdd['amps_masked'] = {}
pdd['data_masked'] = {}
for qb in self.qb_names:
sp_param = [k for k in self.mospm[qb] if 'freq' in k][0]
pdd['freqs_masked'][qb] = \
pdd['sweep_points_2D_dict'][qb][sp_param][self.mask_freq]
pdd['amps_masked'][qb] = \
pdd['sweep_points_dict'][qb]['sweep_points'][
:-self.num_cal_points][self.mask_amp]
data_masked = data_no_cp[qb][self.mask_amp,:]
pdd['data_masked'][qb] = data_masked[:, self.mask_freq]
def prepare_fitting(self):
pdd = self.proc_data_dict
self.fit_dicts = OrderedDict()
# Gaussian fit of amplitude slices
gauss_mod = fit_mods.GaussianModel_v2()
for qb in self.qb_names:
for i in range(len(pdd['amps_masked'][qb])):
data = pdd['data_masked'][qb][i,:]
self.fit_dicts[f'gauss_fit_{qb}_{i}'] = {
'model': gauss_mod,
'fit_xvals': {'x': pdd['freqs_masked'][qb]},
'fit_yvals': {'data': data}
}
def analyze_fit_results(self):
pdd = self.proc_data_dict
pdd['gauss_center'] = {}
pdd['gauss_center_err'] = {}
pdd['filtered_center'] = {}
pdd['filtered_amps'] = {}
for qb in self.qb_names:
pdd['gauss_center'][qb] = np.array([
self.fit_res[f'gauss_fit_{qb}_{i}'].best_values['center']
for i in range(len(pdd['amps_masked'][qb]))])
pdd['gauss_center_err'][qb] = np.array([
self.fit_res[f'gauss_fit_{qb}_{i}'].params['center'].stderr
for i in range(len(pdd['amps_masked'][qb]))])
# filter out points with stderr > 1e6 Hz
pdd['filtered_center'][qb] = np.array([])
pdd['filtered_amps'][qb] = np.array([])
for i, stderr in enumerate(pdd['gauss_center_err'][qb]):
try:
if stderr < 1e6:
pdd['filtered_center'][qb] = \
np.append(pdd['filtered_center'][qb],
pdd['gauss_center'][qb][i])
pdd['filtered_amps'][qb] = \
np.append(pdd['filtered_amps'][qb],
pdd['sweep_points_dict'][qb]\
['sweep_points'][:-self.num_cal_points][i])
except:
continue
# if gaussian fitting does not work (i.e. all points were filtered
# out above) use max value of data to get an estimate of freq
if len(pdd['filtered_amps'][qb]) == 0:
for qb in self.qb_names:
freqs = np.array([])
for i in range(pdd['data_masked'][qb].shape[0]):
freqs = np.append(freqs, pdd['freqs_masked'][qb]\
[np.argmax(pdd['data_masked'][qb][i,:])])
pdd['filtered_center'][qb] = freqs
pdd['filtered_amps'][qb] = pdd['amps_masked'][qb]
# fit the freqs to the qubit model
self.fit_func = self.get_param_value('fit_func', fit_mods.Qubit_dac_to_freq)
if self.fit_func == fit_mods.Qubit_dac_to_freq_precise:
fit_guess_func = fit_mods.Qubit_dac_arch_guess_precise
else:
fit_guess_func = fit_mods.Qubit_dac_arch_guess
freq_mod = lmfit.Model(self.fit_func)
fixed_params = \
self.get_param_value("fixed_params_for_fit", {}).get(qb, None)
if fixed_params is None:
fixed_params = dict(E_c=0)
freq_mod.guess = fit_guess_func.__get__(
freq_mod, freq_mod.__class__)
self.fit_dicts[f'freq_fit_{qb}'] = {
'model': freq_mod,
'fit_xvals': {'dac_voltage': pdd['filtered_amps'][qb]},
'fit_yvals': {'data': pdd['filtered_center'][qb]},
"guessfn_pars": {"fixed_params": fixed_params}}
self.run_fitting()
def prepare_plots(self):
pdd = self.proc_data_dict
rdd = self.raw_data_dict
for qb in self.qb_names:
sp_param = [k for k in self.mospm[qb] if 'freq' in k][0]
self.plot_dicts[f'data_2d_{qb}'] = {
'title': rdd['measurementstring'] +
'\n' + rdd['timestamp'],
'ax_id': f'data_2d_{qb}',
'plotfn': self.plot_colorxy,
'xvals': pdd['sweep_points_dict'][qb]['sweep_points'],
'yvals': pdd['sweep_points_2D_dict'][qb][sp_param],
'zvals': np.transpose(pdd['data_reshaped'][qb]),
'xlabel': r'Flux pulse amplitude',
'xunit': 'V',
'ylabel': r'Qubit drive frequency',
'yunit': 'Hz',
'zlabel': 'Excited state population',
}
if self.do_fitting:
if self.options_dict.get('scatter', True):
label = f'freq_scatter_{qb}_scatter'
self.plot_dicts[label] = {
'title': rdd['measurementstring'] +
'\n' + rdd['timestamp'],
'ax_id': f'data_2d_{qb}',
'plotfn': self.plot_line,
'linestyle': '',
'marker': 'o',
'xvals': pdd['filtered_amps'][qb],
'yvals': pdd['filtered_center'][qb],
'xlabel': r'Flux pulse amplitude',
'xunit': 'V',
'ylabel': r'Qubit drive frequency',
'yunit': 'Hz',
'color': 'white',
}
amps = pdd['sweep_points_dict'][qb]['sweep_points'][
:-self.num_cal_points]
label = f'freq_scatter_{qb}'
self.plot_dicts[label] = {
'title': rdd['measurementstring'] +
'\n' + rdd['timestamp'],
'ax_id': f'data_2d_{qb}',
'plotfn': self.plot_line,
'linestyle': '-',
'marker': '',
'xvals': amps,
'yvals': self.fit_func(amps,
**self.fit_res[f'freq_fit_{qb}'].best_values),
'color': 'red',
}
class T1FrequencySweepAnalysis(MultiQubit_TimeDomain_Analysis):
def process_data(self):
super().process_data()
pdd = self.proc_data_dict
nr_cp = self.num_cal_points
self.lengths = OrderedDict()
self.amps = OrderedDict()
self.freqs = OrderedDict()
for qbn in self.qb_names:
len_key = [pn for pn in self.mospm[qbn] if 'length' in pn]
if len(len_key) == 0:
raise KeyError('Couldn"t find sweep points corresponding to '
'flux pulse length.')
self.lengths[qbn] = self.sp.get_sweep_params_property(
'values', 0, len_key[0])
amp_key = [pn for pn in self.mospm[qbn] if 'amp' in pn]
if len(len_key) == 0:
raise KeyError('Couldn"t find sweep points corresponding to '
'flux pulse amplitude.')
self.amps[qbn] = self.sp.get_sweep_params_property(
'values', 1, amp_key[0])
freq_key = [pn for pn in self.mospm[qbn] if 'freq' in pn]
if len(freq_key) == 0:
self.freqs[qbn] = None
else:
self.freqs[qbn] =self.sp.get_sweep_params_property(
'values', 1, freq_key[0])
nr_amps = len(self.amps[self.qb_names[0]])
nr_lengths = len(self.lengths[self.qb_names[0]])
# make matrix out of vector
data_reshaped_no_cp = {qb: np.reshape(deepcopy(
pdd['data_to_fit'][qb][
:, :pdd['data_to_fit'][qb].shape[1]-nr_cp]).flatten(),
(nr_amps, nr_lengths)) for qb in self.qb_names}
pdd['data_reshaped_no_cp'] = data_reshaped_no_cp
pdd['mask'] = {qb: np.ones(nr_amps, dtype=np.bool)
for qb in self.qb_names}
def prepare_fitting(self):
pdd = self.proc_data_dict
self.fit_dicts = OrderedDict()
exp_mod = fit_mods.ExponentialModel()
for qb in self.qb_names:
for i, data in enumerate(pdd['data_reshaped_no_cp'][qb]):
self.fit_dicts[f'exp_fit_{qb}_amp_{i}'] = {
'model': exp_mod,
'fit_xvals': {'x': self.lengths[qb]},
'fit_yvals': {'data': data}}
def analyze_fit_results(self):
pdd = self.proc_data_dict
pdd['T1'] = {}
pdd['T1_err'] = {}
for qb in self.qb_names:
pdd['T1'][qb] = np.array([
abs(self.fit_res[f'exp_fit_{qb}_amp_{i}'].best_values['decay'])
for i in range(len(self.amps[qb]))])
pdd['T1_err'][qb] = np.array([
self.fit_res[f'exp_fit_{qb}_amp_{i}'].params['decay'].stderr
for i in range(len(self.amps[qb]))])
for i in range(len(self.amps[qb])):
try:
if pdd['T1_err'][qb][i] >= 10 * pdd['T1'][qb][i]:
pdd['mask'][qb][i] = False
except TypeError:
pdd['mask'][qb][i] = False
def prepare_plots(self):
pdd = self.proc_data_dict
rdd = self.raw_data_dict
for qb in self.qb_names:
for p, param_values in enumerate([self.amps, self.freqs]):
if param_values is None:
continue
suffix = '_amp' if p == 0 else '_freq'
mask = pdd['mask'][qb]
xlabel = r'Flux pulse amplitude' if p == 0 else \
r'Derived qubit frequency'
if self.do_fitting:
# Plot T1 vs flux pulse amplitude
label = f'T1_fit_{qb}{suffix}'
self.plot_dicts[label] = {
'title': rdd['measurementstring'] + '\n' + rdd['timestamp'],
'plotfn': self.plot_line,
'linestyle': '-',
'xvals': param_values[qb][mask],
'yvals': pdd['T1'][qb][mask],
'yerr': pdd['T1_err'][qb][mask],
'xlabel': xlabel,
'xunit': 'V' if p == 0 else 'Hz',
'ylabel': r'T1',
'yunit': 's',
'color': 'blue',
}
# Plot rotated integrated average in dependece of flux pulse
# amplitude and length
label = f'T1_color_plot_{qb}{suffix}'
self.plot_dicts[label] = {
'title': rdd['measurementstring'] + '\n' + rdd['timestamp'],
'plotfn': self.plot_colorxy,
'linestyle': '-',
'xvals': param_values[qb][mask],
'yvals': self.lengths[qb],
'zvals': np.transpose(pdd['data_reshaped_no_cp'][qb][mask]),
'xlabel': xlabel,
'xunit': 'V' if p == 0 else 'Hz',
'ylabel': r'Flux pulse length',
'yunit': 's',
'zlabel': r'Excited state population'
}
# Plot population loss for the first flux pulse length as a
# function of flux pulse amplitude
label = f'Pop_loss_{qb}{suffix}'
self.plot_dicts[label] = {
'title': rdd['measurementstring'] + '\n' + rdd['timestamp'],
'plotfn': self.plot_line,
'linestyle': '-',
'xvals': param_values[qb][mask],
'yvals': 1 - pdd['data_reshaped_no_cp'][qb][:, 0][mask],
'xlabel': xlabel,
'xunit': 'V' if p == 0 else 'Hz',
'ylabel': r'Pop. loss @ {:.0f} ns'.format(
self.lengths[qb][0]/1e-9
),
'yunit': '',
}
# Plot all fits in single figure
if self.options_dict.get('all_fits', False) and self.do_fitting:
colormap = self.options_dict.get('colormap', mpl.cm.plasma)
for i in range(len(self.amps[qb])):
color = colormap(i/(len(self.amps[qb])-1))
label = f'exp_fit_{qb}_amp_{i}'
fitid = param_values[qb][i]
self.plot_dicts[label] = {
'title': rdd['measurementstring'] + '\n' + rdd['timestamp'],
'fig_id': f'T1_fits_{qb}',
'xlabel': r'Flux pulse length',
'xunit': 's',
'ylabel': r'Excited state population',
'plotfn': self.plot_fit,
'fit_res': self.fit_res[label],
'plot_init': self.options_dict.get('plot_init', False),
'color': color,
'setlabel': f'freq={fitid:.4f}' if p == 1
else f'amp={fitid:.4f}',
'do_legend': False,
'legend_bbox_to_anchor': (1, 1),
'legend_pos': 'upper left',
}
label = f'freq_scatter_{qb}_{i}'
self.plot_dicts[label] = {
'fig_id': f'T1_fits_{qb}',
'plotfn': self.plot_line,
'xvals': self.lengths[qb],
'linestyle': '',
'yvals': pdd['data_reshaped_no_cp'][qb][i, :],
'color': color,
'setlabel': f'freq={fitid:.4f}' if p == 1
else f'amp={fitid:.4f}',
}
class T2FrequencySweepAnalysis(MultiQubit_TimeDomain_Analysis):
def process_data(self):
super().process_data()
pdd = self.proc_data_dict
nr_cp = self.num_cal_points
nr_amps = len(self.metadata['amplitudes'])
nr_lengths = len(self.metadata['flux_lengths'])
nr_phases = len(self.metadata['phases'])
# make matrix out of vector
data_reshaped_no_cp = {qb: np.reshape(
deepcopy(pdd['data_to_fit'][qb][
:, :pdd['data_to_fit'][qb].shape[1]-nr_cp]).flatten(),
(nr_amps, nr_lengths, nr_phases)) for qb in self.qb_names}
pdd['data_reshaped_no_cp'] = data_reshaped_no_cp
if self.metadata['use_cal_points']:
pdd['cal_point_data'] = {qb: deepcopy(
pdd['data_to_fit'][qb][
len(pdd['data_to_fit'][qb])-nr_cp:]) for qb in self.qb_names}
pdd['mask'] = {qb: np.ones(nr_amps, dtype=np.bool)
for qb in self.qb_names}
def prepare_fitting(self):
pdd = self.proc_data_dict
self.fit_dicts = OrderedDict()
nr_amps = len(self.metadata['amplitudes'])
for qb in self.qb_names:
for i in range(nr_amps):
for j, data in enumerate(pdd['data_reshaped_no_cp'][qb][i]):
cos_mod = fit_mods.CosModel
guess_pars = fit_mods.Cos_guess(
model=cos_mod, t=self.metadata['phases'],
data=data,
freq_guess=1/360)
guess_pars['frequency'].value = 1/360
guess_pars['frequency'].vary = False
self.fit_dicts[f'cos_fit_{qb}_{i}_{j}'] = {
'fit_fn': fit_mods.CosFunc,
'fit_xvals': {'t': self.metadata['phases']},
'fit_yvals': {'data': data},
'guess_pars': guess_pars}
def analyze_fit_results(self):
pdd = self.proc_data_dict
pdd['T2'] = {}
pdd['T2_err'] = {}
pdd['phase_contrast'] = {}
nr_lengths = len(self.metadata['flux_lengths'])
nr_amps = len(self.metadata['amplitudes'])
for qb in self.qb_names:
pdd['phase_contrast'][qb] = {}
exp_mod = fit_mods.ExponentialModel()
for i in range(nr_amps):
pdd['phase_contrast'][qb][f'amp_{i}'] = np.array([self.fit_res[
f'cos_fit_{qb}_{i}_{j}'
].best_values['amplitude']
for j in
range(nr_lengths)])
self.fit_dicts[f'exp_fit_{qb}_{i}'] = {
'model': exp_mod,
'fit_xvals': {'x': self.metadata['flux_lengths']},
'fit_yvals': {'data': np.array([self.fit_res[
f'cos_fit_{qb}_{i}_{j}'
].best_values['amplitude']
for j in
range(nr_lengths)])}}
self.run_fitting()
pdd['T2'][qb] = np.array([
abs(self.fit_res[f'exp_fit_{qb}_{i}'].best_values['decay'])
for i in range(len(self.metadata['amplitudes']))])
pdd['mask'][qb] = []
for i in range(len(self.metadata['amplitudes'])):
try:
if self.fit_res[f'exp_fit_{qb}_{i}']\
.params['decay'].stderr >= 1e-5:
pdd['mask'][qb][i] = False
except TypeError:
pdd['mask'][qb][i] = False
def prepare_plots(self):
pdd = self.proc_data_dict
rdd = self.raw_data_dict
for qb in self.qb_names:
mask = pdd['mask'][qb]
label = f'T2_fit_{qb}'
xvals = self.metadata['amplitudes'][mask] if \
self.metadata['frequencies'] is None else \
self.metadata['frequencies'][mask]
xlabel = r'Flux pulse amplitude' if \
self.metadata['frequencies'] is None else \
r'Derived qubit frequency'
self.plot_dicts[label] = {
'plotfn': self.plot_line,
'linestyle': '-',
'xvals': xvals,
'yvals': pdd['T2'][qb][mask],
'xlabel': xlabel,
'xunit': 'V' if self.metadata['frequencies'] is None else 'Hz',
'ylabel': r'T2',
'yunit': 's',
'color': 'blue',
}
# Plot all fits in single figure
if not self.options_dict.get('all_fits', False):
continue
colormap = self.options_dict.get('colormap', mpl.cm.plasma)
for i in range(len(self.metadata['amplitudes'])):
color = colormap(i/(len(self.metadata['frequencies'])-1))
label = f'exp_fit_{qb}_amp_{i}'
freqs = self.metadata['frequencies'] is not None
fitid = self.metadata.get('frequencies',
self.metadata['amplitudes'])[i]
self.plot_dicts[label] = {
'title': rdd['measurementstring'] +
'\n' + rdd['timestamp'],
'ax_id': f'T2_fits_{qb}',
'xlabel': r'Flux pulse length',
'xunit': 's',
'ylabel': r'Excited state population',
'plotfn': self.plot_fit,
'fit_res': self.fit_res[label],
'plot_init': self.options_dict.get('plot_init', False),
'color': color,
'setlabel': f'freq={fitid:.4f}' if freqs
else f'amp={fitid:.4f}',
'do_legend': False,
'legend_bbox_to_anchor': (1, 1),
'legend_pos': 'upper left',
}
label = f'freq_scatter_{qb}_{i}'
self.plot_dicts[label] = {
'ax_id': f'T2_fits_{qb}',
'plotfn': self.plot_line,
'xvals': self.metadata['phases'],
'linestyle': '',
'yvals': pdd['data_reshaped_no_cp'][qb][i,:],
'color': color,
'setlabel': f'freq={fitid:.4f}' if freqs
else f'amp={fitid:.4f}',
}
class MeasurementInducedDephasingAnalysis(MultiQubit_TimeDomain_Analysis):
def process_data(self):
super().process_data()
rdd = self.raw_data_dict
pdd = self.proc_data_dict
pdd['data_reshaped'] = {qb: [] for qb in pdd['data_to_fit']}
pdd['amps_reshaped'] = np.unique(self.metadata['hard_sweep_params']['ro_amp_scale']['values'])
pdd['phases_reshaped'] = []
for amp in pdd['amps_reshaped']:
mask = self.metadata['hard_sweep_params']['ro_amp_scale']['values'] == amp
pdd['phases_reshaped'].append(self.metadata['hard_sweep_params']['phase']['values'][mask])
for qb in self.qb_names:
pdd['data_reshaped'][qb].append(pdd['data_to_fit'][qb][:len(mask)][mask])
def prepare_fitting(self):
pdd = self.proc_data_dict
rdd = self.raw_data_dict
self.fit_dicts = OrderedDict()
for qb in self.qb_names:
for i, data in enumerate(pdd['data_reshaped'][qb]):
cos_mod = fit_mods.CosModel
guess_pars = fit_mods.Cos_guess(
model=cos_mod, t=pdd['phases_reshaped'][i],
data=data, freq_guess=1/360)
guess_pars['frequency'].value = 1/360
guess_pars['frequency'].vary = False
self.fit_dicts[f'cos_fit_{qb}_{i}'] = {
'fit_fn': fit_mods.CosFunc,
'fit_xvals': {'t': pdd['phases_reshaped'][i]},
'fit_yvals': {'data': data},
'guess_pars': guess_pars}
def analyze_fit_results(self):
pdd = self.proc_data_dict
pdd['phase_contrast'] = {}
pdd['phase_offset'] = {}
pdd['sigma'] = {}
pdd['sigma_err'] = {}
pdd['a'] = {}
pdd['a_err'] = {}
pdd['c'] = {}
pdd['c_err'] = {}
for qb in self.qb_names:
pdd['phase_contrast'][qb] = np.array([
self.fit_res[f'cos_fit_{qb}_{i}'].best_values['amplitude']
for i, _ in enumerate(pdd['data_reshaped'][qb])])
pdd['phase_offset'][qb] = np.array([
self.fit_res[f'cos_fit_{qb}_{i}'].best_values['phase']
for i, _ in enumerate(pdd['data_reshaped'][qb])])
pdd['phase_offset'][qb] += np.pi * (pdd['phase_contrast'][qb] < 0)
pdd['phase_offset'][qb] = (pdd['phase_offset'][qb] + np.pi) % (2 * np.pi) - np.pi
pdd['phase_offset'][qb] = 180*np.unwrap(pdd['phase_offset'][qb])/np.pi
pdd['phase_contrast'][qb] = np.abs(pdd['phase_contrast'][qb])
gauss_mod = lmfit.models.GaussianModel()
self.fit_dicts[f'phase_contrast_fit_{qb}'] = {
'model': gauss_mod,
'guess_dict': {'center': {'value': 0, 'vary': False}},
'fit_xvals': {'x': pdd['amps_reshaped']},
'fit_yvals': {'data': pdd['phase_contrast'][qb]}}
quadratic_mod = lmfit.models.QuadraticModel()
self.fit_dicts[f'phase_offset_fit_{qb}'] = {
'model': quadratic_mod,
'guess_dict': {'b': {'value': 0, 'vary': False}},
'fit_xvals': {'x': pdd['amps_reshaped']},
'fit_yvals': {'data': pdd['phase_offset'][qb]}}
self.run_fitting()
self.save_fit_results()
pdd['sigma'][qb] = self.fit_res[f'phase_contrast_fit_{qb}'].best_values['sigma']
pdd['sigma_err'][qb] = self.fit_res[f'phase_contrast_fit_{qb}'].params['sigma']. \
stderr
pdd['a'][qb] = self.fit_res[f'phase_offset_fit_{qb}'].best_values['a']
pdd['a_err'][qb] = self.fit_res[f'phase_offset_fit_{qb}'].params['a'].stderr
pdd['c'][qb] = self.fit_res[f'phase_offset_fit_{qb}'].best_values['c']
pdd['c_err'][qb] = self.fit_res[f'phase_offset_fit_{qb}'].params['c'].stderr
pdd['sigma_err'][qb] = float('nan') if pdd['sigma_err'][qb] is None \
else pdd['sigma_err'][qb]
pdd['a_err'][qb] = float('nan') if pdd['a_err'][qb] is None else pdd['a_err'][qb]
pdd['c_err'][qb] = float('nan') if pdd['c_err'][qb] is None else pdd['c_err'][qb]
def prepare_plots(self):
pdd = self.proc_data_dict
rdd = self.raw_data_dict
phases_equal = True
for phases in pdd['phases_reshaped'][1:]:
if not np.all(phases == pdd['phases_reshaped'][0]):
phases_equal = False
break
for qb in self.qb_names:
if phases_equal:
self.plot_dicts[f'data_2d_{qb}'] = {
'title': rdd['measurementstring'] +
'\n' + rdd['timestamp'],
'plotfn': self.plot_colorxy,
'xvals': pdd['phases_reshaped'][0],
'yvals': pdd['amps_reshaped'],
'zvals': pdd['data_reshaped'][qb],
'xlabel': r'Pulse phase, $\phi$',
'xunit': 'deg',
'ylabel': r'Readout pulse amplitude scale, $V_{RO}/V_{ref}$',
'yunit': '',
'zlabel': 'Excited state population',
}
colormap = self.options_dict.get('colormap', mpl.cm.plasma)
for i, amp in enumerate(pdd['amps_reshaped']):
color = colormap(i/(len(pdd['amps_reshaped'])-1))
label = f'cos_data_{qb}_{i}'
self.plot_dicts[label] = {
'title': rdd['measurementstring'] +
'\n' + rdd['timestamp'],
'ax_id': f'amplitude_crossections_{qb}',
'plotfn': self.plot_line,
'xvals': pdd['phases_reshaped'][i],
'yvals': pdd['data_reshaped'][qb][i],
'xlabel': r'Pulse phase, $\phi$',
'xunit': 'deg',
'ylabel': 'Excited state population',
'linestyle': '',
'color': color,
'setlabel': f'amp={amp:.4f}',
'do_legend': True,
'legend_bbox_to_anchor': (1, 1),
'legend_pos': 'upper left',
}
if self.do_fitting:
for i, amp in enumerate(pdd['amps_reshaped']):
color = colormap(i/(len(pdd['amps_reshaped'])-1))
label = f'cos_fit_{qb}_{i}'
self.plot_dicts[label] = {
'ax_id': f'amplitude_crossections_{qb}',
'plotfn': self.plot_fit,
'fit_res': self.fit_res[label],
'plot_init': self.options_dict.get('plot_init', False),
'color': color,
'setlabel': f'fit, amp={amp:.4f}',
}
# Phase contrast
self.plot_dicts[f'phase_contrast_data_{qb}'] = {
'title': rdd['measurementstring'] +
'\n' + rdd['timestamp'],
'ax_id': f'phase_contrast_{qb}',
'plotfn': self.plot_line,
'xvals': pdd['amps_reshaped'],
'yvals': 200*pdd['phase_contrast'][qb],
'xlabel': r'Readout pulse amplitude scale, $V_{RO}/V_{ref}$',
'xunit': '',
'ylabel': 'Phase contrast',
'yunit': '%',
'linestyle': '',
'color': 'k',
'setlabel': 'data',
'do_legend': True,
}
self.plot_dicts[f'phase_contrast_fit_{qb}'] = {
'ax_id': f'phase_contrast_{qb}',
'plotfn': self.plot_line,
'xvals': pdd['amps_reshaped'],
'yvals': 200*self.fit_res[f'phase_contrast_fit_{qb}'].best_fit,
'color': 'r',
'marker': '',
'setlabel': 'fit',
'do_legend': True,
}
self.plot_dicts[f'phase_contrast_labels_{qb}'] = {
'ax_id': f'phase_contrast_{qb}',
'plotfn': self.plot_line,
'xvals': pdd['amps_reshaped'],
'yvals': 200*pdd['phase_contrast'][qb],
'marker': '',
'linestyle': '',
'setlabel': r'$\sigma = ({:.5f} \pm {:.5f})$ V'.
format(pdd['sigma'][qb], pdd['sigma_err'][qb]),
'do_legend': True,
'legend_bbox_to_anchor': (1, 1),
'legend_pos': 'upper left',
}
# Phase offset
self.plot_dicts[f'phase_offset_data_{qb}'] = {
'title': rdd['measurementstring'] +
'\n' + rdd['timestamp'],
'ax_id': f'phase_offset_{qb}',
'plotfn': self.plot_line,
'xvals': pdd['amps_reshaped'],
'yvals': pdd['phase_offset'][qb],
'xlabel': r'Readout pulse amplitude scale, $V_{RO}/V_{ref}$',
'xunit': '',
'ylabel': 'Phase offset',
'yunit': 'deg',
'linestyle': '',
'color': 'k',
'setlabel': 'data',
'do_legend': True,
}
self.plot_dicts[f'phase_offset_fit_{qb}'] = {
'ax_id': f'phase_offset_{qb}',
'plotfn': self.plot_line,
'xvals': pdd['amps_reshaped'],
'yvals': self.fit_res[f'phase_offset_fit_{qb}'].best_fit,
'color': 'r',
'marker': '',
'setlabel': 'fit',
'do_legend': True,
}
self.plot_dicts[f'phase_offset_labels_{qb}'] = {
'ax_id': f'phase_offset_{qb}',
'plotfn': self.plot_line,
'xvals': pdd['amps_reshaped'],
'yvals': pdd['phase_offset'][qb],
'marker': '',
'linestyle': '',
'setlabel': r'$a = {:.0f} \pm {:.0f}$ deg/V${{}}^2$'.
format(pdd['a'][qb], pdd['a_err'][qb]) + '\n' +
r'$c = {:.1f} \pm {:.1f}$ deg'.
format(pdd['c'][qb], pdd['c_err'][qb]),
'do_legend': True,
'legend_bbox_to_anchor': (1, 1),
'legend_pos': 'upper left',
}
class DriveCrosstalkCancellationAnalysis(MultiQubit_TimeDomain_Analysis):
def process_data(self):
super().process_data()
if self.sp is None:
raise ValueError('This analysis needs a SweepPoints '
'class instance.')
pdd = self.proc_data_dict
# get the ramsey phases as the values of the first sweep parameter
# in the 2nd sweep dimension.
# !!! This assumes all qubits have the same ramsey phases !!!
pdd['ramsey_phases'] = self.sp.get_sweep_params_property('values', 1)
pdd['qb_sweep_points'] = {}
pdd['qb_sweep_param'] = {}
for k, v in self.sp.get_sweep_dimension(0).items():
if k == 'phase':
continue
qb, param = k.split('.')
pdd['qb_sweep_points'][qb] = v[0]
pdd['qb_sweep_param'][qb] = (param, v[1], v[2])
pdd['qb_msmt_vals'] = {}
pdd['qb_cal_vals'] = {}
for qb, data in pdd['data_to_fit'].items():
pdd['qb_msmt_vals'][qb] = data[:, :-self.num_cal_points].reshape(
len(pdd['qb_sweep_points'][qb]), len(pdd['ramsey_phases']))
pdd['qb_cal_vals'][qb] = data[0, -self.num_cal_points:]
def prepare_fitting(self):
pdd = self.proc_data_dict
self.fit_dicts = OrderedDict()
for qb in self.qb_names:
for i, data in enumerate(pdd['qb_msmt_vals'][qb]):
cos_mod = fit_mods.CosModel
guess_pars = fit_mods.Cos_guess(
model=cos_mod, t=pdd['ramsey_phases'],
data=data, freq_guess=1/360)
guess_pars['frequency'].value = 1/360
guess_pars['frequency'].vary = False
self.fit_dicts[f'cos_fit_{qb}_{i}'] = {
'fit_fn': fit_mods.CosFunc,
'fit_xvals': {'t': pdd['ramsey_phases']},
'fit_yvals': {'data': data},
'guess_pars': guess_pars}
def analyze_fit_results(self):
pdd = self.proc_data_dict
pdd['phase_contrast'] = {}
pdd['phase_offset'] = {}
for qb in self.qb_names:
pdd['phase_contrast'][qb] = np.array([
2*self.fit_res[f'cos_fit_{qb}_{i}'].best_values['amplitude']
for i, _ in enumerate(pdd['qb_msmt_vals'][qb])])
pdd['phase_offset'][qb] = np.array([
self.fit_res[f'cos_fit_{qb}_{i}'].best_values['phase']
for i, _ in enumerate(pdd['qb_msmt_vals'][qb])])
pdd['phase_offset'][qb] *= 180/np.pi
pdd['phase_offset'][qb] += 180 * (pdd['phase_contrast'][qb] < 0)
pdd['phase_offset'][qb] = (pdd['phase_offset'][qb] + 180) % 360 - 180
pdd['phase_contrast'][qb] = np.abs(pdd['phase_contrast'][qb])
def prepare_plots(self):
pdd = self.proc_data_dict
rdd = self.raw_data_dict
for qb in self.qb_names:
self.plot_dicts[f'data_2d_{qb}'] = {
'title': rdd['measurementstring'] +
'\n' + rdd['timestamp'] + '\n' + qb,
'plotfn': self.plot_colorxy,
'xvals': pdd['ramsey_phases'],
'yvals': pdd['qb_sweep_points'][qb],
'zvals': pdd['qb_msmt_vals'][qb],
'xlabel': r'Ramsey phase, $\phi$',
'xunit': 'deg',
'ylabel': pdd['qb_sweep_param'][qb][2],
'yunit': pdd['qb_sweep_param'][qb][1],
'zlabel': 'Excited state population',
}
colormap = self.options_dict.get('colormap', mpl.cm.plasma)
for i, pval in enumerate(pdd['qb_sweep_points'][qb]):
if i == len(pdd['qb_sweep_points'][qb]) - 1:
legendlabel='data, ref.'
else:
legendlabel = f'data, {pdd["qb_sweep_param"][qb][0]}='\
f'{pval:.4f}{pdd["qb_sweep_param"][qb][1]}'
color = colormap(i/(len(pdd['qb_sweep_points'][qb])-1))
label = f'cos_data_{qb}_{i}'
self.plot_dicts[label] = {
'title': rdd['measurementstring'] +
'\n' + rdd['timestamp'] + '\n' + qb,
'ax_id': f'param_crossections_{qb}',
'plotfn': self.plot_line,
'xvals': pdd['ramsey_phases'],
'yvals': pdd['qb_msmt_vals'][qb][i],
'xlabel': r'Ramsey phase, $\phi$',
'xunit': 'deg',
'ylabel': 'Excited state population',
'linestyle': '',
'color': color,
'setlabel': legendlabel,
'do_legend': False,
'legend_bbox_to_anchor': (1, 1),
'legend_pos': 'upper left',
}
if self.do_fitting:
for i, pval in enumerate(pdd['qb_sweep_points'][qb]):
if i == len(pdd['qb_sweep_points'][qb]) - 1:
legendlabel = 'fit, ref.'
else:
legendlabel = f'fit, {pdd["qb_sweep_param"][qb][0]}='\
f'{pval:.4f}{pdd["qb_sweep_param"][qb][1]}'
color = colormap(i/(len(pdd['qb_sweep_points'][qb])-1))
label = f'cos_fit_{qb}_{i}'
self.plot_dicts[label] = {
'ax_id': f'param_crossections_{qb}',
'plotfn': self.plot_fit,
'fit_res': self.fit_res[label],
'plot_init': self.options_dict.get('plot_init', False),
'color': color,
'do_legend': False,
# 'setlabel': legendlabel
}
# Phase contrast
self.plot_dicts[f'phase_contrast_data_{qb}'] = {
'title': rdd['measurementstring'] +
'\n' + rdd['timestamp'] + '\n' + qb,
'ax_id': f'phase_contrast_{qb}',
'plotfn': self.plot_line,
'xvals': pdd['qb_sweep_points'][qb][:-1],
'yvals': pdd['phase_contrast'][qb][:-1] * 100,
'xlabel': pdd['qb_sweep_param'][qb][2],
'xunit': pdd['qb_sweep_param'][qb][1],
'ylabel': 'Phase contrast',
'yunit': '%',
'linestyle': '-',
'marker': 'o',
'color': 'C0',
'setlabel': 'data',
'do_legend': True,
}
self.plot_dicts[f'phase_contrast_ref_{qb}'] = {
'ax_id': f'phase_contrast_{qb}',
'plotfn': self.plot_hlines,
'xmin': pdd['qb_sweep_points'][qb][:-1].min(),
'xmax': pdd['qb_sweep_points'][qb][:-1].max(),
'y': pdd['phase_contrast'][qb][-1] * 100,
'linestyle': '--',
'colors': '0.6',
'setlabel': 'ref',
'do_legend': True,
}
# Phase offset
self.plot_dicts[f'phase_offset_data_{qb}'] = {
'title': rdd['measurementstring'] +
'\n' + rdd['timestamp'] + '\n' + qb,
'ax_id': f'phase_offset_{qb}',
'plotfn': self.plot_line,
'xvals': pdd['qb_sweep_points'][qb][:-1],
'yvals': pdd['phase_offset'][qb][:-1],
'xlabel': pdd['qb_sweep_param'][qb][2],
'xunit': pdd['qb_sweep_param'][qb][1],
'ylabel': 'Phase offset',
'yunit': 'deg',
'linestyle': '-',
'marker': 'o',
'color': 'C0',
'setlabel': 'data',
'do_legend': True,
}
self.plot_dicts[f'phase_offset_ref_{qb}'] = {
'ax_id': f'phase_offset_{qb}',
'plotfn': self.plot_hlines,
'xmin': pdd['qb_sweep_points'][qb][:-1].min(),
'xmax': pdd['qb_sweep_points'][qb][:-1].max(),
'y': pdd['phase_offset'][qb][-1],
'linestyle': '--',
'colors': '0.6',
'setlabel': 'ref',
'do_legend': True,
}
class FluxlineCrosstalkAnalysis(MultiQubit_TimeDomain_Analysis):
"""Analysis for the measure_fluxline_crosstalk measurement.
The measurement involves Ramsey measurements on a set of crosstalk qubits,
which have been brought to a flux-sensitive position with a flux pulse.
The first dimension is the ramsey-phase of these qubits.
In the second sweep dimension, the amplitude of a flux pulse on another
(target) qubit is swept.
The analysis extracts the change in Ramsey phase offset, which gets
converted to a frequency offset due to the flux pulse on the target qubit.
The frequency offset is then converted to a flux offset, which is a measure
of the crosstalk between the target fluxline and the crosstalk qubit.
The measurement is hard-compressed, meaning the raw data is inherently 1d,
with one set of calibration points as the final segments. The experiment
part of the measured values are reshaped to the correct 2d shape for
the analysis. The sweep points passed into the analysis should still reflect
the 2d nature of the measurement, meaning the ramsey phase values should be
passed in the first dimension and the target fluxpulse amplitudes in the
second sweep dimension.
"""
def __init__(self, qb_names, *args, **kwargs):
params_dict = {f'{qbn}.amp_to_freq_model':
f'Instrument settings.{qbn}.fit_ge_freq_from_flux_pulse_amp'
for qbn in qb_names}
kwargs['params_dict'] = kwargs.get('params_dict', {})
kwargs['params_dict'].update(params_dict)
super().__init__(qb_names, *args, **kwargs)
def process_data(self):
super().process_data()
if self.sp is None:
raise ValueError('This analysis needs a SweepPoints '
'class instance.')
pdd = self.proc_data_dict
pdd['ramsey_phases'] = self.sp.get_sweep_params_property('values', 0)
pdd['target_amps'] = self.sp.get_sweep_params_property('values', 1)
pdd['target_fluxpulse_length'] = \
self.get_param_value('target_fluxpulse_length')
pdd['crosstalk_qubits_amplitudes'] = \
self.get_param_value('crosstalk_qubits_amplitudes')
pdd['qb_msmt_vals'] = {qb:
pdd['data_to_fit'][qb][:, :-self.num_cal_points].reshape(
len(pdd['target_amps']), len(pdd['ramsey_phases']))
for qb in self.qb_names}
pdd['qb_cal_vals'] = {
qb: pdd['data_to_fit'][qb][0, -self.num_cal_points:]
for qb in self.qb_names}
def prepare_fitting(self):
pdd = self.proc_data_dict
self.fit_dicts = OrderedDict()
cos_mod = lmfit.Model(fit_mods.CosFunc)
cos_mod.guess = fit_mods.Cos_guess.__get__(cos_mod, cos_mod.__class__)
for qb in self.qb_names:
for i, data in enumerate(pdd['qb_msmt_vals'][qb]):
self.fit_dicts[f'cos_fit_{qb}_{i}'] = {
'model': cos_mod,
'guess_dict': {'frequency': {'value': 1 / 360,
'vary': False}},
'fit_xvals': {'t': pdd['ramsey_phases']},
'fit_yvals': {'data': data}}
def analyze_fit_results(self):
pdd = self.proc_data_dict
pdd['phase_contrast'] = {}
pdd['phase_offset'] = {}
pdd['freq_offset'] = {}
pdd['freq'] = {}
self.skip_qb_freq_fits = self.get_param_value('skip_qb_freq_fits', False)
if not self.skip_qb_freq_fits:
pdd['flux'] = {}
for qb in self.qb_names:
pdd['phase_contrast'][qb] = np.array([
2 * self.fit_res[f'cos_fit_{qb}_{i}'].best_values['amplitude']
for i, _ in enumerate(pdd['qb_msmt_vals'][qb])])
pdd['phase_offset'][qb] = np.array([
self.fit_res[f'cos_fit_{qb}_{i}'].best_values['phase']
for i, _ in enumerate(pdd['qb_msmt_vals'][qb])])
pdd['phase_offset'][qb] *= 180 / np.pi
pdd['phase_offset'][qb] += 180 * (pdd['phase_contrast'][qb] < 0)
pdd['phase_offset'][qb] = (pdd['phase_offset'][qb] + 180) % 360 - 180
pdd['phase_offset'][qb] = \
np.unwrap(pdd['phase_offset'][qb] / 180 * np.pi) * 180 / np.pi
pdd['phase_contrast'][qb] = np.abs(pdd['phase_contrast'][qb])
pdd['freq_offset'][qb] = pdd['phase_offset'][qb] / 360 / pdd[
'target_fluxpulse_length']
fr = lmfit.Model(lambda a, f_a=1, f0=0: a * f_a + f0).fit(
data=pdd['freq_offset'][qb], a=pdd['target_amps'])
pdd['freq_offset'][qb] -= fr.best_values['f0']
if not self.skip_qb_freq_fits:
mpars = eval(self.raw_data_dict[f'{qb}.amp_to_freq_model'])
freq_idle = fit_mods.Qubit_dac_to_freq(
pdd['crosstalk_qubits_amplitudes'].get(qb, 0), **mpars)
pdd['freq'][qb] = pdd['freq_offset'][qb] + freq_idle
mpars.update({'V_per_phi0': 1, 'dac_sweet_spot': 0})
pdd['flux'][qb] = fit_mods.Qubit_freq_to_dac(
pdd['freq'][qb], **mpars)
# fit fitted results to linear models
lin_mod = lmfit.Model(lambda x, a=1, b=0: a*x + b)
def guess(model, data, x, **kwargs):
a_guess = (data[-1] - data[0])/(x[-1] - x[0])
b_guess = data[0] - x[0]*a_guess
return model.make_params(a=a_guess, b=b_guess)
lin_mod.guess = guess.__get__(lin_mod, lin_mod.__class__)
keys_to_fit = []
for qb in self.qb_names:
for param in ['phase_offset', 'freq_offset', 'flux']:
if param == 'flux' and self.skip_qb_freq_fits:
continue
key = f'{param}_fit_{qb}'
self.fit_dicts[key] = {
'model': lin_mod,
'fit_xvals': {'x': pdd['target_amps']},
'fit_yvals': {'data': pdd[param][qb]}}
keys_to_fit.append(key)
self.run_fitting(keys_to_fit=keys_to_fit)
def prepare_plots(self):
pdd = self.proc_data_dict
rdd = self.raw_data_dict
for qb in self.qb_names:
self.plot_dicts[f'data_2d_{qb}'] = {
'title': rdd['measurementstring'] +
'\n' + rdd['timestamp'] + '\n' + qb,
'plotfn': self.plot_colorxy,
'xvals': pdd['ramsey_phases'],
'yvals': pdd['target_amps'],
'zvals': pdd['qb_msmt_vals'][qb],
'xlabel': r'Ramsey phase, $\phi$',
'xunit': 'deg',
'ylabel': self.sp.get_sweep_params_property('label', 1,
'target_amp'),
'yunit': self.sp.get_sweep_params_property('unit', 1,
'target_amp'),
'zlabel': 'Excited state population',
}
colormap = self.options_dict.get('colormap', mpl.cm.plasma)
for i, pval in enumerate(pdd['target_amps']):
legendlabel = f'data, amp. = {pval:.4f} V'
color = colormap(i / (len(pdd['target_amps']) - 1))
label = f'cos_data_{qb}_{i}'
self.plot_dicts[label] = {
'title': rdd['measurementstring'] +
'\n' + rdd['timestamp'] + '\n' + qb,
'ax_id': f'param_crossections_{qb}',
'plotfn': self.plot_line,
'xvals': pdd['ramsey_phases'],
'yvals': pdd['qb_msmt_vals'][qb][i],
'xlabel': r'Ramsey phase, $\phi$',
'xunit': 'deg',
'ylabel': 'Excited state population',
'linestyle': '',
'color': color,
'setlabel': legendlabel,
'do_legend': False,
'legend_bbox_to_anchor': (1, 1),
'legend_pos': 'upper left',
}
if self.do_fitting:
for i, pval in enumerate(pdd['target_amps']):
legendlabel = f'fit, amp. = {pval:.4f} V'
color = colormap(i / (len(pdd['target_amps']) - 1))
label = f'cos_fit_{qb}_{i}'
self.plot_dicts[label] = {
'ax_id': f'param_crossections_{qb}',
'plotfn': self.plot_fit,
'fit_res': self.fit_res[label],
'plot_init': self.options_dict.get('plot_init', False),
'color': color,
'setlabel': legendlabel,
'do_legend': False,
}
# Phase contrast
self.plot_dicts[f'phase_contrast_data_{qb}'] = {
'title': rdd['measurementstring'] +
'\n' + rdd['timestamp'] + '\n' + qb,
'ax_id': f'phase_contrast_{qb}',
'plotfn': self.plot_line,
'xvals': pdd['target_amps'],
'yvals': pdd['phase_contrast'][qb] * 100,
'xlabel':self.sp.get_sweep_params_property('label', 1,
'target_amp'),
'xunit': self.sp.get_sweep_params_property('unit', 1,
'target_amp'),
'ylabel': 'Phase contrast',
'yunit': '%',
'linestyle': '-',
'marker': 'o',
'color': 'C0',
}
# Phase offset
self.plot_dicts[f'phase_offset_data_{qb}'] = {
'title': rdd['measurementstring'] +
'\n' + rdd['timestamp'] + '\n' + qb,
'ax_id': f'phase_offset_{qb}',
'plotfn': self.plot_line,
'xvals': pdd['target_amps'],
'yvals': pdd['phase_offset'][qb],
'xlabel':self.sp.get_sweep_params_property('label', 1,
'target_amp'),
'xunit': self.sp.get_sweep_params_property('unit', 1,
'target_amp'),
'ylabel': 'Phase offset',
'yunit': 'deg',
'linestyle': 'none',
'marker': 'o',
'color': 'C0',
}
# Frequency offset
self.plot_dicts[f'freq_offset_data_{qb}'] = {
'title': rdd['measurementstring'] +
'\n' + rdd['timestamp'] + '\n' + qb,
'ax_id': f'freq_offset_{qb}',
'plotfn': self.plot_line,
'xvals': pdd['target_amps'],
'yvals': pdd['freq_offset'][qb],
'xlabel':self.sp.get_sweep_params_property('label', 1,
'target_amp'),
'xunit': self.sp.get_sweep_params_property('unit', 1,
'target_amp'),
'ylabel': 'Freq. offset, $\\Delta f$',
'yunit': 'Hz',
'linestyle': 'none',
'marker': 'o',
'color': 'C0',
}
if not self.skip_qb_freq_fits:
# Flux
self.plot_dicts[f'flux_data_{qb}'] = {
'title': rdd['measurementstring'] +
'\n' + rdd['timestamp'] + '\n' + qb,
'ax_id': f'flux_{qb}',
'plotfn': self.plot_line,
'xvals': pdd['target_amps'],
'yvals': pdd['flux'][qb],
'xlabel': self.sp[1]['target_amp'][2],
'xunit': self.sp[1]['target_amp'][1],
'ylabel': 'Flux, $\\Phi$',
'yunit': '$\\Phi_0$',
'linestyle': 'none',
'marker': 'o',
'color': 'C0',
}
for param in ['phase_offset', 'freq_offset', 'flux']:
if param == 'flux' and self.skip_qb_freq_fits:
continue
self.plot_dicts[f'{param}_fit_{qb}'] = {
'ax_id': f'{param}_{qb}',
'plotfn': self.plot_fit,
'fit_res': self.fit_res[f'{param}_fit_{qb}'],
'plot_init': self.options_dict.get('plot_init', False),
'linestyle': '-',
'marker': '',
'color': 'C1',
}
class RabiAnalysis(MultiQubit_TimeDomain_Analysis):
def __init__(self, qb_names, *args, **kwargs):
params_dict = {}
for qbn in qb_names:
s = 'Instrument settings.'+qbn
for trans_name in ['ge', 'ef']:
params_dict[f'{trans_name}_amp180_'+qbn] = \
s+f'.{trans_name}_amp180'
params_dict[f'{trans_name}_amp90scale_'+qbn] = \
s+f'.{trans_name}_amp90_scale'
kwargs['params_dict'] = params_dict
kwargs['numeric_params'] = list(params_dict)
super().__init__(qb_names, *args, **kwargs)
def prepare_fitting(self):
self.fit_dicts = OrderedDict()
for qbn in self.qb_names:
data = self.proc_data_dict['data_to_fit'][qbn]
sweep_points = self.proc_data_dict['sweep_points_dict'][qbn][
'msmt_sweep_points']
if self.num_cal_points != 0:
data = data[:-self.num_cal_points]
cos_mod = lmfit.Model(fit_mods.CosFunc)
guess_pars = fit_mods.Cos_guess(
model=cos_mod, t=sweep_points, data=data)
guess_pars['amplitude'].vary = True
guess_pars['amplitude'].min = -10
guess_pars['offset'].vary = True
guess_pars['frequency'].vary = True
guess_pars['phase'].vary = True
self.set_user_guess_pars(guess_pars)
key = 'cos_fit_' + qbn
self.fit_dicts[key] = {
'fit_fn': fit_mods.CosFunc,
'fit_xvals': {'t': sweep_points},
'fit_yvals': {'data': data},
'guess_pars': guess_pars}
def analyze_fit_results(self):
self.proc_data_dict['analysis_params_dict'] = OrderedDict()
for qbn in self.qb_names:
fit_res = self.fit_dicts['cos_fit_' + qbn]['fit_res']
sweep_points = self.proc_data_dict['sweep_points_dict'][qbn][
'msmt_sweep_points']
self.proc_data_dict['analysis_params_dict'][qbn] = \
self.get_amplitudes(fit_res=fit_res, sweep_points=sweep_points)
self.save_processed_data(key='analysis_params_dict')
def get_amplitudes(self, fit_res, sweep_points):
# Extract the best fitted frequency and phase.
freq_fit = fit_res.best_values['frequency']
phase_fit = fit_res.best_values['phase']
freq_std = fit_res.params['frequency'].stderr
phase_std = fit_res.params['phase'].stderr
# If fitted_phase<0, shift fitted_phase by 4. This corresponds to a
# shift of 2pi in the argument of cos.
if np.abs(phase_fit) < 0.1:
phase_fit = 0
# If phase_fit<1, the piHalf amplitude<0.
if phase_fit < 1:
log.info('The data could not be fitted correctly. '
'The fitted phase "%s" <1, which gives '
'negative piHalf '
'amplitude.' % phase_fit)
stepsize = sweep_points[1] - sweep_points[0]
if freq_fit > 2 * stepsize:
log.info('The data could not be fitted correctly. The '
'frequency "%s" is too high.' % freq_fit)
n = | np.arange(-2, 10) | numpy.arange |
import numpy as np
from rltf.memory import BaseBuffer
class PGBuffer(BaseBuffer):
"""Fixed-size data buffer. Supports both image observations and low-level observations.
"""
def __init__(self, size, state_shape, obs_dtype, act_shape, act_dtype, obs_len=1):
"""
Args: See `BaseBuffer.store()`
"""
super().__init__(size, state_shape, obs_dtype, act_shape, act_dtype, obs_len)
# Create a buffer for the value function
vf = np.empty([self.max_size+1], dtype=np.float32)
self.vf = vf[:-1]
self.next_vf = vf[1:]
self.gae_lambda = np.empty([self.max_size], dtype=np.float32)
self.td_lambda = np.empty([self.max_size], dtype=np.float32)
self.logp = np.empty([self.max_size], dtype=np.float32)
self.it = None
#pylint: disable=arguments-differ
def store(self, obs_t, act_t, rew_tp1, done_tp1, vf_t, logp_t):
"""Store the observed transition defined as: Given `obs_t`, action `act_t` was taken.
Then reward `reward_tp1` was observed. If after action `act_t` the episode terminated,
then `done_tp1` will be `True`, otherwise `Fasle`. Note that the observation after taking
`act_t` should be passed as `obs_t` on the next call to `store()`. NOTE: if `done_tp1 == True`,
then there is no need to call `store()` on `obs_tp1`: we do NOT need to know it since we never
use it in computing the backup value
Args:
obs_t: See `BaseBuffer.store()`
act_t: See `BaseBuffer.store()`
reward_tp1: See `BaseBuffer.store()`
done_tp1: See `BaseBuffer.store()`
vf_t: float. Value function estimate for `obs_t`
logp_t: float. Log probability of action `act_t`
"""
# Store these before advancing next_idx in BaseBuffer
self.vf[self.next_idx] = vf_t
self.logp[self.next_idx] = logp_t
super().store(obs_t, act_t, rew_tp1, done_tp1)
def __iter__(self):
self.it = -1
return self
def __next__(self):
if self.it >= self.size_now:
raise StopIteration
else:
self.it += 1
return self.__getitem__(self.it)
def __getitem__(self, i):
# If low-level observations or single frames
if self.obs_len == 1:
obs = self.obs[i]
else:
obs = self._encode_img_observation(i)
return obs, self.action[i], self.reward[i], self.done[i], self.vf[i], self.next_vf[i]
def compute_estimates(self, gamma, lam, next_vf=0):
"""Compute the advantage estimates using the GAE(gamma, lambda) estimator and
the value function targets using the TD(lambda) estimator
Args:
gamma: float. The value of gamma for GAE(gamma, lambda)
lam: float. The value of lambda for GAE(gamma, lambda) and TD(lambda)
next_vf: float. The value function estimate for the observation encountered after the
last step. Must be 0 if the episode was done
"""
# Assert that the buffer is exactly filled
assert self.next_idx == 0
self.next_vf[-1] = next_vf
gae_t = 0
# Compute GAE(gamma, lambda)
# pylint: disable=redefined-argument-from-local
for t, (_, _, rew, done, vf, next_vf) in zip(reversed(range(self.size_now)), reversed(self)):
delta_t = rew + (1 - done) * gamma * next_vf - vf
gae_t = delta_t + (1 - done) * gamma * lam * gae_t
self.gae_lambda[t] = gae_t
# Compute TD(lambda)
self.td_lambda = self.gae_lambda + self.vf
def get_data(self):
"""Return all data"""
return self._batch_samples(np.arange(0, self.size_now))
def iterate(self, batch_size, shuffle=True):
size = (self.size_now // batch_size) * batch_size
inds = | np.arange(0, self.size_now) | numpy.arange |
# -*- encoding: utf-8 -*-
import numba
import numpy as np
import healpy as hp
from scipy.interpolate import griddata
from scipy.ndimage.interpolation import zoom
@numba.jit(nopython=True)
def binned_map(signal, pixidx, mappixels, hits, reset_map=True):
"""Project a TOD onto a map.
This function implements a simple binner to project a TOD into a
map.
Args:
* signal: A TOD containing the signal to be projected (1D vector)
* pixidx: A TOD containing the index of the pixels (1D vector, same
length as `signal`)
* mappixels: A Healpix map that will contain the projected signal
* hits: A Healpix map of the same resolution as mappixels that will
contain the number of hits
"""
assert len(mappixels) == len(hits)
assert len(signal) == len(pixidx)
if reset_map:
mappixels[:] = 0.0
hits[:] = 0
for i in range(len(signal)):
mappixels[pixidx[i]] += signal[i]
hits[pixidx[i]] += 1
for i in range(len(mappixels)):
if hits[i] > 0:
mappixels[i] /= hits[i]
def img2map(
img, resultmap, resulthits, delta_theta, delta_phi, rot=np.eye(3), reset_map=True
):
"""Project a 2D image on a Healpix map.
Args:
* img: A 2D matrix containing the image to be projected on the map
* resultmap: A Healpix map where to project the image
* resulthits: A Healpix map of the same side as `resultmap`, which
will store the hit count per pixel of `resultmap`
* delta_theta: the width of the image along the meridian, in degrees
* delta_phi: the height of the image along the meridian, in degrees
* rot: Either a 3×3 matrix or a `healpy.rotator.Rotator` object
* reset_map: If True, both `resultmap` and `resulthits` will be zeroed
before doing the projection
Returns:
A tuple containing the map and the hit map. Unseen pixels in
the map are set to zero.
"""
assert img.ndim == 2
assert len(resultmap) == len(resulthits)
assert delta_theta > 0.0
assert delta_phi > 0.0
nside = hp.npix2nside(len(resultmap))
delta_theta, delta_phi = [np.deg2rad(x) for x in (delta_theta, delta_phi)]
if type(rot) is hp.rotator.Rotator:
rotmatr = rot.mat
else:
rotmatr = rot
# We fire a number of rays close enough not to miss pixels within
# the image frame. We use as a rule of thumb a spacing that is
# half the resolution of the map.
map_resolution = 0.5 * hp.nside2resol(nside, arcmin=False)
nx, ny = [max(1, int(span / map_resolution)) for span in (delta_theta, delta_phi)]
theta_proj = np.linspace((np.pi - delta_theta) / 2, (np.pi + delta_theta) / 2, nx)
phi_proj = np.linspace(delta_phi / 2, -delta_phi / 2, ny)
# In order to fire so many rays, we need to interpolate between
# adjacent pixels in the image matrix
proj_img = zoom(img, (nx / img.shape[1], ny / img.shape[0]), order=0)
# This 2D mesh grid contains the direction of all the rays we're
# going to fire around position (θ=π/2, φ=0).
theta_proj, phi_proj = np.meshgrid(theta_proj, phi_proj)
# The shape of "dirs" is nx × ny × 3
dirs = hp.ang2vec(theta_proj, phi_proj)
# "rotdirs" has the same shape as "dirs". With this operation, we
# apply the rotation matrix to the rays around position (θ=π/2,
# φ=0).
rotdirs = np.tensordot(dirs, rotmatr, (2, 1))
# "theta" and "phi" are linear vectors
theta, phi = hp.vec2ang(np.reshape(rotdirs, (-1, 3)))
pixidx = hp.ang2pix(nside, theta, phi)
# Run a simple map-maker
binned_map(np.ravel(proj_img), pixidx, resultmap, resulthits)
# We're returning nothing, as the result is in "resultmap" and
# "resulthits"
def img2healpix(img, nside, delta_theta, delta_phi, rot=np.eye(3)):
"""Projection of a 2D image on a Healpix map.
This function is a wrapper to :func:`nnhealpix.img2map`. Use the
latter function if you have already allocated a map.
Args:
* img: A 2D matrix containing the image to be projected on the
* map nside (int): The resolution of the Healpix map
* delta_theta (float): the width of the image along the
* meridian, in degrees delta_phi (float): the height of the
* image along the meridian, in degrees rot: Either a 3×3
* matrix or a `healpy.rotator.Rotator` object
Returns:
A tuple containing the map and the hit map. Unseen pixels in
the map are set to zero.
"""
assert hp.isnsideok(nside)
assert delta_theta < 180.0
assert delta_phi < 180.0
result = np.zeros(hp.nside2npix(nside))
hits = np.zeros(result.size, dtype="int")
img2map(img, result, hits, delta_theta, delta_phi, rot, reset_map=False)
return result, hits
class projectimages:
"""Project a randomly chosen set of 2D images on Healpix maps.
This class returns an iterator that produces a set of Healpix maps
given a number of 2D images. It can be used in :code:`for` loops
to produce datasets for training convolutional neural networks.
Args:
* images (array): 3D tensor with shape ``[n, width, height]``,
where ``n`` is the number of images and ``width×width`` is
the size of each 2D image
* nside (int): resolution of the Healpix maps
* delta_theta (float, or 2-tuple of floats): Either the size
along the theta axis of the image (before applying any
rotation), or a range ``(min, max)``. In the latter case,
each map will have delta_theta picked randomly within the
range.
* delta_phi (float, or 2-tuple of floats): Same as
:param:`delta_phi`, but along the phi direction
* num (int): If specified, the iterator will run "num"
times. Otherwise, it will loop forever.
* rot: Either a 3×3 matrix or a ``healpy.rotator.Rotator``
object (optional)
Returns:
Each iteration returns a pair (num, pixels) containing the
index of the image projected on the map and the pixels of the
map itself. The value of "num" is always in the range [0,
images.shape[0] - 1).
Example::
import nnhealpix as nnh
import numpy as np
import healpy
from tensorflow.keras.datasets import mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
for idx, pixels in nnh.projectimages(x_train, 64, 30.0, 30.0, num=5):
print('Image index: {0}, digit is {1}'.format(idx, y_train[idx]))
healpy.mollview(pixels)
"""
def __init__(self, images, nside, delta_theta, delta_phi, rot=None, num=None):
self.images = images
self.nside = nside
self.delta_theta = delta_theta
self.delta_phi = delta_phi
self.num = num
self.idx = 0
self.hitmap = np.zeros(hp.nside2npix(self.nside), dtype="int")
self.rot = rot
def __iter__(self):
return self
def _get_angle(self, value):
if isinstance(value, (list, tuple, np.ndarray)):
assert len(value) == 2
start, stop = value
result = np.random.rand() * (stop - start) + start
else:
result = float(value)
return result
def _get_delta_theta(self):
return self._get_angle(self.delta_theta)
def _get_delta_phi(self):
return self._get_angle(self.delta_phi)
def __next__(self):
if self.num and self.idx >= self.num:
raise StopIteration()
delta_theta = self._get_delta_theta()
delta_phi = self._get_delta_phi()
imgidx = np.random.choice(self.images.shape[0])
if self.rot:
rotation = self.rot
else:
rotation = hp.rotator.Rotator(
rot=(
np.random.rand() * 360.0,
np.random.rand() * 360.0,
| np.random.rand() | numpy.random.rand |
#!/usr/bin/python
from __future__ import division
from __future__ import print_function
import sys
import os
import re
import datetime
import zipfile
import tempfile
import argparse
import math
import warnings
import json
import csv
import numpy as np
import scipy.stats as scp
from lxml import etree as et
def get_rdml_lib_version():
"""Return the version string of the RDML library.
Returns:
The version string of the RDML library.
"""
return "1.0.0"
class NpEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.bool_):
return bool(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
else:
return super(NpEncoder, self).default(obj)
class RdmlError(Exception):
"""Basic exception for errors raised by the RDML-Python library"""
def __init__(self, message):
Exception.__init__(self, message)
pass
class secondError(RdmlError):
"""Just to have, not used yet"""
def __init__(self, message):
RdmlError.__init__(self, message)
pass
def _get_first_child(base, tag):
"""Get a child element of the base node with a given tag.
Args:
base: The base node element. (lxml node)
tag: Child elements group tag used to select the elements. (string)
Returns:
The first child lxml node element found or None.
"""
for node in base:
if node.tag.replace("{http://www.rdml.org}", "") == tag:
return node
return None
def _get_first_child_text(base, tag):
"""Get a child element of the base node with a given tag.
Args:
base: The base node element. (lxml node)
tag: Child elements group tag used to select the elements. (string)
Returns:
The text of first child node element found or an empty string.
"""
for node in base:
if node.tag.replace("{http://www.rdml.org}", "") == tag:
return node.text
return ""
def _get_first_child_bool(base, tag, triple=True):
"""Get a child element of the base node with a given tag.
Args:
base: The base node element. (lxml node)
tag: Child elements group tag used to select the elements. (string)
triple: If True, None is returned if not found, if False, False
Returns:
The a bool value of tag or if triple is True None.
"""
for node in base:
if node.tag.replace("{http://www.rdml.org}", "") == tag:
return _string_to_bool(node.text, triple)
if triple is False:
return False
else:
return None
def _get_step_sort_nr(elem):
"""Get the number of the step eg. for sorting.
Args:
elem: The node element. (lxml node)
Returns:
The a int value of the step node nr.
"""
if elem is None:
raise RdmlError('A step element must be provided for sorting.')
ret = _get_first_child_text(elem, "nr")
if ret == "":
raise RdmlError('A step element must have a \"nr\" element for sorting.')
return int(ret)
def _sort_list_int(elem):
"""Get the first element of the array as int. for sorting.
Args:
elem: The 2d list
Returns:
The a int value of the first list element.
"""
return int(elem[0])
def _sort_list_float(elem):
"""Get the first element of the array as float. for sorting.
Args:
elem: The 2d list
Returns:
The a float value of the first list element.
"""
return float(elem[0])
def _sort_list_digital_PCR(elem):
"""Get the first column of the list as int. for sorting.
Args:
elem: The list
Returns:
The a int value of the first list element.
"""
arr = elem.split("\t")
return int(arr[0]), arr[4]
def _string_to_bool(value, triple=True):
"""Translates a string into bool value or None.
Args:
value: The string value to evaluate. (string)
triple: If True, None is returned if not found, if False, False
Returns:
The a bool value of tag or if triple is True None.
"""
if value is None or value == "":
if triple is True:
return None
else:
return False
if type(value) is bool:
return value
if type(value) is int:
if value != 0:
return True
else:
return False
if type(value) is str:
if value.lower() in ['false', '0', 'f', '-', 'n', 'no']:
return False
else:
return True
return
def _value_to_booldic(value):
"""Translates a string, list or dic to a dictionary with true/false.
Args:
value: The string value to evaluate. (string)
Returns:
The a bool value of tag or if triple is True None.
"""
ret = {}
if type(value) is str:
ret[value] = True
if type(value) is list:
for ele in value:
ret[ele] = True
if type(value) is dict:
for key, val in value.items():
ret[key] = _string_to_bool(val, triple=False)
return ret
def _get_first_child_by_pos_or_id(base, tag, by_id, by_pos):
"""Get a child element of the base node with a given tag and position or id.
Args:
base: The base node element. (lxml node)
tag: Child elements group tag used to select the elements. (string)
by_id: The unique id to search for. (string)
by_pos: The position of the element in the list (int)
Returns:
The child node element found or raise error.
"""
if by_id is None and by_pos is None:
raise RdmlError('Either an ' + tag + ' id or a position must be provided.')
if by_id is not None and by_pos is not None:
raise RdmlError('Only an ' + tag + ' id or a position can be provided.')
allChildren = _get_all_children(base, tag)
if by_id is not None:
for node in allChildren:
if node.get('id') == by_id:
return node
raise RdmlError('The ' + tag + ' id: ' + by_id + ' was not found in RDML file.')
if by_pos is not None:
if by_pos < 0 or by_pos > len(allChildren) - 1:
raise RdmlError('The ' + tag + ' position ' + by_pos + ' is out of range.')
return allChildren[by_pos]
def _add_first_child_to_dic(base, dic, opt, tag):
"""Adds the first child element with a given tag to a dictionary.
Args:
base: The base node element. (lxml node)
dic: The dictionary to add the element to (dictionary)
opt: If false and id is not found in base, the element is added with an empty string (Bool)
tag: Child elements group tag used to select the elements. (string)
Returns:
The dictionary with the added element.
"""
for node in base:
if node.tag.replace("{http://www.rdml.org}", "") == tag:
dic[tag] = node.text
return dic
if not opt:
dic[tag] = ""
return dic
def _get_all_children(base, tag):
"""Get a list of all child elements with a given tag.
Args:
base: The base node element. (lxml node)
tag: Child elements group tag used to select the elements. (string)
Returns:
A list with all child node elements found or an empty list.
"""
ret = []
for node in base:
if node.tag.replace("{http://www.rdml.org}", "") == tag:
ret.append(node)
return ret
def _get_all_children_id(base, tag):
"""Get a list of ids of all child elements with a given tag.
Args:
base: The base node element. (lxml node)
tag: Child elements group tag used to select the elements. (string)
Returns:
A list with all child id strings found or an empty list.
"""
ret = []
for node in base:
if node.tag.replace("{http://www.rdml.org}", "") == tag:
ret.append(node.get('id'))
return ret
def _get_number_of_children(base, tag):
"""Count all child elements with a given tag.
Args:
base: The base node element. (lxml node)
tag: Child elements group tag used to select the elements. (string)
Returns:
A int number of the found child elements with the id.
"""
counter = 0
for node in base:
if node.tag.replace("{http://www.rdml.org}", "") == tag:
counter += 1
return counter
def _check_unique_id(base, tag, id):
"""Find all child elements with a given group and check if the id is already used.
Args:
base: The base node element. (lxml node)
tag: Child elements group tag used to select the elements. (string)
id: The unique id to search for. (string)
Returns:
False if the id is already used, True if not.
"""
for node in base:
if node.tag.replace("{http://www.rdml.org}", "") == tag:
if node.get('id') == id:
return False
return True
def _create_new_element(base, tag, id):
"""Create a new element with a given tag and id.
Args:
base: The base node element. (lxml node)
tag: Child elements group tag. (string)
id: The unique id of the new element. (string)
Returns:
False if the id is already used, True if not.
"""
if id is None or id == "":
raise RdmlError('An ' + tag + ' id must be provided.')
if not _check_unique_id(base, tag, id):
raise RdmlError('The ' + tag + ' id "' + id + '" must be unique.')
return et.Element(tag, id=id)
def _add_new_subelement(base, basetag, tag, text, opt):
"""Create a new element with a given tag and id.
Args:
base: The base node element. (lxml node)
basetag: Child elements group tag. (string)
tag: Child elements own tag, to be created. (string)
text: The text content of the new element. (string)
opt: If true, the element is optional (Bool)
Returns:
Nothing, the base lxml element is modified.
"""
if opt is False:
if text is None or text == "":
raise RdmlError('An ' + basetag + ' ' + tag + ' must be provided.')
et.SubElement(base, tag).text = text
else:
if text is not None and text != "":
et.SubElement(base, tag).text = text
def _change_subelement(base, tag, xmlkeys, value, opt, vtype, id_as_element=False):
"""Change the value of the element with a given tag.
Args:
base: The base node element. (lxml node)
tag: Child elements own tag, to be created. (string)
xmlkeys: The list of possible keys in the right order for xml (list strings)
value: The text content of the new element.
opt: If true, the element is optional (Bool)
vtype: If true, the element is optional ("string", "int", "float")
id_as_element: If true, handle tag "id" as element, else as attribute
Returns:
Nothing, the base lxml element is modified.
"""
# Todo validate values with vtype
goodVal = value
if vtype == "bool":
ev = _string_to_bool(value, triple=True)
if ev is None or ev == "":
goodVal = ""
else:
if ev:
goodVal = "true"
else:
goodVal = "false"
if opt is False:
if goodVal is None or goodVal == "":
raise RdmlError('A value for ' + tag + ' must be provided.')
if tag == "id" and id_as_element is False:
if base.get('id') != goodVal:
par = base.getparent()
groupTag = base.tag.replace("{http://www.rdml.org}", "")
if not _check_unique_id(par, groupTag, goodVal):
raise RdmlError('The ' + groupTag + ' id "' + goodVal + '" is not unique.')
base.attrib['id'] = goodVal
return
# Check if the tag already excists
elem = _get_first_child(base, tag)
if elem is not None:
if goodVal is None or goodVal == "":
base.remove(elem)
else:
elem.text = goodVal
else:
if goodVal is not None and goodVal != "":
new_node = et.Element(tag)
new_node.text = goodVal
place = _get_tag_pos(base, tag, xmlkeys, 0)
base.insert(place, new_node)
def _get_or_create_subelement(base, tag, xmlkeys):
"""Get element with a given tag, if not present, create it.
Args:
base: The base node element. (lxml node)
tag: Child elements own tag, to be created. (string)
xmlkeys: The list of possible keys in the right order for xml (list strings)
Returns:
The node element with the tag.
"""
# Check if the tag already excists
if _get_first_child(base, tag) is None:
new_node = et.Element(tag)
place = _get_tag_pos(base, tag, xmlkeys, 0)
base.insert(place, new_node)
return _get_first_child(base, tag)
def _remove_irrelevant_subelement(base, tag):
"""If element with a given tag has no children, remove it.
Args:
base: The base node element. (lxml node)
tag: Child elements own tag, to be created. (string)
Returns:
The node element with the tag.
"""
# Check if the tag already excists
elem = _get_first_child(base, tag)
if elem is None:
return
if len(elem) == 0:
base.remove(elem)
def _move_subelement(base, tag, id, xmlkeys, position):
"""Change the value of the element with a given tag.
Args:
base: The base node element. (lxml node)
tag: The id to search for. (string)
id: The unique id of the new element. (string)
xmlkeys: The list of possible keys in the right order for xml (list strings)
position: the new position of the element (int)
Returns:
Nothing, the base lxml element is modified.
"""
pos = _get_tag_pos(base, tag, xmlkeys, position)
ele = _get_first_child_by_pos_or_id(base, tag, id, None)
base.insert(pos, ele)
def _move_subelement_pos(base, tag, oldpos, xmlkeys, position):
"""Change the value of the element with a given tag.
Args:
base: The base node element. (lxml node)
tag: The id to search for. (string)
oldpos: The unique id of the new element. (string)
xmlkeys: The list of possible keys in the right order for xml (list strings)
position: the new position of the element (int)
Returns:
Nothing, the base lxml element is modified.
"""
pos = _get_tag_pos(base, tag, xmlkeys, position)
ele = _get_first_child_by_pos_or_id(base, tag, None, oldpos)
base.insert(pos, ele)
def _get_tag_pos(base, tag, xmlkeys, pos):
"""Returns a position were to add a subelement with the given tag inc. pos offset.
Args:
base: The base node element. (lxml node)
tag: The id to search for. (string)
xmlkeys: The list of possible keys in the right order for xml (list strings)
pos: The position relative to the tag elements (int)
Returns:
The int number of were to add the element with the tag.
"""
count = _get_number_of_children(base, tag)
offset = pos
if pos is None or pos < 0:
offset = 0
pos = 0
if pos > count:
offset = count
return _get_first_tag_pos(base, tag, xmlkeys) + offset
def _get_first_tag_pos(base, tag, xmlkeys):
"""Returns a position were to add a subelement with the given tag.
Args:
base: The base node element. (lxml node)
tag: The id to search for. (string)
xmlkeys: The list of possible keys in the right order for xml (list strings)
Returns:
The int number of were to add the element with the tag.
"""
listrest = xmlkeys[xmlkeys.index(tag):]
counter = 0
for node in base:
if node.tag.replace("{http://www.rdml.org}", "") in listrest:
return counter
counter += 1
return counter
def _writeFileInRDML(rdmlName, fileName, data):
"""Writes a file in the RDML zip, even if it existed before.
Args:
rdmlName: The name of the RDML zip file
fileName: The name of the file to write into the zip
data: The data string of the file
Returns:
Nothing, modifies the RDML file.
"""
needRewrite = False
if os.path.isfile(rdmlName):
with zipfile.ZipFile(rdmlName, 'r') as RDMLin:
for item in RDMLin.infolist():
if item.filename == fileName:
needRewrite = True
if needRewrite:
tempFolder, tempName = tempfile.mkstemp(dir=os.path.dirname(rdmlName))
os.close(tempFolder)
# copy everything except the filename
with zipfile.ZipFile(rdmlName, 'r') as RDMLin:
with zipfile.ZipFile(tempName, mode='w', compression=zipfile.ZIP_DEFLATED) as RDMLout:
RDMLout.comment = RDMLin.comment
for item in RDMLin.infolist():
if item.filename != fileName:
RDMLout.writestr(item, RDMLin.read(item.filename))
if data != "":
RDMLout.writestr(fileName, data)
os.remove(rdmlName)
os.rename(tempName, rdmlName)
else:
with zipfile.ZipFile(rdmlName, mode='a', compression=zipfile.ZIP_DEFLATED) as RDMLout:
RDMLout.writestr(fileName, data)
def _lrp_linReg(xIn, yUse):
"""A function which calculates the slope or the intercept by linear regression.
Args:
xIn: The numpy array of the cycles
yUse: The numpy array that contains the fluorescence
Returns:
An array with the slope and intercept.
"""
counts = np.ones(yUse.shape)
xUse = xIn.copy()
xUse[np.isnan(yUse)] = 0
counts[np.isnan(yUse)] = 0
cycSqared = xUse * xUse
cycFluor = xUse * yUse
sumCyc = np.nansum(xUse, axis=1)
sumFluor = np.nansum(yUse, axis=1)
sumCycSquared = np.nansum(cycSqared, axis=1)
sumCycFluor = np.nansum(cycFluor, axis=1)
n = np.nansum(counts, axis=1)
ssx = sumCycSquared - (sumCyc * sumCyc) / n
sxy = sumCycFluor - (sumCyc * sumFluor) / n
slope = sxy / ssx
intercept = (sumFluor / n) - slope * (sumCyc / n)
return [slope, intercept]
def _lrp_findStopCyc(fluor, aRow):
"""Find the stop cycle of the log lin phase in fluor.
Args:
fluor: The array with the fluorescence values
aRow: The row to work on
Returns:
An int with the stop cycle.
"""
# Take care of nan values
validTwoLessCyc = 3 # Cycles so +1 to array
while (validTwoLessCyc <= fluor.shape[1] and
(np.isnan(fluor[aRow, validTwoLessCyc - 1]) or
np.isnan(fluor[aRow, validTwoLessCyc - 2]) or
np.isnan(fluor[aRow, validTwoLessCyc - 3]))):
validTwoLessCyc += 1
# First and Second Derivative values calculation
fluorShift = np.roll(fluor[aRow], 1, axis=0) # Shift to right - real position is -0.5
fluorShift[0] = np.nan
firstDerivative = fluor[aRow] - fluorShift
if np.isfinite(firstDerivative).any():
FDMaxCyc = np.nanargmax(firstDerivative, axis=0) + 1 # Cycles so +1 to array
else:
return fluor.shape[1]
firstDerivativeShift = np.roll(firstDerivative, -1, axis=0) # Shift to left
firstDerivativeShift[-1] = np.nan
secondDerivative = firstDerivativeShift - firstDerivative
if FDMaxCyc + 2 <= fluor.shape[1]:
# Only add two cycles if there is an increase without nan
if (not np.isnan(fluor[aRow, FDMaxCyc - 1]) and
not np.isnan(fluor[aRow, FDMaxCyc]) and
not np.isnan(fluor[aRow, FDMaxCyc + 1]) and
fluor[aRow, FDMaxCyc + 1] > fluor[aRow, FDMaxCyc] > fluor[aRow, FDMaxCyc - 1]):
FDMaxCyc += 2
else:
FDMaxCyc = fluor.shape[1]
maxMeanSD = 0.0
stopCyc = fluor.shape[1]
for cycInRange in range(validTwoLessCyc, FDMaxCyc):
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=RuntimeWarning)
tempMeanSD = np.mean(secondDerivative[cycInRange - 2: cycInRange + 1], axis=0)
# The > 0.000000000001 is to avoid float differences to the pascal version
if not np.isnan(tempMeanSD) and (tempMeanSD - maxMeanSD) > 0.000000000001:
maxMeanSD = tempMeanSD
stopCyc = cycInRange
if stopCyc + 2 >= fluor.shape[1]:
stopCyc = fluor.shape[1]
return stopCyc
def _lrp_findStartCyc(fluor, aRow, stopCyc):
"""A function which finds the start cycle of the log lin phase in fluor.
Args:
fluor: The array with the fluorescence values
aRow: The row to work on
stopCyc: The stop cycle
Returns:
An array [int, int] with the start cycle and the fixed start cycle.
"""
startCyc = stopCyc - 1
# startCyc might be NaN, so shift it to the first value
firstNotNaN = 1 # Cycles so +1 to array
while np.isnan(fluor[aRow, firstNotNaN - 1]) and firstNotNaN < startCyc:
firstNotNaN += 1
while startCyc > firstNotNaN and np.isnan(fluor[aRow, startCyc - 1]):
startCyc -= 1
# As long as there are no NaN and new values are increasing
while (startCyc > firstNotNaN and
not np.isnan(fluor[aRow, startCyc - 2]) and
fluor[aRow, startCyc - 2] <= fluor[aRow, startCyc - 1]):
startCyc -= 1
startCycFix = startCyc
if (not np.isnan(fluor[aRow, startCyc]) and
not np.isnan(fluor[aRow, startCyc - 1]) and
not np.isnan(fluor[aRow, stopCyc - 1]) and
not np.isnan(fluor[aRow, stopCyc - 2])):
startStep = np.log10(fluor[aRow, startCyc]) - np.log10(fluor[aRow, startCyc - 1])
stopStep = np.log10(fluor[aRow, stopCyc - 1]) - np.log10(fluor[aRow, stopCyc - 2])
if startStep > 1.1 * stopStep:
startCycFix += 1
return [startCyc, startCycFix]
def _lrp_testSlopes(fluor, aRow, stopCyc, startCycFix):
"""Splits the values and calculates a slope for the upper and the lower half.
Args:
fluor: The array with the fluorescence values
aRow: The row to work on
stopCyc: The stop cycle
startCycFix: The start cycle
Returns:
An array with [slopelow, slopehigh].
"""
# Both start with full range
loopStart = [startCycFix[aRow], stopCyc[aRow]]
loopStop = [startCycFix[aRow], stopCyc[aRow]]
# Now find the center ignoring nan
while True:
loopStart[1] -= 1
loopStop[0] += 1
while (loopStart[1] - loopStop[0]) > 1 and np.isnan(fluor[aRow, loopStart[1] - 1]):
loopStart[1] -= 1
while (loopStart[1] - loopStop[0]) > 1 and np.isnan(fluor[aRow, loopStop[1] - 1]):
loopStop[0] += 1
if (loopStart[1] - loopStop[0]) <= 1:
break
# basic regression per group
ssx = [0, 0]
sxy = [0, 0]
slope = [0, 0]
for j in range(0, 2):
sumx = 0.0
sumy = 0.0
sumx2 = 0.0
sumxy = 0.0
nincl = 0.0
for i in range(loopStart[j], loopStop[j] + 1):
if not np.isnan(fluor[aRow, i - 1]):
sumx += i
sumy += np.log10(fluor[aRow, i - 1])
sumx2 += i * i
sumxy += i * np.log10(fluor[aRow, i - 1])
nincl += 1
ssx[j] = sumx2 - sumx * sumx / nincl
sxy[j] = sumxy - sumx * sumy / nincl
slope[j] = sxy[j] / ssx[j]
return [slope[0], slope[1]]
def _lrp_lastCycMeanMax(fluor, vecSkipSample, vecNoPlateau):
"""A function which calculates the mean of the max fluor in the last ten cycles.
Args:
fluor: The array with the fluorescence values
vecSkipSample: Skip the sample
vecNoPlateau: Sample has no plateau
Returns:
An float with the max mean.
"""
maxFlour = np.nanmax(fluor[:, -11:], axis=1)
maxFlour[vecSkipSample] = np.nan
maxFlour[vecNoPlateau] = np.nan
# Ignore all nan slices, to fix them below
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=RuntimeWarning)
maxMean = np.nanmean(maxFlour)
if np.isnan(maxMean):
maxMean = np.nanmax(maxFlour)
return maxMean
def _lrp_meanPcrEff(tarGroup, vecTarget, pcrEff, vecSkipSample, vecNoPlateau, vecShortLogLin):
"""A function which calculates the mean efficiency of the selected target group excluding bad ones.
Args:
tarGroup: The target number
vecTarget: The vector with the targets numbers
pcrEff: The array with the PCR efficiencies
vecSkipSample: Skip the sample
vecNoPlateau: True if there is no plateau
vecShortLogLin: True indicates a short log lin phase
Returns:
An array with [meanPcrEff, pcrEffVar].
"""
cnt = 0
sumEff = 0.0
sumEff2 = 0.0
for j in range(0, len(pcrEff)):
if tarGroup is None or tarGroup == vecTarget[j]:
if (not (vecSkipSample[j] or vecNoPlateau[j] or vecShortLogLin[j])) and pcrEff[j] > 1.0:
cnt += 1
sumEff += pcrEff[j]
sumEff2 += pcrEff[j] * pcrEff[j]
if cnt > 1:
meanPcrEff = sumEff / cnt
pcrEffVar = (sumEff2 - (sumEff * sumEff) / cnt) / (cnt - 1)
else:
meanPcrEff = 1.0
pcrEffVar = 100
return [meanPcrEff, pcrEffVar]
def _lrp_startStopInWindow(fluor, aRow, upWin, lowWin):
"""Find the start and the stop of the part of the curve which is inside the window.
Args:
fluor: The array with the fluorescence values
aRow: The row to work on
upWin: The upper limit of the window
lowWin: The lower limit of the window
Returns:
The int startWinCyc, stopWinCyc and the bool notInWindow.
"""
startWinCyc = 0
stopWinCyc = 0
# Find the stopCyc and the startCyc cycle of the log lin phase
stopCyc = _lrp_findStopCyc(fluor, aRow)
[startCyc, startCycFix] = _lrp_findStartCyc(fluor, aRow, stopCyc)
if np.isfinite(fluor[aRow, startCycFix - 1:]).any():
stopMaxCyc = np.nanargmax(fluor[aRow, startCycFix - 1:]) + startCycFix
else:
return startCyc, startCyc, True
# If is true if outside the window
if fluor[aRow, startCyc - 1] > upWin or fluor[aRow, stopMaxCyc - 1] < lowWin:
notInWindow = True
if fluor[aRow, startCyc - 1] > upWin:
startWinCyc = startCyc
stopWinCyc = startCyc
if fluor[aRow, stopMaxCyc - 1] < lowWin:
startWinCyc = stopMaxCyc
stopWinCyc = stopMaxCyc
else:
notInWindow = False
# look for stopWinCyc
if fluor[aRow, stopMaxCyc - 1] < upWin:
stopWinCyc = stopMaxCyc
else:
for i in range(stopMaxCyc, startCyc, -1):
if fluor[aRow, i - 1] > upWin > fluor[aRow, i - 2]:
stopWinCyc = i - 1
# look for startWinCyc
if fluor[aRow, startCycFix - 1] > lowWin:
startWinCyc = startCycFix
else:
for i in range(stopMaxCyc, startCyc, -1):
if fluor[aRow, i - 1] > lowWin > fluor[aRow, i - 2]:
startWinCyc = i
return startWinCyc, stopWinCyc, notInWindow
def _lrp_paramInWindow(fluor, aRow, upWin, lowWin):
"""Calculates slope, nNull, PCR efficiency and mean x/y for the curve part in the window.
Args:
fluor: The array with the fluorescence values
aRow: The row to work on
upWin: The upper limit of the window
lowWin: The lower limit of the window
Returns:
The calculated values: indMeanX, indMeanY, pcrEff, nnulls, ninclu, correl.
"""
startWinCyc, stopWinCyc, notInWindow = _lrp_startStopInWindow(fluor, aRow, upWin, lowWin)
sumx = 0.0
sumy = 0.0
sumx2 = 0.0
sumy2 = 0.0
sumxy = 0.0
nincl = 0.0
ssx = 0.0
ssy = 0.0
sxy = 0.0
for i in range(startWinCyc, stopWinCyc + 1):
fluorSamp = fluor[aRow, i - 1]
if not np.isnan(fluorSamp):
logFluorSamp = np.log10(fluorSamp)
sumx += i
sumy += logFluorSamp
sumx2 += i * i
sumy2 += logFluorSamp * logFluorSamp
sumxy += i * logFluorSamp
nincl += 1
if nincl > 1:
ssx = sumx2 - sumx * sumx / nincl
ssy = sumy2 - sumy * sumy / nincl
sxy = sumxy - sumx * sumy / nincl
if ssx > 0.0 and ssy > 0.0 and nincl > 0.0:
cslope = sxy / ssx
cinterc = sumy / nincl - cslope * sumx / nincl
correl = sxy / np.sqrt(ssx * ssy)
indMeanX = sumx / nincl
indMeanY = sumy / nincl
pcrEff = np.power(10, cslope)
nnulls = np.power(10, cinterc)
else:
correl = np.nan
indMeanX = np.nan
indMeanY = np.nan
pcrEff = np.nan
nnulls = np.nan
if notInWindow:
ninclu = 0
else:
ninclu = stopWinCyc - startWinCyc + 1
return indMeanX, indMeanY, pcrEff, nnulls, ninclu, correl
def _lrp_allParamInWindow(fluor, tarGroup, vecTarget, indMeanX, indMeanY, pcrEff, nnulls, ninclu, correl, upWin, lowWin, vecNoAmplification, vecBaselineError):
"""A function which calculates the mean of the max fluor in the last ten cycles.
Args:
fluor: The array with the fluorescence values
tarGroup: The target number
vecTarget: The vector with the targets numbers
indMeanX: The vector with the x mean position
indMeanY: The vector with the y mean position
pcrEff: The array with the PCR efficiencies
nnulls: The array with the calculated nnulls
ninclu: The array with the calculated ninclu
correl: The array with the calculated correl
upWin: The upper limit of the window
lowWin: The lower limit of the window
vecNoAmplification: True if there is a amplification error
vecBaselineError: True if there is a baseline error
Returns:
An array with [indMeanX, indMeanY, pcrEff, nnulls, ninclu, correl].
"""
for row in range(0, fluor.shape[0]):
if tarGroup is None or tarGroup == vecTarget[row]:
if not (vecNoAmplification[row] or vecBaselineError[row]):
if tarGroup is None:
indMeanX[row], indMeanY[row], pcrEff[row], nnulls[row], ninclu[row], correl[row] = _lrp_paramInWindow(fluor, row, upWin[0], lowWin[0])
else:
indMeanX[row], indMeanY[row], pcrEff[row], nnulls[row], ninclu[row], correl[row] = _lrp_paramInWindow(fluor, row, upWin[tarGroup], lowWin[tarGroup])
else:
correl[row] = np.nan
indMeanX[row] = np.nan
indMeanY[row] = np.nan
pcrEff[row] = np.nan
nnulls[row] = np.nan
ninclu[row] = 0
return indMeanX, indMeanY, pcrEff, nnulls, ninclu, correl
def _lrp_meanStopFluor(fluor, tarGroup, vecTarget, stopCyc, vecSkipSample, vecNoPlateau):
"""Return the mean of the stop fluor or the max fluor if all rows have no plateau.
Args:
fluor: The array with the fluorescence values
tarGroup: The target number
vecTarget: The vector with the targets numbers
stopCyc: The vector with the stop cycle of the log lin phase
vecSkipSample: Skip the sample
vecNoPlateau: True if there is no plateau
Returns:
The meanMax fluorescence.
"""
meanMax = 0.0
maxFluor = 0.0000001
cnt = 0
if tarGroup is None:
for aRow in range(0, fluor.shape[0]):
if not vecSkipSample[aRow]:
if not vecNoPlateau[aRow]:
cnt += 1
meanMax += fluor[aRow, stopCyc[aRow] - 1]
else:
for i in range(0, fluor.shape[1]):
if fluor[aRow, i] > maxFluor:
maxFluor = fluor[aRow, i]
else:
for aRow in range(0, fluor.shape[0]):
if tarGroup == vecTarget[aRow] and not vecSkipSample[aRow]:
if not vecNoPlateau[aRow]:
cnt += 1
meanMax += fluor[aRow, stopCyc[aRow] - 1]
else:
for i in range(0, fluor.shape[1]):
if fluor[aRow, i] > maxFluor:
maxFluor = fluor[aRow, i]
if cnt > 0:
meanMax = meanMax / cnt
else:
meanMax = maxFluor
return meanMax
def _lrp_maxStartFluor(fluor, tarGroup, vecTarget, startCyc, vecSkipSample):
"""Return the maximum of the start fluorescence
Args:
fluor: The array with the fluorescence values
tarGroup: The target number
vecTarget: The vector with the targets numbers
startCyc: The vector with the start cycle of the log lin phase
vecSkipSample: Skip the sample
Returns:
The maxStart fluorescence.
"""
maxStart = -10.0
if tarGroup is None:
for aRow in range(0, fluor.shape[0]):
if not vecSkipSample[aRow]:
if fluor[aRow, startCyc[aRow] - 1] > maxStart:
maxStart = fluor[aRow, startCyc[aRow] - 1]
else:
for aRow in range(0, fluor.shape[0]):
if tarGroup == vecTarget[aRow] and not vecSkipSample[aRow]:
if fluor[aRow, startCyc[aRow] - 1] > maxStart:
maxStart = fluor[aRow, startCyc[aRow] - 1]
return 0.999 * maxStart
def _lrp_setLogWin(tarGroup, newUpWin, foldWidth, upWin, lowWin, maxFluorTotal, minFluorTotal):
"""Sets a new window and ensures its within the total fluorescence values.
Args:
tarGroup: The target number
newUpWin: The new upper window
foldWidth: The foldWith to the lower window
upWin: The upper window fluorescence
lowWin: The lower window fluorescence
maxFluorTotal: The maximum fluorescence over all rows
minFluorTotal: The minimum fluorescence over all rows
Returns:
An array with [indMeanX, indMeanY, pcrEff, nnulls, ninclu, correl].
"""
# No rounding needed, only present for exact identical output with Pascal version
tempUpWin = np.power(10, np.round(1000 * newUpWin) / 1000)
tempLowWin = np.power(10, np.round(1000 * (newUpWin - foldWidth)) / 1000)
tempUpWin = np.minimum(tempUpWin, maxFluorTotal)
tempUpWin = np.maximum(tempUpWin, minFluorTotal)
tempLowWin = np.minimum(tempLowWin, maxFluorTotal)
tempLowWin = np.maximum(tempLowWin, minFluorTotal)
if tarGroup is None:
upWin[0] = tempUpWin
lowWin[0] = tempLowWin
else:
upWin[tarGroup] = tempUpWin
lowWin[tarGroup] = tempLowWin
return upWin, lowWin
def _lrp_logStepStop(fluor, tarGroup, vecTarget, stopCyc, vecSkipSample, vecNoPlateau):
"""Calculates the log of the fluorescence increase at the stop cycle.
Args:
fluor: The array with the fluorescence values
tarGroup: The target number
vecTarget: The vector with the targets numbers
stopCyc: The vector with the stop cycle of the log lin phase
vecSkipSample: True if row should be skipped
vecNoPlateau: True if there is no plateau
Returns:
An array with [indMeanX, indMeanY, pcrEff, nnulls, ninclu, correl].
"""
cnt = 0
step = 0.0
for aRow in range(0, fluor.shape[0]):
if (tarGroup is None or tarGroup == vecTarget[aRow]) and not (vecSkipSample[aRow] or vecNoPlateau[aRow]):
cnt += 1
step += np.log10(fluor[aRow, stopCyc[aRow] - 1]) - np.log10(fluor[aRow, stopCyc[aRow] - 2])
if cnt > 0:
step = step / cnt
else:
step = np.log10(1.8)
return step
def _lrp_setWoL(fluor, tarGroup, vecTarget, pointsInWoL, indMeanX, indMeanY, pcrEff, nNulls, nInclu, correl,
upWin, lowWin, maxFluorTotal, minFluorTotal, stopCyc, startCyc, threshold,
vecNoAmplification, vecBaselineError, vecSkipSample, vecNoPlateau, vecShortLogLin, vecIsUsedInWoL):
"""Find the window with the lowest variation in PCR efficiency and calculate its values.
Args:
fluor: The array with the fluorescence values
tarGroup: The target number
vecTarget: The vector with the targets numbers
pointsInWoL: The number of points in the window
indMeanX: The vector with the x mean position
indMeanY: The vector with the y mean position
pcrEff: The array with the PCR efficiencies
nNulls: The array with the calculated nNulls
nInclu: The array with the calculated nInclu
correl: The array with the calculated correl
upWin: The upper limit of the window
lowWin: The lower limit of the window
maxFluorTotal: The maximum fluorescence over all rows
minFluorTotal: The minimum fluorescence over all rows
stopCyc: The vector with the stop cycle of the log lin phase
startCyc: The vector with the start cycle of the log lin phase
threshold: The threshold fluorescence
vecNoAmplification: True if there is a amplification error
vecBaselineError: True if there is a baseline error
vecSkipSample: Skip the sample
vecNoPlateau: True if there is no plateau
vecShortLogLin: True indicates a short log lin phase
vecIsUsedInWoL: True if used in the WoL
Returns:
The values indMeanX, indMeanY, pcrEff, nNulls, nInclu, correl, upWin, lowWin, threshold, vecIsUsedInWoL.
"""
skipGroup = False
stepSize = 0.2 # was 0.5, smaller steps help in finding WoL
# Keep 60 calculated results
memVarEff = np.zeros(60, dtype=np.float64)
memUpWin = np.zeros(60, dtype=np.float64)
memFoldWidth = np.zeros(60, dtype=np.float64)
maxFluorWin = _lrp_meanStopFluor(fluor, tarGroup, vecTarget, stopCyc, vecSkipSample, vecNoPlateau)
if maxFluorWin > 0.0:
maxFluorWin = np.log10(maxFluorWin)
else:
skipGroup = True
minFluorLim = _lrp_maxStartFluor(fluor, tarGroup, vecTarget, startCyc, vecSkipSample)
if minFluorLim > 0.0:
minFluorLim = np.log10(minFluorLim)
else:
skipGroup = True
checkMeanEff = 1.0
if not skipGroup:
foldWidth = pointsInWoL * _lrp_logStepStop(fluor, tarGroup, vecTarget, stopCyc, vecSkipSample, vecNoPlateau)
upWin, lowWin = _lrp_setLogWin(tarGroup, maxFluorWin, foldWidth, upWin, lowWin, maxFluorTotal, minFluorTotal)
_unused, _unused2, checkPcrEff, _unused3, _unused4, _unused5 = _lrp_allParamInWindow(fluor, tarGroup, vecTarget,
indMeanX, indMeanY, pcrEff,
nNulls, nInclu, correl,
upWin, lowWin,
vecNoAmplification,
vecBaselineError)
[checkMeanEff, _unused] = _lrp_meanPcrEff(tarGroup, vecTarget, checkPcrEff,
vecSkipSample, vecNoPlateau, vecShortLogLin)
if checkMeanEff < 1.001:
skipGroup = True
if skipGroup:
if tarGroup is None:
threshold[0] = (0.5 * np.round(1000 * upWin[0]) / 1000)
else:
threshold[tarGroup] = (0.5 * np.round(1000 * upWin[tarGroup]) / 1000)
if not skipGroup:
foldWidth = np.log10(np.power(checkMeanEff, pointsInWoL))
counter = -1
maxVarEff = 0.0
maxVarEffStep = -1
lastUpWin = 2 + maxFluorWin
while True:
counter += 1
step = np.log10(checkMeanEff)
newUpWin = maxFluorWin - counter * stepSize * step
if newUpWin < lastUpWin:
upWin, lowWin = _lrp_setLogWin(tarGroup, newUpWin, foldWidth, upWin, lowWin, maxFluorTotal, minFluorTotal)
_unused, _unused2, checkPcrEff, _unused3, _unused4, _unused5 = _lrp_allParamInWindow(fluor, tarGroup,
vecTarget, indMeanX,
indMeanY, pcrEff,
nNulls, nInclu,
correl,
upWin, lowWin,
vecNoAmplification,
vecBaselineError)
[checkMeanEff, _unused] = _lrp_meanPcrEff(tarGroup, vecTarget, checkPcrEff,
vecSkipSample, vecNoPlateau, vecShortLogLin)
foldWidth = np.log10(np.power(checkMeanEff, pointsInWoL))
if foldWidth < 0.5:
foldWidth = 0.5 # to avoid width = 0 above stopCyc
upWin, lowWin = _lrp_setLogWin(tarGroup, newUpWin, foldWidth, upWin, lowWin, maxFluorTotal, minFluorTotal)
_unused, _unused2, checkPcrEff, _unused3, _unused4, _unused5 = _lrp_allParamInWindow(fluor, tarGroup,
vecTarget, indMeanX,
indMeanY, pcrEff,
nNulls, nInclu,
correl,
upWin, lowWin,
vecNoAmplification,
vecBaselineError)
[checkMeanEff, checkVarEff] = _lrp_meanPcrEff(tarGroup, vecTarget, checkPcrEff,
vecSkipSample, vecNoPlateau, vecShortLogLin)
if checkVarEff > 0.0:
memVarEff[counter] = | np.sqrt(checkVarEff) | numpy.sqrt |
# Copyright 2016 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: <NAME> <<EMAIL>>
# License: Apache License, Version 2.0
# Create: 2016-12-02
"""Utilities for the SurrogateModel test process."""
from math import isnan
import numpy as np
def assert_rel_error(test_case, actual, desired, tolerance):
"""
Determine that the relative error between `actual` and `desired`
is within `tolerance`. If `desired` is zero, then use absolute error.
Args
----
test_case : :class:`unittest.TestCase`
TestCase instance used for assertions.
actual : float
The value from the test.
desired : float
The value expected.
tolerance : float
Maximum relative error ``(actual - desired) / desired``.
"""
try:
actual[0]
except (TypeError, IndexError):
if isnan(actual) and not isnan(desired):
test_case.fail('actual nan, desired %s, rel error nan, tolerance %s'
% (desired, tolerance))
if desired != 0:
error = (actual - desired) / desired
else:
error = actual
if abs(error) > tolerance:
test_case.fail('actual %s, desired %s, rel error %s, tolerance %s'
% (actual, desired, error, tolerance))
else: # array values
if not np.all(np.isnan(actual) == np.isnan(desired)):
test_case.fail('actual and desired values have non-matching nan values')
if np.linalg.norm(desired) == 0:
error = np.linalg.norm(actual)
else:
error = np.linalg.norm(actual - desired) / | np.linalg.norm(desired) | numpy.linalg.norm |
#!/usr/bin/env python
# coding: utf-8
# updated!!!!
'''
For more information and details about the algorithm, please refer to PhD thesis of <NAME>
LEARNING AND AGGREGATION OF FUZZY COGNITIVE MAPS – AN
EVOLUTIONARY APPROACH
by
<NAME>
'''
import numpy as np
import copy
import tqdm.auto as tq
import matplotlib.pylab as plt
import matplotlib
#matplotlib.use("TkAgg") # nice feature, it will plot and update fitness function during learning process !!!! do NOT use in the jupyter notebook !!!
class rcga:
'''
RCGA algrithm for creating FCM based on the sample valuee,
nConcepts - number of concepts (nodes), concetps: initial concepts values,
Pmutation: probability of mutation (default 0.5), Precombination: probability of crossover (0.9),
population_size (default 100), max_generations: max nubmer of steps (def 100000),
numberofsteps - number of simulation steps, should be the same as in the historical data,
maxfitness - fitness value after which learning process can be stopped
'''
def __init__(self, concepts, Pmutation=None, Precombination=None, population_size=None,
max_generations=None, historicaldata=None, fcm=None,
numberofsteps=None, tournamentP=None, tournamentK=None, lbd=None,maxfitness=None):
# GENERAL PARAMS
# types of mutations are randomly choosen according to authors of the article W.Stach et al. 2005
self.mutation_methods = ['random', 'nonuniform', 'Muhlenbein']
# types of selection are randomly choosen according to authors of the article W.Stach et al. 2005
self.selection_methods = ['rulette', 'tournament']
# proability of cell mutatiing
self.prob_mutation = 0.5 if Pmutation is None else Pmutation
self.prob_recombination = 0.9 if Precombination is None else Precombination
self.tournamentP = 1 if tournamentP is None else tournamentP
self.tournamentK = 5 if tournamentK is None else tournamentK # or 10....
self.lbd = 1 if lbd is None else lbd # this is the operator of the sigmoid function, in a lot of papers it's set to 1 (elpiniki), Stach suggested 5
# GENERATION PROPERTIES
# size of the population, number of chromosomes in each population
self.population_size = 100 if population_size is None else population_size
if self.population_size % 2 != 0:
raise ValueError('Population size must be an EVEN number')
# nmax number of generations
self.max_generations = 100000 # 300000 if max_generations is None else max_generations
self.current_gen = 0
self.generations = np.zeros((self.population_size, len(concepts[0]), len(concepts[0]) - 1))
self.nConcepts = len(concepts[0])
# HISTORICAL DATA
# historical data obtained from fcm simulations or observations (in the format columns - concepts, rows - simulation steps)
if historicaldata is None and fcm is None:
raise ValueError('Cannot run the learning process without previous FCM architecture or historical data!!!')
self.data = historicaldata
# fcm which we are optimizing
self.fcm = fcm
# FITNESS FUNCTION
self.generation_fitness = np.zeros((1, self.population_size))
self.maxfitness = 0.999 if maxfitness is None else maxfitness
self.concepts_for_testing = concepts
# number of steps we have to run the simulation in order to calculate fintess function (in Stach paper - 1 step)
self.numberofsteps = 2 # 5 if numberofsteps is None else numberofsteps # suggested 1
# termination conditions
self.termination = False
def intitialize(self):
# initialize 1st population
self.generations = np.random.uniform(low=-1, high=1,
size=(self.population_size, self.nConcepts, self.nConcepts - 1))
# -------------------- FITNESS OF THE GENERATION --------------------------------------
def simulateFCM(self, concepts, weights, nsteps):
'''
we have to simulate fcm with current weights in order to calculate fitness function
concepts should be given as a np.array((1,nConcepts))
:param concepts: conept vector
:param weights: weight array
:param nsteps: number of time step for the FCM simulation
:return: concepts values after nsteps
'''
# VERY IMPORTANT
# weights as np.array((nConcepts,nConcepts-1)) !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
assert weights.shape == (self.nConcepts, self.nConcepts - 1), 'wrong encoding'
for j in range(1, nsteps):
newvalues = np.zeros((concepts.shape[0]))
for i in range(concepts.shape[0]):
idx = list(range(concepts.shape[0]))
idx.remove(i)
newvalues[i] = round(1 / (1 + np.exp(-(concepts[i] + concepts[idx] @ weights[i]))), 8)
concepts = newvalues
return concepts
def calculate_fitness(self, weights):
'''
calculate fitness for each of the chromosome
:param weights: generated weight array, then tested
:return: fitness of the chromosome (how well this weight matrix did)
'''
# difference
alpha = 1 / ((self.numberofsteps - 1) * self.nConcepts* self.data.shape[0])
# we are countin L1
# let's say we have both historical data and fcm, so we can simply
# simulate with new weights and calculate difference to obtain the fitness function
error = 0
for row, testcase in zip(self.data,self.concepts_for_testing):
error += np.sum(
np.abs(np.subtract(row, self.simulateFCM(testcase, weights, self.numberofsteps))))
return 1 / (100 * alpha*error + 1)
# -------------------- CROSSOVER --------------------------------------
def crossover(self):
'''
crossover - swiping the values between the chromosomes in the generation e.g. 0:15 weights from weights1 are swaped with
weights 15:: in weights2
:return: crossedovered pair
'''
crossover_pairs = self.generations
a = list(np.random.choice([False, True], p=[1 - self.prob_recombination, self.prob_recombination],
size=self.population_size).astype(int) * range(self.population_size))
a = list(filter(lambda x: x != 0, a))
# we are applying one point corssover and mixing 1st with 2nd, 3rd with 4th and so on...
for i in range(0, len(a), 2): # population size (defaul 100), every even idx
# choosing if the crossover will happen
# 1 take two crossover pairs
chromA = crossover_pairs[i]
chromB = crossover_pairs[i + 1]
# 2 flatten them
chromA = np.reshape(chromA, (self.nConcepts * (self.nConcepts - 1)))
chromB = np.reshape(chromB, (self.nConcepts * (self.nConcepts - 1)))
# 3 randomly choose the 'crossing point'
point = np.random.choice(range(self.nConcepts * (self.nConcepts - 1)))
# 4 swap the values
chromA[point:] = chromB[point:]
chromB[:point] = chromA[:point]
# 5 reshape to (nconcepts,nconcepts)
chromA = np.reshape(chromA, (self.nConcepts, self.nConcepts - 1))
chromB = np.reshape(chromB, (self.nConcepts, self.nConcepts - 1))
# after crossover, crossover_pairs are the latest generation
self.generations = crossover_pairs
# -------------------- MUTATION --------------------------------------
def mutation(self):
'''
randomly chooses one of implemented mutation technique and applies it on the wieght matrix
both random and nmutation use techniqes described in Genetic learning offuzzy cognitive maps
<NAME>, <NAME>∗, <NAME>, <NAME>
:return:
'''
mut = np.random.choice(['random','nonuniform'])
if mut =='random':
self.randommutation()
elif mut =='nonuniform':
self.numutation()
def randommutation(self):
'''
randomly chooses one of implemented mutation technique and applies it on the wieght matrix
both random and nmutation use techniqes described in Genetic learning offuzzy cognitive maps
<NAME>, <NAME>∗, <NAME>, <NAME>
:return:
'''
# applying mutation
# choosing x % indexes for mutation
a = list(np.random.choice([False, True], p=[1 - self.prob_mutation, self.prob_mutation], size=self.population_size).astype(int) * range(self.population_size))
a = list(filter(lambda x: x != 0, a))
for i in a:
# muation is happening with probability
# random method
j = np.random.choice(range(self.nConcepts), size=1)
k = np.random.choice(range(self.nConcepts - 1), size=1)
self.generations[i, j,k] = np.random.uniform(-1,1)
def numutation(self):
'''
randomly chooses one of implemented mutation technique and applies it on the wieght matrix
both random and nmutation use techniqes described in Genetic learning offuzzy cognitive maps
<NAME>, <NAME>∗, <NAME>, <NAME>
:return:
'''
# choosing p % of chromosomes in the generation
a = list(np.random.choice([False, True], p=[1 - self.prob_mutation, self.prob_mutation],
size=self.population_size).astype(int) * range(self.population_size))
a = list(filter(lambda x: x != 0, a))
# randomly choose max 3 elements in the chromosome and change their vals
d = round((self.max_generations-self.current_gen)/(self.max_generations/2))
for i in a:
# randomly choosing d% of the elements to mutate, it decreases with the n of generations
for change in range(d):
j = np.random.choice(range(self.nConcepts), size=1)
k = np.random.choice(range(self.nConcepts - 1), size=1)
self.generations[i, j, k] = np.random.uniform(-1, 1)
# -------------------- SELECTION OF THE BEST CANDIDATES FOR THE NEXT GENERATION --------------------------------------
def selection(self):
'''
selecting the candidates from the last generation to the new generation
as paper suggestd we are randomly choosing the way to choose gene for crossover
ref: Genetic learning offuzzy cognitive maps
<NAME>, <NAME>∗, <NAME>, <NAME>
calls one of the selection methods rullete or tournament
'''
cross = np.random.choice(['rulette', 'tournament'])
if cross == 'rulette':
crossover_pairs = self.rulette()
elif cross == 'tournament':
crossover_pairs = self.tournament()
def rulette(self):
'''
choosing candidates for crossover with probability according to the fitness function of each chromosome
more information https://en.wikipedia.org/wiki/Selection_(genetic_algorithm)
:return:
'''
selection = np.zeros((self.population_size, self.nConcepts, self.nConcepts - 1))
# initial probability list
p = self.generation_fitness[-2] / | np.sum(self.generation_fitness[-2]) | numpy.sum |
import numpy as np
from scipy.optimize import curve_fit
import matplotlib.pyplot as plt
#Dados:
#Referentes à tabela 1:
tabela1x = np.arange(1,21)
tabela1y = np.array([9, 16, 21, 25, 33, 48, 61, 75, 91, 27, 148, 175, 244,375,
450, 62, 674, 860, 1100, 1331 ], float)
#Referentes à tabela 2:
tabela2x = np.arange(21,41)
tabela2y = np.array([1696, 1995, 2314, 2606, 3024, 3523, 4032, 5387,
6507, 7560, 8078, 8911, 10328, 10869, 12210, 13197, 13832, 14393,
14967,15729], float)
#Versões linearizadas de y, para as tabelas 1 e 2, respectivamente:
tabela1Log = np.array([2.19722, 2.77259, 3.04452, 3.21888, 3.49651,
3.8712, 4.11087, 4.31749, 4.51086, 4.84419, 4.99721, 5.16479, 5.49717,
5.92693, 6.10925, 6.3315, 6.51323, 6.75693, 7.00307, 7.19369], float)
tabela2Log = np.array([7.43603, 7.5984, 7.74673, 7.86557, 8.01434,
8.16707, 8.30202, 8.59174, 8.78063, 8.93063, 8.9969, 9.09504, 9.24261,
9.29367, 9.41001, 9.48774, 9.53474, 9.5745, 9.6136, 9.66326], float)
'''
#Implementando o MMQ p/ um polinômio de grau 1:
#C/ dados da tabela 1:
m = len(tabela1x)
n = 1
A = np.zeros((n+1,n+1))
B = np.zeros(n+1)
a = np.zeros(n+1)
for row in range(n+1):
for col in range(n+1):
if row == 0 and col == 0:
A[row,col] = m
continue
A[row, col] = np.sum(tabela1x**(row+col))
B[row] = np.sum(tabela1x**row * tabela1Log)
a = np.linalg.solve(A, B)
print('Para tabela 1 : ')
print('f(x) = %.3f'%a[0])
for i in range(1, n+1):
print(' %+.3f x^%d' % (a[i],i))
#Para Tabela 2:
m = len(tabela2x)
n = 1
A = np.zeros((n+1,n+1))
B = np.zeros(n+1)
a = np.zeros(n+1)
for row in range(n+1):
for col in range(n+1):
if row == 0 and col == 0:
A[row,col] = m
continue
A[row, col] = np.sum(tabela2x**(row+col))
B[row] = np.sum(tabela2x**row * tabela2Log)
a = np.linalg.solve(A, B)
print('Para tabela 2 : ')
print('f(x) = %.3f'%a[0])
for i in range(1, n+1):
print(' %+.3f x^%d' % (a[i],i))
'''
#Define uma função polinomial de grau 1:
def f(x,a0,a1):
return a0 + a1*x
#Retorna uma função de ajuste de acorodo com os dados:
a,_ = curve_fit(f, tabela1x, tabela1Log)
#print('Equação p/ tabela 1:')
#print('y = (%.3f) + (%.3f)x'%(a[0],a[1]))
c,_ = curve_fit(f, tabela2x, tabela2Log)
#print('Equação p/ tabela 2:')
#print('y = (%.3f) + (%.3f)x' % (c[0],c[1]))
'''
#Plotando gráfico para (reta 1 x reta 2):
plt.plot(tabela1x, f(tabela1x, *a), 'r-',
label='reta 1: %5.2f + %5.2f x' % tuple(a))
plt.plot(tabela2x, f(tabela2x, *a), 'b-',
label='reta 2: %5.2f + %5.2f x' % tuple(c))
plt.xlabel('x')
plt.ylabel('y')
plt.legend()
plt.show()
'''
#Obtendo os coeficientes das curvas exponenciais:
#P/ curva 1:
c1 = np.exp(a[0])
c2 = np.exp(a[1])
#P/ curva 2:
c3 = np.exp(c[0])
c4 = np.exp(c[1])
#Definindo os intervalos e funções :
x1 = np.arange(21)
x2 = | np.arange(21, 41) | numpy.arange |
import numpy as np
from scipy.spatial import distance
from ccdc import protein
def centroid(arr):
length = arr.shape[0]
sum_x = np.sum(arr[:, 0])
sum_y = | np.sum(arr[:, 1]) | numpy.sum |
from typing import Tuple, Dict, List, Union
import numpy as np
from jina.executors.crafters import BaseSegmenter
from .helper import _crop_image, _move_channel_axis, _load_image
class RandomImageCropper(BaseSegmenter):
"""
:class:`RandomImageCropper` crops the image with a random crop box. The coordinate is the same coordinate-system
in the :py:mode:`PIL.Image`.
"""
def __init__(self,
target_size: Union[Tuple[int], int] = 224,
num_patches: int = 1,
channel_axis: int = -1,
*args,
**kwargs):
"""
:param target_size: desired output size. If size is a sequence like (h, w), the output size will be matched to
this. If size is an int, the output will have the same height and width as the `target_size`.
"""
super().__init__(*args, **kwargs)
self.target_size = target_size
self.num_patches = num_patches
self.channel_axis = channel_axis
def craft(self, blob: 'np.ndarray', *args, **kwargs) -> List[Dict]:
"""
Crop the input image array.
:param blob: the ndarray of the image
:return: a list of chunk dicts with the cropped images
"""
raw_img = _load_image(blob, self.channel_axis)
result = []
for i in range(self.num_patches):
_img, top, left = _crop_image(raw_img, self.target_size, how='random')
img = _move_channel_axis( | np.asarray(_img) | numpy.asarray |
# *-* encoding: utf-8 *-*
# Unit tests for ppn functions
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
import numpy as np
import tensorflow as tf
from faster_particles.ppn_utils import generate_anchors, \
top_R_pixels, clip_pixels, \
compute_positives_ppn1, compute_positives_ppn2, assign_gt_pixels, \
include_gt_pixels, predicted_pixels, crop_pool_layer, \
all_combinations, slice_rois, \
nms_step, nms
def generate_anchors_np(im_shape, repeat=1):
dim = len(im_shape)
anchors = np.indices(im_shape).transpose(tuple(range(1, dim+1)) + (0,))
anchors = anchors + 0.5
anchors = np.reshape(anchors, (-1, dim))
return np.repeat(anchors, repeat, axis=0)
def clip_pixels_np(pixels, im_shape):
"""
pixels shape: [None, 2]
Clip pixels (x, y) to [0, im_shape[0]) x [0, im_shape[1])
"""
dim = len(im_shape)
for i in range(dim):
pixels[:, i] = np.clip(pixels[:, i], 0, im_shape[i])
return pixels
class Test(unittest.TestCase):
def generate_anchors(self, im_shape, repeat):
anchors_np = generate_anchors_np(im_shape, repeat=repeat)
with tf.Session():
anchors_tf = generate_anchors(im_shape, repeat=repeat)
return np.array_equal(anchors_tf, anchors_np)
def test_generate_anchors_2d(self):
im_shape = (2, 2)
repeat = 3
return self.generate_anchors(im_shape, repeat)
def test_generate_anchors_3d(self):
im_shape = (2, 2, 2)
repeat = 3
return self.generate_anchors(im_shape, repeat)
def clip_pixels(self, im_shape, proposals_np):
pixels_np = clip_pixels_np(proposals_np, im_shape)
with tf.Session() as sess:
proposals = tf.constant(proposals_np, dtype=tf.float32)
pixels = clip_pixels(proposals, im_shape)
pixels_tf = sess.run(pixels)
return np.allclose(pixels_np, pixels_tf)
def test_clip_pixels_2d(self):
im_shape = (3, 3)
proposals_np = np.array([[-0.5, 1.0], [0.01, 3.4], [2.5, 2.99]])
return self.clip_pixels(im_shape, proposals_np)
def test_clip_pixels_3d(self):
im_shape = (2, 2, 2)
proposals_np = np.random.rand(5, 3)*4-1
return self.clip_pixels(im_shape, proposals_np)
def top_R_pixels(self, R, threshold, proposals_np, scores_np):
threshold_indices = np.nonzero(scores_np > threshold)
scores_np = scores_np[threshold_indices]
proposals_np = proposals_np[threshold_indices]
sorted_indices = np.argsort(scores_np)
roi_scores_np = scores_np[sorted_indices][::-1][:R]
rois_np = proposals_np[sorted_indices][::-1][:R]
with tf.Session() as sess:
proposals = tf.constant(proposals_np, dtype=tf.float32)
scores = tf.constant(scores_np, dtype=tf.float32)
rois, roi_scores = top_R_pixels(proposals, scores, R=R, threshold=threshold)
rois_tf, roi_scores_tf = sess.run([rois, roi_scores])
return np.allclose(rois_tf, rois_np) and np.allclose(roi_scores_np, roi_scores_tf)
def test_top_R_pixels_2d(self):
R = 3
threshold = 0.5
# Shape N*N x 2
proposals_np = np.array([[0.0, 1.0], [0.5, 0.7], [0.3, 0.88], [-0.2, 0.76], [0.23, 0.47], [0.33, 0.56], [0.0, 0.4], [-0.6, 0.3], [0.27, -0.98]])
# Shape N*N x 1
scores_np = np.array([0.1, 0.5, 0.7, 0.45, 0.65, 0.01, 0.78, 0.98, 0.72])
return self.top_R_pixels(R, threshold, proposals_np, scores_np)
def test_top_R_pixels_3d(self):
R = 3
threshold = 0.5
# shape N*N x 3
proposals_np = np.array([[0.0, 1.0, 0.3], [0.87, 0.1, -0.34], [0.45, 0.68, 0.09],
[0.34, 0.21, -0.6], [0.12, -0.4, 0.8], [0.48, 0.43, -0.79], [0.89, 0.05, -0.02], [0.9, 0.04, 1.0]])
# shape N*N x 1
scores_np = np.array([0.1, 0.5, 0.7, 0.45, 0.65, 0.01, 0.78, 0.98])
return self.top_R_pixels(R, threshold, proposals_np, scores_np)
def predicted_pixels(self, im_shape, repeat, rpn_cls_prob_np, rpn_bbox_pred_np):
dim = len(im_shape)
anchors_np = generate_anchors_np(im_shape, repeat=repeat)
scores = rpn_cls_prob_np[..., 1:]
roi_scores_np = np.reshape(scores, (-1, scores.shape[-1]))
anchors_np = np.reshape(anchors_np, (-1,) + (rpn_cls_prob_np.shape[1],) * dim + (dim,))
proposals = anchors_np + rpn_bbox_pred_np
proposals = np.reshape(proposals, (-1, dim))
# clip predicted pixels to the image
proposals = clip_pixels_np(proposals, im_shape) # FIXME np function
rois_np = proposals.astype(float)
with tf.Session() as sess:
anchors_tf = generate_anchors(im_shape, repeat=repeat)
rpn_cls_prob_tf = tf.constant(rpn_cls_prob_np, dtype=tf.float32)
rpn_bbox_pred_tf = tf.constant(rpn_bbox_pred_np, dtype=tf.float32)
rois, roi_scores = predicted_pixels(rpn_cls_prob_tf, rpn_bbox_pred_tf, anchors_tf, im_shape)
rois_tf, roi_scores_tf = sess.run([rois, roi_scores])
return np.allclose(rois_tf, rois_np) and np.allclose(roi_scores_tf, roi_scores_np)
def test_predicted_pixels1_2d(self): # for PPN1
im_shape = (2, 2)
repeat = 1
# Shape [None, N, N, n] where n = 2 (background/signal)
rpn_cls_prob_np = np.array([[[[0.1, 0.9], [0.3, 0.7]], [[0.5, 0.5], [0.8, 0.2]]]])
# Shape [None, N, N, 2]
rpn_bbox_pred_np = np.array([[[[0.1, 0.1], [0.5, 0.2]], [[0.9, -0.5], [0.1, -0.4]]]])
return self.predicted_pixels(im_shape, repeat, rpn_cls_prob_np, rpn_bbox_pred_np)
def test_predicted_pixels1_3d(self):
im_shape = (2, 2, 2)
repeat = 1
rpn_cls_prob_np = np.random.rand(1, 2, 2, 2, 2)
rpn_bbox_pred_np = np.random.rand(1, 2, 2, 2, 3)*2-1
return self.predicted_pixels(im_shape, repeat, rpn_cls_prob_np, rpn_bbox_pred_np)
def test_predicted_pixels2_2d(self): # for PPN2
im_shape = (2, 2)
repeat = 1
# Shape [None, N, N, n] where n = num_classes
rpn_cls_prob_np = np.array([[[[0.1, 0.8, 0.1], [0.3, 0.65, 0.05]], [[0.5, 0.02, 0.48], [0.8, 0.18, 0.02]]]])
# Shape [None, N, N, 2]
rpn_bbox_pred_np = np.array([[[[0.1, 0.1], [0.5, 0.2]], [[0.9, -0.5], [0.1, -0.4]]]])
return self.predicted_pixels(im_shape, repeat, rpn_cls_prob_np, rpn_bbox_pred_np)
def test_predicted_pixels2_3d(self):
im_shape = (2, 2, 2)
repeat = 1
rpn_cls_prob_np = np.random.rand(1, 2, 2, 2, 3)
rpn_bbox_pred_np = np.random.rand(1, 2, 2, 2, 3)*2-1
return self.predicted_pixels(im_shape, repeat, rpn_cls_prob_np, rpn_bbox_pred_np)
def include_gt_pixels(self, rois_np, gt_pixels_np, dim1, dim2):
dim = gt_pixels_np.shape[-1]
# convert to F3 coordinates
gt_pixels_coord = np.floor(gt_pixels_np / dim1)
# Get 3x3 pixels around this in F3
gt_pixels_coord = gt_pixels_coord[:, np.newaxis, :]
gt_pixels_coord = np.tile(gt_pixels_coord, [1, 3**dim, 1]) # shape N x 9 x 2
shifts = all_combinations(([-1, 0, 1],) * dim)
update = np.tile(shifts[np.newaxis, :, :], [gt_pixels_coord.shape[0], 1, 1])
gt_pixels_coord = gt_pixels_coord + update
gt_pixels_coord = np.reshape(gt_pixels_coord, (-1, dim)) # Shape N*9, 2
# Go back to F5 coordinates
gt_pixels_coord = gt_pixels_coord / dim2
rois_result_np = np.vstack([np.floor(rois_np), gt_pixels_coord]) # shape [None, 2]
with tf.Session() as sess:
rois_tf = tf.constant(rois_np, dtype=tf.float32)
gt_pixels_tf = tf.constant(gt_pixels_np, dtype=tf.float32)
rois_tf = include_gt_pixels(rois_tf, gt_pixels_tf, dim1, dim2)
rois_result_tf = sess.run(rois_tf)
return np.allclose(rois_result_tf, rois_result_np)
def test_include_gt_pixels_2d(self):
dim1, dim2 = 8.0, 4.0
# [None, 2] in F5 coordinates
rois_np = np.array([[0, 3], [15, 2], [3, 4], [5.6, 9.1]])
# [None, 2]
gt_pixels_np = np.array([[2.4, 2.3], [3, 4], [6.4, 1.2]])
return self.include_gt_pixels(rois_np, gt_pixels_np, dim1, dim2)
def test_include_gt_pixels_3d(self):
dim1, dim2 = 8.0, 4.0
rois_np = np.random.rand(10, 3)
gt_pixels_np = np.random.rand(4, 3)*dim1*dim2
return self.include_gt_pixels(rois_np, gt_pixels_np, dim1, dim2)
def compute_positives_ppn1(self, gt_pixels_test, N3, dim1, dim2):
dim =gt_pixels_test.shape[-1]
classes_np = np.zeros((N3,)*dim)
gt_pixels_np = np.floor(gt_pixels_test / (dim1 * dim2)).astype(int)
gt_pixels_np = tuple(zip(*gt_pixels_np))
classes_np[gt_pixels_np] = 1.
classes_mask_np = classes_np.reshape(-1,1).astype(bool) # shape (16*16, 1)
with tf.Session() as sess:
gt_pixels_tf = tf.constant(gt_pixels_test, dtype=tf.float32)
classes_mask_tf = compute_positives_ppn1(gt_pixels_tf, N3, dim1, dim2)
classes_mask_tf = sess.run([classes_mask_tf])
return np.allclose(classes_mask_np, classes_mask_tf)
def test_compute_positives_ppn1_2d(self):
dim1, dim2, N3 = 8.0, 4.0, 16
# Dummy input for testing, num of gt pixels = N = 3
gt_pixels_test = np.array([[5.5, 7.7], [511.1, 433.3], [320, 320]])
return self.compute_positives_ppn1(gt_pixels_test, N3, dim1, dim2)
def test_compute_positives_ppn1_3d(self):
dim1, dim2, N3 = 8.0, 4.0, 16
gt_pixels_test = np.array([[5.5, 7.7, 45.9], [511.1, 433.3, 5.6], [320, 320, 201]])
return self.compute_positives_ppn1(gt_pixels_test, N3, dim1, dim2)
def compute_positives_ppn2(self, closest_gt_distance_test, thres_test):
pixel_count = closest_gt_distance_test.shape[0]
common_shape_np = np.array([pixel_count, 1])
mask_np = np.where(np.greater(closest_gt_distance_test, thres_test), False, True)
mask_np[np.argmin(closest_gt_distance_test)] = True
with tf.Session() as sess:
mask_tf = compute_positives_ppn2(closest_gt_distance_test, threshold=thres_test)
mask_tf = sess.run([mask_tf])
return np.allclose(mask_np, mask_tf)
def test_compute_positives_ppn2_2d(self):
nb_rois, N = 5, 16
closest_gt_distance_test = np.arange(nb_rois*N*N).reshape(-1, 1)
thres_test = 2
return self.compute_positives_ppn2(closest_gt_distance_test, thres_test)
def test_compute_positives_ppn2_3d(self):
nb_rois, N = 5, 16
closest_gt_distance_test = np.arange(nb_rois*N*N*N).reshape(-1, 1)
thres_test = 2
return self.compute_positives_ppn2(closest_gt_distance_test, thres_test)
# TODO test rois option too
def assign_gt_pixels(self, gt_pixels_np, proposals_np, dim1, dim2, rois=None):
dim = proposals_np.shape[-1]
gt_pixels = gt_pixels_np[:, :-1]
gt_pixels = gt_pixels[np.newaxis, :, :]
if rois is not None:
proposals = (proposals_np * dim2 * rois) * dim1
else:
proposals = proposals_np * dim1 * dim2
all_gt_pixels = np.tile(gt_pixels, [proposals_np.shape[0], 1, 1])
proposals = proposals[:, np.newaxis, :]
distances = np.sqrt(np.sum(np.power(proposals - all_gt_pixels, 2), axis=2))
closest_gt = np.argmin(distances, axis=1)
closest_gt_distance = np.amin(distances, axis=1)
gt_pixels_labels = gt_pixels_np[:, -1]
closest_gt_label = [gt_pixels_labels[i] for i in closest_gt]
with tf.Session() as sess:
gt_pixels_tf = tf.constant(gt_pixels_np, dtype=tf.float32)
proposals_tf = tf.constant(proposals_np, dtype=tf.float32)
closest_gt_tf, closest_gt_distance_tf, closest_gt_label_tf = assign_gt_pixels(gt_pixels_tf, proposals_tf, dim1, dim2, rois=rois)
closest_gt_result, closest_gt_distance_result, closest_gt_label_result = sess.run([closest_gt_tf, closest_gt_distance_tf, closest_gt_label_tf])
return np.allclose(closest_gt_result, closest_gt) and np.allclose(closest_gt_distance_result, closest_gt_distance) and np.allclose(closest_gt_label_result, closest_gt_label)
def test_assign_gt_pixels_2d(self):
dim1, dim2 = 8.0, 4.0
gt_pixels_np = np.array([[0.5, 5.6, 1], [53, 76, 2]])
proposals_np = np.array([[1.0, 1.0], [7, 75], [98, 10], [5, 34]])
return self.assign_gt_pixels(gt_pixels_np, proposals_np, dim1, dim2)
def test_assign_gt_pixels_3d(self):
dim1, dim2 = 8.0, 4.0
gt_pixels_np = np.array([[0.5, 5.6, 45, 1], [53, 76, 102, 2]])
proposals_np = np.array([[1.0, 1.0, 0.43], [7, 75, 2.3], [98, 10, 45], [5, 34, 72]])
return self.assign_gt_pixels(gt_pixels_np, proposals_np, dim1, dim2)
def crop_pool_layer(self, net, rois_np, dim2, dim):
rois = np.array(rois_np * dim2).astype(int)
nb_channels = net.shape[-1]
if dim == 2:
rois = [net[:, i[0], i[1], :] for i in rois]
elif dim == 3:
rois = [net[:, i[0], i[1], i[2], :] for i in rois]
rois = np.reshape(rois, (-1,) + (1,) * dim + (nb_channels,))
with tf.Session() as sess:
rois_tf = crop_pool_layer(tf.constant(net, dtype=tf.float32), tf.constant(rois_np, dtype=tf.float32), dim2, dim)
rois_result = sess.run(rois_tf)
return np.allclose(rois, rois_result)
def test_crop_pool_layer_2d(self):
dim2, dim = 4.0, 2
net = np.random.rand(1, 64, 64, 16)
rois_np = np.random.rand(10, 2)*16
return self.crop_pool_layer(net, rois_np, dim2, dim)
def test_crop_pool_layer_3d(self):
dim2, dim = 4.0, 3
net = np.random.rand(1, 64, 64, 64, 16)
rois_np = np.random.rand(10, 3)*16
return self.crop_pool_layer(net, rois_np, dim2, dim)
def test_all_combinations(self):
return np.allclose(all_combinations(([0, 1], [0, 1])), np.array([[0, 0], [0, 1], [1, 0], [1, 1]]))
def slice_rois(self, rois_np, dim2):
dim = rois_np.shape[-1]
rois_slice = []
for i in range(dim):
rois_slice.append(np.multiply(rois_np[:, i], dim2))
rois_slice = np.array(rois_slice)[..., np.newaxis, np.newaxis]
indices = ([-2, -1, 0, 1],) * dim
shifts = all_combinations(indices).T[:, np.newaxis, np.newaxis, :]
all_rois = np.add(rois_slice, shifts)
rois = np.reshape(all_rois, (-1, dim)) / dim2
with tf.Session() as sess:
rois_tf = slice_rois(tf.constant(rois_np, dtype=tf.float32), dim2)
rois_result = sess.run(rois_tf)
return np.allclose(rois, rois_result)
def test_slice_rois_2d(self):
dim2 = 4.0
rois_np = np.random.rand(10, 2) * 64
return self.slice_rois(rois_np, dim2)
def test_slice_rois_3d(self):
dim2 = 4.0
rois_np = np.random.rand(10, 3) * 64
return self.slice_rois(rois_np, dim2)
def test_nms_step(self):
order = np.array([1, 2, 0])
x1 = np.array([0, 2, 3])
x2 = | np.array([1, 3, 4]) | numpy.array |
import cv2
import os
import sys
import numpy as np
import pandas as pd
from glob import glob
import itertools
from visionfuncs.io import sorted_glob
from visionfuncs import cbcalib
from visionfuncs import geometry
from epypes.compgraph import CompGraphRunner
from .io import open_images_all
from .graph import CGCalibrateStereoBase
def prepare_points_for_all_images(runner_prepare, imfiles_1, imfiles_2):
all_images_1 = open_images_all(imfiles_1)
all_images_2 = open_images_all(imfiles_2)
runner_prepare.run(
calibration_images_1=all_images_1,
calibration_images_2=all_images_2
)
def create_runner_calib(im_wh):
cg_calib = CGCalibrateStereoBase()
params_calib = {'im_wh': im_wh}
return CompGraphRunner(cg_calib, params_calib)
def run_calib(impoints_1, impoints_2, indices_subset, pattern_points, im_wh):
runner_calib = create_runner_calib(im_wh)
imp_1 = [impoints_1[idx] for idx in indices_subset]
imp_2 = [impoints_2[idx] for idx in indices_subset]
obp = cbcalib.make_list_of_identical_pattern_points(len(indices_subset), pattern_points)
runner_calib.run(
image_points_1=imp_1,
image_points_2=imp_2,
object_points=obp
)
return runner_calib
def all_images_reprojection_error_for_subsets(calib_runners, runner_prepare):
"""
For each indices subset generated by indices_subset_gen,
perform stereo calibration. Then, given the resulting intrinsics,
solve PnP problem and compute reprojection error for all images.
Return two NumPy arrays of equal length, where each element
corresponds to reprojection error given all images
and intrinsics from calibration based on a specific images subset.
"""
rms_list_1 = []
rms_list_2 = []
# for all images
impoints_1 = runner_prepare['image_points_1']
impoints_2 = runner_prepare['image_points_2']
pattern_points = runner_prepare['pattern_points']
def multiple_pnp(impoints, cm, dc): # capturing object_points
rvecs = []
tvecs = []
for imp in impoints:
_, rvec, tvec = cv2.solvePnP(pattern_points, imp, cm, dc)
rvecs.append(rvec)
tvecs.append(tvec)
return rvecs, tvecs
object_points = cbcalib.make_list_of_identical_pattern_points(len(impoints_1), pattern_points)
for rcalib in calib_runners:
cm1 = rcalib['cm_1']
dc1 = rcalib['dc_1']
cm2 = rcalib['cm_2']
dc2 = rcalib['dc_2']
rvecs1, tvecs1 = multiple_pnp(impoints_1, cm1, dc1)
rvecs2, tvecs2 = multiple_pnp(impoints_2, cm2, dc2)
rms1 = cbcalib.reproject_and_measure_error(impoints_1, object_points, rvecs1, tvecs1, cm1, dc1)
rms2 = cbcalib.reproject_and_measure_error(impoints_2, object_points, rvecs2, tvecs2, cm2, dc2)
rms_list_1.append(rms1)
rms_list_2.append(rms2)
return np.array(rms_list_1), | np.array(rms_list_2) | numpy.array |
import numpy as np
import os
import re
import requests
import sys
import time
from netCDF4 import Dataset
import pandas as pd
from bs4 import BeautifulSoup
from tqdm import tqdm
# setup constants used to access the data from the different M2M interfaces
BASE_URL = 'https://ooinet.oceanobservatories.org/api/m2m/' # base M2M URL
SENSOR_URL = '12576/sensor/inv/' # Sensor Information
# setup access credentials
AUTH = ['OOIAPI-853A3LA6QI3L62', '<KEY>']
def M2M_Call(uframe_dataset_name, start_date, end_date):
options = '?beginDT=' + start_date + '&endDT=' + end_date + '&format=application/netcdf'
r = requests.get(BASE_URL + SENSOR_URL + uframe_dataset_name + options, auth=(AUTH[0], AUTH[1]))
if r.status_code == requests.codes.ok:
data = r.json()
else:
return None
# wait until the request is completed
print('Waiting for OOINet to process and prepare data request, this may take up to 20 minutes')
url = [url for url in data['allURLs'] if re.match(r'.*async_results.*', url)][0]
check_complete = url + '/status.txt'
with tqdm(total=400, desc='Waiting') as bar:
for i in range(400):
r = requests.get(check_complete)
bar.update(1)
if r.status_code == requests.codes.ok:
bar.n = 400
bar.last_print_n = 400
bar.refresh()
print('\nrequest completed in %f minutes.' % elapsed)
break
else:
time.sleep(3)
elapsed = (i * 3) / 60
return data
def M2M_Files(data, tag=''):
"""
Use a regex tag combined with the results of the M2M data request to collect the data from the THREDDS catalog.
Collected data is gathered into an xarray dataset for further processing.
:param data: JSON object returned from M2M data request with details on where the data is to be found for download
:param tag: regex tag to use in discriminating the data files, so we only collect the correct ones
:return: the collected data as an xarray dataset
"""
# Create a list of the files from the request above using a simple regex as a tag to discriminate the files
url = [url for url in data['allURLs'] if re.match(r'.*thredds.*', url)][0]
files = list_files(url, tag)
return files
def list_files(url, tag=''):
"""
Function to create a list of the NetCDF data files in the THREDDS catalog created by a request to the M2M system.
:param url: URL to user's THREDDS catalog specific to a data request
:param tag: regex pattern used to distinguish files of interest
:return: list of files in the catalog with the URL path set relative to the catalog
"""
page = requests.get(url).text
soup = BeautifulSoup(page, 'html.parser')
pattern = re.compile(tag)
return [node.get('href') for node in soup.find_all('a', text=pattern)]
def M2M_Data(nclist,variables):
thredds = 'https://opendap.oceanobservatories.org/thredds/dodsC/ooi/'
#nclist is going to contain more than one url eventually
for jj in range(len(nclist)):
url=nclist[jj]
url=url[25:]
dap_url = thredds + url + '#fillmismatch'
openFile = Dataset(dap_url,'r')
for ii in range(len(variables)):
dum = openFile.variables[variables[ii].name]
variables[ii].data = np.append(variables[ii].data, dum[:].data)
tmp = variables[0].data/60/60/24
time_converted = pd.to_datetime(tmp, unit='D', origin=pd.Timestamp('1900-01-01'))
return variables, time_converted
class var(object):
def __init__(self):
"""A Class that generically holds data with a variable name
and the units as attributes"""
self.name = ''
self.data = np.array([])
self.units = ''
def __repr__(self):
return_str = "name: " + self.name + '\n'
return_str += "units: " + self.units + '\n'
return_str += "data: size: " + str(self.data.shape)
return return_str
class structtype(object):
def __init__(self):
""" A class that imitates a Matlab structure type
"""
self._data = []
def __getitem__(self, index):
"""implement index behavior in the struct"""
if index == len(self._data):
self._data.append(var())
return self._data[index]
def __len__(self):
return len(self._data)
def M2M_URLs(platform_name,node,instrument_class,method):
var_list = structtype()
#MOPAK
if platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#METBK
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
#FLORT
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/04-FLORTK000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
#FDCHP
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/telemetered/fdchp_a_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#DOSTA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/02-DOFSTK000/telemetered/dofst_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
#ADCP
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
#ZPLSC
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#WAVSS
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
#VELPT
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
#PCO2W
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
#PHSEN
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
#SPKIR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
#PRESF
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/02-PRESFA000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/02-PRESFA000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/02-PRESFB000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/02-PRESFC000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
#CTDBP
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/06-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/06-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#VEL3D
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
#VEL3DK
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/01-VEL3DK000/telemetered/vel3d_k_wfp_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/03-CTDPFK000/telemetered/ctdpf_ckl_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#PCO2A
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
#PARAD
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/05-PARADK000/telemetered/parad_k__stc_imodem_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
#OPTAA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/01-OPTAAC000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#NUTNR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
##
#MOPAK
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSPM/SBS01/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#METBK
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
#FLORT
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
#FDCHP
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/recovered_host/fdchp_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#DOSTA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
#ADCP
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
#WAVSS
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
#VELPT
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = | np.array([]) | numpy.array |
# Copyright (c) 2015, <NAME> and <NAME> All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Author's note:
# This file was distributed as part of the Nature Biotechnology
# supplementary software release for DeepBind. Users of DeepBind
# are encouraged to instead use the latest source code and binaries
# for scoring sequences at
# http://tools.genes.toronto.edu/deepbind/
#
import re
import os
import csv
import copy
import time
import logging
import tempfile
import itertools
import smat as sm
import numpy as np
import numpy.random as npr
import deepity
import scipy
import scipy.stats
import gzip
import cPickle
from .util import acgt2ord,acgtcomplement,ord2acgt
from . import globals
from deepity.util import tic,toc
from os.path import join,basename,splitext,exists
from math import tanh
_dinucs = ["".join(dinuc) for dinuc in itertools.product(['A','C','G','T'],['A','C','G','T'])]
def dinuc_enrichment_features(s):
# Assumption: all kmers have same length
n = len(s)
k = len(_dinucs[0])
expected = float(n-k+1) / (4.**k)
feats = []
for dinuc in _dinucs:
count = sum(1 for _ in re.finditer('(?=%s)'%dinuc, s)) # count all occurrances of kmer; str.count doesn't count overlapping kmers
#feats.append(count/expected-1.0)
feats.append(0)
return feats
#########################################################################
class datasource(deepity.resident_datasource):
"""
A kangaroo datasource that serves input attributes:
- X_Sequence0...X_Sequencek: a list of "sequence columns",
where each column has the same size, and
is provided under the name X_SequenceName (where SequenceName
was taken from the column header in the sequencefile)
- F: a single table of features F, taken from the featuresfile.
and output attributes:
- Y: the targets, with one column per target
- Ymask: the mask of non-NaN elements in Y
"""
@staticmethod
def fromtxt(sequencefile, featurefile=None, targetfile=None, foldfilter=None, maxrows=None, targetcols=None, sequencenames=None, featurenames=None, targetnames=None, dinucfeatures=True, **kwargs):
# Load each text file, possible from cache
sequencenames, sequences = loadtxt(sequencefile, maxrows=maxrows, colnames=sequencenames)
featurenames, features = loadtxt(featurefile, maxrows=maxrows, colnames=featurenames)
targetnames, targets = loadtxt(targetfile, maxrows=maxrows, colnames=targetnames, usecols=targetcols)
# If the sequence file contained the targets, then split off that extra column
if targets is None and sequencenames[-1].lower() == "bound":
targetnames = [sequencenames.pop()]
targets = [row[-1] for row in sequences]
sequences = [row[:-1] for row in sequences]
rowidx = np.arange(len(sequences)).astype(np.uint32).reshape((-1,1))
# Filter out rows that are not actually sequences
if foldfilter:
idx = [i for i in range(len(sequences)) if sequences[i][0] in foldfilter]
sequences = [sequences[i] for i in idx]
rowidx = rowidx[idx]
if features is not None:
features = [features[i] for i in idx]
if targets is not None:
targets = [targets[i] for i in idx]
# Strip out the Fold ID and Event ID columns of the sequence array.
if sequencenames and sequencenames[0].lower() in ("fold","foldid","fold id"):
sequencenames = sequencenames[2:]
foldids = [row[0] for row in sequences]
sequences = [row[2:] for row in sequences]
else:
foldids = ["A" for i in range(len(sequences))]
# Automatically add dinucleotide frequency features for each input sequence
if dinucfeatures:
if not featurenames:
featurenames = []
features = [[] for row in sequences]
for seqname in sequencenames:
featurenames += [seqname+"."+dinuc for dinuc in _dinucs]
for rowfeats, rowseqs in zip(features, sequences):
for s in rowseqs:
rowfeats += dinuc_enrichment_features(s)
return datasource(sequencenames, sequences, featurenames, features, targetnames, targets, foldids, rowidx, **kwargs)
@staticmethod
def _generate_dinuc_featurevec(X):
return dinuc_enrichment_features(ord2acgt(X))
def __init__(self, sequencenames, sequences, featurenames, features, targetnames, targets, foldids, rowidx):
self.sequencenames = sequencenames
self.featurenames = featurenames if features is not None else []
self.targetnames = targetnames if targets is not None else []
nsequence = len(self.sequencenames)
seqattrs = sum([self._seqattrnames(i) for i in range(nsequence)],())
featattr = [("F",),("features",)] if features is not None else [(),()]
targattrs = [("Y","Ymask"),("targets",)] if targets is not None else [(),()]
foldattr = ("foldids",) if foldids is not None else ()
# Initialize the datasource superclass by telling it how many
# input attributes to expect, based on
super(datasource,self).__init__(input_attrs = seqattrs + featattr[0],
output_attrs = targattrs[0],
extra_attrs = ("rowidx","sequences") + featattr[1] + targattrs[1] + foldattr, # Attributes not batched or sent to the GPU
)
nrow = len(sequences)
self.rowidx = rowidx
self.sequences = sequences
self.features = np.asarray(features, dtype=np.float32).reshape((nrow,-1)) if features is not None else None
self.targets = np.asarray(targets, dtype=np.float32).reshape((nrow,-1)) if targets is not None else None
self.foldids = foldids
self._task_ids = sorted(self.targetnames)
self.preprocessors = {"features" : [], "targets" : []}
self.requirements = {}
self._create_attributes()
def extract_fold(self, foldid):
idx = np.asarray([i for i in range(len(self)) if self.foldids[i] == foldid])
return self[idx]
def add_requirements(self, reqs):
self.requirements.update(reqs)
def clamp_extremes(self, lo, hi):
self.Y = self.Y.copy() # Make a copy in case we're looking at a row-slice of a larger datasource
self.Ymask = self.Ymask.copy()
self.preprocessors = copy.deepcopy(self.preprocessors)
pp = _clamp_extremes_preprocessor(self.Y, lo, hi)
self.targets = self.Y.copy()
self.Ymask = ~np.isnan(self.Y)
self.preprocessors["targets"].append(pp)
def logtransform_targets(self):
self.Y = self.Y.copy() # Make a copy in case we're looking at a row-slice of a larger datasource
self.Ymask = self.Ymask.copy()
self.preprocessors = copy.deepcopy(self.preprocessors)
pp = _logtransform_preprocessor(self.Y)
self.preprocessors["targets"].append(pp)
def arcsinhtransform_targets(self):
self.Y = self.Y.copy() # Make a copy in case we're looking at a row-slice of a larger datasource
self.Ymask = self.Ymask.copy()
self.preprocessors = copy.deepcopy(self.preprocessors)
pp = _arcsinhtransform_preprocessor(self.Y)
self.preprocessors["targets"].append(pp)
def normalize_targets(self, **requirements):
requirements.update(self.requirements)
if any([value == 'logistic' for value in requirements.values()]):
intercept_mode = "min"
else:
intercept_mode = "mean"
self.Y = self.Y.copy() # Make a copy in case we're looking at a row-slice of a larger datasource
self.Ymask = self.Ymask.copy()
self.preprocessors = copy.deepcopy(self.preprocessors)
pp = _normalize_preprocessor(self.Y, intercept_mode)
self.preprocessors["targets"].append(pp)
def normalize_features(self):
if hasattr(self,"F"):
self.F = self.F.copy() # Make a copy in case we're looking at a row-slice of a larger datasource
self.preprocessors = copy.deepcopy(self.preprocessors)
pp = _normalize_preprocessor(self.F, "mean")
self.preprocessors["features"].append(pp)
def _create_attributes(self):
# Adds public attributes with names matching
nrow = len(self)
nseq = len(self.sequencenames)
for i in range(nseq):
Xname,Rname = self._seqattrnames(i)
self.__dict__[Xname] = [row[i] for row in self.sequences]
self.__dict__[Rname] = np.zeros((nrow,1), np.uint32) # empty until set during asbatches()
if self.features is not None:
self.__dict__['F'] = self.features.copy()
if self.targets is not None:
self.__dict__['Y'] = self.targets.copy()
self.__dict__['Ymask'] = ~np.isnan(self.targets)
def _seqattrnames(self, index):
return ('X_%s'%self.sequencenames[index], 'R_%s'%self.sequencenames[index])
def __len__(self):
return len(self.rowidx)
def open(self):
return
def load_preprocessors(self, indir):
if not os.path.exists(join(indir, 'preprocessors.pkl')):
return
with open(join(indir, 'preprocessors.pkl'),'rb') as f:
assert not self.preprocessors['features'], "Cannot load preprocessors for a datasource with already-preprocessed features."
assert not self.preprocessors['targets'], "Cannot load preprocessors for a datasource with already-preprocessed targets."
self.preprocessors = cPickle.load(f)
for pp in self.preprocessors['features']:
self.F = self.F.copy()
pp.apply(self.F)
for pp in self.preprocessors['targets']:
self.Y = self.Y.copy()
self.Ymask = self.Ymask.copy()
pp.apply(self.Y)
def dump_preprocessors(self, outdir, cols=None):
if cols is None:
cols = slice(None)
preprocessors_sliced = { 'features' : self.preprocessors['features'],
'targets' : [pp.slice(cols) for pp in self.preprocessors['targets']] }
with open(join(outdir, 'preprocessors.pkl'), 'wb') as f:
cPickle.dump(preprocessors_sliced, f)
def _insert_reversecomplements(self):
if "reverse_complement" not in globals.flags:
return
nseq = len(self.sequencenames)
for i in range(nseq):
Xname,Rname = self._seqattrnames(i)
X = getattr(self, Xname)
rows = range(len(X))
Xrev = [acgtcomplement(x[::-1]) for x in X]
newX = [Xrev[i] if j else X[i] for i in rows for j in (0,1)]
setattr(self, Xname, newX)
# For all the other attributes, simply duplicate their rows.
duprows = np.repeat(np.arange(len(self)), 2)
if hasattr(self, "rowidx"):self.rowidx = self.rowidx[duprows,:]
if hasattr(self, "Y"): self.Y = self.Y[duprows,:]
if hasattr(self, "Ymask"): self.Ymask = self.Ymask[duprows,:]
if hasattr(self, "F"):
self.F = self.F[duprows,:]
# HACK: For dinuc statistic features, adjust columns.
fwdrows = np.arange(0,len(self.F),2)
revrows = np.arange(1,len(self.F),2)
for j in range(len(self.featurenames)):
fname = self.featurenames[j]
if "." in fname:
prefix, suffix = fname.rsplit(".",1)
if suffix in _dinucs:
rcsuffix = acgtcomplement(suffix[::-1])
k = self.featurenames.index(prefix+"."+rcsuffix)
self.F[revrows,k] = self.F[fwdrows,j]
return
def asbatches(self, batchsize=64, reshuffle=False):
n = len(self)
assert n > 0
nbatch = (n + batchsize - 1) // batchsize
nseq = len(self.sequencenames)
padding = self.requirements.get('padding',0)
batches = []
for i in range(nbatch):
# Slice a our data attributes row-wise, according to batch index
batch = self[np.arange(i*batchsize,min(n,(i+1)*batchsize))]
batch._insert_reversecomplements()
# Convert each sequence attribute from a list of strings ("GATC") to a
# single contiguous numpy array X (0..3), along with a list of
# regions R that identify the batch-relative offsets to the start/end
# of each individual sequence
for i in range(nseq):
Xname,Rname = self._seqattrnames(i)
batchX = getattr(batch, Xname)
batchR = np.asarray(np.cumsum([0]+[padding+len(x) for x in batchX]),np.uint32).reshape((-1,1))
batchR = np.hstack([batchR[:-1],batchR[1:]])
# Convert list of strings to giant contiguous array of integers 0..3,
# with padding values of 255 put between the individual sequences
batchX = acgt2ord(("."*padding).join([""]+[x for x in batchX]+[""])).reshape((-1,1))
# Convert each batch from numpy array to sarray,
# and then quickly forget about the numpy batch
batchX = sm.asarray(batchX)
batchR = sm.asarray(batchR)
setattr(batch, Xname, batchX)
setattr(batch, Rname, batchR)
setattr(batch, "regions", batchR)
batch._data_attrs = batch._data_attrs + ("regions",)
if hasattr(batch,"F") and batch.F is not None:
batch.F = sm.asarray(batch.F,sm.get_default_dtype())
if hasattr(batch,"Y") and batch.Y is not None:
batch.Y = sm.asarray(batch.Y,sm.get_default_dtype())
if isinstance(batch.Ymask,np.ndarray):
batch.Ymask = sm.asarray(batch.Ymask)
batches.append(batch)
return deepity.shuffled_repeat_iter(batches, reshuffle)
###################################################################################
class _preprocessor(object):
def apply(self, data): raise NotImplementedError("Subclass should implement this.")
def undo(self, data): raise NotImplementedError("Subclass should implement this.")
def slice(self, cols): return self # Do nothing by default
class _normalize_preprocessor(_preprocessor):
def __init__(self, data, intercept_mode):
self.scales = []
self.biases = []
# Preprocess each column to have unit variance and zero mean
ncol = data.shape[1]
for i in range(ncol):
col = data[:,i:i+1]
mask = ~ | np.isnan(col) | numpy.isnan |
"""
In this example we use the pysid library to estimate a SIMO armax model
"""
#Import Libraries
from numpy import concatenate, dot, zeros, sqrt
from numpy.random import rand, randn #To generate the experiment
from scipy.signal import lfilter #To generate the data
from pysid import armax #To estimate an arx model
#True System
#Number of inputs
nu = 1
#Number of outputs
ny = 2
#Orders
na = [[2, 2], [2, 2]] #This variable must be (ny x ny)
nb = [1, 1] #This variable must be (ny x nu)
nk = [1, 1] #This variable must be (ny x nu)
nc = [2, 2] #This variable must be (ny x 1)
#with the following true parameters
A1o = [1, -1.2, 0.36]
A12o = [0, 0.09, -0.1]
A2o = [1, -1.6, 0.64]
A21o = [0, 0.2, -0.01]
B1o = [0, 0.5, 0.4]
B2o = [0, 0.2,-0.3]
C1o = [1, 0.8,-0.1]
C2o = [1, 0.9,-0.2]
#True parameter vector
thetao = [-1.2, 0.36, 0.5, 0.4, 0.2, -0.3, 0.8, -0.1]
#Generate the experiment
#The true system is generates by the following relation:
# S: y(t) = Go(q)*u(t) + Ho(q)*e(t),
#with u(t) the input and e white noise.
#Number of Samples
N = 400
#Take u as uniform
u = -sqrt(3) + 2*sqrt(3)*rand(N, nu)
#Generate gaussian white noise with standat deviation 0.01
e = 0.01*randn(N, ny)
#Calculate the y through S (ARX: G(q) = B(q)/A(q) and H(q) = 1/A(q))
#Calculate the y through S (ARX: G(q) = B(q)/A(q) and H(q) = 1/A(q))
y1 = | zeros((N, 1)) | numpy.zeros |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
from six.moves import xrange
import tensorflow as tf
import os, sys, pickle, argparse
sys.path.append('../utils/')
from model_eval import model_eval
from scipy.misc import logsumexp
import keras.backend
sys.path.append('load/')
from load_classifier import load_classifier
def comp_logp(logit, y, text, comp_logit_dist = False):
logpx = logsumexp(logit, axis=1)
logpx_mean = np.mean(logpx)
logpx_std = np.sqrt(np.var(logpx))
logpxy = np.sum(y * logit, axis=1)
logpxy_mean = []; logpxy_std = []
for i in xrange(y.shape[1]):
ind = np.where(y[:, i] == 1)[0]
logpxy_mean.append(np.mean(logpxy[ind]))
logpxy_std.append(np.sqrt(np.var(logpxy[ind])))
print('%s: logp(x) = %.3f +- %.3f, logp(x|y) = %.3f +- %.3f' \
% (text, logpx_mean, logpx_std, np.mean(logpxy_mean), np.mean(logpxy_std)))
results = [logpx, logpx_mean, logpx_std, logpxy, logpxy_mean, logpxy_std]
# compute distribution of the logits
if comp_logit_dist:
logit_mean = []
logit_std = []
logit_kl_mean = []
logit_kl_std = []
softmax_mean = []
for i in xrange(y.shape[1]):
ind = np.where(y[:, i] == 1)[0]
logit_mean.append(np.mean(logit[ind], 0))
logit_std.append(np.sqrt(np.var(logit[ind], 0)))
logit_tmp = logit[ind] - logsumexp(logit[ind], axis=1)[:, np.newaxis]
softmax_mean.append(np.mean(np.exp(logit_tmp), 0))
logit_kl = np.sum(softmax_mean[i] * (np.log(softmax_mean[i]) - logit_tmp), 1)
logit_kl_mean.append(np.mean(logit_kl))
logit_kl_std.append(np.sqrt(np.var(logit_kl)))
results.extend([logit_mean, logit_std, logit_kl_mean, logit_kl_std, softmax_mean])
return results
def comp_detect(x, x_mean, x_std, alpha, plus):
if plus:
detect_rate = np.mean(x > x_mean + alpha * x_std)
else:
detect_rate = np.mean(x < x_mean - alpha * x_std)
return detect_rate * 100
def search_alpha(x, x_mean, x_std, target_rate = 5.0, plus = False):
alpha_min = 0.0
alpha_max = 3.0
alpha_now = 1.5
detect_rate = comp_detect(x, x_mean, x_std, alpha_now, plus)
T = 0
while np.abs(detect_rate - target_rate) > 0.01 and T < 20:
if detect_rate > target_rate:
alpha_min = alpha_now
else:
alpha_max = alpha_now
alpha_now = 0.5 * (alpha_min + alpha_max)
detect_rate = comp_detect(x, x_mean, x_std, alpha_now, plus)
T += 1
return alpha_now, detect_rate
def test_attacks(batch_size, conv, guard_name, targeted, attack_method, victim_name, data_name, save):
# Set TF random seed to improve reproducibility
tf.set_random_seed(1234)
# Create TF session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
print("Created TensorFlow session.")
# Get MNIST test data
use_data = True
if use_data:
if data_name == 'mnist':
img_rows, img_cols, channels = 28, 28, 1
from cleverhans.utils_mnist import data_mnist
x_train, y_train, x_clean, y_clean = data_mnist(train_start=0,
train_end=60000,
test_start=0,
test_end=10000)
if data_name in ['cifar10', 'plane_frog']:
img_rows, img_cols, channels = 32, 32, 3
from import_data_cifar10 import load_data_cifar10
labels = None
if data_name == 'plane_frog':
labels = [0, 6]
datapath = '../cifar_data/'
x_train, x_clean, y_train, y_clean = load_data_cifar10(datapath, labels=labels)
nb_classes = y_train.shape[1]
# Define input TF placeholder
x = tf.placeholder(tf.float32, shape=(batch_size, img_rows, img_cols, channels))
y = tf.placeholder(tf.float32, shape=(batch_size, nb_classes))
# Define TF model graph
gen = load_classifier(sess, guard_name, data_name)
if 'bayes' in guard_name and 'distill' not in guard_name and 'query' not in guard_name:
vae_type = guard_name[-1]
guard_name += '_cnn'
# now perform detection
path = 'raw_attack_results/' + victim_name + '/'
print(path)
assert os.path.isdir(path)
filename = data_name + '_' + attack_method
if targeted:
filename = filename + '_targeted'
else:
filename = filename + '_untargeted'
filename = path + filename + '.pkl'
x_adv, _, y_clean, adv_logits = pickle.load(open(filename, 'rb'))
# for cifar-binary, need to extract test data that all the classifiers agree on
if data_name == 'plane_frog':
load_path = 'data_ind/'
ind = range(x_clean.shape[0])
classifiers = ['bayes_K10_A_cnn', 'bayes_K10_B_cnn', 'bayes_K10_C_cnn',
'bayes_K10_D_cnn', 'bayes_K10_E_cnn', 'bayes_K10_F_cnn',
'bayes_K10_G_cnn']#, 'bnn_K10']
for c in classifiers:
filename = load_path + data_name + '_' + c + '.pkl'
tmp = pickle.load(open(filename, 'rb'))
ind = list(set(ind) & set(tmp))
print('crafting adversarial examples only on correctly prediced images...')
print('%d / %d in total' % (len(ind), x_clean.shape[0]))
x_clean = x_clean[ind]; y_clean = y_clean[ind]
print(len(ind), x_adv.shape, adv_logits.shape)
x_adv = x_adv[ind]; adv_logits = adv_logits[ind]
print("data loaded from %s, %d samples in total" % (filename, x_adv.shape[0]))
print(x_clean.shape, x_adv.shape)
if 'bnn' not in guard_name:
keras.backend.set_learning_phase(0)
else:
keras.backend.set_learning_phase(1)
y_logit_op = gen.predict(x, softmax=False)
# compute classification
y_logit_adv = []
for i in xrange(int(x_adv.shape[0] / batch_size)):
X_batch = x_adv[i*batch_size:(i+1)*batch_size]
y_logit_adv.append(sess.run(y_logit_op, feed_dict={x: X_batch}))
y_logit_adv = np.concatenate(y_logit_adv, 0)
N_adv_total = y_logit_adv.shape[0]
x_clean = x_clean[:N_adv_total]; y_clean = y_clean[:N_adv_total]
x_adv = x_adv[:N_adv_total]; adv_logits = adv_logits[:N_adv_total]
test_attack = False
if guard_name != victim_name:
if guard_name + '_cnn' != victim_name:
print('test transfer attack: attack crafted on victim model')
test_attack = True
if 'distill' in victim_name:
print('test gray-box attack: attack crafted on a distilled model')
test_attack = True
if test_attack:
# test adversarial example transfer, compute the classification again
print('test adversarial example transfer from %s to %s' % (victim_name, guard_name))
y_adv = np.zeros((y_logit_adv.shape[0], nb_classes), dtype=np.float32)
y_adv[np.arange(y_logit_adv.shape[0]), | np.argmax(y_logit_adv, 1) | numpy.argmax |
import math
import numpy as np
import pybullet as p
from gym_pybullet_drones.control.BaseControl import BaseControl
from gym_pybullet_drones.envs.BaseAviary import DroneModel, BaseAviary
from gym_pybullet_drones.utils.utils import nnlsRPM
class SimplePIDControl(BaseControl):
"""Generic PID control class without yaw control.
Based on https://github.com/prfraanje/quadcopter_sim.
"""
################################################################################
def __init__(self,
drone_model: DroneModel,
g: float=9.8
):
"""Common control classes __init__ method.
Parameters
----------
drone_model : DroneModel
The type of drone to control (detailed in an .urdf file in folder `assets`).
g : float, optional
The gravitational acceleration in m/s^2.
"""
super().__init__(drone_model=drone_model, g=g)
if self.DRONE_MODEL not in [DroneModel.HB, DroneModel.ARDRONE2]:
print("[ERROR] in SimplePIDControl.__init__(), SimplePIDControl requires DroneModel.HB")
exit()
self.P_COEFF_FOR = np.array([1, 1, 2])
self.I_COEFF_FOR = np.array([.001, .001, .001])
self.D_COEFF_FOR = np.array([3, 3, 4])
self.P_COEFF_TOR = np.array([3, 3, .5])
self.I_COEFF_TOR = | np.array([.001, .001, .001]) | numpy.array |
# Copyright 2019 AUI, Inc. Washington DC, USA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def mask(xds, name='MASK1', ra=None, dec=None, pixels=None, pol=-1, channels=-1):
"""
Create a new mask Data variable in the Dataset \n
.. note:: This function currently only supports rectangles and integer pixel boundaries
Parameters
----------
xds : xarray.core.dataset.Dataset
input Image
name : str
dataset variable name for mask, overwrites if already present
ra : list
right ascension coordinate range in the form of [min, max]. Default None means all
dec : list
declination coordinate range in the form of [min, max]. Default None means all
pixels : numpy.ndarray
array of shape (N,2) containing pixel box. AND'd with ra/dec
pol : int or list
polarization dimension(s) to include in mask. Default of -1 means all
channels : int or list
channel dimension(s) to include in mask. Default of -1 means all
Returns
-------
xarray.core.dataset.Dataset
output Image
"""
import numpy as np
import xarray as xr
# type checking/conversion
if not name.strip(): name = 'maskX'
if ra is None: ra = [0.0, 0.0]
if dec is None: dec = [0.0, 0.0]
if pixels is None: pixels = np.zeros((1,2), dtype=int)-1
pixels = np.array(pixels, dtype=int)
if (pixels.ndim != 2) or (pixels.shape[1] != 2):
print('ERROR: pixels parameter not a (N,2) array')
return None
pol = np.array(np.atleast_1d(pol), dtype=int)
if pol[0] == -1: pol = [-1]
channels = np.array(np.atleast_1d(channels), dtype=int)
if channels[0] == -1: channels = [-1]
# define mask within ra/dec range
mask = xr.zeros_like(xds.IMAGE, dtype=bool).where((xds.right_ascension > np.min(ra)) &
(xds.right_ascension < np.max(ra)) &
(xds.declination > np.min(dec)) &
(xds.declination < np.max(dec)), True)
# AND pixel values with ra/dec values
mask = mask & xr.zeros_like(xds.IMAGE, dtype=bool).where((xds.d0 > np.min(pixels[:, 0])) &
(xds.d0 < np.max(pixels[:, 0])) &
(xds.d1 > | np.min(pixels[:, 1]) | numpy.min |
import cv2
import numpy as np
import math
from PIL import Image
import random
class DIP:
def __init__(self):
pass
def read(self, file):
return np.array(Image.open(file))
def save(self, file, image):
return cv2.imwrite(file, image )
def resize(self, image, size):
return cv2.resize(image, (size[0], size[1]))
def cvtGreyscale(self, image):
grey = np.dot(image[...,:3], [0.2989, 0.5870, 0.114])
grey /= np.max(grey)
return grey
def gaussianKernel(self, kernelSize, sigma, flag=True, BilSpatial=None):
normal = 1 / (2.0 * np.pi * sigma * sigma)
if flag:
center = kernelSize // 2
x, y = np.mgrid[-center:center + 1, -center:center + 1]
kernel = np.exp(-((x * x + y * y) / (2.0 * sigma * sigma))) * normal
else:
kernel = np.exp(-(kernelSize*kernelSize / (2.0 * sigma * sigma)))
kernel = np.multiply(kernel, BilSpatial)
return kernel
def gaussianFilter(self, image, kernelSize, sigma):
gKernel = self.gaussianKernel(kernelSize, sigma)
output = np.zeros(image.shape, np.float)
padded_image = np.pad(image, int((kernelSize - 1) / 2), 'edge')
for row in range(image.shape[1]):
for col in range(image.shape[0]):
output[col, row] = np.sum(gKernel * padded_image[col:col + kernelSize, row:row + kernelSize])
output /= np.max(output)
return output
def gabf(self, image, kernelSize, sigmaS, sigmaR):
spatialKernel = self.gaussianKernel(kernelSize, sigmaS)
LP_guide = np.zeros(image.shape, np.float)
output = np.zeros(image.shape, np.float)
padded_image = np.pad(image, int((kernelSize - 1) / 2), 'edge')
for row in range(image.shape[1]):
for col in range(image.shape[0]):
LP_guide[col, row] = np.sum(spatialKernel * padded_image[col:col + kernelSize, row:row + kernelSize])
LP_guide /= np.max(LP_guide)
padded_image = np.pad(LP_guide, int((kernelSize - 1) / 2), 'edge')
for row in range(image.shape[1]):
for col in range(image.shape[0]):
neighb_win = padded_image[col:col + kernelSize, row:row + kernelSize]
intensity_diff = np.absolute(image[col, row] - neighb_win)
weights = self.gaussianKernel(intensity_diff, sigmaR, flag=False, BilSpatial=spatialKernel)
vals = np.sum(np.multiply(weights, neighb_win))
norm = np.sum(weights)
output[col, row] = np.divide(vals, norm, out=np.zeros_like(vals), where=norm != 0)
output /= np.max(output)
return output
def median(self, image, kernelSize):
output = np.zeros(image.shape, np.float)
padded_image = np.pad(image, int((kernelSize - 1) / 2), 'edge')
for row in range(image.shape[1]):
for col in range(image.shape[0]):
neighb_win = padded_image[col:col + kernelSize, row:row + kernelSize]
output[col, row] = np.median(neighb_win)
output /= np.max(output)
return output
def gradient2x2(self, image):
kernelSize = 2
gX = np.array([
[-1, 1],
[-1, 1]
])
gY = np.array([
[1, 1],
[-1, -1]
])
G_x = np.zeros(image.shape, np.float)
G_y = np.zeros(image.shape, np.float)
padded_image = np.pad(image, int((kernelSize - 1) / 2), 'edge')
for row in range(image.shape[1]): # loop through row
for col in range(image.shape[0]): # loop through col
pix = padded_image[col:col + kernelSize, row:row + kernelSize] # get pixel value
G_x[col, row] = np.sum(np.multiply(gX, pix))
G_y[col, row] = np.sum(np.multiply(gY, pix))
filtered_image = np.hypot(G_x, G_y)
angle_image = | np.arctan2(G_y, G_x) | numpy.arctan2 |
import numpy as np
import time
import scipy.sparse as sp
import networkx as nx
from revop import *
import sys
import time
from joblib import Parallel, delayed
from multiprocessing import Process, Manager
DATA_PATH = '/media/chundi/3b6b0f74-0ac7-42c7-b76b-00c65f5b3673/revisitop/cnnimageretrieval-pytorch/data/test/matlab_data'
if os.path.exists(DATA_PATH)==False:
DATA_PATH = '/d2/lmk_code/revisitop/data'
if os.path.exists(DATA_PATH)==False:
DATA_PATH = '/media/gcn-gae/data'
assert os.path.exists(DATA_PATH),'out of data path to search, add your path to preprocess_graph!'
def sparse_to_tuple(sparse_mx):
if not sp.isspmatrix_coo(sparse_mx):
sparse_mx = sparse_mx.tocoo()
coords = | np.vstack((sparse_mx.row, sparse_mx.col)) | numpy.vstack |
# Parallel implementation template from: https://gitlab.com/lucasrthompson/Sonic-Bot-In-OpenAI-and-NEAT
import os
import pickle
import numpy as np
import neat
import visualize
from IceGame import IceGame
DRAW_NETS = True
NUM_WORKERS = 4 # number of workers for parallel genome score evaluation
NUM_RUNS = 1 # game runs per genome
NUM_GEN = 5000 # max number of generations
MUL_LEVELS = True # train on multiple levels?
NUM_ACTIONS = 4
MAX_STEPS = 57 # max steps for fastest completion
filename = "ezrmazeLevels.txt" # file with training levels
fn_results = "feedforward-" + filename.replace(".txt", "")
class Worker():
def __init__(self, genome, config):
self.genome = genome
self.config = config
def doWork(self):
self.env = IceGame(filename, max_steps=MAX_STEPS, multiple=MUL_LEVELS)
net = neat.nn.feed_forward.FeedForwardNetwork.create(self.genome, self.config)
total_reward = 0.0
for _ in range(NUM_RUNS):
ob = self.env.reset()
done = False
while True:
# Input is simple state of the game in a grid
inputs = np.ndarray.flatten(ob) / 4 # normalize between 0-1
nn_output = net.activate(inputs)
action = | np.argmax(nn_output) | numpy.argmax |
import pytest
import numpy as np
import tensorflow as tf
import jax
import torch
from tensornetwork.backends import backend_factory
#pylint: disable=line-too-long
from tensornetwork.matrixproductstates.mpo import (FiniteMPO,
BaseMPO,
InfiniteMPO,
FiniteFreeFermion2D)
from tensornetwork.matrixproductstates.finite_mps import FiniteMPS
from tensornetwork.matrixproductstates.dmrg import FiniteDMRG
@pytest.fixture(
name="backend_dtype_values",
params=[('numpy', np.float64), ('numpy', np.complex128),
('tensorflow', tf.float64), ('tensorflow', tf.complex128),
('pytorch', torch.float64), ('jax', np.float64),
('jax', np.complex128)])
def backend_dtype(request):
return request.param
def test_base_mpo_init(backend_dtype_values):
backend = backend_factory.get_backend(backend_dtype_values[0])
dtype = backend_dtype_values[1]
tensors = [
backend.randn((1, 5, 2, 2), dtype=dtype),
backend.randn((5, 5, 2, 2), dtype=dtype),
backend.randn((5, 1, 2, 2), dtype=dtype)
]
mpo = BaseMPO(tensors=tensors, backend=backend, name='test')
assert mpo.backend is backend
assert mpo.dtype == dtype
np.testing.assert_allclose(mpo.bond_dimensions, [1, 5, 5, 1])
def test_base_mpo_raises():
backend = backend_factory.get_backend('numpy')
tensors = [
backend.randn((1, 5, 2, 2), dtype=np.float64),
backend.randn((5, 5, 2, 2), dtype=np.float64),
backend.randn((5, 1, 2, 2), dtype=np.float32)
]
with pytest.raises(TypeError):
BaseMPO(tensors=tensors, backend=backend)
mpo = BaseMPO(tensors=[], backend=backend)
mpo.tensors = tensors
with pytest.raises(TypeError):
mpo.dtype
def test_finite_mpo_raises(backend):
tensors = [np.random.rand(2, 5, 2, 2), np.random.rand(5, 1, 2, 2)]
with pytest.raises(ValueError):
FiniteMPO(tensors=tensors, backend=backend)
tensors = [np.random.rand(1, 5, 2, 2), np.random.rand(5, 2, 2, 2)]
with pytest.raises(ValueError):
FiniteMPO(tensors=tensors, backend=backend)
def test_infinite_mpo_raises(backend):
tensors = [np.random.rand(2, 5, 2, 2), | np.random.rand(5, 3, 2, 2) | numpy.random.rand |
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
import nipype.interfaces.fsl as fsl # fsl
from nipype.algorithms.misc import TSNR
import nipype.interfaces.utility as util # utility
import nipype.pipeline.engine as pe # pypeline engine
from nipype.algorithms.rapidart import ArtifactDetect
from nipype.interfaces.fsl.utils import ImageMaths
# For unwarping
from dti_wf import my_robex, invert_contrast, my_ants_registration_syn
from nipype.workflows.dmri.fsl.artifacts import remove_bias
from nipype.interfaces.freesurfer.preprocess import MRIConvert
from nipype.interfaces.ants.resampling import WarpTimeSeriesImageMultiTransform
from nipype.interfaces.fsl.preprocess import FAST
def morph_open_close(vol_in, sphere_radius=7, suffix='smooth'):
'''
Performs and open and close morphological operation on input mask,
using FSL's fslmaths, added py <NAME>
'''
import os
def splitext2(path):
for ext in ['.nii.gz']:
if path.endswith(ext):
path, ext = path[:-len(ext)], path[-len(ext):]
break
else:
path, ext = os.path.splitext(path)
return path, ext
vol_p, vol_e = splitext2(vol_in)
vol_out = ''.join([vol_p, '_' + suffix, vol_e])
cmd = ('fslmaths -dt input {vol_in} -kernel sphere {sphere_radius} '
'-ero -dilD -dilD -ero {vol_out}')
op = os.system(cmd.format(vol_in=vol_in, sphere_radius=sphere_radius,
vol_out=vol_out))
print(op)
return vol_out
def concatetante_reg_files(file1, file2):
'''
For merging regressors
'''
import numpy as np
import os.path as op
in_mats = {f: np.loadtxt(f) for f in [file1, file2]}
out_mat = np.hstack(tuple(in_mats.values()))
out_dir = op.dirname(file1)
out_fn = op.join(out_dir, 'merged_regressors.txt')
np.savetxt(out_fn, out_mat)
return out_fn
def remove_first_n_frames(in_vol_fn, n_frames=5):
import nibabel as nib
import os.path as op
im_d = nib.load(in_vol_fn)
in_4d = im_d.get_data()
out_4d = in_4d[:, :, :, n_frames:]
out_dir = op.dirname(in_vol_fn)
in_fn = op.basename(in_vol_fn).split('.')[0]
out_fn = op.join(out_dir, in_fn + '_start_excised.nii.gz')
out_header = im_d.header
out_affine = im_d.affine
out_img = nib.Nifti1Image(out_4d, out_affine, header=out_header)
nib.save(out_img, out_fn)
return out_fn
def extract_noise_components(realigned_file, noise_mask_file, num_components):
"""Derive components most reflective of physiological noise
num_components: the number of components to use. If <1, it means
fraction of the variance than needs to be explained.
"""
import os
from nibabel import load
import numpy as np
import scipy as sp
imgseries = load(realigned_file)
components = None
mask = load(noise_mask_file).get_data()
voxel_timecourses = imgseries.get_data()[mask > 0]
voxel_timecourses[np.isnan( | np.sum(voxel_timecourses, axis=1) | numpy.sum |
from pytpt.validation import is_stochastic_matrix, is_irreducible_matrix
import numpy as np
import pytest
from pytpt import periodic
import random
import functools
class TestPeriodic:
@pytest.fixture
def P_random(self, S, M, seed):
''' Random periodic stationary transition matrix
Args:
S: int
dimension of the state space
seed: int
seed
'''
# set seed
np.random.seed(seed)
# create random matrix uniformly distributed over [0, 1) and normalize
# at time point mod 0
P0 = np.random.rand(S, S)
P0 = np.divide(P0, np.sum(P0, axis=1).reshape(S, 1))
# at last time point
P1 = np.random.rand(S, S)
P1 = np.divide(P1, np.sum(P1, axis=1).reshape(S, 1))
# transition matrix interpolates between P0 and P1 during period
def P_M(k, M):
gamma = np.mod(k, M) / (M-1) # ranges from 0 to 1 during each period
return (1-gamma)*P0 + gamma*P1
return functools.partial(P_M, M=M)
@pytest.fixture
def states_random(self, S, seed):
''' States classification
Args:
S: int
dimension of the state space
seed: int
seed
'''
# set seed
random.seed(seed)
states = np.empty(S, dtype='str')
# sorted list of two elements chosen from the set of integers
# between 0 and S-1 without replacement
i, j = sorted(random.sample(range(1, S), 2))
states[:i] = 'A'
states[i:j] = 'B'
states[j:] = 'C'
return states
@pytest.fixture
def P_small_network(self, shared_datadir, M):
''' Periodic transition matrix of the small network example
'''
small_network_construction = np.load(
shared_datadir / 'small_network_construction.npz',
allow_pickle=True,
)
T = small_network_construction['T']
L = small_network_construction['L']
def P_p(k, M):
return T + np.cos(k * 2. * np.pi / M) * L
return functools.partial(P_p, M=M)
@pytest.fixture
def states_small_network(self, shared_datadir):
''' States classification of the small network example
'''
small_network_construction = np.load(
shared_datadir / 'small_network_construction.npz',
allow_pickle=True,
)
states = small_network_construction['states'].item()
return states
@pytest.fixture
def tpt_periodic(self, M, states_random, P_random, states_small_network,
P_small_network, small_network):
''' initialize the tpt object
'''
if small_network:
states = states_small_network
P = P_small_network
ind_A = np.array([key for key in states if states[key] == 'A'])
ind_B = np.array([key for key in states if states[key] == 'B'])
ind_C = np.array([key for key in states if states[key] == 'C'])
else:
states = states_random
P = P_random
ind_A = np.where(states == 'A')[0]
ind_B = np.where(states == 'B')[0]
ind_C = np.where(states == 'C')[0]
tpt_periodic = periodic.tpt(P, M, ind_A, ind_B, ind_C)
return tpt_periodic
def test_transition_matrix(self, tpt_periodic):
S = tpt_periodic.S
M = tpt_periodic.M
P = tpt_periodic.P
for m in range(M):
assert P(m).shape == (S, S)
assert np.isclose(P(m), P(M + m)).all()
assert np.isnan(P(m)).any() == False
assert is_stochastic_matrix(P(m))
assert is_irreducible_matrix(P(m))
def test_stationary_density(self, tpt_periodic):
S = tpt_periodic.S
M = tpt_periodic.M
stationary_density = tpt_periodic.stationary_density()
assert stationary_density.shape == (M, S)
assert np.isnan(stationary_density).any() == False
assert np.greater_equal(stationary_density, 0).all()
assert np.less_equal(stationary_density, 1).all()
def test_backward_transition_matrix(self, tpt_periodic):
S = tpt_periodic.S
M = tpt_periodic.M
stationary_density = tpt_periodic.stationary_density()
P = tpt_periodic.P
P_back = tpt_periodic.backward_transitions()
for m in range(M):
assert P_back(m).shape == (S, S)
assert np.isnan(P_back(m)).any() == False
assert is_stochastic_matrix(P_back(m))
for i in np.arange(S):
for j in np.arange(S):
assert np.isclose(
stationary_density[m, i] * P_back(m)[i, j],
stationary_density[m-1, j] * P(m-1)[j, i],
)
def test_committors(self, tpt_periodic):
S = tpt_periodic.S
M = tpt_periodic.M
q_f = tpt_periodic.forward_committor()
q_b = tpt_periodic.backward_committor()
assert q_f.shape == (M, S)
assert np.isnan(q_f).any() == False
assert | np.greater_equal(q_f, 0) | numpy.greater_equal |
import numpy as np
from itertools import count
import matplotlib
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from easy21 import Easy21
EPSILON = 0.05
GAMMA = 1
def feature_id(s, a):
sf = []
for i, (l, r) in enumerate(zip(range(1, 8, 3), range(4, 11, 3))):
if s[1] >= l and s[1] <= r:
sf.append(i)
break
for i, (l, r) in enumerate(zip(range(1, 17, 3), range(6, 22, 3))):
if s[0] >= l and s[0] <= r:
sf.append(i)
break
for i, j in enumerate(range(2)):
if a == j:
sf.append(i)
break
return tuple(sf)
def feature(s, a):
sf = feature_id(s, a)
m = np.zeros((3, 6, 2))
m[tuple(sf)] = 1
return m
def Qvalue(s, a, theta):
return np.sum(feature(s, a) * theta)
def choose_action(s, theta):
# epsilon greedy exploration
if np.random.random_sample() < EPSILON:
if np.random.random_sample() < 0.5:
return 1
else:
return 0
lst = list(map(lambda a: Qvalue(s, a, theta), [0, 1]))
return np.argmax(lst)
def TD_learning(env):
LAMBDAS = list( | np.arange(0, 1.1, 0.1) | numpy.arange |
# ----------------------------------------------------------------------------
# Copyright (c) 2017 Massachusetts Institute of Technology (MIT)
# All rights reserved.
#
# Distributed under the terms of the BSD 3-clause license.
#
# The full license is in the LICENSE file, distributed with this software.
# ----------------------------------------------------------------------------
"""Module defining a Digital RF Source block."""
from __future__ import absolute_import, division, print_function
import os
import sys
import traceback
import warnings
from collections import defaultdict
from itertools import chain, tee
import numpy as np
import pmt
import six
from gnuradio import gr
from six.moves import zip
from digital_rf import DigitalMetadataWriter, DigitalRFWriter, _py_rf_write_hdf5, util
def parse_time_pmt(val, samples_per_second):
"""Get (sec, frac, idx) from an rx_time pmt value."""
tsec = np.uint64(pmt.to_uint64(pmt.tuple_ref(val, 0)))
tfrac = pmt.to_double(pmt.tuple_ref(val, 1))
# calculate sample index of time and floor to uint64
tidx = np.uint64(tsec * samples_per_second + tfrac * samples_per_second)
return int(tsec), tfrac, int(tidx)
def translate_rx_freq(tag):
"""Translate 'rx_freq' tag to 'center_frequencies' metadata sample."""
offset = tag.offset
key = "center_frequencies"
val = np.array(pmt.to_python(tag.value), ndmin=1)
yield offset, key, val
def translate_metadata(tag):
"""Translate 'metadata' dictionary tag to metadata samples."""
offset = tag.offset
md = pmt.to_python(tag.value)
try:
for key, val in md.items():
yield offset, key, val
except AttributeError:
wrnstr = "Received 'metadata' stream tag that isn't a dictionary. Ignoring."
warnings.warn(wrnstr)
def collect_tags_in_dict(tags, translator, tag_dict=None):
"""Add the stream tags to `tag_dict` by their offset."""
if tag_dict is None:
tag_dict = {}
for tag in tags:
for offset, key, val in translator(tag):
# add tag as its own dictionary to tag_dict[offset]
tag_dict.setdefault(offset, {}).update(((key, val),))
def recursive_dict_update(d, u):
"""Update d with values from u, recursing into sub-dictionaries."""
for k, v in u.items():
if isinstance(v, dict):
try:
# copy because we don't want to modify the sub-dictionary
# just use its values to create an updated sub-dictionary
subdict = d[k].copy()
except KeyError:
subdict = {}
d[k] = recursive_dict_update(subdict, v)
else:
d[k] = v
return d
def pairwise(iterable):
"""Return iterable elements in pairs, e.g. range(3) -> (0, 1), (1, 2)."""
a, b = tee(iterable)
next(b, None)
return zip(a, b)
class digital_rf_channel_sink(gr.sync_block):
"""Sink block for writing a channel of Digital RF data."""
def __init__(
self,
channel_dir,
dtype,
subdir_cadence_secs,
file_cadence_millisecs,
sample_rate_numerator,
sample_rate_denominator,
start=None,
ignore_tags=False,
is_complex=True,
num_subchannels=1,
uuid_str=None,
center_frequencies=None,
metadata=None,
is_continuous=True,
compression_level=0,
checksum=False,
marching_periods=True,
stop_on_skipped=False,
stop_on_time_tag=False,
debug=False,
min_chunksize=None,
):
"""Write a channel of data in Digital RF format.
In addition to storing the input samples in Digital RF format, this
block also populates the channel's accompanying Digital Metadata
at the sample indices when the metadata changes or a data skip occurs.
See the Notes section for details on what metadata is stored.
Parameters
----------
channel_dir : string
The directory where this channel is to be written. It will be
created if it does not exist. The basename (last component) of the
path is considered the channel's name for reading purposes.
dtype : np.dtype | object to be cast by np.dtype()
Object that gives the numpy dtype of the data to be written. This
value is passed into ``np.dtype`` to get the actual dtype
(e.g. ``np.dtype('>i4')``). Scalar types, complex types, and
structured complex types with 'r' and 'i' fields of scalar types
are valid.
subdir_cadence_secs : int
The number of seconds of data to store in one subdirectory. The
timestamp of any subdirectory will be an integer multiple of this
value.
file_cadence_millisecs : int
The number of milliseconds of data to store in each file. Note that
an integer number of files must exactly span a subdirectory,
implying::
(subdir_cadence_secs*1000 % file_cadence_millisecs) == 0
sample_rate_numerator : int
Numerator of sample rate in Hz.
sample_rate_denominator : int
Denominator of sample rate in Hz.
Other Parameters
----------------
start : None | int | float | string, optional
A value giving the time/index of the channel's first sample. When
`ignore_tags` is False, 'rx_time' tags will be used to identify
data gaps and skip the sample index forward appropriately (tags
that refer to an earlier time will be ignored).
If None or '' and `ignore_tags` is False, a default value of 0
is used (a ValueError is raised if `ignore_tags` is True).
If an integer, it is interpreted as a sample index given in the
number of samples since the epoch (time_since_epoch*sample_rate).
If a float, it is interpreted as a UTC timestamp (seconds since
epoch).
If a string, three forms are permitted:
1) a string which can be evaluated to an integer/float and
interpreted as above,
2) a time in ISO8601 format, e.g. '2016-01-01T16:24:00Z'
3) 'now' ('nowish'), indicating the current time (rounded up)
ignore_tags : bool, optional
If True, do not use 'rx_time' tags to set the sample index and do
not write other tags as Digital Metadata.
is_complex : bool, optional
This parameter is only used when `dtype` is not complex.
If True (the default), interpret supplied data as interleaved
complex I/Q samples. If False, each sample has a single value.
num_subchannels : int, optional
Number of subchannels to write simultaneously. Default is 1.
uuid_str : None | string, optional
UUID string that will act as a unique identifier for the data and
can be used to tie the data files to metadata. If None, a random
UUID will be generated.
center_frequencies : None | array_like of floats, optional
List of subchannel center frequencies to include in initial
metadata. If None, ``[0.0]*num_subchannels`` will be used.
Subsequent center frequency metadata samples can be written using
'rx_freq' stream tags.
metadata : dict, optional
Dictionary of additional metadata to include in initial Digital
Metadata sample. Subsequent metadata samples can be written
using 'metadata' stream tags, but all keys intended to be included
should be set here first even if their values are empty.
is_continuous : bool, optional
If True, data will be written in continuous blocks. If False data
will be written with gapped blocks. Fastest write/read speed is
achieved with `is_continuous` True, `checksum` False, and
`compression_level` 0 (all defaults).
compression_level : int, optional
0 for no compression (default), 1-9 for varying levels of gzip
compression (1 == least compression, least CPU; 9 == most
compression, most CPU).
checksum : bool, optional
If True, use HDF5 checksum capability. If False (default), no
checksum.
marching_periods : bool, optional
If True, write a period to stdout for every subdirectory when
writing.
stop_on_skipped : bool, optional
If True, stop writing when a sample would be skipped (such as from
a dropped packet).
stop_on_time_tag : bool, optional
If True, stop writing when any but an initial 'rx_time' tag is received.
debug : bool, optional
If True, print debugging information.
min_chunksize : None | int, optional
Minimum number of samples to consume at once. This value can be
used to adjust the sink's performance to reduce processing time.
If None, a sensible default will be used.
Notes
-----
By convention, this block sets the following Digital Metadata fields:
uuid_str : string
Value provided by the `uuid_str` argument.
sample_rate_numerator : int
Value provided by the `sample_rate_numerator` argument.
sample_rate_denominator : int
Value provided by the `sample_rate_denominator` argument.
center_frequencies : list of floats with length `num_subchannels`
Subchannel center frequencies as specified by
`center_frequencies` argument and 'rx_freq' stream tags.
Additional metadata fields can be set using the `metadata` argument and
stream tags. Nested dictionaries are permitted and are helpful for
grouping properties. For example, receiver-specific metadata is
typically specified with a sub-dictionary using the 'receiver' field.
This block acts on the following stream tags when `ignore_tags` is
False:
rx_time : (int secs, float frac) tuple
Used to set the sample index from the given time since epoch.
rx_freq : float
Used to set the 'center_frequencies' value in the channel's
Digital Metadata as described above.
metadata : dict
Used to populate additional (key, value) pairs in the channel's
Digital Metadata. Any keys passed in 'metadata' tags should be
included in the `metadata` argument at initialization to ensure
that they always exist in the Digital Metadata.
"""
dtype = np.dtype(dtype)
# create structured dtype for interleaved samples if necessary
if is_complex and (
not np.issubdtype(dtype, np.complexfloating) and not dtype.names
):
realdtype = dtype
dtype = np.dtype([("r", realdtype), ("i", realdtype)])
if num_subchannels == 1:
in_sig = [dtype]
else:
in_sig = [(dtype, num_subchannels)]
gr.sync_block.__init__(
self, name="digital_rf_channel_sink", in_sig=in_sig, out_sig=None
)
self._channel_dir = channel_dir
self._channel_name = os.path.basename(channel_dir)
self._dtype = dtype
self._subdir_cadence_secs = subdir_cadence_secs
self._file_cadence_millisecs = file_cadence_millisecs
self._sample_rate_numerator = sample_rate_numerator
self._sample_rate_denominator = sample_rate_denominator
self._uuid_str = uuid_str
self._ignore_tags = ignore_tags
self._is_complex = is_complex
self._num_subchannels = num_subchannels
self._is_continuous = is_continuous
self._compression_level = compression_level
self._checksum = checksum
self._marching_periods = marching_periods
self._stop_on_skipped = stop_on_skipped
self._stop_on_time_tag = stop_on_time_tag
self._debug = debug
self._work_done = False
self._samples_per_second = np.longdouble(
np.uint64(sample_rate_numerator)
) / np.longdouble(np.uint64(sample_rate_denominator))
if min_chunksize is None:
self._min_chunksize = max(int(self._samples_per_second // 1000), 1)
else:
self._min_chunksize = min_chunksize
# reduce CPU usage by setting a minimum number of samples to handle
# at once
# (really want to set_min_noutput_items, but no way to do that from
# Python)
try:
self.set_output_multiple(self._min_chunksize)
except RuntimeError:
traceback.print_exc()
errstr = "Failed to set sink block min_chunksize to {min_chunksize}."
if min_chunksize is None:
errstr += (
" This value was calculated automatically based on the sample rate."
" You may have to specify min_chunksize manually."
)
raise ValueError(errstr.format(min_chunksize=self._min_chunksize))
# will be None if start is None or ''
self._start_sample = util.parse_identifier_to_sample(
start, self._samples_per_second, None
)
if self._start_sample is None:
if self._ignore_tags:
raise ValueError("Must specify start if ignore_tags is True.")
# data without a time tag will be written starting at global index
# of 0, i.e. the Unix epoch
# we don't want to guess the start time because the user would
# know better and it could obscure bugs by setting approximately
# the correct time (samples in 1970 are immediately obvious)
self._start_sample = 0
self._next_rel_sample = 0
if self._debug:
tidx = self._start_sample
timedelta = util.samples_to_timedelta(tidx, self._samples_per_second)
tsec = int(timedelta.total_seconds() // 1)
tfrac = timedelta.microseconds / 1e6
tagstr = ("|{0}|start @ sample 0: {1}+{2} ({3})\n").format(
self._channel_name, tsec, tfrac, tidx
)
sys.stdout.write(tagstr)
sys.stdout.flush()
# stream tags to read (in addition to rx_time, handled specially)
self._stream_tag_translators = {
# disable rx_freq until we figure out what to do with polyphase
# also, USRP source in gr < 3.7.12 has bad rx_freq tags
# pmt.intern('rx_freq'): translate_rx_freq,
pmt.intern("metadata"): translate_metadata
}
# create metadata dictionary that will be updated and written whenever
# new metadata is received in stream tags
if metadata is None:
metadata = {}
self._metadata = metadata.copy()
if not center_frequencies:
center_frequencies = np.array([0.0] * self._num_subchannels)
else:
center_frequencies = np.ascontiguousarray(center_frequencies)
self._metadata.update(
# standard metadata by convention
uuid_str="",
sample_rate_numerator=self._sample_rate_numerator,
sample_rate_denominator=self._sample_rate_denominator,
center_frequencies=center_frequencies,
)
# create directories for RF data channel and metadata
self._metadata_dir = os.path.join(self._channel_dir, "metadata")
if not os.path.exists(self._metadata_dir):
os.makedirs(self._metadata_dir)
# sets self._Writer, self._DMDWriter, and adds to self._metadata
self._create_writer()
# dict of metadata samples to be written, add for first sample
# keys: absolute sample index for metadata
# values: metadata dictionary to update self._metadata and then write
self._md_queue = defaultdict(dict)
self._md_queue[self._start_sample] = {}
def _create_writer(self):
# Digital RF writer
self._Writer = DigitalRFWriter(
self._channel_dir,
self._dtype,
self._subdir_cadence_secs,
self._file_cadence_millisecs,
self._start_sample,
self._sample_rate_numerator,
self._sample_rate_denominator,
uuid_str=self._uuid_str,
compression_level=self._compression_level,
checksum=self._checksum,
is_complex=self._is_complex,
num_subchannels=self._num_subchannels,
is_continuous=self._is_continuous,
marching_periods=self._marching_periods,
)
# update UUID in metadata after parsing by DigitalRFWriter
self._metadata.update(uuid_str=self._Writer.uuid)
# Digital Metadata writer
self._DMDWriter = DigitalMetadataWriter(
metadata_dir=self._metadata_dir,
subdir_cadence_secs=self._subdir_cadence_secs,
file_cadence_secs=1,
sample_rate_numerator=self._sample_rate_numerator,
sample_rate_denominator=self._sample_rate_denominator,
file_name="metadata",
)
def _read_tags(self, nsamples):
"""Read stream tags and set data blocks and metadata to write.
Metadata from tags is added to the queue at ``self._md_queue``.
"""
nread = self.nitems_read(0)
md_queue = self._md_queue
# continue writing at next continuous sample with start of block
# unless overridden by a time tag
data_blk_idxs = [0]
data_rel_samples = [self._next_rel_sample]
# read time tags
# get_tags_in_window convenience function is broken, so use get_tags_in_range
time_tags = self.get_tags_in_range(
0, nread, nread + nsamples, pmt.intern("rx_time")
)
if time_tags and self._stop_on_time_tag and self._next_rel_sample != 0:
self._work_done = True
# separate data into blocks to be written
for tag in time_tags:
offset = tag.offset
tsec, tfrac, tidx = parse_time_pmt(tag.value, self._samples_per_second)
# index into data block for this tag
bidx = offset - nread
# get sample index relative to start
sidx = tidx - self._start_sample
# add new data block if valid and it indicates a gap
prev_bidx = data_blk_idxs[-1]
prev_sidx = data_rel_samples[-1]
next_continuous_sample = prev_sidx + (bidx - prev_bidx)
if sidx < next_continuous_sample:
if self._debug:
errstr = (
"\n|{0}|rx_time tag @ sample {1}: {2}+{3} ({4})"
"\n INVALID: time cannot go backwards from index {5}."
" Skipping."
).format(
self._channel_name,
offset,
tsec,
tfrac,
tidx,
self._start_sample + next_continuous_sample,
)
sys.stdout.write(errstr)
sys.stdout.flush()
continue
elif sidx == next_continuous_sample:
# don't create a new block because it's continuous
if self._debug:
tagstr = ("\n|{0}|rx_time tag @ sample {1}: {2}+{3} ({4})").format(
self._channel_name, offset, tsec, tfrac, tidx
)
sys.stdout.write(tagstr)
sys.stdout.flush()
continue
else:
# add new block to write based on time tag
if self._debug:
tagstr = (
"\n|{0}|rx_time tag @ sample {1}: {2}+{3} ({4})"
"\n {5} dropped samples."
).format(
self._channel_name,
offset,
tsec,
tfrac,
tidx,
sidx - next_continuous_sample,
)
sys.stdout.write(tagstr)
sys.stdout.flush()
# set flag to stop work when stop_on_skipped is set
if self._stop_on_skipped and self._next_rel_sample != 0:
self._work_done = True
if bidx == 0:
# override assumed continuous write
# data_blk_idxs[0] is already 0
data_rel_samples[0] = sidx
else:
data_blk_idxs.append(bidx)
data_rel_samples.append(sidx)
# reset metadata queue with only valid values
for md_idx in list(md_queue.keys()):
md_sidx = md_idx - self._start_sample
if next_continuous_sample <= md_sidx and md_sidx < sidx:
del md_queue[md_idx]
# new metadata sample to help flag data skip
md_queue.setdefault(sidx + self._start_sample, {})
# read other tags by data block (so we know the sample index)
for (bidx, bend), sidx in zip(
pairwise(chain(data_blk_idxs, (nsamples,))), data_rel_samples
):
tags_by_offset = {}
# read tags, translate to metadata dict, add to tag dict
for tag_name, translator in self._stream_tag_translators.items():
tags = self.get_tags_in_range(0, nread + bidx, nread + bend, tag_name)
collect_tags_in_dict(tags, translator, tags_by_offset)
# add tags to metadata sample dictionary
for offset, tag_dict in tags_by_offset.items():
mbidx = offset - nread
# get the absolute sample index for the metadata sample
msidx = (sidx + (mbidx - bidx)) + self._start_sample
md_queue[msidx].update(tag_dict)
return data_blk_idxs, data_rel_samples
def work(self, input_items, output_items):
in_data = input_items[0]
nsamples = len(in_data)
if not self._ignore_tags:
# break data into blocks from time tags
# get metadata from other tags and add to self._md_queue
data_blk_idxs, data_rel_samples = self._read_tags(nsamples)
else:
# continue writing at next continuous sample with start of block
data_blk_idxs = [0]
data_rel_samples = [self._next_rel_sample]
# make index lists into uint64 arrays
data_rel_samples = np.array(data_rel_samples, dtype=np.uint64, ndmin=1)
data_blk_idxs = np.array(data_blk_idxs, dtype=np.uint64, ndmin=1)
# get any metadata samples to be written from queue
if self._md_queue:
md_samples, md_dict_updates = zip(
*sorted(self._md_queue.items(), key=lambda x: x[0])
)
md_samples = | np.array(md_samples, dtype=np.uint64, ndmin=1) | numpy.array |
import csv
import matplotlib.pyplot as plt
import numpy as np
from pylab import *
import math
#### Data Reading ####
with open("cookies_report.csv") as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
#### Pie chart clicked vs not ####
cookiesClicked = 0
cookiesNotClicked = 0
totalDomains = -1
# REMEMBER! Column 6 is clicked, if we add more columns remember to change this value
for row in csv_reader:
totalDomains += 1
if row[6] == "True":
cookiesClicked += 1
elif row[6] == "False":
cookiesNotClicked += 1
print("cookiesClicked: ", cookiesClicked)
print("cookiesNotClicked: ", cookiesNotClicked)
print("totalDomains: ", totalDomains)
cookies = [cookiesClicked, cookiesNotClicked]
nombres = ["Accepted", "Not asked/Not accepted"]
plt.pie(cookies, labels=nombres, autopct="%0.1f %%")
plt.axis("equal")
plt.title("Pages able to accept cookies")
plt.show()
with open("cookies_report.csv") as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
#### Pie chart same vs more cookies ####
moreCookies = 0
sameCookies = 0
# Column 2 is cookies_default, column 5 is cookies_accepted
for row in csv_reader:
if row[6] == "True":
if row[2] == row[5] or row[5] < row[2]:
sameCookies += 1
elif row[5] > row[2]:
moreCookies += 1
print("moreCookies: ", moreCookies)
print("sameCookies: ", sameCookies)
cookies = [moreCookies, sameCookies]
nombres = ["More cookies", "Same Cookies"]
plt.pie(cookies, labels=nombres, autopct="%0.1f %%")
plt.axis("equal")
plt.title("Domains with the same cookies after accept")
plt.show()
with open("cookies_report.csv") as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
#### How many cookies by saying yes ####
accepting = 0
notInteract = 0
firstLine = 0
# Column 2 is cookies_default, column 5 is cookies_accepted
for row in csv_reader:
if firstLine == 0:
firstLine += 1
else:
accepting = accepting + int(row[5])
notInteract = notInteract + int(row[2])
print("Cookies accepting: ", accepting)
print("Cookies not interacting: ", notInteract)
objects = ('Accepting','Not interacting')
y_pos = np.arange(len(objects))
performance = [accepting,notInteract]
plt.bar(y_pos, performance, align='center', alpha=0.5)
plt.xticks(y_pos, objects)
plt.ylabel('Number of cookies')
plt.xlabel('Action performed')
plt.title('Total of cookies accepting or not interacting')
plt.show()
with open("cookies_report.csv") as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
firstLine = 0
mydict = {}
allCountries = []
myDictPercentage = {}
#Colum 7 is location (countries) and column 6 is clicked
for row in csv_reader:
if firstLine == 0:
firstLine += 1
else:
newStr = row[7].replace('[','')
newStr2 = newStr.replace(']','')
arrayCountry = eval('[' + newStr2 + ']')
#print(arrayCountry)
for country in arrayCountry:
allCountries.append(country)
numCountry = allCountries.count(country)
#print("total numCountry: ", numCountry)
if row[6] == "False":
mydict[country] = mydict.get(country,0) + 1
else:
mydict[country] = mydict.get(country,0)
actualValDic = mydict.get(country)
#print("actualValDic: ", actualValDic)
percentFalse = (actualValDic/numCountry) * 100
#print("percentatge: ", percentFalse)
myDictPercentage.update({country:percentFalse})
print(mydict)
print(myDictPercentage)
plt.bar(range(len(myDictPercentage)), list(myDictPercentage.values()), align='center')
plt.xticks(range(len(myDictPercentage)), list(myDictPercentage.keys()))
plt.ylabel('Percentatge of NOT accepted')
plt.xlabel('Country')
plt.title('Country vs acceptance')
plt.show()
csv_header = ["country","perNotAccept"]
with open("countryCookiesNot.csv", 'w', newline='') as csv_file:
csv_writer = csv.writer(csv_file)
csv_writer.writerow(csv_header)
for key in myDictPercentage.keys():
csv_file.write("%s,%s\n"%(key,myDictPercentage[key]))
with open("cookies_report.csv") as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
#### Pie chart same vs more cookies ####
moreCookies = 0
sameCookies = 0
lessCookies = 0
# Column 2 is cookies_default, column 4 is cookies_ninja
for row in csv_reader:
if row[2] == row[4]:
sameCookies += 1
elif row[4] > row[2]:
moreCookies += 1
elif row[4] < row[2]:
lessCookies += 1
print("moreCookies: ", moreCookies)
print("sameCookies: ", sameCookies)
print("lessCookies: ", lessCookies)
cookies = [moreCookies, sameCookies, lessCookies]
nombres = ["More cookies", "Same Cookies", "Less Cookies"]
plt.pie(cookies, labels=nombres, autopct="%0.1f %%")
plt.axis("equal")
plt.title("Domains with the same cookies case Ninja Cookie plugin")
plt.show()
with open("cookies_report.csv") as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
#### How many cookies by saying yes ####
ninja = 0
notInteract = 0
firstLine = 0
# Column 2 is cookies_default, column 4 is cookies_ninja
for row in csv_reader:
if firstLine == 0:
firstLine += 1
else:
ninja = ninja + int(row[4])
notInteract = notInteract + int(row[2])
print("Cookies ninja plugin: ", ninja)
print("Cookies not interacting: ", notInteract)
objects = ('Ninja Plugin','Not interacting')
y_pos = np.arange(len(objects))
performance = [ninja,notInteract]
plt.bar(y_pos, performance, align='center', alpha=0.5)
plt.xticks(y_pos, objects)
plt.ylabel('Number of cookies')
plt.xlabel('Action performed')
plt.title('Total of cookies with ninja plugin or not interacting')
plt.show()
#CDF accept cookies vs default
def cdf(data, data2):
data_size=len(data)
data_size2=len(data2)
# Set bins edges
data_set=sorted(set(data))
data_set2=sorted(set(data2))
bins=np.append(data_set, data_set[-1]+1)
bins2=np.append(data_set2, data_set2[-1]+1)
# Use the histogram function to bin the data
counts, bin_edges = np.histogram(data, bins=bins, density=False)
counts2, bin_edges2 = np.histogram(data2, bins=bins2, density=False)
counts=counts.astype(float)/data_size
counts2=counts2.astype(float)/data_size2
# Find the cdf
cdf = np.cumsum(counts)
cdf2 = np.cumsum(counts2)
# Plot the cdf
blueLine, = plt.plot(bin_edges[0:-1], cdf,linestyle='--', marker="o", color='b')
redLine, = plt.plot(bin_edges2[0:-1], cdf2,linestyle='--', marker="o", color='r')
plt.legend([blueLine,redLine],['cookies Accepted', 'cookies Default'])
plt.ylim((0,1))
plt.xlabel('Number of cookies')
plt.title("CDF cookies accepted vs default")
plt.grid(True)
plt.show()
print("resource_id,num_fonts")
data = []
data2 = []
with open('cookies_report.csv', newline='') as csvfile:
spamreader = csv.reader(csvfile, delimiter=',')
first = True
for row in spamreader:
if not first:
data.append(row[5]) #Cookies accepted column
data2.append(row[2]) #Default cookies column
first = False
data_array = np.asarray(data).astype(int)
data_array2 = np.asarray(data2).astype(int)
cdf(data_array, data_array2)
#CDF ninja cookies vs default
def cdf(data, data2):
data_size=len(data)
data_size2=len(data2)
# Set bins edges
data_set=sorted(set(data))
data_set2=sorted(set(data2))
bins=np.append(data_set, data_set[-1]+1)
bins2=np.append(data_set2, data_set2[-1]+1)
# Use the histogram function to bin the data
counts, bin_edges = np.histogram(data, bins=bins, density=False)
counts2, bin_edges2 = np.histogram(data2, bins=bins2, density=False)
counts=counts.astype(float)/data_size
counts2=counts2.astype(float)/data_size2
# Find the cdf
cdf = np.cumsum(counts)
cdf2 = np.cumsum(counts2)
# Plot the cdf
blueLine, = plt.plot(bin_edges[0:-1], cdf,linestyle='--', marker="o", color='b')
redLine, = plt.plot(bin_edges2[0:-1], cdf2,linestyle='--', marker="o", color='r')
plt.legend([blueLine,redLine],['cookies Ninja', 'cookies Default'])
plt.ylim((0,1))
plt.xlabel('Number of cookies')
plt.title("CDF cookies Ninja Plugin vs Default")
plt.grid(True)
plt.show()
print("resource_id,num_fonts")
data = []
data2 = []
with open('cookies_report.csv', newline='') as csvfile:
spamreader = csv.reader(csvfile, delimiter=',')
first = True
for row in spamreader:
if not first:
data.append(row[4]) #Ninja cookies column
data2.append(row[2]) #Default cookies column
first = False
data_array = np.asarray(data).astype(int)
data_array2 = np.asarray(data2).astype(int)
cdf(data_array, data_array2)
#Plot known 10 domains vs number of cookies
with open("cookies_report.csv") as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
#### How many cookies by saying yes ####
firstLine = 0
dictDomainCookies = {}
# Column 2 is cookies_default, column 1 is domain names
for row in csv_reader:
if firstLine == 0:
firstLine += 1
else:
if row[1] == "ups.com":
dictDomainCookies["ups.com"] = int(row[2])
elif row[1] == "bing.com":
dictDomainCookies["bing.com"] = int(row[2])
elif row[1] == "oracle.com":
dictDomainCookies["oracle.com"] = int(row[2])
elif row[1] == "cnn.com":
dictDomainCookies["cnn.com"] = int(row[2])
elif row[1] == "orange.fr":
dictDomainCookies["orange.fr"] = int(row[2])
elif row[1] == "dell.com":
dictDomainCookies["dell.com"] = int(row[2])
elif row[1] == "booking.com":
dictDomainCookies["booking.com"] = int(row[2])
elif row[1] == "grammarly.com":
dictDomainCookies["grammarly.com"] = int(row[2])
elif row[1] == "google.com":
dictDomainCookies["google.com"] = int(row[2])
elif row[1] == "amazon.com":
dictDomainCookies["amazon.com"] = int(row[2])
print("Dictionari cookies domains: ", dictDomainCookies)
plt.bar(range(len(dictDomainCookies)),dictDomainCookies.values(), align='center')
plt.xticks(range(len(dictDomainCookies)), list(dictDomainCookies.keys()))
plt.ylabel('Number of cookies')
plt.xlabel('Domains names')
plt.title('Number cookies vs domains')
plt.show()
with open("cookies_report.csv") as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
firstLine = 0
mydict = {}
myDictPercentage = {}
myDictPercentageThird = {}
mydictThird = {}
mydictTotal = {}
isEU = ['AT','BE','BG','HR','CY','CZ','DK','EE','FI','FR','DE','GR','HU','IE','IT','LV','LT','LU','MT','NL','PL','PT','RO','SK','SI','ES','SE']
#Colum 7 is location (countries) and column 6 is clicked and column 8 is first parties
for row in csv_reader:
if firstLine == 0:
firstLine += 1
else:
newStr = row[7].replace('[','')
newStr2 = newStr.replace(']','')
arrayCountry = eval('[' + newStr2 + ']')
newStrPart = row[8].replace('[','')
newStr2Part = newStrPart.replace(']','')
arrayPart = eval('[' + newStr2Part + ']')
#print(arrayCountry)
#print(row[8])
for i in range(len(arrayCountry)):
if row[6] == "False" and arrayPart[i] == True:
mydict[arrayCountry[i]] = mydict.get(arrayCountry[i],0) + 1
mydictTotal[arrayCountry[i]] = mydictTotal.get(arrayCountry[i],0) + 1
elif row[6] == "True" and arrayPart[i] == True:
mydict[arrayCountry[i]] = mydict.get(arrayCountry[i],0)
mydictTotal[arrayCountry[i]] = mydictTotal.get(arrayCountry[i],0) + 1
elif row[6] == "False" and arrayPart[i] == False:
mydictThird[arrayCountry[i]] = mydictThird.get(arrayCountry[i],0) + 1
mydictTotal[arrayCountry[i]] = mydictTotal.get(arrayCountry[i],0) + 1
elif row[6] == "True" and arrayPart[i] == False:
mydictThird[arrayCountry[i]] = mydictThird.get(arrayCountry[i],0)
mydictTotal[arrayCountry[i]] = mydictTotal.get(arrayCountry[i],0) + 1
print(mydict)
print(mydictTotal)
print(mydictThird)
for x in mydictTotal.keys():
myDictPercentage.update({x:0})
myDictPercentageThird.update({x:0})
print(myDictPercentage)
print(myDictPercentageThird)
for x, y in mydict.items():
yTotal = mydictTotal[x]
percentFalse = (y/yTotal) * 100
myDictPercentage.update({x:percentFalse})
print(myDictPercentage)
for x, y in mydictThird.items():
yTotal = mydictTotal[x]
percentFalse = (y/yTotal) * 100
myDictPercentageThird.update({x:percentFalse})
print(myDictPercentageThird)
X_axis = np.arange(len(mydictTotal))
plt.bar(X_axis - 0.2, myDictPercentage.values(), 0.4, label = 'First parties')
plt.bar(X_axis + 0.2, myDictPercentageThird.values(), 0.4, label = 'Third parties')
plt.xticks(X_axis, mydictTotal.keys())
plt.xlabel("Country")
plt.ylabel("Percentatge of NOT accepted")
plt.title("Country vs acceptance")
plt.legend()
plt.show()
csv_header = ["country","perNotAccept"]
with open("countryCookiesNotFirst.csv", 'w', newline='') as csv_file:
csv_writer = csv.writer(csv_file)
csv_writer.writerow(csv_header)
for key in myDictPercentage.keys():
csv_file.write("%s,%s\n"%(key,myDictPercentage[key]))
csv_header = ["country","perNotAccept"]
with open("countryCookiesNotThird.csv", 'w', newline='') as csv_file:
csv_writer = csv.writer(csv_file)
csv_writer.writerow(csv_header)
for key in myDictPercentageThird.keys():
csv_file.write("%s,%s\n"%(key,myDictPercentageThird[key]))
with open("cookies_report.csv") as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
#### Pie chart clicked vs not ####
cookiesClicked = 0
cookiesNotClicked = 0
totalDomains = -1
# REMEMBER! Column 10 is accepted_pablo, if we add more columns remember to change this value
for row in csv_reader:
totalDomains += 1
if row[10] == "True":
cookiesClicked += 1
elif row[10] == "False":
cookiesNotClicked += 1
print("cookiesClicked: ", cookiesClicked)
print("cookiesNotClicked: ", cookiesNotClicked)
print("totalDomains: ", totalDomains)
cookies = [cookiesClicked, cookiesNotClicked]
nombres = ["Accepted", "Not asked/Not accepted"]
plt.pie(cookies, labels=nombres, autopct="%0.1f %%")
plt.axis("equal")
plt.title("Pages able to accept cookies Computer Vision")
plt.show()
with open("cookies_report.csv") as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
#### Pie chart same vs more cookies ####
moreCookies = 0
sameCookies = 0
# Column 2 is cookies_default, column 9 is cookies_pablo
for row in csv_reader:
if row[10] == "True":
if row[2] == row[9] or row[9] < row[2]:
sameCookies += 1
elif row[9] > row[2]:
moreCookies += 1
print("moreCookies: ", moreCookies)
print("sameCookies: ", sameCookies)
cookies = [moreCookies, sameCookies]
nombres = ["More cookies", "Same Cookies"]
plt.pie(cookies, labels=nombres, autopct="%0.1f %%")
plt.axis("equal")
plt.title("Domains with the same cookies after accept Computer Vision")
plt.show()
#CDF Computer Vision accept cookies vs default
def cdf(data, data2):
data_size=len(data)
data_size2=len(data2)
# Set bins edges
data_set=sorted(set(data))
data_set2=sorted(set(data2))
bins=np.append(data_set, data_set[-1]+1)
bins2=np.append(data_set2, data_set2[-1]+1)
# Use the histogram function to bin the data
counts, bin_edges = np.histogram(data, bins=bins, density=False)
counts2, bin_edges2 = np.histogram(data2, bins=bins2, density=False)
counts=counts.astype(float)/data_size
counts2=counts2.astype(float)/data_size2
# Find the cdf
cdf = np.cumsum(counts)
cdf2 = np.cumsum(counts2)
# Plot the cdf
blueLine, = plt.plot(bin_edges[0:-1], cdf,linestyle='--', marker="o", color='b')
redLine, = plt.plot(bin_edges2[0:-1], cdf2,linestyle='--', marker="o", color='r')
plt.legend([blueLine,redLine],['cookies Computer Vision', 'cookies Default'])
plt.ylim((0,1))
plt.xlabel('Number of cookies')
plt.title("CDF cookies Computer Vision vs Default")
plt.grid(True)
plt.show()
print("resource_id,num_fonts")
data = []
data2 = []
with open('cookies_report.csv', newline='') as csvfile:
spamreader = csv.reader(csvfile, delimiter=',')
first = True
for row in spamreader:
if not first:
data.append(row[9]) #Cookies Pablo column
data2.append(row[2]) #Default cookies column
first = False
data_array = np.asarray(data).astype(int)
data_array2 = np.asarray(data2).astype(int)
cdf(data_array, data_array2)
#CDF all
def cdf(data, data2, data3, data4):
data_size=len(data)
data_size2=len(data2)
data_size3=len(data3)
data_size4=len(data4)
# Set bins edges
data_set=sorted(set(data))
data_set2=sorted(set(data2))
data_set3=sorted(set(data3))
data_set4=sorted(set(data4))
bins=np.append(data_set, data_set[-1]+1)
bins2=np.append(data_set2, data_set2[-1]+1)
bins3=np.append(data_set3, data_set3[-1]+1)
bins4=np.append(data_set4, data_set4[-1]+1)
# Use the histogram function to bin the data
counts, bin_edges = np.histogram(data, bins=bins, density=False)
counts2, bin_edges2 = np.histogram(data2, bins=bins2, density=False)
counts3, bin_edges3 = np.histogram(data3, bins=bins3, density=False)
counts4, bin_edges4 = np.histogram(data4, bins=bins4, density=False)
counts=counts.astype(float)/data_size
counts2=counts2.astype(float)/data_size2
counts3=counts3.astype(float)/data_size3
counts4=counts4.astype(float)/data_size4
# Find the cdf
cdf = np.cumsum(counts)
cdf2 = np.cumsum(counts2)
cdf3 = np.cumsum(counts3)
cdf4 = np.cumsum(counts4)
# Plot the cdf
blueLine, = plt.plot(bin_edges[0:-1], cdf,linestyle='--', marker="o", color='b')
redLine, = plt.plot(bin_edges2[0:-1], cdf2,linestyle='--', marker="o", color='r')
greenLine, = plt.plot(bin_edges3[0:-1], cdf3,linestyle='--', marker="o", color='g')
orangeLine, = plt.plot(bin_edges4[0:-1], cdf4,linestyle='--', marker="o", color='y')
plt.legend([blueLine,redLine,greenLine,orangeLine],['cookies Ninja', 'cookies Default','cookies Computer Vision','Cookies Selenium'])
plt.ylim((0,1))
plt.xlabel('Number of cookies')
plt.title("CDF cookies of all different methods")
plt.grid(True)
plt.show()
print("resource_id,num_fonts")
data = []
data2 = []
data3 = []
data4 = []
with open('cookies_report.csv', newline='') as csvfile:
spamreader = csv.reader(csvfile, delimiter=',')
first = True
for row in spamreader:
if not first:
data.append(row[4]) #Ninja cookies column
data2.append(row[2]) #Default cookies column
data3.append(row[9]) #Cookies Pablo column
data4.append(row[5]) #Cookies accepted column
first = False
data_array = np.asarray(data).astype(int)
data_array2 = np.asarray(data2).astype(int)
data_array3 = | np.asarray(data3) | numpy.asarray |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# pylint: disable=no-member
"""
TridentNet Training Script.
This script is a simplified version of the training script in detectron2/tools.
"""
import math
from typing import List, Tuple, Union, Set, DefaultDict
from typing_extensions import Literal, TypedDict
import pickle
from pathlib import Path
from collections import defaultdict
import json
import os
import sys
from tqdm import tqdm
import torch
from torch.utils.data import Dataset, DataLoader
import torch.multiprocessing as mp
from torch.multiprocessing import set_start_method
import argtyped
import cv2
import lmdb
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import matplotlib.colors as mcolors
from PIL import Image
from apex import amp
import torch
from torch.nn import functional as F
sys.path.append("detectron2")
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import get_cfg
from detectron2.engine import DefaultTrainer, default_setup
from detectron2.structures import Instances
from utils.extract_utils import get_image_blob
from models import add_config
from models.bua.box_regression import BUABoxes
from models.bua.layers.nms import batched_nms
# Only valid with fp32 inputs - give AMP the hint
Mode = Literal["bbox_feats", "roi_feats", "bboxes"]
class MatterportFeature(TypedDict):
image_feat: List[Image.Image]
view_ids: List[int]
reverie_ids: List[int]
image_h: int
image_w: int
fov: float
boxes: List[List[int]]
cls_probs: List[np.ndarray]
class Arguments(argtyped.Arguments):
config_file: Path = Path("configs/bua-caffe/extract-bua-caffe-r101.yaml")
part_ids: List[int] = [0]
num_parts: int = 1
extract_mode: Mode = "roi_feats"
mode: str = 'caffe'
overwrite: bool = False
min_max_boxes: str = "min_max_default"
output: Path = Path("output")
bbox_dir: Path = Path("bbox_dir")
connectivity: Path = Path("connectivity")
visualize: bool = False
preload: bool = False
nms_thresh: float = 0.3
conf_thresh: float = 0.4
min_local_boxes: int = 5
max_local_boxes: int = 20
max_total_boxes: int = 100
num_sweeps: int = 3
views_per_sweep: int = 12
# number of total views from one pano
viewpoint_size: int = 36
heading_inc: int = 30
# margin of error for deciding if an object is closer to the centre of another view
angle_margin: int=5
# elevation on first sweep
elevation_start: int =-30
# how much elevation increases each sweep
elevation_inc: int =30
# DataLoader
num_workers: int = 0
batch_size: int = 1
# Matterport LMDB containing view images (see pointer-reverie/scripts/matterport_images.py)
matterport: Path = Path("matterport.lmdb")
def set_min_max_boxes(min_max_boxes):
if min_max_boxes == "min_max_default":
return []
try:
min_boxes = int(min_max_boxes.split(",")[0])
max_boxes = int(min_max_boxes.split(",")[1])
except:
print("Illegal min-max boxes setting, using config default. ")
return []
cmd = [
"MODEL.BUA.EXTRACTOR.MIN_BOXES",
min_boxes,
"MODEL.BUA.EXTRACTOR.MAX_BOXES",
max_boxes,
]
return cmd
def switch_extract_mode(mode: Mode):
if mode == "roi_feats":
switch_cmd = ["MODEL.BUA.EXTRACTOR.MODE", 1]
elif mode == "bboxes":
switch_cmd = ["MODEL.BUA.EXTRACTOR.MODE", 2]
elif mode == "bbox_feats":
switch_cmd = [
"MODEL.BUA.EXTRACTOR.MODE",
3,
"MODEL.PROPOSAL_GENERATOR.NAME",
"PrecomputedProposals",
]
else:
print("Wrong extract mode! ")
exit()
return switch_cmd
def setup(args: Arguments):
"""
Create configs and perform basic setups.
"""
cfg = get_cfg()
add_config(args, cfg)
cfg.merge_from_file(str(args.config_file))
# cfg.merge_from_list(args.opts)
cfg.merge_from_list(switch_extract_mode(args.extract_mode))
cfg.merge_from_list(set_min_max_boxes(args.min_max_boxes))
cfg.freeze()
default_setup(cfg, args)
return cfg
def get_detections_from_im( im: np.ndarray, model, cfg, fov: float, boxes = None
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
im = np.array(im)
dataset_dict = get_image_blob(im, cfg.MODEL.PIXEL_MEAN)
# extract bbox only
if boxes is not None:
boxes = boxes * dataset_dict["im_scale"]
proposals = Instances(dataset_dict["image"].shape[-2:])
proposals.proposal_boxes = BUABoxes(boxes)
dataset_dict["proposals"] = proposals
with torch.no_grad():
if cfg.MODEL.BUA.ATTRIBUTE_ON:
boxes, cls_scores, features_pooled, attr_scores = model(
[dataset_dict]
)
else:
boxes, cls_scores, features_pooled = model([dataset_dict])
return boxes[0].tensor, cls_scores[0], features_pooled[0]
def filter_bbox(boxes, prob, view_id: int, width: int, height: int, fov: float, args: Arguments):
num_boxes, num_cls = boxes.shape
# Keep only the best detections
max_conf = torch.zeros((num_boxes)).to(prob.device)
for cls_ind in range(1, num_cls):
scores = prob[:, cls_ind]
keep = batched_nms(boxes, scores, torch.arange(num_boxes), args.nms_thresh)
mask = scores[keep] > max_conf[keep]
max_conf[keep][mask] = scores[keep][mask]
keep_boxes = max_conf >= args.conf_thresh
if len(keep_boxes) < args.min_local_boxes:
top_boxes = torch.topk(max_conf, args.min_local_boxes).indices
keep_boxes = torch.zeros(num_boxes).bool().to(prob.device)
keep_boxes[top_boxes] = True
elif len(keep_boxes) > args.max_local_boxes:
top_boxes = torch.topk(max_conf, args.max_local_boxes).indices
keep_boxes = torch.zeros(num_boxes).bool().to(prob.device)
keep_boxes[top_boxes] = True
# Discard any box that would be better centered in another image
# threshold for pixel distance from center of image
focal_length = (height/2)/math.tan(math.radians(fov/2)) # focal length
hor_thresh = focal_length * math.tan(math.radians(args.heading_inc/2 + args.angle_margin))
vert_thresh = focal_length * math.tan(math.radians(args.elevation_inc/2 + args.angle_margin))
center_x = 0.5*(boxes[:,0]+boxes[:,2])
center_y = 0.5*(boxes[:,1]+boxes[:,3])
reject = (center_x < width/2-hor_thresh) | (center_x > width/2+hor_thresh)
if view_id >= args.views_per_sweep: # not lowest sweep
reject |= (center_y > height/2+vert_thresh)
if view_id < args.viewpoint_size - args.views_per_sweep: # not highest sweep
reject |= (center_y < height/2-vert_thresh)
keep_boxes = keep_boxes & ~reject
return keep_boxes
def get_ft_head_elev(bbox: torch.Tensor, view_id: torch.Tensor, width: int, height: int, fov: float):
# Calculate the heading and elevation of the center of each observation
center_x = 0.5 * (bbox[:, 0] + bbox[:, 2])
center_y = 0.5 * (bbox[:, 1] + bbox[:, 3])
focal_length = torch.tensor((height / 2) / math.tan(math.radians(fov / 2))).to(bbox.device)
heading = torch.deg2rad((view_id % 12) * 30)
ft_heading = heading + torch.atan2(center_x - width / 2, focal_length)
# normalize featureHeading
ft_heading = ft_heading % (math.pi * 2)
assert (0 <= ft_heading).all() and (ft_heading <= math.pi * 2).all()
# force it to be the positive remainder, so that 0 <= angle < 360
more_than_pi = ft_heading > math.pi
ft_heading[more_than_pi] = (ft_heading - math.pi * 2)[more_than_pi]
assert (-math.pi <= ft_heading).all() and (ft_heading <= math.pi).all()
elevation = torch.deg2rad((view_id // 12) * 30)
ft_elevation = elevation + torch.atan2(
-center_y + height / 2, focal_length
)
return ft_heading, ft_elevation
def filter_panorama(boxes: torch.Tensor, probs: torch.Tensor, features: torch.Tensor, view_ids: torch.Tensor, max_boxes: int, width: int, height: int, fov: float) -> torch.Tensor:
ft_heading, ft_elevation = get_ft_head_elev(boxes, view_ids, width, height, fov)
# Remove the most redundant features (that have similar heading, elevation and
# are close together to an existing feature in cosine distance)
ext_features = features.unsqueeze(2)
feat_dist = F.cosine_similarity(ext_features, ext_features.transpose(0, 2), dim=1)
indices = torch.triu_indices(*feat_dist.shape, 1)
heading_diff_tri = F.pdist(ft_heading.unsqueeze(1), 2)
heading_diff_tri = torch.minimum(heading_diff_tri, 2*math.pi - heading_diff_tri)
heading_diff = torch.zeros_like(feat_dist)
heading_diff[indices[0], indices[1]] = heading_diff_tri
elevation_diff_tri = F.pdist(ft_elevation.unsqueeze(1), 2)
elevation_diff = torch.zeros_like(feat_dist)
elevation_diff[indices[0], indices[1]] = elevation_diff_tri
total_dist = feat_dist + heading_diff + elevation_diff # Could add weights
# Discard diagonal and upper triangle by setting large distance
dist = total_dist[indices[0], indices[1]]
arg_ind = torch.argsort(dist)
# Remove indices of the most similar features (in appearance and orientation)
keep = set(range(feat_dist.shape[0]))
ix = 0
while len(keep) > max_boxes:
min_ind = arg_ind[ix]
i, j = indices[:, min_ind].tolist()
if i not in keep or j not in keep:
ix += 1
continue
if probs[i,1:].max() > probs[j,1:].max():
keep.remove(j)
else:
keep.remove(i)
ix += 1
return torch.Tensor(list(keep)).long().to(boxes.device)
def extract_feat(worker_id, viewpoint_lists, args: Arguments):
part_id = args.part_ids[worker_id]
viewpoint_list = viewpoint_lists[part_id]
num_viewpoints = len(viewpoint_list)
cfg = setup(args)
print("Number of viewpoints on split{}: {}.".format(part_id, num_viewpoints))
num_gpus = torch.cuda.device_count()
assert num_gpus != 0
device_id = part_id % num_gpus
torch.cuda.set_device(device_id)
if args.num_parts == 1:
lmdb_path = args.output
else:
lmdb_path = args.output.parent / f"{args.output.stem}-{part_id}.lmdb"
writer = LMDBWriter(str(lmdb_path), map_size=int(1e12), buffer_size=300, overwrite=args.overwrite)
model = DefaultTrainer.build_model(cfg)
DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(
cfg.MODEL.WEIGHTS, resume=False
)
model.eval()
done = set(tuple(bkey.decode().split("_")) for bkey in writer._keys)
print('Done', len(done))
viewpoint_list = [v for v in viewpoint_list if v not in done]
dataset = MatterportDataset(viewpoint_list, args)
dataloader = DataLoader(
dataset,
num_workers=args.num_workers,
batch_size=10,
collate_fn=lambda x: x,
)
batch: List[Tuple[MatterportFeature, DefaultDict, str, str]]
for batch in tqdm(dataloader):
for feats, bboxes_per_view_id, scan, viewpoint in batch:
all_boxes = []
all_features = []
all_probs = []
all_view_ids = []
all_reverie_ids = []
for view_id, im in zip(feats['view_ids'], feats['image_feat']):
if args.extract_mode == "bbox_feats":
boxes = torch.Tensor([b["bbox"] for b in bboxes_per_view_id[view_id]])
all_reverie_ids += [b["obj_id"] for b in bboxes_per_view_id[view_id]]
else:
boxes = None
detected_boxes, probs, features = get_detections_from_im( | np.array(im) | numpy.array |
"""Using the FSWT to process anistropic images.
In this demo, an anisotropic piecewise-constant image is transformed by the
standard DWT and the fully-separable DWT. The 'Haar' wavelet gives a sparse
representation for such piecewise constant signals (detail coefficients are
only non-zero near edges).
For such anistropic signals, the number of non-zero coefficients will be lower
for the fully separable DWT than for the isotropic one.
This example is inspired by the following publication where it is proven that
the FSWT gives a sparser representation than the DWT for this class of
anistropic images:
.. <NAME>, <NAME>, <NAME> and <NAME>.
Directionlets: Anisotropic Multidirectional Representation With
Separable Filtering. IEEE Transactions on Image Processing, Vol. 15,
No. 7, July 2006.
"""
import numpy as np
import pywt
from matplotlib import pyplot as plt
def mondrian(shape=(256, 256), nx=5, ny=8, seed=4):
""" Piecewise-constant image (reminiscent of Dutch painter <NAME>rian's
geometrical period).
"""
rstate = np.random.RandomState(seed)
min_dx = 0
while(min_dx < 3):
xp = np.sort(np.round(rstate.rand(nx-1)*shape[0]).astype(np.int64))
xp = np.concatenate(((0, ), xp, (shape[0], )))
min_dx = np.min(np.diff(xp))
min_dy = 0
while(min_dy < 3):
yp = np.sort(np.round(rstate.rand(ny-1)*shape[1]).astype(np.int64))
yp = np.concatenate(((0, ), yp, (shape[1], )))
min_dy = np.min(np.diff(yp))
img = | np.zeros(shape) | numpy.zeros |
# -*- coding: utf-8 -*-
import re
import warnings
from datetime import timedelta
from itertools import product
import pytest
import numpy as np
import pandas as pd
from pandas import (CategoricalIndex, DataFrame, Index, MultiIndex,
compat, date_range, period_range)
from pandas.compat import PY3, long, lrange, lzip, range, u, PYPY
from pandas.errors import PerformanceWarning, UnsortedIndexError
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.indexes.base import InvalidIndexError
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas._libs.tslib import Timestamp
import pandas.util.testing as tm
from pandas.util.testing import assert_almost_equal, assert_copy
from .common import Base
class TestMultiIndex(Base):
_holder = MultiIndex
_compat_props = ['shape', 'ndim', 'size']
def setup_method(self, method):
major_axis = Index(['foo', 'bar', 'baz', 'qux'])
minor_axis = Index(['one', 'two'])
major_labels = np.array([0, 0, 1, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
self.index_names = ['first', 'second']
self.indices = dict(index=MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels
], names=self.index_names,
verify_integrity=False))
self.setup_indices()
def create_index(self):
return self.index
def test_can_hold_identifiers(self):
idx = self.create_index()
key = idx[0]
assert idx._can_hold_identifiers_and_holds_name(key) is True
def test_boolean_context_compat2(self):
# boolean context compat
# GH7897
i1 = MultiIndex.from_tuples([('A', 1), ('A', 2)])
i2 = MultiIndex.from_tuples([('A', 1), ('A', 3)])
common = i1.intersection(i2)
def f():
if common:
pass
tm.assert_raises_regex(ValueError, 'The truth value of a', f)
def test_labels_dtypes(self):
# GH 8456
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
assert i.labels[0].dtype == 'int8'
assert i.labels[1].dtype == 'int8'
i = MultiIndex.from_product([['a'], range(40)])
assert i.labels[1].dtype == 'int8'
i = MultiIndex.from_product([['a'], range(400)])
assert i.labels[1].dtype == 'int16'
i = MultiIndex.from_product([['a'], range(40000)])
assert i.labels[1].dtype == 'int32'
i = pd.MultiIndex.from_product([['a'], range(1000)])
assert (i.labels[0] >= 0).all()
assert (i.labels[1] >= 0).all()
def test_where(self):
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
def f():
i.where(True)
pytest.raises(NotImplementedError, f)
def test_where_array_like(self):
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
klasses = [list, tuple, np.array, pd.Series]
cond = [False, True]
for klass in klasses:
def f():
return i.where(klass(cond))
pytest.raises(NotImplementedError, f)
def test_repeat(self):
reps = 2
numbers = [1, 2, 3]
names = np.array(['foo', 'bar'])
m = MultiIndex.from_product([
numbers, names], names=names)
expected = MultiIndex.from_product([
numbers, names.repeat(reps)], names=names)
tm.assert_index_equal(m.repeat(reps), expected)
with tm.assert_produces_warning(FutureWarning):
result = m.repeat(n=reps)
tm.assert_index_equal(result, expected)
def test_numpy_repeat(self):
reps = 2
numbers = [1, 2, 3]
names = np.array(['foo', 'bar'])
m = MultiIndex.from_product([
numbers, names], names=names)
expected = MultiIndex.from_product([
numbers, names.repeat(reps)], names=names)
tm.assert_index_equal(np.repeat(m, reps), expected)
msg = "the 'axis' parameter is not supported"
tm.assert_raises_regex(
ValueError, msg, np.repeat, m, reps, axis=1)
def test_set_name_methods(self):
# so long as these are synonyms, we don't need to test set_names
assert self.index.rename == self.index.set_names
new_names = [name + "SUFFIX" for name in self.index_names]
ind = self.index.set_names(new_names)
assert self.index.names == self.index_names
assert ind.names == new_names
with tm.assert_raises_regex(ValueError, "^Length"):
ind.set_names(new_names + new_names)
new_names2 = [name + "SUFFIX2" for name in new_names]
res = ind.set_names(new_names2, inplace=True)
assert res is None
assert ind.names == new_names2
# set names for specific level (# GH7792)
ind = self.index.set_names(new_names[0], level=0)
assert self.index.names == self.index_names
assert ind.names == [new_names[0], self.index_names[1]]
res = ind.set_names(new_names2[0], level=0, inplace=True)
assert res is None
assert ind.names == [new_names2[0], self.index_names[1]]
# set names for multiple levels
ind = self.index.set_names(new_names, level=[0, 1])
assert self.index.names == self.index_names
assert ind.names == new_names
res = ind.set_names(new_names2, level=[0, 1], inplace=True)
assert res is None
assert ind.names == new_names2
@pytest.mark.parametrize('inplace', [True, False])
def test_set_names_with_nlevel_1(self, inplace):
# GH 21149
# Ensure that .set_names for MultiIndex with
# nlevels == 1 does not raise any errors
expected = pd.MultiIndex(levels=[[0, 1]],
labels=[[0, 1]],
names=['first'])
m = pd.MultiIndex.from_product([[0, 1]])
result = m.set_names('first', level=0, inplace=inplace)
if inplace:
result = m
tm.assert_index_equal(result, expected)
def test_set_levels_labels_directly(self):
# setting levels/labels directly raises AttributeError
levels = self.index.levels
new_levels = [[lev + 'a' for lev in level] for level in levels]
labels = self.index.labels
major_labels, minor_labels = labels
major_labels = [(x + 1) % 3 for x in major_labels]
minor_labels = [(x + 1) % 1 for x in minor_labels]
new_labels = [major_labels, minor_labels]
with pytest.raises(AttributeError):
self.index.levels = new_levels
with pytest.raises(AttributeError):
self.index.labels = new_labels
def test_set_levels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
levels = self.index.levels
new_levels = [[lev + 'a' for lev in level] for level in levels]
def assert_matching(actual, expected, check_dtype=False):
# avoid specifying internal representation
# as much as possible
assert len(actual) == len(expected)
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp)
tm.assert_numpy_array_equal(act, exp, check_dtype=check_dtype)
# level changing [w/o mutation]
ind2 = self.index.set_levels(new_levels)
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, new_levels)
# level changing specific level [w/o mutation]
ind2 = self.index.set_levels(new_levels[0], level=0)
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.set_levels(new_levels[1], level=1)
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/o mutation]
ind2 = self.index.set_levels(new_levels, level=[0, 1])
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[0], level=0, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[1], level=1, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, level=[0, 1],
inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# illegal level changing should not change levels
# GH 13754
original_index = self.index.copy()
for inplace in [True, False]:
with tm.assert_raises_regex(ValueError, "^On"):
self.index.set_levels(['c'], level=0, inplace=inplace)
assert_matching(self.index.levels, original_index.levels,
check_dtype=True)
with tm.assert_raises_regex(ValueError, "^On"):
self.index.set_labels([0, 1, 2, 3, 4, 5], level=0,
inplace=inplace)
assert_matching(self.index.labels, original_index.labels,
check_dtype=True)
with tm.assert_raises_regex(TypeError, "^Levels"):
self.index.set_levels('c', level=0, inplace=inplace)
assert_matching(self.index.levels, original_index.levels,
check_dtype=True)
with tm.assert_raises_regex(TypeError, "^Labels"):
self.index.set_labels(1, level=0, inplace=inplace)
assert_matching(self.index.labels, original_index.labels,
check_dtype=True)
def test_set_labels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
labels = self.index.labels
major_labels, minor_labels = labels
major_labels = [(x + 1) % 3 for x in major_labels]
minor_labels = [(x + 1) % 1 for x in minor_labels]
new_labels = [major_labels, minor_labels]
def assert_matching(actual, expected):
# avoid specifying internal representation
# as much as possible
assert len(actual) == len(expected)
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp, dtype=np.int8)
tm.assert_numpy_array_equal(act, exp)
# label changing [w/o mutation]
ind2 = self.index.set_labels(new_labels)
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, new_labels)
# label changing specific level [w/o mutation]
ind2 = self.index.set_labels(new_labels[0], level=0)
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.set_labels(new_labels[1], level=1)
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/o mutation]
ind2 = self.index.set_labels(new_labels, level=[0, 1])
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[0], level=0, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[1], level=1, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, level=[0, 1],
inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing for levels of different magnitude of categories
ind = pd.MultiIndex.from_tuples([(0, i) for i in range(130)])
new_labels = range(129, -1, -1)
expected = pd.MultiIndex.from_tuples(
[(0, i) for i in new_labels])
# [w/o mutation]
result = ind.set_labels(labels=new_labels, level=1)
assert result.equals(expected)
# [w/ mutation]
result = ind.copy()
result.set_labels(labels=new_labels, level=1, inplace=True)
assert result.equals(expected)
def test_set_levels_labels_names_bad_input(self):
levels, labels = self.index.levels, self.index.labels
names = self.index.names
with tm.assert_raises_regex(ValueError, 'Length of levels'):
self.index.set_levels([levels[0]])
with tm.assert_raises_regex(ValueError, 'Length of labels'):
self.index.set_labels([labels[0]])
with tm.assert_raises_regex(ValueError, 'Length of names'):
self.index.set_names([names[0]])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_names(names[0])
# should have equal lengths
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_levels(levels, level=0)
# should have equal lengths
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_labels(labels, level=0)
# should have equal lengths
with tm.assert_raises_regex(ValueError, 'Length of names'):
self.index.set_names(names[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'string'):
self.index.set_names(names, level=0)
def test_set_levels_categorical(self):
# GH13854
index = MultiIndex.from_arrays([list("xyzx"), [0, 1, 2, 3]])
for ordered in [False, True]:
cidx = CategoricalIndex(list("bac"), ordered=ordered)
result = index.set_levels(cidx, 0)
expected = MultiIndex(levels=[cidx, [0, 1, 2, 3]],
labels=index.labels)
tm.assert_index_equal(result, expected)
result_lvl = result.get_level_values(0)
expected_lvl = CategoricalIndex(list("bacb"),
categories=cidx.categories,
ordered=cidx.ordered)
tm.assert_index_equal(result_lvl, expected_lvl)
def test_metadata_immutable(self):
levels, labels = self.index.levels, self.index.labels
# shouldn't be able to set at either the top level or base level
mutable_regex = re.compile('does not support mutable operations')
with tm.assert_raises_regex(TypeError, mutable_regex):
levels[0] = levels[0]
with tm.assert_raises_regex(TypeError, mutable_regex):
levels[0][0] = levels[0][0]
# ditto for labels
with tm.assert_raises_regex(TypeError, mutable_regex):
labels[0] = labels[0]
with tm.assert_raises_regex(TypeError, mutable_regex):
labels[0][0] = labels[0][0]
# and for names
names = self.index.names
with tm.assert_raises_regex(TypeError, mutable_regex):
names[0] = names[0]
def test_inplace_mutation_resets_values(self):
levels = [['a', 'b', 'c'], [4]]
levels2 = [[1, 2, 3], ['a']]
labels = [[0, 1, 0, 2, 2, 0], [0, 0, 0, 0, 0, 0]]
mi1 = MultiIndex(levels=levels, labels=labels)
mi2 = MultiIndex(levels=levels2, labels=labels)
vals = mi1.values.copy()
vals2 = mi2.values.copy()
assert mi1._tuples is not None
# Make sure level setting works
new_vals = mi1.set_levels(levels2).values
tm.assert_almost_equal(vals2, new_vals)
# Non-inplace doesn't kill _tuples [implementation detail]
tm.assert_almost_equal(mi1._tuples, vals)
# ...and values is still same too
tm.assert_almost_equal(mi1.values, vals)
# Inplace should kill _tuples
mi1.set_levels(levels2, inplace=True)
tm.assert_almost_equal(mi1.values, vals2)
# Make sure label setting works too
labels2 = [[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]]
exp_values = np.empty((6,), dtype=object)
exp_values[:] = [(long(1), 'a')] * 6
# Must be 1d array of tuples
assert exp_values.shape == (6,)
new_values = mi2.set_labels(labels2).values
# Not inplace shouldn't change
tm.assert_almost_equal(mi2._tuples, vals2)
# Should have correct values
tm.assert_almost_equal(exp_values, new_values)
# ...and again setting inplace should kill _tuples, etc
mi2.set_labels(labels2, inplace=True)
tm.assert_almost_equal(mi2.values, new_values)
def test_copy_in_constructor(self):
levels = np.array(["a", "b", "c"])
labels = np.array([1, 1, 2, 0, 0, 1, 1])
val = labels[0]
mi = MultiIndex(levels=[levels, levels], labels=[labels, labels],
copy=True)
assert mi.labels[0][0] == val
labels[0] = 15
assert mi.labels[0][0] == val
val = levels[0]
levels[0] = "PANDA"
assert mi.levels[0][0] == val
def test_set_value_keeps_names(self):
# motivating example from #3742
lev1 = ['hans', 'hans', 'hans', 'grethe', 'grethe', 'grethe']
lev2 = ['1', '2', '3'] * 2
idx = pd.MultiIndex.from_arrays([lev1, lev2], names=['Name', 'Number'])
df = pd.DataFrame(
np.random.randn(6, 4),
columns=['one', 'two', 'three', 'four'],
index=idx)
df = df.sort_index()
assert df._is_copy is None
assert df.index.names == ('Name', 'Number')
df.at[('grethe', '4'), 'one'] = 99.34
assert df._is_copy is None
assert df.index.names == ('Name', 'Number')
def test_copy_names(self):
# Check that adding a "names" parameter to the copy is honored
# GH14302
multi_idx = pd.Index([(1, 2), (3, 4)], names=['MyName1', 'MyName2'])
multi_idx1 = multi_idx.copy()
assert multi_idx.equals(multi_idx1)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx1.names == ['MyName1', 'MyName2']
multi_idx2 = multi_idx.copy(names=['NewName1', 'NewName2'])
assert multi_idx.equals(multi_idx2)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx2.names == ['NewName1', 'NewName2']
multi_idx3 = multi_idx.copy(name=['NewName1', 'NewName2'])
assert multi_idx.equals(multi_idx3)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx3.names == ['NewName1', 'NewName2']
def test_names(self):
# names are assigned in setup
names = self.index_names
level_names = [level.name for level in self.index.levels]
assert names == level_names
# setting bad names on existing
index = self.index
tm.assert_raises_regex(ValueError, "^Length of names",
setattr, index, "names",
list(index.names) + ["third"])
tm.assert_raises_regex(ValueError, "^Length of names",
setattr, index, "names", [])
# initializing with bad names (should always be equivalent)
major_axis, minor_axis = self.index.levels
major_labels, minor_labels = self.index.labels
tm.assert_raises_regex(ValueError, "^Length of names", MultiIndex,
levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=['first'])
tm.assert_raises_regex(ValueError, "^Length of names", MultiIndex,
levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=['first', 'second', 'third'])
# names are assigned
index.names = ["a", "b"]
ind_names = list(index.names)
level_names = [level.name for level in index.levels]
assert ind_names == level_names
def test_astype(self):
expected = self.index.copy()
actual = self.index.astype('O')
assert_copy(actual.levels, expected.levels)
assert_copy(actual.labels, expected.labels)
self.check_level_names(actual, expected.names)
with tm.assert_raises_regex(TypeError, "^Setting.*dtype.*object"):
self.index.astype(np.dtype(int))
@pytest.mark.parametrize('ordered', [True, False])
def test_astype_category(self, ordered):
# GH 18630
msg = '> 1 ndim Categorical are not supported at this time'
with tm.assert_raises_regex(NotImplementedError, msg):
self.index.astype(CategoricalDtype(ordered=ordered))
if ordered is False:
# dtype='category' defaults to ordered=False, so only test once
with tm.assert_raises_regex(NotImplementedError, msg):
self.index.astype('category')
def test_constructor_single_level(self):
result = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
labels=[[0, 1, 2, 3]], names=['first'])
assert isinstance(result, MultiIndex)
expected = Index(['foo', 'bar', 'baz', 'qux'], name='first')
tm.assert_index_equal(result.levels[0], expected)
assert result.names == ['first']
def test_constructor_no_levels(self):
tm.assert_raises_regex(ValueError, "non-zero number "
"of levels/labels",
MultiIndex, levels=[], labels=[])
both_re = re.compile('Must pass both levels and labels')
with tm.assert_raises_regex(TypeError, both_re):
MultiIndex(levels=[])
with tm.assert_raises_regex(TypeError, both_re):
MultiIndex(labels=[])
def test_constructor_mismatched_label_levels(self):
labels = [np.array([1]), np.array([2]), np.array([3])]
levels = ["a"]
tm.assert_raises_regex(ValueError, "Length of levels and labels "
"must be the same", MultiIndex,
levels=levels, labels=labels)
length_error = re.compile('>= length of level')
label_error = re.compile(r'Unequal label lengths: \[4, 2\]')
# important to check that it's looking at the right thing.
with tm.assert_raises_regex(ValueError, length_error):
MultiIndex(levels=[['a'], ['b']],
labels=[[0, 1, 2, 3], [0, 3, 4, 1]])
with tm.assert_raises_regex(ValueError, label_error):
MultiIndex(levels=[['a'], ['b']], labels=[[0, 0, 0, 0], [0, 0]])
# external API
with tm.assert_raises_regex(ValueError, length_error):
self.index.copy().set_levels([['a'], ['b']])
with tm.assert_raises_regex(ValueError, label_error):
self.index.copy().set_labels([[0, 0, 0, 0], [0, 0]])
def test_constructor_nonhashable_names(self):
# GH 20527
levels = [[1, 2], [u'one', u'two']]
labels = [[0, 0, 1, 1], [0, 1, 0, 1]]
names = ((['foo'], ['bar']))
message = "MultiIndex.name must be a hashable type"
tm.assert_raises_regex(TypeError, message,
MultiIndex, levels=levels,
labels=labels, names=names)
# With .rename()
mi = MultiIndex(levels=[[1, 2], [u'one', u'two']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=('foo', 'bar'))
renamed = [['foor'], ['barr']]
tm.assert_raises_regex(TypeError, message, mi.rename, names=renamed)
# With .set_names()
tm.assert_raises_regex(TypeError, message, mi.set_names, names=renamed)
@pytest.mark.parametrize('names', [['a', 'b', 'a'], ['1', '1', '2'],
['1', 'a', '1']])
def test_duplicate_level_names(self, names):
# GH18872
pytest.raises(ValueError, pd.MultiIndex.from_product,
[[0, 1]] * 3, names=names)
# With .rename()
mi = pd.MultiIndex.from_product([[0, 1]] * 3)
tm.assert_raises_regex(ValueError, "Duplicated level name:",
mi.rename, names)
# With .rename(., level=)
mi.rename(names[0], level=1, inplace=True)
tm.assert_raises_regex(ValueError, "Duplicated level name:",
mi.rename, names[:2], level=[0, 2])
def assert_multiindex_copied(self, copy, original):
# Levels should be (at least, shallow copied)
tm.assert_copy(copy.levels, original.levels)
tm.assert_almost_equal(copy.labels, original.labels)
# Labels doesn't matter which way copied
tm.assert_almost_equal(copy.labels, original.labels)
assert copy.labels is not original.labels
# Names doesn't matter which way copied
assert copy.names == original.names
assert copy.names is not original.names
# Sort order should be copied
assert copy.sortorder == original.sortorder
def test_copy(self):
i_copy = self.index.copy()
self.assert_multiindex_copied(i_copy, self.index)
def test_shallow_copy(self):
i_copy = self.index._shallow_copy()
self.assert_multiindex_copied(i_copy, self.index)
def test_view(self):
i_view = self.index.view()
self.assert_multiindex_copied(i_view, self.index)
def check_level_names(self, index, names):
assert [level.name for level in index.levels] == list(names)
def test_changing_names(self):
# names should be applied to levels
level_names = [level.name for level in self.index.levels]
self.check_level_names(self.index, self.index.names)
view = self.index.view()
copy = self.index.copy()
shallow_copy = self.index._shallow_copy()
# changing names should change level names on object
new_names = [name + "a" for name in self.index.names]
self.index.names = new_names
self.check_level_names(self.index, new_names)
# but not on copies
self.check_level_names(view, level_names)
self.check_level_names(copy, level_names)
self.check_level_names(shallow_copy, level_names)
# and copies shouldn't change original
shallow_copy.names = [name + "c" for name in shallow_copy.names]
self.check_level_names(self.index, new_names)
def test_get_level_number_integer(self):
self.index.names = [1, 0]
assert self.index._get_level_number(1) == 0
assert self.index._get_level_number(0) == 1
pytest.raises(IndexError, self.index._get_level_number, 2)
tm.assert_raises_regex(KeyError, 'Level fourth not found',
self.index._get_level_number, 'fourth')
def test_from_arrays(self):
arrays = []
for lev, lab in zip(self.index.levels, self.index.labels):
arrays.append(np.asarray(lev).take(lab))
# list of arrays as input
result = MultiIndex.from_arrays(arrays, names=self.index.names)
tm.assert_index_equal(result, self.index)
# infer correctly
result = MultiIndex.from_arrays([[pd.NaT, Timestamp('20130101')],
['a', 'b']])
assert result.levels[0].equals(Index([Timestamp('20130101')]))
assert result.levels[1].equals(Index(['a', 'b']))
def test_from_arrays_iterator(self):
# GH 18434
arrays = []
for lev, lab in zip(self.index.levels, self.index.labels):
arrays.append(np.asarray(lev).take(lab))
# iterator as input
result = MultiIndex.from_arrays(iter(arrays), names=self.index.names)
tm.assert_index_equal(result, self.index)
# invalid iterator input
with tm.assert_raises_regex(
TypeError, "Input must be a list / sequence of array-likes."):
MultiIndex.from_arrays(0)
def test_from_arrays_index_series_datetimetz(self):
idx1 = pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern')
idx2 = pd.date_range('2015-01-01 10:00', freq='H', periods=3,
tz='Asia/Tokyo')
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_timedelta(self):
idx1 = pd.timedelta_range('1 days', freq='D', periods=3)
idx2 = pd.timedelta_range('2 hours', freq='H', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_period(self):
idx1 = pd.period_range('2011-01-01', freq='D', periods=3)
idx2 = pd.period_range('2015-01-01', freq='H', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_datetimelike_mixed(self):
idx1 = pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern')
idx2 = pd.date_range('2015-01-01 10:00', freq='H', periods=3)
idx3 = pd.timedelta_range('1 days', freq='D', periods=3)
idx4 = pd.period_range('2011-01-01', freq='D', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2, idx3, idx4])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
tm.assert_index_equal(result.get_level_values(2), idx3)
tm.assert_index_equal(result.get_level_values(3), idx4)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1),
pd.Series(idx2),
pd.Series(idx3),
pd.Series(idx4)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result2.get_level_values(2), idx3)
tm.assert_index_equal(result2.get_level_values(3), idx4)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_categorical(self):
# GH13743
idx1 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=False)
idx2 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=True)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
result3 = pd.MultiIndex.from_arrays([idx1.values, idx2.values])
tm.assert_index_equal(result3.get_level_values(0), idx1)
tm.assert_index_equal(result3.get_level_values(1), idx2)
def test_from_arrays_empty(self):
# 0 levels
with tm.assert_raises_regex(
ValueError, "Must pass non-zero number of levels/labels"):
MultiIndex.from_arrays(arrays=[])
# 1 level
result = MultiIndex.from_arrays(arrays=[[]], names=['A'])
assert isinstance(result, MultiIndex)
expected = Index([], name='A')
tm.assert_index_equal(result.levels[0], expected)
# N levels
for N in [2, 3]:
arrays = [[]] * N
names = list('ABC')[:N]
result = MultiIndex.from_arrays(arrays=arrays, names=names)
expected = MultiIndex(levels=[[]] * N, labels=[[]] * N,
names=names)
tm.assert_index_equal(result, expected)
def test_from_arrays_invalid_input(self):
invalid_inputs = [1, [1], [1, 2], [[1], 2],
'a', ['a'], ['a', 'b'], [['a'], 'b']]
for i in invalid_inputs:
pytest.raises(TypeError, MultiIndex.from_arrays, arrays=i)
def test_from_arrays_different_lengths(self):
# see gh-13599
idx1 = [1, 2, 3]
idx2 = ['a', 'b']
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
idx1 = []
idx2 = ['a', 'b']
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
idx1 = [1, 2, 3]
idx2 = []
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
def test_from_product(self):
first = ['foo', 'bar', 'buz']
second = ['a', 'b', 'c']
names = ['first', 'second']
result = MultiIndex.from_product([first, second], names=names)
tuples = [('foo', 'a'), ('foo', 'b'), ('foo', 'c'), ('bar', 'a'),
('bar', 'b'), ('bar', 'c'), ('buz', 'a'), ('buz', 'b'),
('buz', 'c')]
expected = MultiIndex.from_tuples(tuples, names=names)
tm.assert_index_equal(result, expected)
def test_from_product_iterator(self):
# GH 18434
first = ['foo', 'bar', 'buz']
second = ['a', 'b', 'c']
names = ['first', 'second']
tuples = [('foo', 'a'), ('foo', 'b'), ('foo', 'c'), ('bar', 'a'),
('bar', 'b'), ('bar', 'c'), ('buz', 'a'), ('buz', 'b'),
('buz', 'c')]
expected = MultiIndex.from_tuples(tuples, names=names)
# iterator as input
result = MultiIndex.from_product(iter([first, second]), names=names)
tm.assert_index_equal(result, expected)
# Invalid non-iterable input
with tm.assert_raises_regex(
TypeError, "Input must be a list / sequence of iterables."):
MultiIndex.from_product(0)
def test_from_product_empty(self):
# 0 levels
with tm.assert_raises_regex(
ValueError, "Must pass non-zero number of levels/labels"):
MultiIndex.from_product([])
# 1 level
result = MultiIndex.from_product([[]], names=['A'])
expected = pd.Index([], name='A')
tm.assert_index_equal(result.levels[0], expected)
# 2 levels
l1 = [[], ['foo', 'bar', 'baz'], []]
l2 = [[], [], ['a', 'b', 'c']]
names = ['A', 'B']
for first, second in zip(l1, l2):
result = MultiIndex.from_product([first, second], names=names)
expected = MultiIndex(levels=[first, second],
labels=[[], []], names=names)
tm.assert_index_equal(result, expected)
# GH12258
names = ['A', 'B', 'C']
for N in range(4):
lvl2 = lrange(N)
result = MultiIndex.from_product([[], lvl2, []], names=names)
expected = MultiIndex(levels=[[], lvl2, []],
labels=[[], [], []], names=names)
tm.assert_index_equal(result, expected)
def test_from_product_invalid_input(self):
invalid_inputs = [1, [1], [1, 2], [[1], 2],
'a', ['a'], ['a', 'b'], [['a'], 'b']]
for i in invalid_inputs:
pytest.raises(TypeError, MultiIndex.from_product, iterables=i)
def test_from_product_datetimeindex(self):
dt_index = date_range('2000-01-01', periods=2)
mi = pd.MultiIndex.from_product([[1, 2], dt_index])
etalon = construct_1d_object_array_from_listlike([(1, pd.Timestamp(
'2000-01-01')), (1, pd.Timestamp('2000-01-02')), (2, pd.Timestamp(
'2000-01-01')), (2, pd.Timestamp('2000-01-02'))])
tm.assert_numpy_array_equal(mi.values, etalon)
def test_from_product_index_series_categorical(self):
# GH13743
first = ['foo', 'bar']
for ordered in [False, True]:
idx = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=ordered)
expected = pd.CategoricalIndex(list("abcaab") + list("abcaab"),
categories=list("bac"),
ordered=ordered)
for arr in [idx, pd.Series(idx), idx.values]:
result = pd.MultiIndex.from_product([first, arr])
tm.assert_index_equal(result.get_level_values(1), expected)
def test_values_boxed(self):
tuples = [(1, pd.Timestamp('2000-01-01')), (2, pd.NaT),
(3, pd.Timestamp('2000-01-03')),
(1, pd.Timestamp('2000-01-04')),
(2, pd.Timestamp('2000-01-02')),
(3, pd.Timestamp('2000-01-03'))]
result = pd.MultiIndex.from_tuples(tuples)
expected = construct_1d_object_array_from_listlike(tuples)
tm.assert_numpy_array_equal(result.values, expected)
# Check that code branches for boxed values produce identical results
tm.assert_numpy_array_equal(result.values[:4], result[:4].values)
def test_values_multiindex_datetimeindex(self):
# Test to ensure we hit the boxing / nobox part of MI.values
ints = np.arange(10 ** 18, 10 ** 18 + 5)
naive = pd.DatetimeIndex(ints)
aware = pd.DatetimeIndex(ints, tz='US/Central')
idx = pd.MultiIndex.from_arrays([naive, aware])
result = idx.values
outer = pd.DatetimeIndex([x[0] for x in result])
tm.assert_index_equal(outer, naive)
inner = pd.DatetimeIndex([x[1] for x in result])
tm.assert_index_equal(inner, aware)
# n_lev > n_lab
result = idx[:2].values
outer = pd.DatetimeIndex([x[0] for x in result])
tm.assert_index_equal(outer, naive[:2])
inner = pd.DatetimeIndex([x[1] for x in result])
tm.assert_index_equal(inner, aware[:2])
def test_values_multiindex_periodindex(self):
# Test to ensure we hit the boxing / nobox part of MI.values
ints = np.arange(2007, 2012)
pidx = pd.PeriodIndex(ints, freq='D')
idx = pd.MultiIndex.from_arrays([ints, pidx])
result = idx.values
outer = pd.Int64Index([x[0] for x in result])
tm.assert_index_equal(outer, pd.Int64Index(ints))
inner = pd.PeriodIndex([x[1] for x in result])
tm.assert_index_equal(inner, pidx)
# n_lev > n_lab
result = idx[:2].values
outer = pd.Int64Index([x[0] for x in result])
tm.assert_index_equal(outer, pd.Int64Index(ints[:2]))
inner = pd.PeriodIndex([x[1] for x in result])
tm.assert_index_equal(inner, pidx[:2])
def test_append(self):
result = self.index[:3].append(self.index[3:])
assert result.equals(self.index)
foos = [self.index[:1], self.index[1:3], self.index[3:]]
result = foos[0].append(foos[1:])
assert result.equals(self.index)
# empty
result = self.index.append([])
assert result.equals(self.index)
def test_append_mixed_dtypes(self):
# GH 13660
dti = date_range('2011-01-01', freq='M', periods=3, )
dti_tz = date_range('2011-01-01', freq='M', periods=3, tz='US/Eastern')
pi = period_range('2011-01', freq='M', periods=3)
mi = MultiIndex.from_arrays([[1, 2, 3],
[1.1, np.nan, 3.3],
['a', 'b', 'c'],
dti, dti_tz, pi])
assert mi.nlevels == 6
res = mi.append(mi)
exp = MultiIndex.from_arrays([[1, 2, 3, 1, 2, 3],
[1.1, np.nan, 3.3, 1.1, np.nan, 3.3],
['a', 'b', 'c', 'a', 'b', 'c'],
dti.append(dti),
dti_tz.append(dti_tz),
pi.append(pi)])
tm.assert_index_equal(res, exp)
other = MultiIndex.from_arrays([['x', 'y', 'z'], ['x', 'y', 'z'],
['x', 'y', 'z'], ['x', 'y', 'z'],
['x', 'y', 'z'], ['x', 'y', 'z']])
res = mi.append(other)
exp = MultiIndex.from_arrays([[1, 2, 3, 'x', 'y', 'z'],
[1.1, np.nan, 3.3, 'x', 'y', 'z'],
['a', 'b', 'c', 'x', 'y', 'z'],
dti.append(pd.Index(['x', 'y', 'z'])),
dti_tz.append(pd.Index(['x', 'y', 'z'])),
pi.append(pd.Index(['x', 'y', 'z']))])
tm.assert_index_equal(res, exp)
def test_get_level_values(self):
result = self.index.get_level_values(0)
expected = Index(['foo', 'foo', 'bar', 'baz', 'qux', 'qux'],
name='first')
tm.assert_index_equal(result, expected)
assert result.name == 'first'
result = self.index.get_level_values('first')
expected = self.index.get_level_values(0)
tm.assert_index_equal(result, expected)
# GH 10460
index = MultiIndex(
levels=[CategoricalIndex(['A', 'B']),
CategoricalIndex([1, 2, 3])],
labels=[np.array([0, 0, 0, 1, 1, 1]),
np.array([0, 1, 2, 0, 1, 2])])
exp = CategoricalIndex(['A', 'A', 'A', 'B', 'B', 'B'])
tm.assert_index_equal(index.get_level_values(0), exp)
exp = CategoricalIndex([1, 2, 3, 1, 2, 3])
tm.assert_index_equal(index.get_level_values(1), exp)
def test_get_level_values_int_with_na(self):
# GH 17924
arrays = [['a', 'b', 'b'], [1, np.nan, 2]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = Index([1, np.nan, 2])
tm.assert_index_equal(result, expected)
arrays = [['a', 'b', 'b'], [np.nan, np.nan, 2]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = Index([np.nan, np.nan, 2])
tm.assert_index_equal(result, expected)
def test_get_level_values_na(self):
arrays = [[np.nan, np.nan, np.nan], ['a', np.nan, 1]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = pd.Index([np.nan, np.nan, np.nan])
tm.assert_index_equal(result, expected)
result = index.get_level_values(1)
expected = pd.Index(['a', np.nan, 1])
tm.assert_index_equal(result, expected)
arrays = [['a', 'b', 'b'], pd.DatetimeIndex([0, 1, pd.NaT])]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = pd.DatetimeIndex([0, 1, pd.NaT])
tm.assert_index_equal(result, expected)
arrays = [[], []]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = pd.Index([], dtype=object)
tm.assert_index_equal(result, expected)
def test_get_level_values_all_na(self):
# GH 17924 when level entirely consists of nan
arrays = [[np.nan, np.nan, np.nan], ['a', np.nan, 1]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = pd.Index([np.nan, np.nan, np.nan], dtype=np.float64)
tm.assert_index_equal(result, expected)
result = index.get_level_values(1)
expected = pd.Index(['a', np.nan, 1], dtype=object)
tm.assert_index_equal(result, expected)
def test_reorder_levels(self):
# this blows up
tm.assert_raises_regex(IndexError, '^Too many levels',
self.index.reorder_levels, [2, 1, 0])
def test_nlevels(self):
assert self.index.nlevels == 2
def test_iter(self):
result = list(self.index)
expected = [('foo', 'one'), ('foo', 'two'), ('bar', 'one'),
('baz', 'two'), ('qux', 'one'), ('qux', 'two')]
assert result == expected
def test_legacy_pickle(self):
if PY3:
pytest.skip("testing for legacy pickles not "
"support on py3")
path = tm.get_data_path('multiindex_v1.pickle')
obj = pd.read_pickle(path)
obj2 = MultiIndex.from_tuples(obj.values)
assert obj.equals(obj2)
res = obj.get_indexer(obj)
exp = np.arange(len(obj), dtype=np.intp)
assert_almost_equal(res, exp)
res = obj.get_indexer(obj2[::-1])
exp = obj.get_indexer(obj[::-1])
exp2 = obj2.get_indexer(obj2[::-1])
assert_almost_equal(res, exp)
assert_almost_equal(exp, exp2)
def test_legacy_v2_unpickle(self):
# 0.7.3 -> 0.8.0 format manage
path = tm.get_data_path('mindex_073.pickle')
obj = pd.read_pickle(path)
obj2 = MultiIndex.from_tuples(obj.values)
assert obj.equals(obj2)
res = obj.get_indexer(obj)
exp = np.arange(len(obj), dtype=np.intp)
assert_almost_equal(res, exp)
res = obj.get_indexer(obj2[::-1])
exp = obj.get_indexer(obj[::-1])
exp2 = obj2.get_indexer(obj2[::-1])
assert_almost_equal(res, exp)
assert_almost_equal(exp, exp2)
def test_roundtrip_pickle_with_tz(self):
# GH 8367
# round-trip of timezone
index = MultiIndex.from_product(
[[1, 2], ['a', 'b'], date_range('20130101', periods=3,
tz='US/Eastern')
], names=['one', 'two', 'three'])
unpickled = tm.round_trip_pickle(index)
assert index.equal_levels(unpickled)
def test_from_tuples_index_values(self):
result = MultiIndex.from_tuples(self.index)
assert (result.values == self.index.values).all()
def test_contains(self):
assert ('foo', 'two') in self.index
assert ('bar', 'two') not in self.index
assert None not in self.index
def test_contains_top_level(self):
midx = MultiIndex.from_product([['A', 'B'], [1, 2]])
assert 'A' in midx
assert 'A' not in midx._engine
def test_contains_with_nat(self):
# MI with a NaT
mi = MultiIndex(levels=[['C'],
pd.date_range('2012-01-01', periods=5)],
labels=[[0, 0, 0, 0, 0, 0], [-1, 0, 1, 2, 3, 4]],
names=[None, 'B'])
assert ('C', pd.Timestamp('2012-01-01')) in mi
for val in mi.values:
assert val in mi
def test_is_all_dates(self):
assert not self.index.is_all_dates
def test_is_numeric(self):
# MultiIndex is never numeric
assert not self.index.is_numeric()
def test_getitem(self):
# scalar
assert self.index[2] == ('bar', 'one')
# slice
result = self.index[2:5]
expected = self.index[[2, 3, 4]]
assert result.equals(expected)
# boolean
result = self.index[[True, False, True, False, True, True]]
result2 = self.index[np.array([True, False, True, False, True, True])]
expected = self.index[[0, 2, 4, 5]]
assert result.equals(expected)
assert result2.equals(expected)
def test_getitem_group_select(self):
sorted_idx, _ = self.index.sortlevel(0)
assert sorted_idx.get_loc('baz') == slice(3, 4)
assert sorted_idx.get_loc('foo') == slice(0, 2)
def test_get_loc(self):
assert self.index.get_loc(('foo', 'two')) == 1
assert self.index.get_loc(('baz', 'two')) == 3
pytest.raises(KeyError, self.index.get_loc, ('bar', 'two'))
pytest.raises(KeyError, self.index.get_loc, 'quux')
pytest.raises(NotImplementedError, self.index.get_loc, 'foo',
method='nearest')
# 3 levels
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
pytest.raises(KeyError, index.get_loc, (1, 1))
assert index.get_loc((2, 0)) == slice(3, 5)
def test_get_loc_duplicates(self):
index = Index([2, 2, 2, 2])
result = index.get_loc(2)
expected = slice(0, 4)
assert result == expected
# pytest.raises(Exception, index.get_loc, 2)
index = Index(['c', 'a', 'a', 'b', 'b'])
rs = index.get_loc('c')
xp = 0
assert rs == xp
def test_get_value_duplicates(self):
index = MultiIndex(levels=[['D', 'B', 'C'],
[0, 26, 27, 37, 57, 67, 75, 82]],
labels=[[0, 0, 0, 1, 2, 2, 2, 2, 2, 2],
[1, 3, 4, 6, 0, 2, 2, 3, 5, 7]],
names=['tag', 'day'])
assert index.get_loc('D') == slice(0, 3)
with pytest.raises(KeyError):
index._engine.get_value(np.array([]), 'D')
def test_get_loc_level(self):
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
loc, new_index = index.get_loc_level((0, 1))
expected = slice(1, 2)
exp_index = index[expected].droplevel(0).droplevel(0)
assert loc == expected
assert new_index.equals(exp_index)
loc, new_index = index.get_loc_level((0, 1, 0))
expected = 1
assert loc == expected
assert new_index is None
pytest.raises(KeyError, index.get_loc_level, (2, 2))
index = MultiIndex(levels=[[2000], lrange(4)], labels=[np.array(
[0, 0, 0, 0]), np.array([0, 1, 2, 3])])
result, new_index = index.get_loc_level((2000, slice(None, None)))
expected = slice(None, None)
assert result == expected
assert new_index.equals(index.droplevel(0))
@pytest.mark.parametrize('level', [0, 1])
@pytest.mark.parametrize('null_val', [np.nan, pd.NaT, None])
def test_get_loc_nan(self, level, null_val):
# GH 18485 : NaN in MultiIndex
levels = [['a', 'b'], ['c', 'd']]
key = ['b', 'd']
levels[level] = np.array([0, null_val], dtype=type(null_val))
key[level] = null_val
idx = MultiIndex.from_product(levels)
assert idx.get_loc(tuple(key)) == 3
def test_get_loc_missing_nan(self):
# GH 8569
idx = MultiIndex.from_arrays([[1.0, 2.0], [3.0, 4.0]])
assert isinstance(idx.get_loc(1), slice)
pytest.raises(KeyError, idx.get_loc, 3)
pytest.raises(KeyError, idx.get_loc, np.nan)
pytest.raises(KeyError, idx.get_loc, [np.nan])
@pytest.mark.parametrize('dtype1', [int, float, bool, str])
@pytest.mark.parametrize('dtype2', [int, float, bool, str])
def test_get_loc_multiple_dtypes(self, dtype1, dtype2):
# GH 18520
levels = [np.array([0, 1]).astype(dtype1),
np.array([0, 1]).astype(dtype2)]
idx = pd.MultiIndex.from_product(levels)
assert idx.get_loc(idx[2]) == 2
@pytest.mark.parametrize('level', [0, 1])
@pytest.mark.parametrize('dtypes', [[int, float], [float, int]])
def test_get_loc_implicit_cast(self, level, dtypes):
# GH 18818, GH 15994 : as flat index, cast int to float and vice-versa
levels = [['a', 'b'], ['c', 'd']]
key = ['b', 'd']
lev_dtype, key_dtype = dtypes
levels[level] = np.array([0, 1], dtype=lev_dtype)
key[level] = key_dtype(1)
idx = MultiIndex.from_product(levels)
assert idx.get_loc(tuple(key)) == 3
def test_get_loc_cast_bool(self):
# GH 19086 : int is casted to bool, but not vice-versa
levels = [[False, True], np.arange(2, dtype='int64')]
idx = MultiIndex.from_product(levels)
assert idx.get_loc((0, 1)) == 1
assert idx.get_loc((1, 0)) == 2
pytest.raises(KeyError, idx.get_loc, (False, True))
pytest.raises(KeyError, idx.get_loc, (True, False))
def test_slice_locs(self):
df = tm.makeTimeDataFrame()
stacked = df.stack()
idx = stacked.index
slob = slice(*idx.slice_locs(df.index[5], df.index[15]))
sliced = stacked[slob]
expected = df[5:16].stack()
tm.assert_almost_equal(sliced.values, expected.values)
slob = slice(*idx.slice_locs(df.index[5] + timedelta(seconds=30),
df.index[15] - timedelta(seconds=30)))
sliced = stacked[slob]
expected = df[6:15].stack()
tm.assert_almost_equal(sliced.values, expected.values)
def test_slice_locs_with_type_mismatch(self):
df = tm.makeTimeDataFrame()
stacked = df.stack()
idx = stacked.index
tm.assert_raises_regex(TypeError, '^Level type mismatch',
idx.slice_locs, (1, 3))
tm.assert_raises_regex(TypeError, '^Level type mismatch',
idx.slice_locs,
df.index[5] + timedelta(
seconds=30), (5, 2))
df = tm.makeCustomDataframe(5, 5)
stacked = df.stack()
idx = stacked.index
with tm.assert_raises_regex(TypeError, '^Level type mismatch'):
idx.slice_locs(timedelta(seconds=30))
# TODO: Try creating a UnicodeDecodeError in exception message
with tm.assert_raises_regex(TypeError, '^Level type mismatch'):
idx.slice_locs(df.index[1], (16, "a"))
def test_slice_locs_not_sorted(self):
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
tm.assert_raises_regex(KeyError, "[Kk]ey length.*greater than "
"MultiIndex lexsort depth",
index.slice_locs, (1, 0, 1), (2, 1, 0))
# works
sorted_index, _ = index.sortlevel(0)
# should there be a test case here???
sorted_index.slice_locs((1, 0, 1), (2, 1, 0))
def test_slice_locs_partial(self):
sorted_idx, _ = self.index.sortlevel(0)
result = sorted_idx.slice_locs(('foo', 'two'), ('qux', 'one'))
assert result == (1, 5)
result = sorted_idx.slice_locs(None, ('qux', 'one'))
assert result == (0, 5)
result = sorted_idx.slice_locs(('foo', 'two'), None)
assert result == (1, len(sorted_idx))
result = sorted_idx.slice_locs('bar', 'baz')
assert result == (2, 4)
def test_slice_locs_not_contained(self):
# some searchsorted action
index = MultiIndex(levels=[[0, 2, 4, 6], [0, 2, 4]],
labels=[[0, 0, 0, 1, 1, 2, 3, 3, 3],
[0, 1, 2, 1, 2, 2, 0, 1, 2]], sortorder=0)
result = index.slice_locs((1, 0), (5, 2))
assert result == (3, 6)
result = index.slice_locs(1, 5)
assert result == (3, 6)
result = index.slice_locs((2, 2), (5, 2))
assert result == (3, 6)
result = index.slice_locs(2, 5)
assert result == (3, 6)
result = index.slice_locs((1, 0), (6, 3))
assert result == (3, 8)
result = index.slice_locs(-1, 10)
assert result == (0, len(index))
def test_consistency(self):
# need to construct an overflow
major_axis = lrange(70000)
minor_axis = lrange(10)
major_labels = np.arange(70000)
minor_labels = np.repeat(lrange(10), 7000)
# the fact that is works means it's consistent
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
# inconsistent
major_labels = np.array([0, 0, 1, 1, 1, 2, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 1, 0, 1, 0, 1])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
assert not index.is_unique
def test_truncate(self):
major_axis = Index(lrange(4))
minor_axis = Index(lrange(2))
major_labels = np.array([0, 0, 1, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
result = index.truncate(before=1)
assert 'foo' not in result.levels[0]
assert 1 in result.levels[0]
result = index.truncate(after=1)
assert 2 not in result.levels[0]
assert 1 in result.levels[0]
result = index.truncate(before=1, after=2)
assert len(result.levels[0]) == 2
# after < before
pytest.raises(ValueError, index.truncate, 3, 1)
def test_get_indexer(self):
major_axis = Index(lrange(4))
minor_axis = Index(lrange(2))
major_labels = np.array([0, 0, 1, 2, 2, 3, 3], dtype=np.intp)
minor_labels = np.array([0, 1, 0, 0, 1, 0, 1], dtype=np.intp)
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
idx1 = index[:5]
idx2 = index[[1, 3, 5]]
r1 = idx1.get_indexer(idx2)
assert_almost_equal(r1, np.array([1, 3, -1], dtype=np.intp))
r1 = idx2.get_indexer(idx1, method='pad')
e1 = np.array([-1, 0, 0, 1, 1], dtype=np.intp)
assert_almost_equal(r1, e1)
r2 = idx2.get_indexer(idx1[::-1], method='pad')
assert_almost_equal(r2, e1[::-1])
rffill1 = idx2.get_indexer(idx1, method='ffill')
assert_almost_equal(r1, rffill1)
r1 = idx2.get_indexer(idx1, method='backfill')
e1 = np.array([0, 0, 1, 1, 2], dtype=np.intp)
assert_almost_equal(r1, e1)
r2 = idx2.get_indexer(idx1[::-1], method='backfill')
assert_almost_equal(r2, e1[::-1])
rbfill1 = idx2.get_indexer(idx1, method='bfill')
assert_almost_equal(r1, rbfill1)
# pass non-MultiIndex
r1 = idx1.get_indexer(idx2.values)
rexp1 = idx1.get_indexer(idx2)
assert_almost_equal(r1, rexp1)
r1 = idx1.get_indexer([1, 2, 3])
assert (r1 == [-1, -1, -1]).all()
# create index with duplicates
idx1 = Index(lrange(10) + lrange(10))
idx2 = Index(lrange(20))
msg = "Reindexing only valid with uniquely valued Index objects"
with tm.assert_raises_regex(InvalidIndexError, msg):
idx1.get_indexer(idx2)
def test_get_indexer_nearest(self):
midx = MultiIndex.from_tuples([('a', 1), ('b', 2)])
with pytest.raises(NotImplementedError):
midx.get_indexer(['a'], method='nearest')
with pytest.raises(NotImplementedError):
midx.get_indexer(['a'], method='pad', tolerance=2)
def test_hash_collisions(self):
# non-smoke test that we don't get hash collisions
index = MultiIndex.from_product([np.arange(1000), np.arange(1000)],
names=['one', 'two'])
result = index.get_indexer(index.values)
tm.assert_numpy_array_equal(result, np.arange(
len(index), dtype='intp'))
for i in [0, 1, len(index) - 2, len(index) - 1]:
result = index.get_loc(index[i])
assert result == i
def test_format(self):
self.index.format()
self.index[:0].format()
def test_format_integer_names(self):
index = MultiIndex(levels=[[0, 1], [0, 1]],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]], names=[0, 1])
index.format(names=True)
def test_format_sparse_display(self):
index = MultiIndex(levels=[[0, 1], [0, 1], [0, 1], [0]],
labels=[[0, 0, 0, 1, 1, 1], [0, 0, 1, 0, 0, 1],
[0, 1, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0]])
result = index.format()
assert result[3] == '1 0 0 0'
def test_format_sparse_config(self):
warn_filters = warnings.filters
warnings.filterwarnings('ignore', category=FutureWarning,
module=".*format")
# GH1538
pd.set_option('display.multi_sparse', False)
result = self.index.format()
assert result[1] == 'foo two'
tm.reset_display_options()
warnings.filters = warn_filters
def test_to_frame(self):
tuples = [(1, 'one'), (1, 'two'), (2, 'one'), (2, 'two')]
index = MultiIndex.from_tuples(tuples)
result = index.to_frame(index=False)
expected = DataFrame(tuples)
tm.assert_frame_equal(result, expected)
result = index.to_frame()
expected.index = index
tm.assert_frame_equal(result, expected)
tuples = [(1, 'one'), (1, 'two'), (2, 'one'), (2, 'two')]
index = MultiIndex.from_tuples(tuples, names=['first', 'second'])
result = index.to_frame(index=False)
expected = DataFrame(tuples)
expected.columns = ['first', 'second']
tm.assert_frame_equal(result, expected)
result = index.to_frame()
expected.index = index
tm.assert_frame_equal(result, expected)
index = MultiIndex.from_product([range(5),
pd.date_range('20130101', periods=3)])
result = index.to_frame(index=False)
expected = DataFrame(
{0: np.repeat(np.arange(5, dtype='int64'), 3),
1: np.tile(pd.date_range('20130101', periods=3), 5)})
tm.assert_frame_equal(result, expected)
index = MultiIndex.from_product([range(5),
pd.date_range('20130101', periods=3)])
result = index.to_frame()
expected.index = index
tm.assert_frame_equal(result, expected)
def test_to_hierarchical(self):
index = MultiIndex.from_tuples([(1, 'one'), (1, 'two'), (2, 'one'), (
2, 'two')])
result = index.to_hierarchical(3)
expected = MultiIndex(levels=[[1, 2], ['one', 'two']],
labels=[[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1]])
tm.assert_index_equal(result, expected)
assert result.names == index.names
# K > 1
result = index.to_hierarchical(3, 2)
expected = MultiIndex(levels=[[1, 2], ['one', 'two']],
labels=[[0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1],
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]])
tm.assert_index_equal(result, expected)
assert result.names == index.names
# non-sorted
index = MultiIndex.from_tuples([(2, 'c'), (1, 'b'),
(2, 'a'), (2, 'b')],
names=['N1', 'N2'])
result = index.to_hierarchical(2)
expected = MultiIndex.from_tuples([(2, 'c'), (2, 'c'), (1, 'b'),
(1, 'b'),
(2, 'a'), (2, 'a'),
(2, 'b'), (2, 'b')],
names=['N1', 'N2'])
tm.assert_index_equal(result, expected)
assert result.names == index.names
def test_bounds(self):
self.index._bounds
def test_equals_multi(self):
assert self.index.equals(self.index)
assert not self.index.equals(self.index.values)
assert self.index.equals(Index(self.index.values))
assert self.index.equal_levels(self.index)
assert not self.index.equals(self.index[:-1])
assert not self.index.equals(self.index[-1])
# different number of levels
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
index2 = MultiIndex(levels=index.levels[:-1], labels=index.labels[:-1])
assert not index.equals(index2)
assert not index.equal_levels(index2)
# levels are different
major_axis = Index(lrange(4))
minor_axis = Index(lrange(2))
major_labels = np.array([0, 0, 1, 2, 2, 3])
minor_labels = np.array([0, 1, 0, 0, 1, 0])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
assert not self.index.equals(index)
assert not self.index.equal_levels(index)
# some of the labels are different
major_axis = Index(['foo', 'bar', 'baz', 'qux'])
minor_axis = Index(['one', 'two'])
major_labels = np.array([0, 0, 2, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
assert not self.index.equals(index)
def test_equals_missing_values(self):
# make sure take is not using -1
i = pd.MultiIndex.from_tuples([(0, pd.NaT),
(0, pd.Timestamp('20130101'))])
result = i[0:1].equals(i[0])
assert not result
result = i[1:2].equals(i[1])
assert not result
def test_identical(self):
mi = self.index.copy()
mi2 = self.index.copy()
assert mi.identical(mi2)
mi = mi.set_names(['new1', 'new2'])
assert mi.equals(mi2)
assert not mi.identical(mi2)
mi2 = mi2.set_names(['new1', 'new2'])
assert mi.identical(mi2)
mi3 = Index(mi.tolist(), names=mi.names)
mi4 = Index(mi.tolist(), names=mi.names, tupleize_cols=False)
assert mi.identical(mi3)
assert not mi.identical(mi4)
assert mi.equals(mi4)
def test_is_(self):
mi = MultiIndex.from_tuples(lzip(range(10), range(10)))
assert mi.is_(mi)
assert mi.is_(mi.view())
assert mi.is_(mi.view().view().view().view())
mi2 = mi.view()
# names are metadata, they don't change id
mi2.names = ["A", "B"]
assert mi2.is_(mi)
assert mi.is_(mi2)
assert mi.is_(mi.set_names(["C", "D"]))
mi2 = mi.view()
mi2.set_names(["E", "F"], inplace=True)
assert mi.is_(mi2)
# levels are inherent properties, they change identity
mi3 = mi2.set_levels([lrange(10), lrange(10)])
assert not mi3.is_(mi2)
# shouldn't change
assert mi2.is_(mi)
mi4 = mi3.view()
# GH 17464 - Remove duplicate MultiIndex levels
mi4.set_levels([lrange(10), lrange(10)], inplace=True)
assert not mi4.is_(mi3)
mi5 = mi.view()
mi5.set_levels(mi5.levels, inplace=True)
assert not mi5.is_(mi)
def test_union(self):
piece1 = self.index[:5][::-1]
piece2 = self.index[3:]
the_union = piece1 | piece2
tups = sorted(self.index.values)
expected = MultiIndex.from_tuples(tups)
assert the_union.equals(expected)
# corner case, pass self or empty thing:
the_union = self.index.union(self.index)
assert the_union is self.index
the_union = self.index.union(self.index[:0])
assert the_union is self.index
# won't work in python 3
# tuples = self.index.values
# result = self.index[:4] | tuples[4:]
# assert result.equals(tuples)
# not valid for python 3
# def test_union_with_regular_index(self):
# other = Index(['A', 'B', 'C'])
# result = other.union(self.index)
# assert ('foo', 'one') in result
# assert 'B' in result
# result2 = self.index.union(other)
# assert result.equals(result2)
def test_intersection(self):
piece1 = self.index[:5][::-1]
piece2 = self.index[3:]
the_int = piece1 & piece2
tups = sorted(self.index[3:5].values)
expected = MultiIndex.from_tuples(tups)
assert the_int.equals(expected)
# corner case, pass self
the_int = self.index.intersection(self.index)
assert the_int is self.index
# empty intersection: disjoint
empty = self.index[:2] & self.index[2:]
expected = self.index[:0]
assert empty.equals(expected)
# can't do in python 3
# tuples = self.index.values
# result = self.index & tuples
# assert result.equals(tuples)
def test_sub(self):
first = self.index
# - now raises (previously was set op difference)
with pytest.raises(TypeError):
first - self.index[-3:]
with pytest.raises(TypeError):
self.index[-3:] - first
with pytest.raises(TypeError):
self.index[-3:] - first.tolist()
with pytest.raises(TypeError):
first.tolist() - self.index[-3:]
def test_difference(self):
first = self.index
result = first.difference(self.index[-3:])
expected = MultiIndex.from_tuples(sorted(self.index[:-3].values),
sortorder=0,
names=self.index.names)
assert isinstance(result, MultiIndex)
assert result.equals(expected)
assert result.names == self.index.names
# empty difference: reflexive
result = self.index.difference(self.index)
expected = self.index[:0]
assert result.equals(expected)
assert result.names == self.index.names
# empty difference: superset
result = self.index[-3:].difference(self.index)
expected = self.index[:0]
assert result.equals(expected)
assert result.names == self.index.names
# empty difference: degenerate
result = self.index[:0].difference(self.index)
expected = self.index[:0]
assert result.equals(expected)
assert result.names == self.index.names
# names not the same
chunklet = self.index[-3:]
chunklet.names = ['foo', 'baz']
result = first.difference(chunklet)
assert result.names == (None, None)
# empty, but non-equal
result = self.index.difference(self.index.sortlevel(1)[0])
assert len(result) == 0
# raise Exception called with non-MultiIndex
result = first.difference(first.values)
assert result.equals(first[:0])
# name from empty array
result = first.difference([])
assert first.equals(result)
assert first.names == result.names
# name from non-empty array
result = first.difference([('foo', 'one')])
expected = pd.MultiIndex.from_tuples([('bar', 'one'), ('baz', 'two'), (
'foo', 'two'), ('qux', 'one'), ('qux', 'two')])
expected.names = first.names
assert first.names == result.names
tm.assert_raises_regex(TypeError, "other must be a MultiIndex "
"or a list of tuples",
first.difference, [1, 2, 3, 4, 5])
def test_from_tuples(self):
tm.assert_raises_regex(TypeError, 'Cannot infer number of levels '
'from empty list',
MultiIndex.from_tuples, [])
expected = MultiIndex(levels=[[1, 3], [2, 4]],
labels=[[0, 1], [0, 1]],
names=['a', 'b'])
# input tuples
result = MultiIndex.from_tuples(((1, 2), (3, 4)), names=['a', 'b'])
tm.assert_index_equal(result, expected)
def test_from_tuples_iterator(self):
# GH 18434
# input iterator for tuples
expected = MultiIndex(levels=[[1, 3], [2, 4]],
labels=[[0, 1], [0, 1]],
names=['a', 'b'])
result = MultiIndex.from_tuples(zip([1, 3], [2, 4]), names=['a', 'b'])
tm.assert_index_equal(result, expected)
# input non-iterables
with tm.assert_raises_regex(
TypeError, 'Input must be a list / sequence of tuple-likes.'):
MultiIndex.from_tuples(0)
def test_from_tuples_empty(self):
# GH 16777
result = MultiIndex.from_tuples([], names=['a', 'b'])
expected = MultiIndex.from_arrays(arrays=[[], []],
names=['a', 'b'])
tm.assert_index_equal(result, expected)
def test_argsort(self):
result = self.index.argsort()
expected = self.index.values.argsort()
tm.assert_numpy_array_equal(result, expected)
def test_sortlevel(self):
import random
tuples = list(self.index)
random.shuffle(tuples)
index = MultiIndex.from_tuples(tuples)
sorted_idx, _ = index.sortlevel(0)
expected = MultiIndex.from_tuples(sorted(tuples))
assert sorted_idx.equals(expected)
sorted_idx, _ = index.sortlevel(0, ascending=False)
assert sorted_idx.equals(expected[::-1])
sorted_idx, _ = index.sortlevel(1)
by1 = sorted(tuples, key=lambda x: (x[1], x[0]))
expected = MultiIndex.from_tuples(by1)
assert sorted_idx.equals(expected)
sorted_idx, _ = index.sortlevel(1, ascending=False)
assert sorted_idx.equals(expected[::-1])
def test_sortlevel_not_sort_remaining(self):
mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list('ABC'))
sorted_idx, _ = mi.sortlevel('A', sort_remaining=False)
assert sorted_idx.equals(mi)
def test_sortlevel_deterministic(self):
tuples = [('bar', 'one'), ('foo', 'two'), ('qux', 'two'),
('foo', 'one'), ('baz', 'two'), ('qux', 'one')]
index = MultiIndex.from_tuples(tuples)
sorted_idx, _ = index.sortlevel(0)
expected = MultiIndex.from_tuples(sorted(tuples))
assert sorted_idx.equals(expected)
sorted_idx, _ = index.sortlevel(0, ascending=False)
assert sorted_idx.equals(expected[::-1])
sorted_idx, _ = index.sortlevel(1)
by1 = sorted(tuples, key=lambda x: (x[1], x[0]))
expected = MultiIndex.from_tuples(by1)
assert sorted_idx.equals(expected)
sorted_idx, _ = index.sortlevel(1, ascending=False)
assert sorted_idx.equals(expected[::-1])
def test_dims(self):
pass
def test_drop(self):
dropped = self.index.drop([('foo', 'two'), ('qux', 'one')])
index = MultiIndex.from_tuples([('foo', 'two'), ('qux', 'one')])
dropped2 = self.index.drop(index)
expected = self.index[[0, 2, 3, 5]]
tm.assert_index_equal(dropped, expected)
tm.assert_index_equal(dropped2, expected)
dropped = self.index.drop(['bar'])
expected = self.index[[0, 1, 3, 4, 5]]
tm.assert_index_equal(dropped, expected)
dropped = self.index.drop('foo')
expected = self.index[[2, 3, 4, 5]]
tm.assert_index_equal(dropped, expected)
index = MultiIndex.from_tuples([('bar', 'two')])
pytest.raises(KeyError, self.index.drop, [('bar', 'two')])
pytest.raises(KeyError, self.index.drop, index)
pytest.raises(KeyError, self.index.drop, ['foo', 'two'])
# partially correct argument
mixed_index = MultiIndex.from_tuples([('qux', 'one'), ('bar', 'two')])
pytest.raises(KeyError, self.index.drop, mixed_index)
# error='ignore'
dropped = self.index.drop(index, errors='ignore')
expected = self.index[[0, 1, 2, 3, 4, 5]]
tm.assert_index_equal(dropped, expected)
dropped = self.index.drop(mixed_index, errors='ignore')
expected = self.index[[0, 1, 2, 3, 5]]
tm.assert_index_equal(dropped, expected)
dropped = self.index.drop(['foo', 'two'], errors='ignore')
expected = self.index[[2, 3, 4, 5]]
tm.assert_index_equal(dropped, expected)
# mixed partial / full drop
dropped = self.index.drop(['foo', ('qux', 'one')])
expected = self.index[[2, 3, 5]]
tm.assert_index_equal(dropped, expected)
# mixed partial / full drop / error='ignore'
mixed_index = ['foo', ('qux', 'one'), 'two']
pytest.raises(KeyError, self.index.drop, mixed_index)
dropped = self.index.drop(mixed_index, errors='ignore')
expected = self.index[[2, 3, 5]]
tm.assert_index_equal(dropped, expected)
def test_droplevel_with_names(self):
index = self.index[self.index.get_loc('foo')]
dropped = index.droplevel(0)
assert dropped.name == 'second'
index = MultiIndex(
levels=[Index(lrange(4)), Index(lrange(4)), Index(lrange(4))],
labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])],
names=['one', 'two', 'three'])
dropped = index.droplevel(0)
assert dropped.names == ('two', 'three')
dropped = index.droplevel('two')
expected = index.droplevel(1)
assert dropped.equals(expected)
def test_droplevel_list(self):
index = MultiIndex(
levels=[Index(lrange(4)), Index(lrange(4)), Index(lrange(4))],
labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])],
names=['one', 'two', 'three'])
dropped = index[:2].droplevel(['three', 'one'])
expected = index[:2].droplevel(2).droplevel(0)
assert dropped.equals(expected)
dropped = index[:2].droplevel([])
expected = index[:2]
assert dropped.equals(expected)
with pytest.raises(ValueError):
index[:2].droplevel(['one', 'two', 'three'])
with pytest.raises(KeyError):
index[:2].droplevel(['one', 'four'])
def test_drop_not_lexsorted(self):
# GH 12078
# define the lexsorted version of the multi-index
tuples = [('a', ''), ('b1', 'c1'), ('b2', 'c2')]
lexsorted_mi = MultiIndex.from_tuples(tuples, names=['b', 'c'])
assert lexsorted_mi.is_lexsorted()
# and the not-lexsorted version
df = pd.DataFrame(columns=['a', 'b', 'c', 'd'],
data=[[1, 'b1', 'c1', 3], [1, 'b2', 'c2', 4]])
df = df.pivot_table(index='a', columns=['b', 'c'], values='d')
df = df.reset_index()
not_lexsorted_mi = df.columns
assert not not_lexsorted_mi.is_lexsorted()
# compare the results
tm.assert_index_equal(lexsorted_mi, not_lexsorted_mi)
with tm.assert_produces_warning(PerformanceWarning):
tm.assert_index_equal(lexsorted_mi.drop('a'),
not_lexsorted_mi.drop('a'))
def test_insert(self):
# key contained in all levels
new_index = self.index.insert(0, ('bar', 'two'))
assert new_index.equal_levels(self.index)
assert new_index[0] == ('bar', 'two')
# key not contained in all levels
new_index = self.index.insert(0, ('abc', 'three'))
exp0 = Index(list(self.index.levels[0]) + ['abc'], name='first')
tm.assert_index_equal(new_index.levels[0], exp0)
exp1 = Index(list(self.index.levels[1]) + ['three'], name='second')
tm.assert_index_equal(new_index.levels[1], exp1)
assert new_index[0] == ('abc', 'three')
# key wrong length
msg = "Item must have length equal to number of levels"
with tm.assert_raises_regex(ValueError, msg):
self.index.insert(0, ('foo2',))
left = pd.DataFrame([['a', 'b', 0], ['b', 'd', 1]],
columns=['1st', '2nd', '3rd'])
left.set_index(['1st', '2nd'], inplace=True)
ts = left['3rd'].copy(deep=True)
left.loc[('b', 'x'), '3rd'] = 2
left.loc[('b', 'a'), '3rd'] = -1
left.loc[('b', 'b'), '3rd'] = 3
left.loc[('a', 'x'), '3rd'] = 4
left.loc[('a', 'w'), '3rd'] = 5
left.loc[('a', 'a'), '3rd'] = 6
ts.loc[('b', 'x')] = 2
ts.loc['b', 'a'] = -1
ts.loc[('b', 'b')] = 3
ts.loc['a', 'x'] = 4
ts.loc[('a', 'w')] = 5
ts.loc['a', 'a'] = 6
right = pd.DataFrame([['a', 'b', 0], ['b', 'd', 1], ['b', 'x', 2],
['b', 'a', -1], ['b', 'b', 3], ['a', 'x', 4],
['a', 'w', 5], ['a', 'a', 6]],
columns=['1st', '2nd', '3rd'])
right.set_index(['1st', '2nd'], inplace=True)
# FIXME data types changes to float because
# of intermediate nan insertion;
tm.assert_frame_equal(left, right, check_dtype=False)
tm.assert_series_equal(ts, right['3rd'])
# GH9250
idx = [('test1', i) for i in range(5)] + \
[('test2', i) for i in range(6)] + \
[('test', 17), ('test', 18)]
left = pd.Series(np.linspace(0, 10, 11),
pd.MultiIndex.from_tuples(idx[:-2]))
left.loc[('test', 17)] = 11
left.loc[('test', 18)] = 12
right = pd.Series(np.linspace(0, 12, 13),
pd.MultiIndex.from_tuples(idx))
tm.assert_series_equal(left, right)
def test_take_preserve_name(self):
taken = self.index.take([3, 0, 1])
assert taken.names == self.index.names
def test_take_fill_value(self):
# GH 12631
vals = [['A', 'B'],
[pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02')]]
idx = pd.MultiIndex.from_product(vals, names=['str', 'dt'])
result = idx.take(np.array([1, 0, -1]))
exp_vals = [('A', pd.Timestamp('2011-01-02')),
('A', pd.Timestamp('2011-01-01')),
('B', pd.Timestamp('2011-01-02'))]
expected = pd.MultiIndex.from_tuples(exp_vals, names=['str', 'dt'])
tm.assert_index_equal(result, expected)
# fill_value
result = idx.take(np.array([1, 0, -1]), fill_value=True)
exp_vals = [('A', pd.Timestamp('2011-01-02')),
('A', pd.Timestamp('2011-01-01')),
(np.nan, pd.NaT)]
expected = pd.MultiIndex.from_tuples(exp_vals, names=['str', 'dt'])
tm.assert_index_equal(result, expected)
# allow_fill=False
result = idx.take( | np.array([1, 0, -1]) | numpy.array |
'''
Created: 2022/02/11
Maintainer: <NAME> <<EMAIL>>
adapted from https://github.com/ethz-asl/robotcar_tools/blob/master/python/interpolate_poses.py
LICENSE: MIT
'''
import transformations as tfs
import numpy as np
import argparse
import bisect
import transformations as tfs
def main(kf_pose_fname, timestamps_fname):
kf_pose = np.genfromtxt(kf_pose_fname)
timestamps = np.genfromtxt(timestamps_fname)
timestamps = np.squeeze(timestamps[:, 1]) * 1e6
pose_timestamps = kf_pose[:, 0] * 1e6
abs_poses = kf_pose[:, 1:]
interpolated_pose = interpolate_poses(pose_timestamps.astype(np.int64).tolist(), abs_poses,
timestamps.astype(np.int64).tolist(), int(timestamps[0]))
np.savetxt(kf_pose_fname.replace(".txt", "_interp.txt"), np.array(interpolated_pose), fmt="%1.6f")
def interpolate_poses(pose_timestamps, abs_poses, requested_timestamps, origin_timestamp):
"""Interpolate between absolute poses.
Args:
pose_timestamps (list[int]): Timestamps of supplied poses. Must be in ascending order.
abs_poses (list[numpy.matrixlib.defmatrix.matrix]): SE3 matrices representing poses at the timestamps specified.
requested_timestamps (list[int]): Timestamps for which interpolated timestamps are required.
origin_timestamp (int): UNIX timestamp of origin frame. Poses will be reported relative to this frame.
Returns:
list[numpy.matrixlib.defmatrix.matrix]: SE3 matrix representing interpolated pose for each requested timestamp.
Raises:
ValueError: if pose_timestamps and abs_poses are not the same length
ValueError: if pose_timestamps is not in ascending order
"""
requested_timestamps.insert(0, origin_timestamp)
requested_timestamps = np.array(requested_timestamps)
pose_timestamps = np.array(pose_timestamps)
if len(pose_timestamps) != len(abs_poses):
raise ValueError('Must supply same number of timestamps as poses')
abs_quaternions = np.zeros((4, len(abs_poses)))
abs_positions = np.zeros((3, len(abs_poses)))
for i, pose in enumerate(abs_poses):
if i > 0 and pose_timestamps[i - 1] >= pose_timestamps[i]:
raise ValueError('Pose timestamps must be in ascending order')
abs_quaternions[:, i] = pose[
3:] # np.roll(pose[3:], -1) uncomment this if the quaternion is saved as [w, x, y, z]
abs_positions[:, i] = pose[:3]
upper_indices = [bisect.bisect(pose_timestamps, pt) for pt in requested_timestamps]
lower_indices = [u - 1 for u in upper_indices]
if max(upper_indices) >= len(pose_timestamps):
upper_indices = [min(i, len(pose_timestamps) - 1) for i in upper_indices]
fractions = (requested_timestamps - pose_timestamps[lower_indices]) // \
(pose_timestamps[upper_indices] - pose_timestamps[lower_indices])
quaternions_lower = abs_quaternions[:, lower_indices]
quaternions_upper = abs_quaternions[:, upper_indices]
d_array = (quaternions_lower * quaternions_upper).sum(0)
linear_interp_indices = np.nonzero(d_array >= 1)
sin_interp_indices = np.nonzero(d_array < 1)
scale0_array = np.zeros(d_array.shape)
scale1_array = np.zeros(d_array.shape)
scale0_array[linear_interp_indices] = 1 - fractions[linear_interp_indices]
scale1_array[linear_interp_indices] = fractions[linear_interp_indices]
theta_array = np.arccos(np.abs(d_array[sin_interp_indices]))
scale0_array[sin_interp_indices] = \
np.sin((1 - fractions[sin_interp_indices]) * theta_array) / np.sin(theta_array)
scale1_array[sin_interp_indices] = \
np.sin(fractions[sin_interp_indices] * theta_array) / np.sin(theta_array)
negative_d_indices = np.nonzero(d_array < 0)
scale1_array[negative_d_indices] = -scale1_array[negative_d_indices]
quaternions_interp = np.tile(scale0_array, (4, 1)) * quaternions_lower \
+ np.tile(scale1_array, (4, 1)) * quaternions_upper
positions_lower = abs_positions[:, lower_indices]
positions_upper = abs_positions[:, upper_indices]
positions_interp = np.multiply(np.tile((1 - fractions), (3, 1)), positions_lower) \
+ np.multiply(np.tile(fractions, (3, 1)), positions_upper)
poses_mat = np.zeros((4, 4 * len(requested_timestamps)))
poses_mat[0, 0::4] = 1 - 2 * | np.square(quaternions_interp[2, :]) | numpy.square |
import codecs
import json
import pickle
import os
import string
import sys
import numpy as np
cwd = os.getcwd()
sys.path.append(cwd)
import tqdm
def get_desc(path):
table = str.maketrans('', '', string.punctuation)
with open(path, 'r', encoding='utf8') as f:
desc = f.read()
# Tokenize
desc = desc.split()
# Convert to lower case
desc = [word.lower() for word in desc]
# Remove punctuation
desc = [word.translate(table) for word in desc]
# Removing hanging letters
desc = [word for word in desc if len(word) > 1]
# Remove tokens with numbers in them
desc = [word for word in desc if word.isalpha()]
desc_final = 'startseq ' + ' '.join(desc) + ' endseq'
return desc_final
def add_to_vocab(vocabulary, desc):
for word in desc.split(' '):
vocabulary[word] = vocabulary.get(word, 0) + 1
return vocabulary
def create_unique_vocab(vocabulary, threshold):
unique_vocab = {word: vocabulary[word] for word in vocabulary.keys() if vocabulary[word]
>= threshold_value}
return unique_vocab
# for folder in ["Urban_Outfitters", "RALPH_LAUREN"]:
# var_path = os.path.join(cwd, "variables", folder)
# with open(os.path.join(var_path, "dataset_class.p"), 'rb') as f:
# dc = pickle.load(f)
# dc.vocab_options = {"threshold": 10}
# print("building_vocab")
# # Rebuild vocabulary
# dc.build_vocabulary()
#
# # Build embedding
# dc.get_embeddings()
# Redo the
for webshop_name in ["RALPH_LAUREN", "Urban_Outfitters"]:
var_path = os.path.join(cwd, "variables", webshop_name)
desc_path = os.path.join(cwd, "Datasets", webshop_name, "Descriptions")
train_imgs = []
main_img_path = os.path.join(cwd, "Datasets", webshop_name, "resized_imgs")
for cat in os.listdir(os.path.join(main_img_path, "TRAIN")):
for img in os.listdir(os.path.join(main_img_path, "TRAIN", cat)):
train_imgs.append(img[:-4] + ".p")
unique_descs = []
for i in train_imgs:
if i[:-4] not in unique_descs:
unique_descs.append(i[:-4] + ".txt")
train_descs = [os.path.join(desc_path, x) for x in unique_descs]
for threshold_value in [1, 3]:
vocabulary = {}
for path in tqdm.tqdm(train_descs):
try:
desc_final = get_desc(path)
except FileNotFoundError:
print(path)
continue
vocabulary = add_to_vocab(vocabulary, desc_final)
# Save the vocabulary as a variable in case we want to reuse it
with open(os.path.join(cwd, "variables", webshop_name, f"full_vocab_{threshold_value}.json"), 'w') as f:
json.dump(vocabulary, f)
# Remove words which occur infrequently
unique_vocab = create_unique_vocab(vocabulary, threshold_value)
# remove normal vocab, it is no longer needed
# del self.vocabulary
# Get the ixtword and wordtoix variables
ix = 1
wordtoix = {}
ixtoword = {}
for word in unique_vocab:
wordtoix[word] = ix
ixtoword[ix] = word
ix += 1
vocab_size = len(wordtoix) + 1
# save these dictionaries
with open(os.path.join(cwd, "variables", webshop_name,
"wordtoix_thr{}.json".format(threshold_value)), 'w') as f:
json.dump(wordtoix, f)
with open(os.path.join(cwd, "variables", webshop_name,
"ixtoword_thr{}.json".format(threshold_value)), 'w') as f:
json.dump(ixtoword, f)
# Get embeddings:
embedding_dict_glove = {
"glove_300d_wiki": os.path.join(cwd, "variables", "glove.6B.300d.txt"),
}
embedding_dict_fasttext = {
"fasttext_300d_wiki": os.path.join(cwd, "variables", "fasttext-wiki-news-300d-1M.vec")
}
# Get glove embeddings
for key, value in embedding_dict_glove.items():
print(key, value)
# Initialize the embedding_matrix
if "300d" in value:
embedding_dim = 300
elif "200d" in value:
embedding_dim = 200
else:
raise ValueError("This embedding dimension size is unsupported..")
print("Starting the embedding of new glove vectors: {}".format(key))
embeddings_index = {}
# build the index based on the known words
f = open(value, encoding='UTF-8')
for line in f:
try:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
except ValueError as e:
print(str(e), " LINE IS: ", line)
f.close()
embedding_matrix = np.zeros((vocab_size, embedding_dim))
print(embedding_matrix.shape)
# Add the known words to create an embedding matrix which can be used in models
for word, i in wordtoix.items():
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
embedding_matrix[i] = embedding_vector
# Save the resulting embeddings:
with open(os.path.join(cwd, "variables", webshop_name,
key + "_thr{}_emb.p".format(threshold_value)), 'wb') as f:
pickle.dump(embedding_matrix, f)
# Get fasttext embeddings
for key, value in embedding_dict_fasttext.items():
print(key, value)
if "300d" in value:
embedding_dim = 300
elif "200d" in value:
embedding_dim = 200
else:
raise ValueError("This embedding dimension size is unsupported..")
print("Starting the embedding of a new fasttext matrix: {}".format(key))
# Build embedding index
embeddings_index = {}
f = codecs.open(value)
for line in f:
try:
values = line.rstrip().rsplit(' ')
word = values[0]
coefs = | np.asarray(values[1:], dtype='float32') | numpy.asarray |
"""
Numerical python functions written for compatibility with MATLAB
commands with the same names. Most numerical python functions can be found in
the `numpy` and `scipy` libraries. What remains here is code for performing
spectral computations.
Spectral functions
------------------
`cohere`
Coherence (normalized cross spectral density)
`csd`
Cross spectral density using Welch's average periodogram
`detrend`
Remove the mean or best fit line from an array
`psd`
Power spectral density using Welch's average periodogram
`specgram`
Spectrogram (spectrum over segments of time)
`complex_spectrum`
Return the complex-valued frequency spectrum of a signal
`magnitude_spectrum`
Return the magnitude of the frequency spectrum of a signal
`angle_spectrum`
Return the angle (wrapped phase) of the frequency spectrum of a signal
`phase_spectrum`
Return the phase (unwrapped angle) of the frequency spectrum of a signal
`detrend_mean`
Remove the mean from a line.
`detrend_linear`
Remove the best fit line from a line.
`detrend_none`
Return the original line.
`stride_windows`
Get all windows in an array in a memory-efficient manner
"""
import functools
from numbers import Number
import numpy as np
from matplotlib import _api
import matplotlib.cbook as cbook
from matplotlib import docstring
def window_hanning(x):
"""
Return x times the hanning window of len(x).
See Also
--------
window_none : Another window algorithm.
"""
return np.hanning(len(x))*x
def window_none(x):
"""
No window function; simply return x.
See Also
--------
window_hanning : Another window algorithm.
"""
return x
def detrend(x, key=None, axis=None):
"""
Return x with its trend removed.
Parameters
----------
x : array or sequence
Array or sequence containing the data.
key : {'default', 'constant', 'mean', 'linear', 'none'} or function
The detrending algorithm to use. 'default', 'mean', and 'constant' are
the same as `detrend_mean`. 'linear' is the same as `detrend_linear`.
'none' is the same as `detrend_none`. The default is 'mean'. See the
corresponding functions for more details regarding the algorithms. Can
also be a function that carries out the detrend operation.
axis : int
The axis along which to do the detrending.
See Also
--------
detrend_mean : Implementation of the 'mean' algorithm.
detrend_linear : Implementation of the 'linear' algorithm.
detrend_none : Implementation of the 'none' algorithm.
"""
if key is None or key in ['constant', 'mean', 'default']:
return detrend(x, key=detrend_mean, axis=axis)
elif key == 'linear':
return detrend(x, key=detrend_linear, axis=axis)
elif key == 'none':
return detrend(x, key=detrend_none, axis=axis)
elif callable(key):
x = np.asarray(x)
if axis is not None and axis + 1 > x.ndim:
raise ValueError(f'axis(={axis}) out of bounds')
if (axis is None and x.ndim == 0) or (not axis and x.ndim == 1):
return key(x)
# try to use the 'axis' argument if the function supports it,
# otherwise use apply_along_axis to do it
try:
return key(x, axis=axis)
except TypeError:
return np.apply_along_axis(key, axis=axis, arr=x)
else:
raise ValueError(
f"Unknown value for key: {key!r}, must be one of: 'default', "
f"'constant', 'mean', 'linear', or a function")
def detrend_mean(x, axis=None):
"""
Return x minus the mean(x).
Parameters
----------
x : array or sequence
Array or sequence containing the data
Can have any dimensionality
axis : int
The axis along which to take the mean. See numpy.mean for a
description of this argument.
See Also
--------
detrend_linear : Another detrend algorithm.
detrend_none : Another detrend algorithm.
detrend : A wrapper around all the detrend algorithms.
"""
x = np.asarray(x)
if axis is not None and axis+1 > x.ndim:
raise ValueError('axis(=%s) out of bounds' % axis)
return x - x.mean(axis, keepdims=True)
def detrend_none(x, axis=None):
"""
Return x: no detrending.
Parameters
----------
x : any object
An object containing the data
axis : int
This parameter is ignored.
It is included for compatibility with detrend_mean
See Also
--------
detrend_mean : Another detrend algorithm.
detrend_linear : Another detrend algorithm.
detrend : A wrapper around all the detrend algorithms.
"""
return x
def detrend_linear(y):
"""
Return x minus best fit line; 'linear' detrending.
Parameters
----------
y : 0-D or 1-D array or sequence
Array or sequence containing the data
axis : int
The axis along which to take the mean. See numpy.mean for a
description of this argument.
See Also
--------
detrend_mean : Another detrend algorithm.
detrend_none : Another detrend algorithm.
detrend : A wrapper around all the detrend algorithms.
"""
# This is faster than an algorithm based on linalg.lstsq.
y = np.asarray(y)
if y.ndim > 1:
raise ValueError('y cannot have ndim > 1')
# short-circuit 0-D array.
if not y.ndim:
return np.array(0., dtype=y.dtype)
x = np.arange(y.size, dtype=float)
C = np.cov(x, y, bias=1)
b = C[0, 1]/C[0, 0]
a = y.mean() - b*x.mean()
return y - (b*x + a)
def stride_windows(x, n, noverlap=None, axis=0):
"""
Get all windows of x with length n as a single array,
using strides to avoid data duplication.
.. warning::
It is not safe to write to the output array. Multiple
elements may point to the same piece of memory,
so modifying one value may change others.
Parameters
----------
x : 1D array or sequence
Array or sequence containing the data.
n : int
The number of data points in each window.
noverlap : int, default: 0 (no overlap)
The overlap between adjacent windows.
axis : int
The axis along which the windows will run.
References
----------
`stackoverflow: Rolling window for 1D arrays in Numpy?
<http://stackoverflow.com/a/6811241>`_
`stackoverflow: Using strides for an efficient moving average filter
<http://stackoverflow.com/a/4947453>`_
"""
if noverlap is None:
noverlap = 0
if noverlap >= n:
raise ValueError('noverlap must be less than n')
if n < 1:
raise ValueError('n cannot be less than 1')
x = np.asarray(x)
if x.ndim != 1:
raise ValueError('only 1-dimensional arrays can be used')
if n == 1 and noverlap == 0:
if axis == 0:
return x[np.newaxis]
else:
return x[np.newaxis].transpose()
if n > x.size:
raise ValueError('n cannot be greater than the length of x')
# np.lib.stride_tricks.as_strided easily leads to memory corruption for
# non integer shape and strides, i.e. noverlap or n. See #3845.
noverlap = int(noverlap)
n = int(n)
step = n - noverlap
if axis == 0:
shape = (n, (x.shape[-1]-noverlap)//step)
strides = (x.strides[0], step*x.strides[0])
else:
shape = ((x.shape[-1]-noverlap)//step, n)
strides = (step*x.strides[0], x.strides[0])
return np.lib.stride_tricks.as_strided(x, shape=shape, strides=strides)
def _spectral_helper(x, y=None, NFFT=None, Fs=None, detrend_func=None,
window=None, noverlap=None, pad_to=None,
sides=None, scale_by_freq=None, mode=None):
"""
Private helper implementing the common parts between the psd, csd,
spectrogram and complex, magnitude, angle, and phase spectrums.
"""
if y is None:
# if y is None use x for y
same_data = True
else:
# The checks for if y is x are so that we can use the same function to
# implement the core of psd(), csd(), and spectrogram() without doing
# extra calculations. We return the unaveraged Pxy, freqs, and t.
same_data = y is x
if Fs is None:
Fs = 2
if noverlap is None:
noverlap = 0
if detrend_func is None:
detrend_func = detrend_none
if window is None:
window = window_hanning
# if NFFT is set to None use the whole signal
if NFFT is None:
NFFT = 256
if mode is None or mode == 'default':
mode = 'psd'
_api.check_in_list(
['default', 'psd', 'complex', 'magnitude', 'angle', 'phase'],
mode=mode)
if not same_data and mode != 'psd':
raise ValueError("x and y must be equal if mode is not 'psd'")
# Make sure we're dealing with a numpy array. If y and x were the same
# object to start with, keep them that way
x = np.asarray(x)
if not same_data:
y = np.asarray(y)
if sides is None or sides == 'default':
if np.iscomplexobj(x):
sides = 'twosided'
else:
sides = 'onesided'
_api.check_in_list(['default', 'onesided', 'twosided'], sides=sides)
# zero pad x and y up to NFFT if they are shorter than NFFT
if len(x) < NFFT:
n = len(x)
x = np.resize(x, NFFT)
x[n:] = 0
if not same_data and len(y) < NFFT:
n = len(y)
y = np.resize(y, NFFT)
y[n:] = 0
if pad_to is None:
pad_to = NFFT
if mode != 'psd':
scale_by_freq = False
elif scale_by_freq is None:
scale_by_freq = True
# For real x, ignore the negative frequencies unless told otherwise
if sides == 'twosided':
numFreqs = pad_to
if pad_to % 2:
freqcenter = (pad_to - 1)//2 + 1
else:
freqcenter = pad_to//2
scaling_factor = 1.
elif sides == 'onesided':
if pad_to % 2:
numFreqs = (pad_to + 1)//2
else:
numFreqs = pad_to//2 + 1
scaling_factor = 2.
if not np.iterable(window):
window = window(np.ones(NFFT, x.dtype))
if len(window) != NFFT:
raise ValueError(
"The window length must match the data's first dimension")
result = stride_windows(x, NFFT, noverlap, axis=0)
result = detrend(result, detrend_func, axis=0)
result = result * window.reshape((-1, 1))
result = np.fft.fft(result, n=pad_to, axis=0)[:numFreqs, :]
freqs = np.fft.fftfreq(pad_to, 1/Fs)[:numFreqs]
if not same_data:
# if same_data is False, mode must be 'psd'
resultY = stride_windows(y, NFFT, noverlap)
resultY = detrend(resultY, detrend_func, axis=0)
resultY = resultY * window.reshape((-1, 1))
resultY = np.fft.fft(resultY, n=pad_to, axis=0)[:numFreqs, :]
result = np.conj(result) * resultY
elif mode == 'psd':
result = np.conj(result) * result
elif mode == 'magnitude':
result = np.abs(result) / np.abs(window).sum()
elif mode == 'angle' or mode == 'phase':
# we unwrap the phase later to handle the onesided vs. twosided case
result = np.angle(result)
elif mode == 'complex':
result /= np.abs(window).sum()
if mode == 'psd':
# Also include scaling factors for one-sided densities and dividing by
# the sampling frequency, if desired. Scale everything, except the DC
# component and the NFFT/2 component:
# if we have a even number of frequencies, don't scale NFFT/2
if not NFFT % 2:
slc = slice(1, -1, None)
# if we have an odd number, just don't scale DC
else:
slc = slice(1, None, None)
result[slc] *= scaling_factor
# MATLAB divides by the sampling frequency so that density function
# has units of dB/Hz and can be integrated by the plotted frequency
# values. Perform the same scaling here.
if scale_by_freq:
result /= Fs
# Scale the spectrum by the norm of the window to compensate for
# windowing loss; see Bendat & Piersol Sec 11.5.2.
result /= (np.abs(window)**2).sum()
else:
# In this case, preserve power in the segment, not amplitude
result /= np.abs(window).sum()**2
t = np.arange(NFFT/2, len(x) - NFFT/2 + 1, NFFT - noverlap)/Fs
if sides == 'twosided':
# center the frequency range at zero
freqs = np.roll(freqs, -freqcenter, axis=0)
result = np.roll(result, -freqcenter, axis=0)
elif not pad_to % 2:
# get the last value correctly, it is negative otherwise
freqs[-1] *= -1
# we unwrap the phase here to handle the onesided vs. twosided case
if mode == 'phase':
result = np.unwrap(result, axis=0)
return result, freqs, t
def _single_spectrum_helper(
mode, x, Fs=None, window=None, pad_to=None, sides=None):
"""
Private helper implementing the commonality between the complex, magnitude,
angle, and phase spectrums.
"""
_api.check_in_list(['complex', 'magnitude', 'angle', 'phase'], mode=mode)
if pad_to is None:
pad_to = len(x)
spec, freqs, _ = _spectral_helper(x=x, y=None, NFFT=len(x), Fs=Fs,
detrend_func=detrend_none, window=window,
noverlap=0, pad_to=pad_to,
sides=sides,
scale_by_freq=False,
mode=mode)
if mode != 'complex':
spec = spec.real
if spec.ndim == 2 and spec.shape[1] == 1:
spec = spec[:, 0]
return spec, freqs
# Split out these keyword docs so that they can be used elsewhere
docstring.interpd.update(
Spectral="""\
Fs : float, default: 2
The sampling frequency (samples per time unit). It is used to calculate
the Fourier frequencies, *freqs*, in cycles per time unit.
window : callable or ndarray, default: `.window_hanning`
A function or a vector of length *NFFT*. To create window vectors see
`.window_hanning`, `.window_none`, `numpy.blackman`, `numpy.hamming`,
`numpy.bartlett`, `scipy.signal`, `scipy.signal.get_window`, etc. If a
function is passed as the argument, it must take a data segment as an
argument and return the windowed version of the segment.
sides : {'default', 'onesided', 'twosided'}, optional
Which sides of the spectrum to return. 'default' is one-sided for real
data and two-sided for complex data. 'onesided' forces the return of a
one-sided spectrum, while 'twosided' forces two-sided.""",
Single_Spectrum="""\
pad_to : int, optional
The number of points to which the data segment is padded when performing
the FFT. While not increasing the actual resolution of the spectrum (the
minimum distance between resolvable peaks), this can give more points in
the plot, allowing for more detail. This corresponds to the *n* parameter
in the call to fft(). The default is None, which sets *pad_to* equal to
the length of the input signal (i.e. no padding).""",
PSD="""\
pad_to : int, optional
The number of points to which the data segment is padded when performing
the FFT. This can be different from *NFFT*, which specifies the number
of data points used. While not increasing the actual resolution of the
spectrum (the minimum distance between resolvable peaks), this can give
more points in the plot, allowing for more detail. This corresponds to
the *n* parameter in the call to fft(). The default is None, which sets
*pad_to* equal to *NFFT*
NFFT : int, default: 256
The number of data points used in each block for the FFT. A power 2 is
most efficient. This should *NOT* be used to get zero padding, or the
scaling of the result will be incorrect; use *pad_to* for this instead.
detrend : {'none', 'mean', 'linear'} or callable, default: 'none'
The function applied to each segment before fft-ing, designed to remove
the mean or linear trend. Unlike in MATLAB, where the *detrend* parameter
is a vector, in Matplotlib is it a function. The :mod:`~matplotlib.mlab`
module defines `.detrend_none`, `.detrend_mean`, and `.detrend_linear`,
but you can use a custom function as well. You can also use a string to
choose one of the functions: 'none' calls `.detrend_none`. 'mean' calls
`.detrend_mean`. 'linear' calls `.detrend_linear`.
scale_by_freq : bool, default: True
Whether the resulting density values should be scaled by the scaling
frequency, which gives density in units of Hz^-1. This allows for
integration over the returned frequency values. The default is True for
MATLAB compatibility.""")
@docstring.dedent_interpd
def psd(x, NFFT=None, Fs=None, detrend=None, window=None,
noverlap=None, pad_to=None, sides=None, scale_by_freq=None):
r"""
Compute the power spectral density.
The power spectral density :math:`P_{xx}` by Welch's average
periodogram method. The vector *x* is divided into *NFFT* length
segments. Each segment is detrended by function *detrend* and
windowed by function *window*. *noverlap* gives the length of
the overlap between segments. The :math:`|\mathrm{fft}(i)|^2`
of each segment :math:`i` are averaged to compute :math:`P_{xx}`.
If len(*x*) < *NFFT*, it will be zero padded to *NFFT*.
Parameters
----------
x : 1-D array or sequence
Array or sequence containing the data
%(Spectral)s
%(PSD)s
noverlap : int, default: 0 (no overlap)
The number of points of overlap between segments.
Returns
-------
Pxx : 1-D array
The values for the power spectrum :math:`P_{xx}` (real valued)
freqs : 1-D array
The frequencies corresponding to the elements in *Pxx*
References
----------
<NAME> -- Random Data: Analysis and Measurement Procedures, John
Wiley & Sons (1986)
See Also
--------
specgram
`specgram` differs in the default overlap; in not returning the mean of
the segment periodograms; and in returning the times of the segments.
magnitude_spectrum : returns the magnitude spectrum.
csd : returns the spectral density between two signals.
"""
Pxx, freqs = csd(x=x, y=None, NFFT=NFFT, Fs=Fs, detrend=detrend,
window=window, noverlap=noverlap, pad_to=pad_to,
sides=sides, scale_by_freq=scale_by_freq)
return Pxx.real, freqs
@docstring.dedent_interpd
def csd(x, y, NFFT=None, Fs=None, detrend=None, window=None,
noverlap=None, pad_to=None, sides=None, scale_by_freq=None):
"""
Compute the cross-spectral density.
The cross spectral density :math:`P_{xy}` by Welch's average
periodogram method. The vectors *x* and *y* are divided into
*NFFT* length segments. Each segment is detrended by function
*detrend* and windowed by function *window*. *noverlap* gives
the length of the overlap between segments. The product of
the direct FFTs of *x* and *y* are averaged over each segment
to compute :math:`P_{xy}`, with a scaling to correct for power
loss due to windowing.
If len(*x*) < *NFFT* or len(*y*) < *NFFT*, they will be zero
padded to *NFFT*.
Parameters
----------
x, y : 1-D arrays or sequences
Arrays or sequences containing the data
%(Spectral)s
%(PSD)s
noverlap : int, default: 0 (no overlap)
The number of points of overlap between segments.
Returns
-------
Pxy : 1-D array
The values for the cross spectrum :math:`P_{xy}` before scaling (real
valued)
freqs : 1-D array
The frequencies corresponding to the elements in *Pxy*
References
----------
Bendat & Piersol -- Random Data: Analysis and Measurement Procedures, John
Wiley & Sons (1986)
See Also
--------
psd : equivalent to setting ``y = x``.
"""
if NFFT is None:
NFFT = 256
Pxy, freqs, _ = _spectral_helper(x=x, y=y, NFFT=NFFT, Fs=Fs,
detrend_func=detrend, window=window,
noverlap=noverlap, pad_to=pad_to,
sides=sides, scale_by_freq=scale_by_freq,
mode='psd')
if Pxy.ndim == 2:
if Pxy.shape[1] > 1:
Pxy = Pxy.mean(axis=1)
else:
Pxy = Pxy[:, 0]
return Pxy, freqs
_single_spectrum_docs = """\
Compute the {quantity} of *x*.
Data is padded to a length of *pad_to* and the windowing function *window* is
applied to the signal.
Parameters
----------
x : 1-D array or sequence
Array or sequence containing the data
{Spectral}
{Single_Spectrum}
Returns
-------
spectrum : 1-D array
The {quantity}.
freqs : 1-D array
The frequencies corresponding to the elements in *spectrum*.
See Also
--------
psd
Returns the power spectral density.
complex_spectrum
Returns the complex-valued frequency spectrum.
magnitude_spectrum
Returns the absolute value of the `complex_spectrum`.
angle_spectrum
Returns the angle of the `complex_spectrum`.
phase_spectrum
Returns the phase (unwrapped angle) of the `complex_spectrum`.
specgram
Can return the complex spectrum of segments within the signal.
"""
complex_spectrum = functools.partial(_single_spectrum_helper, "complex")
complex_spectrum.__doc__ = _single_spectrum_docs.format(
quantity="complex-valued frequency spectrum",
**docstring.interpd.params)
magnitude_spectrum = functools.partial(_single_spectrum_helper, "magnitude")
magnitude_spectrum.__doc__ = _single_spectrum_docs.format(
quantity="magnitude (absolute value) of the frequency spectrum",
**docstring.interpd.params)
angle_spectrum = functools.partial(_single_spectrum_helper, "angle")
angle_spectrum.__doc__ = _single_spectrum_docs.format(
quantity="angle of the frequency spectrum (wrapped phase spectrum)",
**docstring.interpd.params)
phase_spectrum = functools.partial(_single_spectrum_helper, "phase")
phase_spectrum.__doc__ = _single_spectrum_docs.format(
quantity="phase of the frequency spectrum (unwrapped phase spectrum)",
**docstring.interpd.params)
@docstring.dedent_interpd
def specgram(x, NFFT=None, Fs=None, detrend=None, window=None,
noverlap=None, pad_to=None, sides=None, scale_by_freq=None,
mode=None):
"""
Compute a spectrogram.
Compute and plot a spectrogram of data in x. Data are split into
NFFT length segments and the spectrum of each section is
computed. The windowing function window is applied to each
segment, and the amount of overlap of each segment is
specified with noverlap.
Parameters
----------
x : array-like
1-D array or sequence.
%(Spectral)s
%(PSD)s
noverlap : int, default: 128
The number of points of overlap between blocks.
mode : str, default: 'psd'
What sort of spectrum to use:
'psd'
Returns the power spectral density.
'complex'
Returns the complex-valued frequency spectrum.
'magnitude'
Returns the magnitude spectrum.
'angle'
Returns the phase spectrum without unwrapping.
'phase'
Returns the phase spectrum with unwrapping.
Returns
-------
spectrum : array-like
2D array, columns are the periodograms of successive segments.
freqs : array-like
1-D array, frequencies corresponding to the rows in *spectrum*.
t : array-like
1-D array, the times corresponding to midpoints of segments
(i.e the columns in *spectrum*).
See Also
--------
psd : differs in the overlap and in the return values.
complex_spectrum : similar, but with complex valued frequencies.
magnitude_spectrum : similar single segment when mode is 'magnitude'.
angle_spectrum : similar to single segment when mode is 'angle'.
phase_spectrum : similar to single segment when mode is 'phase'.
Notes
-----
detrend and scale_by_freq only apply when *mode* is set to 'psd'.
"""
if noverlap is None:
noverlap = 128 # default in _spectral_helper() is noverlap = 0
if NFFT is None:
NFFT = 256 # same default as in _spectral_helper()
if len(x) <= NFFT:
_api.warn_external("Only one segment is calculated since parameter "
f"NFFT (={NFFT}) >= signal length (={len(x)}).")
spec, freqs, t = _spectral_helper(x=x, y=None, NFFT=NFFT, Fs=Fs,
detrend_func=detrend, window=window,
noverlap=noverlap, pad_to=pad_to,
sides=sides,
scale_by_freq=scale_by_freq,
mode=mode)
if mode != 'complex':
spec = spec.real # Needed since helper implements generically
return spec, freqs, t
@docstring.dedent_interpd
def cohere(x, y, NFFT=256, Fs=2, detrend=detrend_none, window=window_hanning,
noverlap=0, pad_to=None, sides='default', scale_by_freq=None):
r"""
The coherence between *x* and *y*. Coherence is the normalized
cross spectral density:
.. math::
C_{xy} = \frac{|P_{xy}|^2}{P_{xx}P_{yy}}
Parameters
----------
x, y
Array or sequence containing the data
%(Spectral)s
%(PSD)s
noverlap : int, default: 0 (no overlap)
The number of points of overlap between segments.
Returns
-------
Cxy : 1-D array
The coherence vector.
freqs : 1-D array
The frequencies for the elements in *Cxy*.
See Also
--------
:func:`psd`, :func:`csd` :
For information about the methods used to compute :math:`P_{xy}`,
:math:`P_{xx}` and :math:`P_{yy}`.
"""
if len(x) < 2 * NFFT:
raise ValueError(
"Coherence is calculated by averaging over *NFFT* length "
"segments. Your signal is too short for your choice of *NFFT*.")
Pxx, f = psd(x, NFFT, Fs, detrend, window, noverlap, pad_to, sides,
scale_by_freq)
Pyy, f = psd(y, NFFT, Fs, detrend, window, noverlap, pad_to, sides,
scale_by_freq)
Pxy, f = csd(x, y, NFFT, Fs, detrend, window, noverlap, pad_to, sides,
scale_by_freq)
Cxy = np.abs(Pxy) ** 2 / (Pxx * Pyy)
return Cxy, f
class GaussianKDE:
"""
Representation of a kernel-density estimate using Gaussian kernels.
Parameters
----------
dataset : array-like
Datapoints to estimate from. In case of univariate data this is a 1-D
array, otherwise a 2D array with shape (# of dims, # of data).
bw_method : str, scalar or callable, optional
The method used to calculate the estimator bandwidth. This can be
'scott', 'silverman', a scalar constant or a callable. If a
scalar, this will be used directly as `kde.factor`. If a
callable, it should take a `GaussianKDE` instance as only
parameter and return a scalar. If None (default), 'scott' is used.
Attributes
----------
dataset : ndarray
The dataset with which `gaussian_kde` was initialized.
dim : int
Number of dimensions.
num_dp : int
Number of datapoints.
factor : float
The bandwidth factor, obtained from `kde.covariance_factor`, with which
the covariance matrix is multiplied.
covariance : ndarray
The covariance matrix of *dataset*, scaled by the calculated bandwidth
(`kde.factor`).
inv_cov : ndarray
The inverse of *covariance*.
Methods
-------
kde.evaluate(points) : ndarray
Evaluate the estimated pdf on a provided set of points.
kde(points) : ndarray
Same as kde.evaluate(points)
"""
# This implementation with minor modification was too good to pass up.
# from scipy: https://github.com/scipy/scipy/blob/master/scipy/stats/kde.py
def __init__(self, dataset, bw_method=None):
self.dataset = | np.atleast_2d(dataset) | numpy.atleast_2d |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Create a Lithology object with different properties."""
import numpy as np
import xarray as xr
from scipy.interpolate import interp1d
from landlab import Component
from landlab.layers import EventLayers, MaterialLayers
from landlab.utils.return_array import return_array_at_node
class Lithology(Component):
"""Create a Lithology object.
A Lithology is a three dimentional representation of material operated on
by landlab components. Material can be removed through erosion or added to
through deposition. Rock types can have multiple attributes (e.g. age,
erodability or other parameter values, etc).
If the tracked properties are model grid fields, they will be updated to
the surface values of the Lithology. If the properties are not grid fields
then at-node grid fields will be created with their names. Lithology and
its derived versions will make a at-node grid field called `rock_type__id`
to store the rock type id.
Lithology was designed to be used on its own and to be inherited from and
improved. Currently one other Lithology variant exists: LithoLayers
which makes it easy to specify parallel layers of rock with generic layer
geometries.
It is constructed by specifying a series of thicknesses and a series of
rock type IDs. Thicknesses and IDs are both specified in order of closest
to the surface to furthest from the surface. Thicknesses can either be a
single value (corresponding to a layer of uniform thickness) or a number-of
-nodes length array (corresponding to a non-uniform layer).
Additionally, an attribute dictionary specifies the properties of each
rock type. This dictionary is expected to have the form of:
.. code-block:: python
attrs = {'K_sp': {1: 0.001,
2: 0.0001},
'D': {1: 0.01,
2: 0.001}}
Where ``'K_sp'`` and ``'D'`` are properties to track, and ``1`` and ``2``
are rock type IDs. The rock type IDs can be any type that is valid as a
python dictionary key.
References
----------
**Required Software Citation(s) Specific to this Component**
<NAME>., <NAME>., <NAME>., <NAME>. (2018). Lithology: A
Landlab submodule for spatially variable rock properties. Journal of Open
Source Software 3(30), 979 - 2. https://dx.doi.org/10.21105/joss.00979
**Additional References**
None Listed
"""
_name = "Lithology"
_cite_as = """@article{barnhart2018lithology,
title = "Lithology: A Landlab submodule for spatially variable rock properties",
journal = "Journal of Open Source Software",
volume = "",
pages = "",
year = "2018",
doi = "10.21105/joss.00979",
author = "<NAME> and <NAME> and <NAME> and <NAME>",
}"""
_info = {}
def __init__(
self,
grid,
thicknesses,
ids,
attrs,
layer_type="MaterialLayers",
dz_advection=0,
rock_id=None,
):
"""Create a new instance of Lithology.
Parameters
----------
grid : Landlab ModelGrid
thicknesses : ndarray of shape `(n_layers, )` or `(n_layers, n_nodes)`
Values of layer thicknesses from surface to depth. Layers do not
have to have constant thickness. Layer thickness can be zero,
though the entirety of Lithology must have non-zero thickness.
ids : ndarray of shape `(n_layers, )` or `(n_layers, n_nodes)`
Values of rock type IDs corresponding to each layer specified in
**thicknesses**. A single layer may have multiple rock types if
specified by the user.
attrs : dict
Rock type property dictionary. See class docstring for example of
required format.
layer_type : str, optional
Type of Landlab layers object used to store the layers. If
MaterialLayers (default) is specified, then erosion removes material
and does not create a layer of thickness zero. If EventLayers is
used, then erosion removes material and creates layers of thickness
zero. Thus, EventLayers may be appropriate if the user is interested
in chronostratigraphy.
dz_advection : float, `(n_nodes, )` shape array, or at-node field array optional
Change in rock elevation due to advection by some external process.
This can be changed using the property setter. Dimensions are in
length, not length per time.
rock_id : value or `(n_nodes, )` shape array, optional
Rock type id for new material if deposited.
This can be changed using the property setter.
Examples
--------
>>> from landlab import RasterModelGrid
>>> from landlab.components import Lithology
>>> mg = RasterModelGrid((3, 3))
>>> z = mg.add_zeros("topographic__elevation", at="node")
Create a Lithology with uniform thicknesses that alternates between
layers of type 1 and type 2 rock.
>>> thicknesses = [1, 2, 4, 1]
>>> ids = [1, 2, 1, 2]
>>> attrs = {'K_sp': {1: 0.001,
... 2: 0.0001}}
>>> lith = Lithology(mg, thicknesses, ids, attrs)
After creating a Lithology, the model grid will have an at-node grid
field set to the surface values of 'K_sp'.
>>> mg.at_node['K_sp']
array([ 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001])
The surface values are also properties of the Lithology.
>>> lith['K_sp']
array([ 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001])
We can access information about the Lithology like the total thickness
or layer thicknesses.
>>> lith.thickness
array([ 8., 8., 8., 8., 8., 8., 8., 8., 8.])
>>> lith.dz
array([[ 1., 1., 1., 1., 1., 1., 1., 1., 1.],
[ 4., 4., 4., 4., 4., 4., 4., 4., 4.],
[ 2., 2., 2., 2., 2., 2., 2., 2., 2.],
[ 1., 1., 1., 1., 1., 1., 1., 1., 1.]])
This might look confusing -- that the layers are in reverse order, but
it is OK. The last layers in the Lithology are those that are closest
to the surface.
The layers don't all have to have the same thickness as in the prior
example. If the layers have non-uniform thickness, then they must be
specified in an array of shape `(n_layer, n_nodes)`. In this case, the
layer IDs must be specified in either an array of `(n_layer)` or
`(n_layer, n_nodes)`.
Here we make a layer that gets thicker as a function of the x value of
the model grid.
>>> layer_pattern = (0.5 * mg.x_of_node) + 1.0
>>> thicknesses = [1*layer_pattern, 2*layer_pattern, 4*layer_pattern]
>>> ids = [1, 2, 1]
>>> lith = Lithology(mg, thicknesses, ids, attrs)
>>> lith.thickness
array([ 7. , 10.5, 14. , 7. , 10.5, 14. , 7. , 10.5, 14. ])
>>> lith.dz
array([[ 4. , 6. , 8. , 4. , 6. , 8. , 4. , 6. , 8. ],
[ 2. , 3. , 4. , 2. , 3. , 4. , 2. , 3. , 4. ],
[ 1. , 1.5, 2. , 1. , 1.5, 2. , 1. , 1.5, 2. ]])
"""
super(Lithology, self).__init__(grid)
try:
self._last_elevation = self._grid["node"]["topographic__elevation"][
:
].copy()
except KeyError:
msg = (
"Lithology requires that topographic__elevation already "
"exists as an at-node field."
)
raise ValueError(msg)
# save inital information about thicknesses, layers, attributes, and ids.
self._init_thicknesses = np.asarray(thicknesses)
self._attrs = attrs
self._number_of_init_layers = self._init_thicknesses.shape[0]
self._properties = list(attrs.keys())
self._rock_id_name = "rock_type__id"
# assert that thicknesses and ids are correct and consistent shapes
# if thickness is a 2d array.
if self._init_thicknesses.ndim == 2:
# assert that the 2nd dimension is the same as the number of nodes.
if self._init_thicknesses.shape[1] != self._grid.number_of_nodes:
msg = (
"Thicknesses provided to Lithology are ",
"inconsistent with the ModelGrid.",
)
raise ValueError(msg)
# if IDs is a 2d array assert that it is the same size as thicknesses
if np.asarray(ids).ndim == 2:
if self._init_thicknesses.shape != | np.asarray(ids) | numpy.asarray |
import numpy as np
from scipy import ndimage, signal
import skimage.draw, skimage.transform
import math
from matplotlib import pyplot as plt
class Segmentation:
"""
An implementation of "A new robust method for blood vessel segmentation in
retinal fundus images based on weighted line detector and hidden Markov
model" by <NAME>, <NAME>, <NAME>
https://doi.org/10.1016/j.cmpb.2019.105231
Attributes
----------
w : int
size of line detection window
angle : int
line detection angle step (degrees)
k1 : float
weight of line detection in combined image
k2 : float
weight of inverted green channel image in combined image
t : float
threshold
block : int
block size for searching seeds (tracing start points)
L : int
tracing segment lenght
eps : float
tracing segment lenght range
t1 : int
tracing direction range (degrees)
e1 : int
tracing end window size
e2 : int
tracing end window size
a : int
denoising window size
p1 : float
denoising parameter
p2 : float
denoising parameter
img
original image
igc
inverted green channel image
basicimg
image resulting from basic line detector
weightedimg
image resulting from weighted line detector
combinedimg
weightedimg combined with igc
binthresholdimg
binary thresholded combinedimg image
lowthresholdimg
thresholded combinedimg image (values less than t are set to 0)
tracingimg
image resulting from tracing vessel certerlines
unionimg
binthresholdimg combined with tracingimg
Methods
-------
basicLineDetector()
Returns an image of detected lines. Shows the major vessel structure.
The resulting image is stored in basicimg
weightedLineDetector()
Returns an image of detected lines. Shows the major vessel structure.
The detector assigns a different weight for each pixel of dectected
lines based on distance. The resulting image is stored in weightedimg
combined(img1=None, img2=None)
By default combines weightedimg and igc (weighted sum). The resulting
image is stored in combinedimg
binThreshold(img)
Return a boolean image were each pixel from combinedimg (by default) is
returned as True if greater than t and False otherwise. The resulting
image is stored in binthresholdimg
lowThreshold()
Return a boolean image were each pixel from combinedimg (by default) is
returned as 0 if less than t. The resulting image is stored in
lowthresholdimg
tracing()
Returns a binary image of detected lines. Shows the major vessel
centerlines. The resulting image is stored in tracingimg
denoising(img)
Sets to 0 pixels detected as noise due to the optic disk or dark
regions. The resulting image is also returned
union(img1=None, img2=None)
Given two boolean images (binthresholdimg and tracingimg by default)
returns their union (OR). The resulting image is stored in unionimg
"""
def __init__(self, imgPath:str, maskPath:str, w=15, angle=15, k1=.67, k2=.33, t=.31, block=11, L=7, eps=.5, t1=90, e1=7, e2=5, a=9, p1=.3, p2=.14):
self.w = w # size of line detection window
self.angle = angle # line detection angle step
self.k1 = k1 # weight of line detection in combined image
self.k2 = k2 # weight of inverted green channel image in combined image
self.t = t # threshold
self.block = block # block size for searching seeds (tracing start points)
self.L = L # tracing segment lenght
self.eps = eps # tracing segment lenght range
self.t1 = t1 # tracing direction range
self.e1 = e1 # tracing end window size
self.e2 = e2 # tracing end window size
self.a = a # denoising window size
self.p1 = p1 # denoising parameter
self.p2 = p2 # denoising parameter
self.basicimg = None
self.weightedimg = None
self.combinedimg = None
self.binthresholdimg = None
self.lowthresholdimg = None
self.tracingimg = None
self.unionimg = None
# get image
self.img = plt.imread(imgPath)
# generate inverted green channel image
self.igc=1-self.img[:,:,1]
self.Y,self.X = self.igc.shape
# create mask
self.mask = plt.imread(maskPath)
self.mask = ~np.array(self.mask,dtype=bool)
# apply mask
# mean = self.igc.mean()
# for x in range(self.X):
# for y in range(self.Y):
# if self.mask[y,x]:
# self.igc[y,x]=mean
self.igc = self.__normalizeData(self.igc)
def __lineMask(self,a):
def line(m, x, o, q):
return m*(x-o)+q
matrix = np.zeros((self.w, self.w), dtype=bool)
if abs(a) <= 45:
m = math.tan(math.radians(a))
for x in range(self.w):
y = int(round(line(m,x,int((self.w-1)/2),int((self.w-1)/2))))
if y in range(self.w):
matrix[self.w-1-y,x] = True
else:
a = (90-abs(a))*np.sign(a)
m = math.tan(math.radians(a))
for x in range(self.w):
y = int(round(line(m,x,int((self.w-1)/2),int((self.w-1)/2))))
if y in range(self.w):
matrix[self.w-1-y,x] = True
matrix = np.rot90(np.flip(matrix,0))
return matrix
def __normalizeData(self,data):
return (data - np.min(data)) / (np.max(data) - np.min(data))
def basicLineDetector(self):
limit = int(np.ceil(self.w/2))
lineMasks = np.array([self.__lineMask(a) for a in range(-90,90,self.angle)])
def R_b(y,x):
# creating window
yIndex, xIndex = np.ogrid[y-int((self.w-1)/2):y+int((self.w+1)/2),x-int((self.w-1)/2):x+int((self.w+1)/2)]
# calculating all lines averages
lineAvg = np.array([self.igc[yIndex,xIndex][m].mean() for m in lineMasks])
Iw_avg = self.igc[yIndex,xIndex].mean() # window average
Iw_max = lineAvg.max() # max line average
return Iw_max-Iw_avg
# generate filtered image
filtered = np.zeros(self.igc.shape) # filtered image
for x in range(limit,self.X-limit):
for y in range(limit,self.Y-limit):
if not self.mask[y,x]:
value = R_b(y,x)
if value > 0:
filtered[y,x]=value
# cut off borders
self.mask = ndimage.binary_dilation(self.mask, iterations=limit)
for x in range(self.X):
for y in range(self.Y):
if self.mask[y,x]:
filtered[y,x]=0
filtered = self.__normalizeData(filtered)
self.basicimg = filtered
return self.basicimg
def weightedLineDetector(self):
limit = int(np.ceil(self.w/2))
lineMasks = np.array([self.__lineMask(a) for a in range(-90,90,self.angle)])
def R_w(y,x):
# creating window
yIndex, xIndex = np.ogrid[y-int((self.w-1)/2):y+int((self.w+1)/2),x-int((self.w-1)/2):x+int((self.w+1)/2)]
# calculating weights
weights = np.concatenate((np.arange(1,limit),np.arange(limit,0,-1)))
# calculating all lines averages
lineAvg = np.array([np.average(self.igc[yIndex,xIndex][m],weights=weights) for m in lineMasks])
Iw_avg = self.igc[yIndex,xIndex].mean() # window average
Iw_max = lineAvg.max() # max line average
return Iw_max-Iw_avg
# generate filtered image
filtered = np.zeros(self.igc.shape) # filtered image
for x in range(limit,self.X-limit):
for y in range(limit,self.Y-limit):
if not self.mask[y,x]:
value = R_w(y,x)
if value > 0:
filtered[y,x]=value
# cut off borders
self.mask = ndimage.binary_dilation(self.mask, iterations=1)
for x in range(self.X):
for y in range(self.Y):
if self.mask[y,x]:
filtered[y,x]=0
filtered = self.__normalizeData(filtered)
self.weightedimg = filtered
return self.weightedimg
def combined(self,img1=None, img2=None):
if img1 is None:
img1 = self.weightedimg
if img1 is None:
img1 = self.weightedLineDetector()
if img2 is None:
img2 = self.igc
combined = self.__normalizeData(self.k1*img1 + self.k2*img2)
#combined = self.k1*img1 + self.k2*img2
# cut off borders
for x in range(self.X):
for y in range(self.Y):
if self.mask[y,x]:
combined[y,x]=0
self.combinedimg = combined
return self.combinedimg
def binThreshold(self, img=None):
if img is None:
img = self.combinedimg
if img is None:
img = self.combined()
self.binthresholdimg = img>self.t
return self.binthresholdimg
def lowThreshold(self, img=None):
if img is None:
img = self.combinedimg
if img is None:
img = self.combined()
self.lowthresholdimg = self.__normalizeData(np.clip(img,self.t,None))
return self.lowthresholdimg
def __seeds(self):
self.seedx = []
self.seedy = []
if self.lowthresholdimg is None:
self.lowThreshold()
img = self.lowthresholdimg
# img = self.igc
low = img.min()
for x in np.arange(0,self.X-self.block,self.block):
for y in np.arange(0,self.Y-self.block,self.block):
i = img[y:y+self.block,x:x+self.block].argmax()
sy, sx = np.unravel_index(i,(self.block,self.block))
sx+=x
sy+=y
if not self.mask[sy,sx] and img[sy,sx] != low:
self.seedx.append(sx)
self.seedy.append(sy)
self.seedx = np.array(self.seedx)
self.seedy = np.array(self.seedy)
return (self.seedx,self.seedy)
def tracing(self):
if self.lowthresholdimg is None:
self.lowThreshold()
img = self.lowthresholdimg
# img = self.igc
def circleMask():
size = int(self.L*2+self.eps*2)
center = int(self.L+self.eps-.5)
mask = np.zeros((size,size))
mask[skimage.draw.circle(center, center, self.L+self.eps)]=1
mask[skimage.draw.circle(center, center, self.L-self.eps)]=0
return np.array(mask,dtype=bool)
def angleMask(a):
ar = math.radians(a)
t1r = math.radians(self.t1)
size = int(self.L*2+self.eps*2)
mask = np.zeros((size,size))
center = int(self.L+self.eps-.5)
x=np.array(center)
y=np.array(center)
for r in np.linspace(ar-t1r,ar+t1r,int(self.t1/10)):
x = np.append(x,np.cos(r)*(self.L+self.eps)+center)
y = np.append(y,-np.sin(r)*(self.L+self.eps)+center)
mask[skimage.draw.polygon(y, x)]=1
return np.array(mask,dtype=bool)
def endMask(a):
mask = np.ones((self.e2,self.e1))
mask = skimage.transform.rotate(mask,a,resize=True)
return | np.array(mask,dtype=bool) | numpy.array |
#!/usr/bin/env python
import general_robotics_toolbox as rox
import numpy as np
import pytest
import sys
if (sys.version_info > (3, 0)):
xrange = range
#Numeric precision reduced for literals
eps = 1e-6 #np.finfo(np.float64).eps
#inches to meters conversion factor
#(use Pint package for any real programs)
in_2_m = 0.0254
def test_hat():
k=[1,2,3]
k_hat=rox.hat(k)
k_hat_t=np.array([[0, -3, 2], [3, 0, -1], [-2, 1,0]])
np.testing.assert_allclose(k_hat, k_hat_t)
def _rot_test(k, theta, rot_t):
rot=rox.rot(k, theta)
np.testing.assert_allclose(rot, rot_t, atol=1e-5)
def test_rot():
rot_1_t=np.array([[1,0,0], [0,0,1], [0,-1,0]]).T
_rot_test(np.array([1,0,0]), np.pi/2.0, rot_1_t)
rot_2_t=np.array([[0,0,-1], [0,1,0], [1,0,0]]).T
_rot_test(np.array([0,1,0]), np.pi/2.0, rot_2_t)
rot_3_t=np.array([[0,1,0], [-1,0,0], [0,0,1]]).T
_rot_test(np.array([0,0,1]), np.pi/2.0, rot_3_t)
#Random rotation
rot_4=np.array([[-0.5057639, -0.1340537,0.8521928], \
[0.6456962,-0.7139224,0.2709081], \
[0.5720833,0.6872731,0.4476342]])
_rot_test(np.array([0.4490221,0.30207945,0.84090853]), 2.65949884, rot_4)
def _R2rot_test(k1, theta1):
R = rox.rot(k1,theta1)
k2, theta2 = rox.R2rot(R)
if abs(theta1-theta2) > (theta1 + theta2):
k2 = -k2
theta2 = -theta2
np.testing.assert_allclose(theta1,theta2, atol=1e-6)
if (abs(theta1) < 1e-9):
return
if ((np.abs(theta1) - np.pi) < 1e-9):
if np.linalg.norm(k1+k2) < 1e-6:
np.testing.assert_allclose(k1,-k2, atol=1e-6)
return
np.testing.assert_allclose(k1,k2, atol=1e-6)
return
np.testing.assert_allclose(k1,k2, atol=1e-6)
def test_R2rot():
_R2rot_test(np.array([1,0,0]), np.pi/2.0)
_R2rot_test(np.array([0,1,0]), np.pi/2.0)
_R2rot_test(np.array([0,0,1]), np.pi/2.0)
_R2rot_test(np.array([0.4490221,0.30207945,0.84090853]), 2.65949884)
#Singularities
_R2rot_test([1,2,3]/np.linalg.norm([1,2,3]), 1e-10)
_R2rot_test([2,-1,3]/np.linalg.norm([2,-1,3]), np.pi + 1e-10)
_R2rot_test([-2,-1,3]/np.linalg.norm([-2,-1,3]), np.pi + 1e-10)
_R2rot_test([2,-1,-3]/ | np.linalg.norm([2,-1,-3]) | numpy.linalg.norm |
#! /usr/bin/env python
"""
This node publishes the joint states to make a square trajectory with the SCARA's end-effector
@author: <NAME> (<EMAIL>)
"""
import rospy
import rospkg
rospack = rospkg.RosPack()
import sys
sys.path.insert(0, rospack.get_path('first_assignment')+"/scripts")
from sensor_msgs.msg import JointState
from geometry_msgs.msg import Pose, PoseArray
from IK_function import scara_IK
import numpy as np
class SquareTrajectoryPublisher():
"""docstring for SquareTrajectoryPublisher"""
def __init__(self):
self._publisher = None
self._path_publisher = rospy.Publisher('desired_path', PoseArray, queue_size=10)
self._path = PoseArray()
self._path.header.frame_id = 'base'
self._dt = 0.1
self._v = 0.05
#the 4 vertices of the square
self._vertices = [ [0.27, -0.15, 0],
[0.57, -0.15, 0.1],
[0.57, 0.15, 0.1],
[0.27, 0.15, 0] ]
self._joint_names = ['rotational1', 'rotational2', 'translation']
self._current_segment = 0
self._current_idx = 0
self._waypoints = None
self.compute_waypoints()
def set_topic_name(self, name):
self._publisher = rospy.Publisher(name, JointState, queue_size=10)
def next_segment(self):
if self._current_segment is 3:
self._current_segment = 0
else:
self._current_segment += 1
self._current_idx = 0
def return_list_of_waypoints(self, p1, p2, dp, it):
waypoints = list()
current_p = p1
waypoints.append(current_p)
p = Pose()
p.position.x = current_p[0]
p.position.y = current_p[1]
p.position.z = current_p[2]
p.orientation.w = 0.707
p.orientation.y = 0.707
self._path.poses.append(p)
for i in range(1, int(abs(it))):
current_p = current_p + dp
waypoints.append(current_p)
p = Pose()
p.position.x = current_p[0]
p.position.y = current_p[1]
p.position.z = current_p[2]
p.orientation.w = 0.707
p.orientation.y = 0.707
self._path.poses.append(p)
waypoints.append(p2)
return waypoints
def compute_waypoints(self):
ds = self._v*self._dt
v1 = np.array(self._vertices[0])
v2 = np.array(self._vertices[1])
v3 = | np.array(self._vertices[2]) | numpy.array |
import numpy as np, torch, torch.nn as nn, torch.optim as optim
import argparse, time, pandas as pd, os
from tqdm import tqdm
from torch.utils.tensorboard import SummaryWriter
from torch.utils.data import DataLoader
from torch.utils.data.sampler import SubsetRandomSampler
from sklearn.metrics import mean_absolute_error
from scipy.stats import pearsonr
from model import RegressionModel, MaskedMSELoss, BiModalAttention
from dataloader import MOSEIRegression
np.random.seed(393)
torch.cuda.device([0])
def get_train_valid_sampler(trainset, valid=0.1):
size = len(trainset)
idx = range(size)
split = int(valid*size)
return SubsetRandomSampler(idx[split:]), SubsetRandomSampler(idx[:split])
def get_MOSEI_loaders(path, batch_size=128, valid=0.1, num_workers=0, pin_memory=False):
trainset = MOSEIRegression(path=path)
train_sampler, valid_sampler = get_train_valid_sampler(trainset, valid)
train_loader = DataLoader(trainset, batch_size=batch_size, sampler=train_sampler, collate_fn=trainset.collate_fn, num_workers=num_workers, pin_memory=pin_memory)
valid_loader = DataLoader(trainset, batch_size=batch_size, sampler=valid_sampler, collate_fn=trainset.collate_fn, num_workers=num_workers, pin_memory=pin_memory)
testset = MOSEIRegression(path=path, train=False)
test_loader = DataLoader(testset, batch_size=batch_size, collate_fn=testset.collate_fn, num_workers=num_workers, pin_memory=pin_memory)
return train_loader, valid_loader, test_loader
def train_or_eval_model(model, loss_function, dataloader, epoch, optimizer=None, train=False):
losses, preds, labels, masks = [], [], [], []
assert not train or optimizer!=None
if train:
model.train()
else:
model.eval()
for data in dataloader:
if train:
optimizer.zero_grad()
textf, visuf, acouf, qmask, umask, label = [d.cuda() for d in data] if cuda else data
pred = model(textf, acouf, visuf, textf, qmask, umask)
labels_ = label.view(-1)
umask_ = umask.view(-1)
loss = loss_function(pred, labels_, umask_)
preds.append(pred.data.cpu().numpy())
labels.append(labels_.data.cpu().numpy())
masks.append(umask_.cpu().numpy())
losses.append(loss.item()*masks[-1].sum())
if train:
loss.backward()
optimizer.step()
if preds!=[]:
preds = np.concatenate(preds)
labels = np.concatenate(labels)
masks = | np.concatenate(masks) | numpy.concatenate |
"""
Simplicial Complexes for manim
"""
from manimlib.imports import *
import numpy as np
class SimplicialFiltration(VGroup):
def __init__(
self,
points,
simplices,
times,
tri_opacity=0.2,
**kwargs
):
super().__init__(**kwargs)
self.current_time = -np.inf
self.pts = points
self.tri_opacity=tri_opacity
self.time = []
self.dims = []
self.simplices = simplices
for (s, t) in zip(simplices, times):
self.add_simplex(s, t, **kwargs)
def time_steps(self):
return | np.unique(self.time) | numpy.unique |
import open3d as o3d
import numpy as np
import random
from scipy.spatial.distance import cdist
import time
try:
import pymesh
except Exception as e:
print('pymesh cannot be imported')
ENGINE = 'corefinement'#'igl'
PRINT = False
def myprint(*args, **kwargs):
if PRINT:
print(*args, **kwargs)
def gaussian_dist(mean, sigma, clip_r=None, attempt=100):
# clip_r: None: sample @ [(mean - sigma), (mean + sigma)] ~ 0.7
# clip_r: float: sample @ [(mean - clip_r * sigma), (mean + clip_r * sigma)] ~ 1
if clip_r is None:
return random.gauss(0, 1) * sigma + mean
else:
ns_list = np.random.randn(attempt)
samples = ns_list * sigma + mean
truncated_locs = np.argwhere((samples >= mean - sigma * clip_r) &
(samples <= mean + sigma * clip_r))
assert len(truncated_locs) > 0, 'No samples within {} attempts'.format(attempt)
return samples[np.min(truncated_locs)]
def uniform_dist(high, low):
return random.uniform(low, high)
def face_mapping_pymesh(m1, m2, outputm):
tic = time.time()
v1 = m1.vertices
v2 = m2.vertices
f1 = m1.faces
f2 = m2.faces + v1.shape[0]
m = pymesh.meshio.form_mesh(np.vstack([v1, v2]), np.vstack([f1, f2]))
m.add_attribute("source_face")
m.set_attribute("source_face", np.arange(f1.shape[0]+f2.shape[0]))
pymesh.map_face_attribute(m, outputm, 'source_face')
source_face = outputm.get_attribute('source_face')
return source_face
def mesh_boolean(mesh1, mesh2, operation, engine):
if engine == 'igl':
mesh = pymesh.boolean(mesh1, mesh2, operation=operation, engine=engine)
else:
tic = time.time()
mesh = pymesh.boolean(mesh1, mesh2, operation=operation, engine=engine)
myprint(time.time() - tic, 'pure_boolean time')
source_face = face_mapping_pymesh(mesh1, mesh2, mesh)
mesh.add_attribute("source_face")
mesh.set_attribute("source_face", source_face)
return mesh
class Primitive(object):
#instance label: face_source
#semantic label: face_label
DEFAULT_ATTRIBUTES = ['face_area', 'face_centroid', 'face_normal', 'vertex_normal', 'face_index','vertex_area','face_index','vertex_index' ]
EXTRA_ATTRIBUTES = [ 'face_source','face_color', 'vertex_color', 'vertex_source', 'face_label', 'vertex_label']
ATTRIBUTES_NAME = DEFAULT_ATTRIBUTES + EXTRA_ATTRIBUTES
LABEL = 0
RDOF = -1e5
DOF = -1e5
def __init__(self, vertex=None, face=None, *args, **kwargs):
self.mesh = None
if (vertex is None) or (face is None): #deuplicate one
if len(args)>0 and isinstance(args[0], pymesh.Mesh):
mesh = args[0]
elif "mesh" in kwargs.keys() and isinstance(kwargs["mesh"], pymesh.Mesh):
mesh = kwargs["mesh"]
else:
raise TypeError("missing required positional argument")
self.mesh = mesh
else:# create one from elements
self.mesh = pymesh.meshio.form_mesh(vertex, face)
for attr in self.ATTRIBUTES_NAME:
if attr in kwargs.keys():self.__init_attributes(attr, kwargs[attr])
self.__init_attributes('face_label', np.ones(self.mesh.num_faces, ) * self.LABEL)
self.__init_attributes('face_source', np.zeros(self.mesh.num_faces, ))
def __get_mesh_attr(self, name):
if name.startswith('face'):
return self.mesh.get_face_attribute(name)
elif name.startswith('vertex'):
return self.mesh.get_vertex_attribute(name)
def __getattr__(self, name): # after get attribute
if self.mesh.has_attribute(name):
return self.__get_mesh_attr(name)
elif name in ["faces", "vertices", "bbox"]:
return getattr(self.mesh, name)
elif name in self.DEFAULT_ATTRIBUTES:
self.mesh.add_attribute(name)
return self.__get_mesh_attr(name)
else:
raise AttributeError("pymesh.Mesh does not have attribute \"{}\"".format(name))
def __init_attributes(self, attr, vals):
mesh_attr = getattr(self, attr, None)
if mesh_attr is None:
self.mesh.add_attribute(attr)
self.mesh.set_attribute(attr, vals)
@staticmethod
def get_rotation_matrix(*args):
if len(args) >= 2:
return Primitive._get_rotation_matrix_from_axis_angle(args[0],args[1])
elif len(args) == 1:
return Primitive._get_rotation_matrix_from_euler_angles(angles)
else:
assert False
@staticmethod
def _get_rotation_matrix_from_axis_angle(axis,angle):
rot = pymesh.Quaternion.fromAxisAngle(axis, angle)
return rot.to_matrix()
@staticmethod
def _get_rotation_matrix_from_euler_angles(angles):
rot = open3d.geometry.get_rotation_matrix_from_xyz(angles)
return rot
def get_attributes_dict(self):
d = {}
for attr in self.ATTRIBUTES_NAME:
if self.mesh.has_attribute(attr):
d.update({attr:self.mesh.get_attribute(attr)})
return d
def rigid_transform(self, axis=[1.,0.,0.],angle=0., offset=[0.,0.,0.]):
rot = self.get_rotation_matrix(axis, angle)
center = self.get_center()
vertices = self.mesh.vertices
vertices = np.dot(rot, (vertices - center).T).T + center + offset
d = self.get_attributes_dict()
self.__init__(vertex=np.squeeze(vertices), face=self.mesh.faces, **d)
def get_center(self, center_type="mean_vertex"):#"mean_vertex"
assert center_type in ['bbox', "mean_vertex" ], '{} is not a valid center type'.format(center_type)
if center_type == "bbox":
bbox = self.mesh.bbox
return 0.5 * (bbox[0] + bbox[1])
elif center_type == "mean_vertex":
return np.mean(self.mesh.vertices, axis=0)
def scale(self, scale_factor=1.0):
vertices = self.mesh.vertices * scale_factor
d = self.get_attributes_dict()
self.__init__(vertices, self.mesh.faces, **d)
def normalize(self, norm_center=None, center_type="bbox"): #np.array([0.0, 0.0, 0.0]), ):
# norm_center-0.5 ~ norm_center+0.5 coordinate,
# the point located on norm center depends on the center_type
center = self.get_center(center_type)
bbox = self.mesh.bbox
center_min_bound = np.max(center - bbox[0])
center_max_bound = np.max(bbox[1] - center)
half_bound = np.max([center_min_bound, center_max_bound])
self.scale(0.5 /(half_bound))
if not(norm_center is None):
self.rigid_transform(offset=norm_center - self.get_center())
def __add__(self, other):
assert isinstance(other, Primitive), 'Should use {} to perform this operation'.format('Primitive')
output_mesh = mesh_boolean(self.mesh, other.mesh, operation="union", engine=ENGINE)
m = self.__assign_label_and_create(output_mesh, other)
return m
def __mul__(self, other):
assert isinstance(other, Primitive), 'Should use {} to perform this operation'.format('Primitive')
output_mesh = mesh_boolean(self.mesh, other.mesh, operation="intersection",engine=ENGINE)
return self.__assign_label_and_create(output_mesh, other)
def __sub__(self, other):
assert isinstance(other, Primitive), 'Should use {} to perform this operation'.format('Primitive')
output_mesh = mesh_boolean(self.mesh, other.mesh, operation="difference",engine=ENGINE)
return self.__assign_label_and_create(output_mesh, other)
def __assign_label_and_create(self, output_mesh, other):
if True: #ENGINE == 'igl':
face_indices = output_mesh.get_attribute("source_face").astype(int)
attr_dict = {}
for attr_name in self.ATTRIBUTES_NAME:
if attr_name.startswith('face'):
if self.mesh.has_attribute(attr_name) and other.mesh.has_attribute(attr_name):
self_attr = self.mesh.get_attribute(attr_name)
other_attr = other.mesh.get_attribute(attr_name)
if attr_name == 'face_source':
other_attr = other_attr + np.max(self_attr) + 1
output_attr = np.concatenate([self_attr, other_attr])[face_indices]
attr_dict.update({attr_name:output_attr})
return Primitive(output_mesh.vertices, output_mesh.faces, **attr_dict)
else:
return Primitive(output_mesh.vertices, output_mesh.faces, )
def save(self, filename):
pymesh.meshio.save_mesh(filename, self.mesh, *list(self.get_attributes_dict().keys()))
def save_raw(self, filename):
pymesh.meshio.save_mesh_raw(filename, self.mesh.vertices, self.mesh.faces)
@staticmethod
def load(file_name):
mesh = pymesh.meshio.load_mesh(file_name)
print(mesh)
return Primitive(mesh=mesh)
@staticmethod
def cal_faces_area(vertices, faces):
x = vertices[faces]
a = x[:, 0, :] - x[:, 1, :]
b = x[:, 0, :] - x[:, 2, :]
cross = np.cross(a, b)
return 0.5 * np.linalg.norm(cross, axis=1)
def sample_points(self, num_points, return_attributes=[]):
assert isinstance(return_attributes, list), 'input argument \'return_attributes\' should be list not other type.'
attr_values = []
for attr in return_attributes:
attr_values.append(getattr(self, 'face_' + attr, None))
assert not (attr_values[-1] is None), '\'{}\' is not a valid face-based attribute.'.format(attr)
face_vs = self.mesh.vertices[self.mesh.faces]
area_p = self.cal_faces_area(self.mesh.vertices, self.mesh.faces)
area_p = area_p / np.sum(area_p)
face_indices = np.random.choice(np.arange(self.mesh.num_faces), num_points, p=area_p)
r1, r2 = np.random.rand(int(num_points)),np.random.rand(int(num_points))
rd_pos = np.asarray([1-np.sqrt(r1),np.sqrt(r1)*(1-r2), np.sqrt(r1)*r2 ]).T
pts = np.tile(rd_pos[:,0],(3,1)).T * face_vs[face_indices, 0] + \
np.tile(rd_pos[:,1],(3,1)).T * face_vs[face_indices, 1] + \
np.tile(rd_pos[:,2],(3,1)).T * face_vs[face_indices, 2]
results = [pts] + [a[face_indices] for a in attr_values]
return tuple(results)
@staticmethod
def random(*args, **kwargs):
return Primitive(*args, **kwargs)
def cal_dof(self):
return self.RDOF + self.DOF
class Sphere(Primitive):
RDOF = 0
DOF = 1
LABEL = 1
GEN_FUNC =o3d.geometry.TriangleMesh.create_sphere
def __init__(self, *args, **kwargs):
super(Sphere, self).__init__( *args, **kwargs)
@staticmethod
def random(res=20):
res = int(uniform_dist(low=res - 5, high=res + 5))
o3d_mesh = Sphere.GEN_FUNC(radius=0.5, resolution=res)
o3d_v = np.asarray(o3d_mesh.vertices)
o3d_f = np.asarray(o3d_mesh.triangles)
return Sphere(vertex=o3d_v,face=o3d_f)
class Cylinder(Primitive):
RDOF = 2
DOF = 2
GEN_FUNC = o3d.geometry.TriangleMesh.create_cylinder
RATIO_MAX= 5
LABEL = 2
def __init__(self,*args, **kwargs):
super(Cylinder, self).__init__(*args, **kwargs)
@staticmethod
def random(resolution=25, split=4):
low, high = np.arctan(1.0 / Cylinder.RATIO_MAX), np.arctan(Cylinder.RATIO_MAX)
theta = uniform_dist(low, high)
h, d = | np.sin(theta) | numpy.sin |
##############################################
# #
# Ferdinand 0.40, <NAME>, LLNL #
# #
# gnd,endf,fresco,azure,hyrma #
# #
##############################################
from fudge import reactionSuite as reactionSuiteModule
from fudge import styles as stylesModule
from fudge import physicalQuantity as physicalQuantityModule
from fudge.reactions import reaction as reactionModule
from fudge.reactionData import crossSection as crossSectionModule
from fudge.processing.resonances.getCoulombWavefunctions import *
from xData.Documentation import documentation as documentationModule
from xData.Documentation import computerCode as computerCodeModule
import fudge.resonances.resonances as resonancesModule
import fudge.resonances.scatteringRadius as scatteringRadiusModule
import fudge.resonances.resolved as resolvedResonanceModule
import fudge.resonances.common as commonResonanceModule
import fudge.resonances.scatteringRadius as scatteringRadiusModule
import fudge.covariances.modelParameters as covarianceModelParametersModule
import fudge.covariances.covarianceSection as covarianceSectionModule
import fudge.covariances.covarianceSuite as covarianceSuiteModule
from PoPs import database as databaseModule
from PoPs import misc as miscModule
from PoPs.families import gaugeBoson as gaugeBosonModule
from PoPs.families import baryon as baryonModule
from PoPs.families import nucleus as nucleusModule
from PoPs.families import nuclide as nuclideModule
from PoPs.quantities import spin as spinModule
from PoPs.groups.misc import *
from numpy import zeros
from pqu import PQU as PQUModule
from xData import table as tableModule
import xData.constant as constantModule
import xData.link as linkModule
import xData.xDataArray as arrayModule
import xData.axes as axesModule
#from f90nml import read
import f90nml.parser
from zeroReaction import *
from getCoulomb import *
# import masses
import masses
import os,pwd,time
import fractions
import numpy
fmscal = 0.0478450
etacns = 0.1574855
amu = 931.494013
oneHalf = fractions.Fraction( '1/2' )
one = fractions.Fraction( '1' )
zero = fractions.Fraction( '0' )
spinUnit = spinModule.baseUnit
############################################## read_sfresco
def read_fresco(inFile, amplitudes,Lvals,CNspec,nonzero,noCov, verbose,debug):
# print('amplitudes:',amplitudes,' and nonzero:',nonzero)
srch = open(inFile,'r')
els = srch.readline().split("'")
#print 'els=',els
name_frin = els[1]
name_frout= els[3]
if verbose: print('in,out =',name_frin,',',name_frout)
np,nd = [int(x) for x in srch.readline().split()][0:2]
print(np,'parameters and',nd,'fitted data sets')
srch.readline() # blank line
header = srch.readline()
srch.readline() # NAMELIST line
srch.seek(0)
namelists = f90nml.read(inFile)
fresco = namelists['fresco']
partitions = namelists['partition']
states = namelists['states']
pots = namelists['pot']
variables = namelists['variable'][0:np] if np>0 else []
try: Covariances = namelists['Cov']
except: Covariances = None
if nd>0:
try:
datanml = namelists['data'] #[0:nd]
except:
nd = 0
datanml = None
rmatch = fresco['rmatch']
elab = fresco['elab']
emin,emax = elab[0],elab[1]
Rm_global = rmatch
Rm_radius = scatteringRadiusModule.scatteringRadius(
constantModule.constant1d(Rm_global, domainMin=emin, domainMax=emax,
axes=axesModule.axes(labelsUnits={1: ('energy_in', 'MeV'), 0: ('radius', 'fm')})) )
if verbose: print('np,nd,emin,emax=',np,nd,emin,emax)
try: s = states[0] # fail if only one 'states' namelist
except:
#print 'Rewrite states namelist from ',states
states = [states] # so put it in list of 1
if debug:
print('\nfresco =',fresco)
for i in range(0,len(partitions)-1): print('\npartitions ',i,' =',partitions[i])
try:
for i in range(0,len(states)): print('\nstates ',i,' =',states[i])
except:
print('\nstates only =',states)
states = [states]
for i in range(0,len(states)): print('\nstates ',i,' =',states[i])
#for i in range(0,len(pots)-1): print '\npots ',i,' =',pots[i]
#for i in range(0,len(variables)): print '\nvariables =',variables[i]
try: pel = fresco['pel']
except: pel=1
try: exl = fresco['exl']
except:exl=1
if debug: print('pel,exl =',pel,exl)
domain = stylesModule.projectileEnergyDomain(emin,emax,'MeV')
style = stylesModule.evaluated( 'eval', '', physicalQuantityModule.temperature( 300, 'K' ), domain, 'from '+inFile , '0.1.0' )
PoPs_data = databaseModule.database( 'fresco', '1.0.0' )
resonanceReactions = commonResonanceModule.resonanceReactions()
computePenetrability = True
MTchannels = []
approximation = 'Full R-Matrix'
#print ' Btype =',fresco['btype'],' bndx = ',fresco['bndx'][0]
BV = None
if fresco['btype']=='S':
BC = resolvedResonanceModule.BoundaryCondition.EliminateShiftFunction
elif fresco['btype']=='L':
BC = resolvedResonanceModule.BoundaryCondition.NegativeOrbitalMomentum
elif fresco['btype']=='A':
BC = resolvedResonanceModule.BoundaryCondition.Brune
elif fresco['btype']=='B': # and isinstance(fresco['bndx'][0],float):
BC = resolvedResonanceModule.BoundaryCondition.Given
BV = float(fresco['bndx'][0])
else:
print('Unrecognized btype =',fresco['btype'],'with bndx =',fresco['bndx'])
if debug: print("btype,bndx so BC=",fresco['btype'],fresco.get('bndx',None),' so BC=',BC,'BV=',BV)
rela = fresco.get('rela','')
relref = fresco.get('relref',1)
KRL = relref if len(rela)>0 else 0
#amplitudes = amplitudes # for RWA in Mev**(1/2) for gnd
LRP = 2 # do not reconstruct resonances pointwise (for now)
jtmax = fresco['jtmax']
itcm = len(states)-1+1 # = ITCM (the -1 is for the final empty namelist)
rrc=[[None for j in range(itcm)] for i in range(itcm)] # initialize array
ZAdict = {}; p1p2 = {}
ic=0
inc = partitions[ic]
itstart = 0
itfin = itstart + inc['nex']-1
cm2lab = 0
spinUnit = spinModule.baseUnit
gchannelName = None
CNspin, CNparity = CNspec
CNparity = 1 if CNparity in ['P','p'] else -1
Q_offset = 0.0
#if debug: print 'itsart,itfin=',itstart,itfin
for it in range(len(states)): # ignore final empty namelist)
if it>itfin:
ic+=1 # next partition
inc = partitions[ic]
itstart = itfin+1
if debug: print(ic,' inc[]=',inc)
itfin = itstart + inc['nex']-1
#if debug: print 'itsart,itfin=',itstart,itfin
p,pMass,pZ = inc['namep'],inc['massp'],inc['zp']
t,tMass,tZ = inc['namet'],inc['masst'],inc['zt']
# Use standard GND names:
pA = int(pMass+0.5)
tA = int(tMass+0.5)
p = idFromZAndA(pZ,pA) if p not in ['photon','gamma'] else 'photon'
t = idFromZAndA(tZ,tA)
Qvalue = inc['qval']
try: prmax = partitions[ic]['prmax']
except: prmax = rmatch
ia=it-itstart
tex = nuclideIDFromIsotopeSymbolAndIndex(t,ia)
rr = '%s + %s' % (p,tex) # label for resonanceReaction
rrc[ic][ia] = rr
if debug: print("rrc [",ic,',',ia,'] =',rrc[ic][ia])
state = states[it]
try: jp,ptyp,ep = state['jp'],state['ptyp'],state['ep']
except: pass
try:
jt,ptyt,et = state['jt'],state['ptyt'],state['et']
except:
print('Cannot find target info in ',state,'\n Stop')
raise SystemExit
pp = ptyp
pt = ptyt
Jp = int(jp) if abs(jp - int(jp))<0.1 else '%i/2' % int(2*jp)
Jt = int(jt) if abs(jt - int(jt))<0.1 else '%i/2' % int(2*jt)
# convert parities in namelist from fresco (-1,1) to gnd (-+) strings:
state['ptyp'] = pp
state['ptyt'] = pt
QI = Qvalue - ep - et
channelName = '%s + %s' % (p,tex)
ZAdict[rr] = (float(pMass),float(tMass),float(pZ),float(tZ),float(QI),float(prmax))
p1 = (1-pp)//2; p2 = (1-pt)//2 # 0 or 1 for + or - parities
p1p2[(ic+1,ia+1)] = (p1,p2) # save for later access from ic, and ia
if debug: print('p1p2[(',ic+1,ia+1,')] = (',p1,p2,')')
MT = 5
if p=='photon': MT = 102
elif p=='n' : MT = 50+ia
elif p=='H1' : MT = 600+ia
elif p=='H2' : MT = 650+ia
elif p=='H3' : MT = 700+ia
elif p=='He3' : MT = 750+ia
elif p=='He4' : MT = 800+ia
if verbose: print(ic,ia,' projectile=',p,pMass,pZ,Jp,ptyp, ', target=',tex,tMass,tZ,Jt,ptyt, ' Q=',Qvalue,' MT=',MT,' prmax =',prmax)
if pZ==0 and pMass == 0 : # g
projectile = miscModule.buildParticleFromRawData( gaugeBosonModule.particle, p, mass = ( 0, 'amu' ), spin = (Jp,spinUnit ), parity = (ptyp,'' ), charge = (0,'e') )
CNspin = Jt
CNparity = ptyt
elif pZ<1 and pMass < 1.5 and p != 'H1' : # n or p
projectile = miscModule.buildParticleFromRawData( baryonModule.particle, p, mass = (pMass,'amu' ), spin = (Jp,spinUnit ), parity = (ptyp,'' ), charge = (pZ,'e') )
else: # nucleus in its gs
nucleus = miscModule.buildParticleFromRawData( nucleusModule.particle, p, index = 0, energy = ( 0.0, 'MeV' ) , spin=(Jp,spinUnit), parity=(ptyp,''), charge=(pZ,'e'))
projectile = miscModule.buildParticleFromRawData( nuclideModule.particle, p, nucleus = nucleus, mass=(pMass,'amu'))
PoPs_data.add( projectile )
# Some state of target at energy 'et':
if verbose: print("Build PoPs for target ",tex,Jt,ptyt,tZ,tMass,ia,et)
nucleus = miscModule.buildParticleFromRawData( nucleusModule.particle, tex, index = ia, energy = (et,'MeV' ) , spin=(Jt,spinUnit), parity=(ptyt,''), charge=(tZ,'e') )
target = miscModule.buildParticleFromRawData( nuclideModule.particle, tex, nucleus = nucleus, mass=(tMass,'amu'))
#print target.toXML()
PoPs_data.add( target )
if ic==pel-1 and ia==exl-1:
if verbose: print(' -- that was the incident channel of',p,tex)
elastics = (p,tex)
Q_offset = QI
MT = 2
cm2lab = (tMass + pMass)/tMass
try: prmax = partitions[ic]['prmax']
except: prmax = rmatch
# Create zero background cross section
# MTchannels.append((rr,zeroReaction(rr,MT, QI, [projectile,target], None, emin,emax,'MeV', debug), channelName,prmax,p))
MTchannels.append((rr,(rr,MT, QI, projectile,target, emin,emax), channelName,prmax,p))
compoundA = pA + tA
compoundZ = pZ + tZ
cMass = masses.getMassFromZA( compoundZ*1000 + compoundA )
compoundName = idFromZAndA(compoundZ,compoundA)
if CNspin is None or CNparity is None:
ptyt = 1; Jt=0
print('\n WARNING: %s spin and parity set to %s,%s by default. If neeed, please fix PoPs data in final file by hand!!!\n' % (compoundName,Jt,ptyt))
else:
Jt = CNspin
ptyt = CNparity
print('\n%s spin and parity set to %s,%s. If needed, please fix PoPs data in final file by hand!!!\n' % (compoundName,Jt,ptyt))
CNnucleus = miscModule.buildParticleFromRawData( nucleusModule.particle, compoundName, index = 0, energy = (0,'MeV' ), spin=(Jt,spinUnit), parity=(ptyt,''))
CNtarget = miscModule.buildParticleFromRawData( nuclideModule.particle, compoundName, nucleus = CNnucleus, mass=(cMass,'amu'))
PoPs_data.add( CNtarget )
# Check if any damping and hence need for Reich-Moore channel
damped = False
changeAmp = False; changed=0
for v in variables:
if v['kind']==3:
damped = damped or v.get('damp',0.0) > 1e-20
if v['kind']==4:
is_rwa = v.get('rwa',True) # same default as frescox
changeAmp = changeAmp or (is_rwa != amplitudes)
if is_rwa != amplitudes: changed += 1
if v['kind']==7:
damped = damped or v.get('damp',0.0) > 1e-20
if changeAmp: print(" Need to change ",changed," reduced width amplitudes with formal widths ")
if damped:
approximation = 'Reich_Moore'
print(" Create Reich-Moore channel 'photon' from damping")
level = 0
compoundNameIndex = nuclideIDFromIsotopeSymbolAndIndex(compoundName,level)
gchannelName = '%s + photon' % compoundName
Q = (pMass + tMass - cMass)*amu
rrcap = gchannelName
print("Reich-Moore particle pair: ",gchannelName,' with CN mass %.5f so Q=%.3f, label=%s' % (cMass,Q,rrcap))
# gData = { '0' : [ 0.0, .0, 1, None, 1, +1 ] }
gammaParticle = miscModule.buildParticleFromRawData( gaugeBosonModule.particle, 'photon',
mass = ( 0, 'amu' ), spin = ( zero, spinUnit ), parity = ( 1, '' ), charge = ( 0, 'e' ))
PoPs_data.add(gammaParticle)
nucleus = miscModule.buildParticleFromRawData( nucleusModule.particle, compoundNameIndex, index = level, energy = ( 0.0, 'MeV' ) ,
spin=(zero,spinUnit), parity=(1,''), charge=(compoundZ,'e') )
compoundParticle = miscModule.buildParticleFromRawData( nuclideModule.particle, compoundNameIndex, nucleus = nucleus, mass=(cMass,'amu') )
#print PoPs_data.toXML()
PoPs_data.add(compoundParticle)
# if verbose: print PoPs_data.toXML()
# Create background ReichMoore cross section (zero to start with)
MT_capture = 102
# label = 'capture'
label = rrcap
capture = zeroReaction(label,MT_capture, Q, [gammaParticle,compoundParticle], 'damping', emin,emax,'MeV', debug)
# MTchannels.append((rrcap, capture, gchannelName,None,'photon'))
MTchannels.append((rrcap, (label,MT_capture, Q, gammaParticle,compoundParticle, emin,emax), gchannelName,None,'photon'))
# After making all the channels, and gnd is generated for the elastic channel, now add them to gnd
p,tex = elastics
gnd = reactionSuiteModule.reactionSuite( p, tex, 'fresco R-matrix fit', PoPs = PoPs_data, style = style, interaction='nuclear')
for rr,reacInfo,channelName,prmax,p in MTchannels:
# Get zero background cross section and link to it
#reaction,channelName,prmax = MTchannels[rr]
rr,MT, QI, pn,tn, emi,ema = reacInfo
reaction = zeroReaction(rr, MT, QI - Q_offset, [pn,tn], None, emi,ema,'MeV', debug)
gnd.reactions.add(reaction)
eliminated = channelName == gchannelName
reactionLink = linkModule.link(reaction)
computeShiftFactor = BC != resolvedResonanceModule.BoundaryCondition.EliminateShiftFunction and not eliminated
computePenetrability = not eliminated # Should be False also for fission channels (but they are not specified yet) TEMPORARY
rreac = commonResonanceModule.resonanceReaction ( label=rr, reactionLink=reactionLink, ejectile=p,
computePenetrability=computePenetrability,
computeShiftFactor=computeShiftFactor, Q=None, eliminated=eliminated )
if prmax is not None and prmax != Rm_global:
rreac.scatteringRadius = scatteringRadiusModule.scatteringRadius(
constantModule.constant1d(prmax, domainMin=emin, domainMax=emax,
axes=axesModule.axes(labelsUnits={1: ('energy_in', 'MeV'), 0: ('radius', 'fm')})) )
resonanceReactions.add(rreac)
if debug: print("RR <"+rr+"> is "+channelName)
if cm2lab<1e-5:
print("Missed elastic channel for cm2lab factor!")
raise SystemExit
if Lvals is not None:
print("Remake channels in each pair for L values up to ",Lvals)
# Now read and collate the reduced channel partial waves and their reduced width amplitudes
# next we have NJS spin groups, each containing channels and resonances
spinGroups = resolvedResonanceModule.spinGroups()
JpiList = []
for i in range(0,len(variables)):
v = variables[i]
if v['kind']==3:
pi = v['par']
J = v['jtot']
Jpi = J,pi
if Jpi not in JpiList: JpiList.append(Jpi)
if debug: print(" List of Jpi",JpiList)
NJS = len(JpiList)
JpiMissing = []
frac = J-int(J) # to fix whether integer or half-integer spins!
NJ = int(jtmax-frac+0.1)+1
for i in range(NJ):
J = frac + i
for pi in [-1,1]:
if (J,pi) not in JpiList: JpiMissing.append( (J,pi) )
NJM = len(JpiMissing)
if NJM>0: print("Spin-parity groups with no poles:",JpiMissing)
kvar = 0 # ; ivar2G = {}; G2ivar = [];
kvarData = []
# if debug: print(resonanceReactions.toXML())
for spinGroupIndex in range(NJS+NJM):
J,piv = JpiList [ spinGroupIndex ] if spinGroupIndex < NJS else JpiMissing[spinGroupIndex-NJS]
JJ = resolvedResonanceModule.spin( J )
pi= resolvedResonanceModule.spin( piv)
x = (1-pi)//2
if verbose: print('\n',spinGroupIndex,': J,pi,x =',JJ,pi,x)
# Previously we got channel quantum numbers from looking at which combinations have poles.
# But this is not good from physics, as we have to be careful to cater for channels even without poles.
# So now (Oct 9, 2017) I re-organize how to make list of channels.
#
chans = set()
itc = 0
for rreac in resonanceReactions:
if not rreac.eliminated:
icch=0; iach=0
for ic in range(len(rrc)): # find icch,iach for this reaction pair
for ia in range(len(rrc[ic])):
if rreac.label==rrc[ic][ia]:
icch=ic+1; iach=ia+1
if debug: print(" pair:",rreac.label," at ic,ia",icch,iach)
p = rreac.ejectile
t = rreac.residual
projectile = PoPs_data[p];
target = PoPs_data[t];
if hasattr(projectile, 'nucleus'): projectile = projectile.nucleus
if hasattr(target, 'nucleus'): target = target.nucleus
jp,pt = projectile.spin[0].float('hbar'), projectile.parity[0].value
jt,tt = target.spin[0].float('hbar'), target.parity[0].value
smin = abs(jt-jp)
smax = jt+jp
s2min = int(2*smin+0.5)
s2max = int(2*smax+0.5)
for s2 in range(s2min,s2max+1,2):
sch = s2*0.5
lmin = int(abs(sch-JJ) +0.5)
lmax = int(sch+JJ +0.5)
if Lvals is not None: lmax = min(lmax,Lvals[itc])
for lch in range(lmin,lmax+1):
if pi != pt*tt*(-1)**lch: continue
chans.add((icch,iach,lch,sch))
if debug: print(' Partial wave channels IC,IA,L,S:',icch,iach,lch,sch)
itc += 1
channelList = sorted(chans)
NCH = len(channelList)
if debug: print(' channels =',chans,' (',NCH,')')
if debug: print(' channelList =',channelList,' (',NCH,')')
columnHeaders = [ tableModule.columnHeader(0, name="energy", unit="MeV") ]
width_units = 'MeV' ## 'MeV**{1/2}' if amplitudes else 'MeV' # wrong units given to GND: correct later if needed
channelNames = []
channels = resolvedResonanceModule.channels()
firstp =1
if damped:
columnHeaders.append( tableModule.columnHeader(1, name=gchannelName, unit= width_units) )
Sch = resolvedResonanceModule.spin( 0.0 )
channels.add( resolvedResonanceModule.channel('1', rrcap, columnIndex=1, L=0, channelSpin=Sch) )
firstp = 2
for chidx in range(NCH):
icch,iach,lch,sch = channelList[chidx]
rr = rrc[icch-1][iach-1]
if debug: print("From ic,ia =",icch,iach," find channel ",rr)
thisChannel = resonanceReactions[rr]
channelName = "%s width" % thisChannel.label
jdx = 2
while True:
if channelName not in channelNames:
channelNames.append( channelName ); break
channelName = '%s width_%d' % (thisChannel.label, jdx)
jdx += 1
columnHeaders.append( tableModule.columnHeader(chidx+firstp, name=channelName, unit= width_units) )
Sch = resolvedResonanceModule.spin( sch )
channels.add( resolvedResonanceModule.channel(str(chidx+firstp), rr, columnIndex=chidx+firstp, L=lch, channelSpin=Sch) )
if debug: print(str(chidx), str(chidx), int(lch), float(sch), chidx+firstp)
terms = set() # for this J,pi spin group
damping = {}
for i in range(0,len(variables)):
v = variables[i]
ivare = v.get('ivar',None)
if ivare is not None and ivare!=i+1 and Covariances is not None:
print("Variable namelists out of order. Expect %i but found %i" % (i+1,ivare))
if v['kind']==3:
Jv = v['jtot']
if Jv==J and v['par']==pi:
term = v['term']
terms.add((term,v['energy'],ivare))
if damping.get(term,None) is None: damping[term] = 0.0
try: d = v['damp']
except: d = 0.
damping[term] += d
if debug: print(i,':',v,'for term',term,' damping',d)
if v['kind']==7:
term = v['term']
if damping.get(term,None) is None: damping[term] = 0.0
try: d = v['damp']
except: d = 0.
damping[term] += d
if debug: print(i,':',v,'for term',term,' damping',d)
terms = sorted(terms)
if debug: print(' terms =',terms)
resonances = []
for term,energy,ivare in terms:
# G2ivar.append(ivare) # energies come before widths, in GNDS
kvar += 1 # ; ivar2G[ivare] = kvar
if debug: print('Jpi',JJ,pi,'kvar=',kvar,'for E=',energy)
kvarData.append([J,piv,'E',energy])
energy += Q_offset
row = [energy*cm2lab]
if damped:
damp = damping.get(term,0.0)*cm2lab
row.append(damp)
kvar += 1 # ; ivar2G[ivare] = kvar
kvarData.append([JJ,piv,'d',damp])
if debug: print('kvar=',kvar,'for damping=',damp)
else:
damp = 0.0
for ch in channelList:
found = False
ic,ia,lch,sch = ch
for i in range(0,len(variables)):
v = variables[i]
#print v['kind'],4 , v['term'],term , v['icch'],ch[0] , v['iach'],ch[1] , v['lch'],ch[2] , v['sch'],ch[3]
if v['kind']==4 and v['term']==term and v['icch']==ic and v['iach']==ia and v['lch']==lch and v['sch']==sch:
#print ' After ch',ch,' widths =',v['width']
p1,p2 = p1p2[(ic,ia)]
phaz = p1 + p2 + lch - x
if phaz % 2 == 1: # should be even. If not: stop
print('Error: ic,ia,p1,p2,lch,x,phaz=',ic,ia,p1,p2,lch,x,phaz)
sys.exit(1)
phase = (-1)**(phaz//2)
w = v['width'] * phase # convert to phase from Fresco, which has explicit i^L Huby phases.
if debug: print(' E= %.3f MeV, damp=%.2e width=%.4e l=%i S=%.1f p1,p2,phaz,s=%i,%i,%i: %i %i' % (energy, damp, w,lch,sch,p1,p2,x,phaz,phase))
try:
is_rwa = v['rwa']
except:
is_rwa = True # same as Frescox
if is_rwa != amplitudes: # fix to give correct output: rwa or formal width
rr = rr = rrc[ch[0]-1][ch[1]-1]
pMass,tMass,pZ,tZ,QI,prmax = ZAdict[ rr ]
e_ch = energy + QI - Q_offset
penetrability,shift,dSdE,W = getCoulomb_PSdSW(
abs(e_ch),lch, prmax, pMass,tMass,pZ,tZ, fmscal,etacns, False) # CWF at abs(e_ch)
if debug: print('p,t =',p,tex,'QI=',QI,': call coulombPenetrationFactor(L=',lch,'e_ch=',e_ch,') =',penetrability,dSdE,W)
# find gamma or Gamma_formal from the G_obs in the AZR input
# Gamma_formal = G_obs * shifty_denom
# gamma = sqrt(Gamma_formal/(2*P))
if amplitudes: # GND to have rwa from Gammaf
width = ( abs(w) /(2. * penetrability) ) **0.5
if w < 0: width = -width
if debug: print(" Converting Gammaf",w," to rwa",width)
else: # GND to have Gammaf from rwa
width = 2.0 * w*w * penetrability
if w < 0: width = -width
if debug: print(" Converting rwa",w," to Gammaf",width,'ampl:',amplitudes)
else:
width = w
width *= cm2lab**0.5 if amplitudes else cm2lab
if nonzero is not None and abs(width)<1e-20:
print('Change',width,'to',nonzero)
width = nonzero
# else:
# print('No change',width,'to',nonzero,'as',abs(width),1e-20,abs(width)<1e-20)
row.append(width)
found = True
# ivar = v.get('ivar',None)
# G2ivar.append(ivar)
kvar += 1; # ivar2G[ivar] = kvar
if debug: print('kvar=',kvar,'for width=',width,'from',v['width'])
kvarData.append([J,piv,energy,damp,width,ic-1,ia-1,lch,sch])
nfv = 0 if nonzero is None else nonzero
if not found: row.append(nfv)
resonances.append(row)
if debug:
print('Col headers:',len(columnHeaders))
print('Make table for',J,pi,[len(row) for row in resonances],'\n',resonances)
table = tableModule.table( columns=columnHeaders, data=resonances )
spinGroups.add( resolvedResonanceModule.spinGroup(str(spinGroupIndex), JJ, pi, channels,
resolvedResonanceModule.resonanceParameters(table)) )
# for J,pi in JpiMissing:
# JJ = resolvedResonanceModule.spin( J )
# spinGroupIndex += 1
# if verbose: print spinGroupIndex,': add empty J,pi =',JJ,pi
# table = tableModule.table( columns=None, data=None )
# channels = resolvedResonanceModule.channels()
# spinGroups.add( resolvedResonanceModule.spinGroup(str(spinGroupIndex), JJ, pi, channels,
# resolvedResonanceModule.resonanceParameters(table)) )
RMatrix = resolvedResonanceModule.RMatrix( 'eval', approximation, resonanceReactions, spinGroups,
boundaryCondition=BC, boundaryConditionValue=BV,
relativisticKinematics=KRL, reducedWidthAmplitudes=bool(amplitudes),
supportsAngularReconstruction=True, calculateChannelRadius=False )
docnew = RMatrix.documentation
docLines = [' ','Converted from SFRESCO search file %s' % inFile,time.ctime(),pwd.getpwuid(os.getuid())[4],' ',' ']
computerCode = computerCodeModule.ComputerCode( label = 'R-matrix fit', name = 'sfrescox', version = '7.1-6-gad5c8e') #, date = time.ctime() )
computerCode.note.body = '\n'.join( docLines )
listallvars = namelists.toString(['variable'])
dataLines = []
for i in range(len(variables)):
if variables[i]['kind'] in [5,6]:
vnew = listallvars[i].replace(' = ','=')
vnew = ' '.join(vnew.split()).replace(" ',","',")
dataLines += [vnew]
for datanml in namelists.toString(['data']):
dnew = datanml.replace(' = ','=').replace('.false.','F').replace('.true.','T')
dnew = ' '.join(dnew.split()).replace(" ',","',")
dataLines += [dnew]
fresco_text = '\n'.join(namelists.toString(['fresco']))
inputDataSpecs= computerCodeModule.InputDeck( 'Fresco input 1', inFile, ('\n %s\n' % time.ctime() ) + fresco_text +'\n' )
computerCode.inputDecks.add( inputDataSpecs )
inputDataSpecs= computerCodeModule.InputDeck( 'Fitted_data 1', inFile, ('\n %s\n' % time.ctime() ) + ('\n'.join( dataLines ))+'\n' )
computerCode.inputDecks.add( inputDataSpecs )
# print('computerCode:\n', computerCode.toXML( ) )
docnew.computerCodes.add( computerCode )
resolved = resolvedResonanceModule.resolved( emin,emax,'MeV' )
resolved.add( RMatrix )
scatteringRadius = Rm_radius
unresolved = None
resonances = resonancesModule.resonances( scatteringRadius, resolved, unresolved )
gnd.resonances = resonances
if Covariances is not None and not noCov:
nvar_covs = Covariances[0]['nvariables']
icor2ivar = Covariances[0]['variables']
nRvariables = kvar
if verbose: print('nvar_covs=',nvar_covs,' varying:',icor2ivar,' for nRvars:',nRvariables)
nvar_Rcovs = 0; R2icor = []; icor2R = []
for icor0 in range(nvar_covs):
icor = icor0+1 # from 1..nvar_covs
varnum = icor2ivar[icor0]
kind = variables[varnum-1]['kind']
if debug: print("Variable",varnum," has kind",kind,"at",icor)
if kind==3 or kind==4 or kind==7:
nvar_Rcovs += 1
R2icor += [icor]
icor2R += [nvar_Rcovs]
else:
icor2R += [None]
print("\nCovariance matrix varies",nvar_Rcovs,"R-matrix parameters within nRvars:",nRvariables)
if nvar_Rcovs > nRvariables: sys.exit(1)
if verbose:
print(' icor2ivar:',icor2ivar, len(icor2ivar))
print(' R to icor:',R2icor , len(R2icor))
print(' icor to R',icor2R, len(icor2R))
# print ' GND to ivar',G2ivar, len(G2ivar)
# print ' ivar to GND',ivar2G #, len(ivar2G)
matrix = zeros([nRvariables,nRvariables])
covar_matrix = zeros([nvar_Rcovs,nvar_Rcovs])
for covs in Covariances[1:]:
row = covs['row']-1
ivar = icor2ivar[row]
if verbose: print('Row:',covs['row'],'Fresco var#',varnum,'of kind',variables[varnum-1]['kind'])
if icor2R[row] is not None:
for i in range(nvar_Rcovs):
ivar_c = icor2ivar[R2icor[i]-1]
# print "matrix[",ivar,ivar_c,"] = covs[",R2icor[i]-1,"] from R=",R2icor[i],i
matrix[ivar-1,ivar_c-1] = covs['emat'][R2icor[i]-1]
covar_matrix[row,i] = matrix[ivar-1,ivar_c-1]
# data for computation
covData = open('fresco-covData.dat','w')
print('# covariance data from',inFile, file=covData)
print(nvar_Rcovs, '#, J,pi,<type>,<value>,partition,excitation,L,S, variable, source', file=covData)
print('# i Jtot,parity,L,S')
for i in range(nvar_Rcovs):
ifv = icor2ivar[R2icor[i]-1]
v = variables[ifv]
# print(i,variables[ifv],' '.join([str(v) for v in kvarData[ifv]]), ifv , file=covData)
name = v['name']
kind = v['kind']
# print(ifv, v)
datas = []
if kind==3:
energy = v['energy']
datas = [energy]
if kind==4:
width = v['width']
ic = v['icch']-1; ia = v['iach']-1; L = v['lch']; S = v['sch']
datas = [width, ic, ia, L, S]
print(i,name,','.join([str(d) for d in datas]), ifv , file=covData)
for i in range(nvar_Rcovs):
line = ' '.join([str(covar_matrix[i,j]) for j in range(nvar_Rcovs)])
print(i,line, file=covData)
# store into GNDS (need links to each spinGroup)
parameters = covarianceModelParametersModule.parameters()
startIndex = 0
for spinGroup in resonances.resolved.evaluated:
nParams = spinGroup.resonanceParameters.table.nColumns * spinGroup.resonanceParameters.table.nRows
if nParams == 0: continue
parameters.add( covarianceModelParametersModule.parameterLink(
label = spinGroup.label, link = spinGroup.resonanceParameters.table, root="$reactions",
matrixStartIndex=startIndex, nParameters=nParams
))
startIndex += nParams
if debug:
print(parameters.toXML(),'\n')
print(type(matrix))
print('matrix:\n',matrix)
if True:
correlation = zeros([nRvariables,nRvariables])
if debug: print("Cov shape",matrix.shape,", Corr shape",correlation.shape)
# print "\nCov diagonals:",[matrix[i,i] for i in range(nRvariables)]
# print "\nCov diagonals:\n",numpy.array_repr(numpy.diagonal(matrix),max_line_width=100,precision=3)
print("Diagonal uncertainties:\n",numpy.array_repr(numpy.sqrt( | numpy.diagonal(matrix) | numpy.diagonal |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 25 19:10:13 2019
@author: chiara
"""
import os
import numpy as np # scientific calculation
import pandas as pd # data analysis
import matplotlib.pyplot as plt # data plot
#import matplotlib
#from datetime import datetime,date # date objects
import seaborn as sns # data plot
# import statsmodels.api as sm
#import networkx as nx
from sklearn.ensemble import RandomForestRegressor
# Set working paths
mainPath="/home/chiara/kaggle/1C_PYproject/scripts/"
os.chdir(mainPath)
#from my_functions_1c import my_prepareTrain
import my_functions_1c as ct
########################
filePath="/home/chiara/kaggle/1C_PYproject/data/competitive-data-science-predict-future-sales/"+"sales_train_v2.csv"
data=pd.read_csv(filePath, index_col=False)
data.head(5)
data.shape
data.tail(5)
######################## LOAD DATA TRAIN
filePath="working_data/"+"1C_small_training.csv"
data=pd.read_csv(filePath, index_col=False)
data=data.drop("Unnamed: 0",axis=1)
data.keys()
data.head()
dataTrain=ct.my_prepareTrain(data) #921400 rows x 9 columns
dataTrain.keys()
#["date_block_num","item_id","shop_id","item_freq","shop_freq",
# "category_freq", "month","item_price","month_cnt"]
dataTrain.reset_index()
dataTrain.iloc[10:20,0:5]
dataTrain.plot(subplots=True)
##############################################################################
##############################################################################
############# CHECKS/SUMMARIES
## is the item price fixed among shops? over months?
# price is not fixed among shops
# price is not fixed among months
dataPriceXShop=dataTrain[{"date_block_num","item_id","shop_id","item_price"}]
dataPriceXShop.head()
dataPriceXShop.shape
dataItemXShop_price=pd.pivot_table(dataPriceXShop,
index=["date_block_num","item_id"],
values="item_price",columns=["shop_id"])
dataItemXShop_price #[135451 rows x 55 columns]
dataItemXShop_price.keys()
dataItemXShop_price.index
dataItemXShop_price.loc[(33,33)]
# all shops priced item 33 199, but shop 49 priced it 159
dataItemXShop_price.loc[(12,33)]
# which items are consistent/present among shops? over months?
33-12+1 # 22 months
nan_indices=dataItemXShop_price.isnull()
#dataItemXShop_count=pd.pivot_table(nan_indices,
# index="item_id",columns=[""]
dataItemXShop_count=nan_indices.groupby("item_id").sum() #over months
dataItemXShop_count.max(axis=1).idxmax()
#item 30 occurs 22 times in at least 1 shop
dataItemXShop_count.max(axis=1).max()
dataItemXShop_count.max(axis=1).idxmin()
##item 0 occurs 1 times in at least 1 shop
dataItemXShop_count.max(axis=1).min()
itemPresence=dataItemXShop_count.sum(axis=1)/55
#stability of item presence on average
itemPresence.plot(kind="hist",bins=22,figsize=(10,5),
title="Number of item occurrences in 22 month period") #sort_values(ascending=False).
# most items appear only once
sns.set(rc={'figure.figsize':(10,12)})
fig, ax = plt.subplots(1, 1)
sns.heatmap(dataItemXShop_count,ax=ax)
ax.set_title("Monthly appeareances of items in shops")
fig
######
dataItemXMonth_price=pd.pivot_table(dataTrain[{"date_block_num","item_id","item_price"}],
index=["item_id"],values="item_price",
columns=["date_block_num"],aggfunc={np.min,np.max})
dataItemXMonth_price.keys()
# item 22167
dataItemXMonth_price.loc[(22167)]
# item 22167 varys min price from 284 to 155
nan_indices2=dataItemXMonth_price.iloc[:,range(0,22)].isnull()
#sum(nan_indices2.values.tolist()==nan_indices.values.tolist())
nan_indices2.iloc[0:10,0:10] #itemXmonths
nan_indices.iloc[0:10,0:10] #itemXshops
####
# each month, in how many shops each item occurs?
dataItemXMonth_count=pd.pivot_table(dataTrain[{"date_block_num","item_id","shop_id"}],
index=["item_id"],values="shop_id",
columns=["date_block_num"],aggfunc=pd.value_counts)
dataItemXMonth_count.iloc[17000:17005,0:5]
dataItemXMonth_count=dataItemXMonth_count.applymap(lambda x: np.nansum(x))
dataItemXMonth_count.keys()
dataItemXMonth_count.iloc[0:40,].transpose().plot.line()
sns.set(rc={'figure.figsize':(10,12)})
fig, ax = plt.subplots(1, 1)
sns.heatmap(dataItemXMonth_count,ax=ax)
ax.set_title("Item appearences in each month")
fig
# most items appear only a few times.
# none item has a regular high appearence
#
#dataItemXMonth_count=dataItemXMonth_count.reset_index()
#dataItemXMonth_count.columns=["item_id",12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,33]
#dataItemXMonth_count.iloc[0:5,].transpose().plot.line()
#dataItemXMonth_count.keys()
####
# how many items each shop sell each month?
dataShopXMonth_count=pd.pivot_table(dataTrain[{"date_block_num","item_id","shop_id"}],
index=["shop_id"],values="item_id",
columns=["date_block_num"],aggfunc="count")
dataShopXMonth_perc=dataShopXMonth_count.applymap(lambda x: (x/17054)*100)
#dataShopXMonth_count.max().max()
sns.set(rc={'figure.figsize':(10,12)})
fig, ax = plt.subplots(1, 2)
sns.heatmap(dataShopXMonth_count,ax=ax[0])
sns.heatmap(dataShopXMonth_perc,ax=ax[1])
ax[0].set_title("Items sold in each shop for month")
ax[1].set_title("% of items sold in each shop for month")
fig
# shop 9,11,13,17,20,25,29,30,31..have more variety
# only 20% of items are sold in each shop,
# and none is continuosly sold
###############################################################################
###############################################################################
############################### CREATE DF for prediction
dataTrain.plot(subplots=True)
# *keys
# date_block_num *
# item_id *
# shop_id *
# category_freq <-
# item_price
# item_freq <-
# shop_freq <-
# month
# month_cnt !!!!TARGET
dataTrain.keys()
dataTrain.set_index(["date_block_num","shop_id","item_id"])
dataTrain.iloc[20:30,2:8]
#sum(dataTrain["item_freq"]==dataTrain["shop_freq"])
## Calculate correlation between variables
# all variables are highly correlated with "month_cnt" except the price
CC=dataTrain[["item_price","month_cnt","month"]].corr()#"item_freq",
CC
# item_freq category_id item_price month_cnt
#item_freq 1.000000 -0.073820 0.067416 0.521578
#category_id -0.073820 1.000000 -0.228345 -0.010741
#item_price 0.067416 -0.228345 1.000000 0.022186
#month_cnt 0.521578 -0.010741 0.022186 1.000000
# Transform it in a links data frame (3 columns only):
links = C.stack().reset_index()
links.columns =["var1","var2","corr_val"]
# remove self correlation (cor(A,A)=1)
links_filtered=links.loc[ (links['var1'] != links['var2']) ]
links_filtered
# Build your graph
#G = nx.Graph()
G = nx.path_graph(0)
graph = {"freq":["price","count"],"price":["freq","count"],
"count":["price","freq"]}
leng=1
#[('freq', 'price'), ('freq', 'count'), ('price', 'count')]
values=[0.067,0.522,0.022]
for vertex, edges in graph.items():
G.add_node("%s" % vertex)
# leng+=1
for edge in edges:
G.add_node("%s" % edge)
G.add_edge("%s" % vertex, "%s" % edge, weight = leng)
# print("'%s' connects with '%s'" % (vertex,edge))
# Create positions of all nodes and save them
#pos = nx.spring_layout(G)
pos={"price": [1.5,1.5],"freq": [0.5,1.5],"count": [1,1]}
labels ={('freq', 'price'): values[0], ('freq', 'count'): values[1],
('price', 'count'): values[2]}
# Draw the graph according to node positions
nx.draw(G, pos, with_labels=True,node_size=3000)
# Create edge labels
#labels = {edg: str(values[G.edges[edg]]) for edg in G.edges}
# Draw edge labels according to node positions
pos_lab={"price": [1.25,1.25],"freq": [0.75,1.25],"count": [1,1.5]}
nx.draw_networkx_edge_labels(G, pos,font_color='red',edge_labels=labels)
plt.axis('off')
plt.show()
################
#import statsmodels.formula.api as smf
# Instantiate a gamma family model with the default link function.
#poisson_model = sm.GLM(data.endog, data.exog, family=sm.families.Gamma())
#form="month_cnt ~ date_block_num + item_id + shop_id + item_freq + category_id + month + item_price"
#form="month_cnt ~ date_block_num + item_freq + month + item_price"
#poisson_model = smf.glm(formula=form, data=dataTrain, family=sm.families.Poisson())
#poisson_fit = poisson_model.fit()
#dir(poisson_fit.mle_settings)
#poisson_fit.use_t
#print(poisson_fit.summary())
#
# Generalized Linear Model Regression Results
#==============================================================================
#Dep. Variable: month_cnt No. Observations: 921400
#Model: GLM Df Residuals: 921392
#Model Family: Poisson Df Model: 7
#Link Function: log Scale: 1.0000
#Method: IRLS Log-Likelihood:* -inf
#Date: Fri, 15 Nov 2019 Deviance: * 8.7344e+05
#Time: 18:15:41 Pearson chi2: 3.83e+06
#No. Iterations: 7 *non-defined for Poisson family
#Covariance Type: nonrobust * non defined for scale=1
#==================================================================================
# coef std err z P>|z| [0.025 0.975]
#----------------------------------------------------------------------------------
#Intercept 0.5517 0.003 163.637 0.000 0.545 0.558
#date_block_num 0.0013 0.000 10.540 0.000 0.001 0.002
#item_id -9.174e-06 1.23e-07 -74.511 0.000 -9.41e-06 -8.93e-06
#shop_id -0.0012 4.26e-05 -27.026 0.000 -0.001 -0.001
#item_freq 0.1936 8.63e-05 2244.772 0.000 0.193 0.194
#category_id -0.0055 4.5e-05 -123.243 0.000 -0.006 -0.005
#month 0.0017 0.000 7.667 0.000 0.001 0.002
#item_price 1.289e-05 3.19e-07 40.347 0.000 1.23e-05 1.35e-05
#==================================================================================
# item_id, category_id have small weight
# Generalized Linear Model Regression Results
#==============================================================================
#Dep. Variable: month_cnt No. Observations: 921400
#Model: GLM Df Residuals: 921395
#Model Family: Poisson Df Model: 4
#Link Function: log Scale: 1.0000
#Method: IRLS Log-Likelihood: -inf
#Date: Fri, 15 Nov 2019 Deviance: 9.1019e+05
#Time: 18:40:30 Pearson chi2: 3.78e+06
#No. Iterations: 7
#Covariance Type: nonrobust
#==================================================================================
# coef std err z P>|z| [0.025 0.975]
#----------------------------------------------------------------------------------
#Intercept 0.2137 0.003 81.395 0.000 0.209 0.219
#date_block_num 0.0004 0.000 3.000 0.003 0.000 0.001
#item_freq 0.1881 8.02e-05 2346.055 0.000 0.188 0.188
#month 0.0024 0.000 11.216 0.000 0.002 0.003
#item_price 2.899e-05 2.82e-07 102.951 0.000 2.84e-05 2.95e-05
#==================================================================================
# item_freq is obviously the larger coeff
filePath="working_data/"+"1C_ctrl_training.csv"
data2=pd.read_csv(filePath, index_col=False)
data2=data2.drop(["Unnamed: 0",'Unnamed: 0.1', 'Unnamed: 0.1.1'],axis=1)
data2.keys()
data2.head()
dataCtrl=ct.my_prepareTrain(data2)
dataCtrl.keys()
dataCtrlHM=ct.my_summaryHistoricFunc(dataCtrl,f_mean=True,f_sum=False) #takes almost 10 minutes
#dataCtrl=pd.get_dummies(dataCtrl)
dataCtrl.reset_index()
dataCtrlHM.reset_index()
C=pd.merge(dataCtrl,dataCtrlHM,how="left",on=["date_block_num","item_id","shop_id"])
#target=dataCtrl["month_cnt"]
#dataCtrl=dataCtrl.drop("month_cnt",axis=1)
#predictions=poisson_fit.predict(exog=dataCtrl, transform=True)
#err=abs(target-predictions)
#err.plot()
#err.mean()
#err.max()
#err.min()
#
#rmse=my_rmse(target,predictions) #15.141159663472205
## not that bad...i should see the mean, std of the counts
#poisson_fit.params
#poisson_fit
dataTrainHM=ct.my_summaryHistoricFunc(dataTrain,f_mean=True,f_sum=False) #15:54-15:09
#dataTrainHM=ct.my_summaryHistoMean(dataTrain) #takes almost 10 minutes
#dataTrain=pd.get_dummies(dataTrain)
dataTrain.reset_index()
dataTrainHM.reset_index()
D=pd.merge(dataTrain,dataTrainHM,how="left",on=["date_block_num","item_id","shop_id"])
#D=D.drop("histo_f_cnt",axis=1)
CC=D.corr()
CC["month_cnt"]
sum(abs(CC.values)>0.4)
#models_param=[["month_cnt ~ date_block_num + item_freq + month + item_price","GLM","poisson"],
# ["month_cnt ~ date_block_num + item_id + shop_id + item_freq + category_id + month + item_price","GLM","poisson"]
# ]
models_param=[[D.keys(),"GLM","poisson"]]#,[D.keys(),"GLM","poisson"]
i=0
modelRes=pd.DataFrame(columns=["model","formula","family","aic",
"scale","log-likel","deviance","chi2",
"mean_err_perc","sign_pval_perc",
"rmse_in","rmse_out","acc_in","acc_out"])
for i in range(0,len(models_param)):
aux=ct.my_compareFitModels(D,models_param[i][0],models_param[i][1],models_param[i][2],C)
modelRes=modelRes.append(aux,sort=False).reset_index() #18:1018:13
modelRes.iloc[0:1,0:11]
[y,X]=ct.my_df2arry_endo_exog(D,"month_cnt")
model = sm.GLM(y,X, family=sm.families.Poisson())
fitModel=model.fit(method='nm', maxiter=100, maxfun=100)#18:15-18:16
predictions=fitModel.predict(exog=X, transform=True)
err=abs(y-predictions)
acc=100*(len([e for e in err if e<1])/len(err)) # <1:53,74% <2: 88,28%
acc
err.mean()
#rmse_in=ct.my_calculateAccuracy(dataTrain,"month_cnt",fitModel)
#rmse_out=ct.my_calculateAccuracy(dataTest,"month_cnt",fitModel)
import my_functions_1c as ct
fitModel.summary()
#Dep. Variable: y No. Observations: 921400
#Model: GLM Df Residuals: 921393
#Model Family: Poisson Df Model: 6
#Link Function: log Scale: 1.0000
#Method: nm Log-Likelihood: -inf
#Date: Sat, 30 Nov 2019 Deviance: 1.0246e+07
#Time: 18:24:07 Pearson chi2: 2.08e+07
#No. Iterations: 556
#Covariance Type: nonrobust
#==============================================================================
# coef std err z P>|z| [0.025 0.975]
#------------------------------------------------------------------------------
#x1 0.0211 8.18e-05 258.570 0.000 0.021 0.021
#x2 -1.245e-05 9.22e-08 -134.923 0.000 -1.26e-05 -1.23e-05
#x3 0.0075 3.68e-05 204.837 0.000 0.007 0.008
#x4 -0.0013 3.61e-05 -35.865 0.000 -0.001 -0.001
#x5 0.0181 0.000 97.250 0.000 0.018 0.018
#x6 4.68e-05 2.6e-07 180.244 0.000 4.63e-05 4.73e-05
#x7 0.0112 1.04e-06 1.07e+04 0.000 0.011 0.011
#==============================================================================
[y,X]=ct.my_df2arry_endo_exog(D,"month_cnt")
rfModel=RandomForestRegressor(n_estimators=500,max_depth=10,random_state=18)
rfFit=rfModel.fit(X,y) #17:09-17:26
pred=rfFit.predict(X) #17:26-17:27
err=abs(y-pred)
err2=y-pred
np.mean(err) #1.3330819427844776
| np.max(err) | numpy.max |
import os
import time
import torch
import numpy as np
def env_factory(path, verbose=False, **kwargs):
from functools import partial
"""
Returns an *uninstantiated* environment constructor.
Since environments containing cpointers (e.g. Mujoco envs) can't be serialized,
this allows us to pass their constructors to Ray remote functions instead
(since the gym registry isn't shared across ray subprocesses we can't simply
pass gym.make() either)
Note: env.unwrapped.spec is never set, if that matters for some reason.
"""
if 'digit' in path.lower():
if not os.path.isdir('digit'):
print("You appear to be missing a './digit' directory.")
print("You can clone the cassie environment repository with:")
print("git clone https://github.com/siekmanj/digit")
exit(1)
from digit.digit import DigitEnv
path = path.lower()
if 'random_dynamics' in path or 'dynamics_random' in path or 'randomdynamics' in path or 'dynamicsrandom' in path:
dynamics_randomization = True
else:
dynamics_randomization = False
if 'impedance' in path:
impedance = True
else:
impedance = False
if 'standing' in path:
standing = True
else:
standing = False
if 'footpos' in path:
footpos = True
else:
footpos = False
if 'perception' in path:
perception = True
else:
perception = False
if 'stairs' in path:
stairs = True
else:
stairs = False
if 'hop_only' in path:
hop_only = True
else:
hop_only = False
if 'walk_only' in path:
walk_only = True
else:
walk_only = False
if 'height' in path:
height = True
else:
height = False
return partial(DigitEnv, dynamics_randomization=dynamics_randomization, impedance=impedance, standing=standing, footpos=footpos, perception=perception, stairs=stairs, hop_only=hop_only, walk_only=walk_only, height=height)
if 'cassie' in path.lower():
if not os.path.isdir('cassie'):
print("You appear to be missing a './cassie' directory.")
print("You can clone the cassie environment repository with:")
print("git clone https://github.com/siekmanj/cassie")
exit(1)
from cassie.cassie import CassieEnv_v2
path = path.lower()
if 'random_dynamics' in path or 'dynamics_random' in path or 'randomdynamics' in path or 'dynamicsrandom' in path:
dynamics_randomization = True
else:
dynamics_randomization = False
if 'nodelta' in path or 'no_delta' in path:
no_delta = True
else:
no_delta = False
no_delta = True
if 'stateest' in path or 'state_est' in path:
state_est = True
else:
state_est = False
state_est = True
if 'clock_based' in path or 'clockbased' in path:
clock = True
else:
clock = False
if 'statehistory' in path or 'state_history' in path:
history=1
else:
history=0
if 'legacy' in path:
legacy = True
else:
legacy = False
legacy = False
if 'impedance' in path:
impedance = True
else:
impedance = False
if 'height' in path:
height = True
else:
height = False
if verbose:
print("Created cassie env with arguments:")
print("\tdynamics randomization: {}".format(dynamics_randomization))
print("\tstate estimation: {}".format(state_est))
print("\tno delta: {}".format(no_delta))
print("\tclock based: {}".format(clock))
print("\timpedance control: {}".format(impedance))
print("\theight control: {}".format(height))
return partial(CassieEnv_v2, 'walking', clock=clock, state_est=state_est, no_delta=no_delta, dynamics_randomization=dynamics_randomization, history=history, legacy=legacy, impedance=impedance, height=height)
import gym
spec = gym.envs.registry.spec(path)
_kwargs = spec._kwargs.copy()
_kwargs.update(kwargs)
try:
if callable(spec._entry_point):
cls = spec._entry_point(**_kwargs)
else:
cls = gym.envs.registration.load(spec._entry_point)
except AttributeError:
if callable(spec.entry_point):
cls = spec.entry_point(**_kwargs)
else:
cls = gym.envs.registration.load(spec.entry_point)
return partial(cls, **_kwargs)
def eval_policy(policy, min_timesteps=1000, max_traj_len=1000, visualize=True, env=None, verbose=True):
env_name = env
with torch.no_grad():
if env_name is None:
env = env_factory(policy.env_name)()
else:
env = env_factory(env_name)()
if verbose:
print("Policy is a: {}".format(policy.__class__.__name__))
reward_sum = 0
env.dynamics_randomization = False
total_t = 0
episodes = 0
obs_states = {}
mem_states = {}
while total_t < min_timesteps:
state = env.reset()
done = False
timesteps = 0
eval_reward = 0
episodes += 1
if hasattr(policy, 'init_hidden_state'):
policy.init_hidden_state()
#speeds = [(0, 0), (0.5, 0), (2.0, 0)]
speeds = list(zip(np.array(range(0, 350)) / 100, np.zeros(350)))
pelvis_vel = 0
while not done and timesteps < max_traj_len:
if (hasattr(env, 'simrate') or hasattr(env, 'dt')) and visualize:
start = time.time()
action = policy.forward(torch.Tensor(state)).detach().numpy()
state, reward, done, _ = env.step(action)
if visualize:
env.render()
eval_reward += reward
timesteps += 1
total_t += 1
if hasattr(policy, 'get_quantized_states'):
obs, mem = policy.get_quantized_states()
obs_states[obs] = True
mem_states[mem] = True
print(policy.get_quantized_states(), len(obs_states), len(mem_states))
if visualize:
if hasattr(env, 'simrate'):
# assume 30hz (hack)
end = time.time()
delaytime = max(0, 1000 / 30000 - (end-start))
time.sleep(delaytime)
if hasattr(env, 'dt'):
while time.time() - start < env.dt:
time.sleep(0.0005)
reward_sum += eval_reward
if verbose:
print("Eval reward: ", eval_reward)
return reward_sum / episodes
def interactive_eval(policy_name, env=None):
from copy import deepcopy
import termios, sys
import tty
import select
with torch.no_grad():
policy = torch.load(policy_name)
m_policy = torch.load(policy_name)
#args, run_args = self.args, self.run_args
#run_args = run_args
print("GOT ENV", env)
if env is None:
env_name = policy.env_name
else:
env_name = env
print("env name: ", env_name)
env = env_factory(env_name)()
env.dynamics_randomization = False
env.evaluation_mode = True
#if self.run_args.pca:
# from util.pca import PCA_Plot
# pca_plot = PCA_Plot(policy, env)
#if self.run_args.pds:
# from util.pds import PD_Plot
# pd_plot = PD_Plot(policy, env)
# print("DOING PDS??")
if hasattr(policy, 'init_hidden_state'):
policy.init_hidden_state()
m_policy.init_hidden_state()
old_settings = termios.tcgetattr(sys.stdin)
env.render()
render_state = True
slowmo = True
try:
tty.setcbreak(sys.stdin.fileno())
state = env.reset()
env.speed = 0
env.side_speed = 0
env.phase_add = 50
env.period_shift = [0, 0.5]
#env.ratio = [0.4, 0.6]
env.eval_mode = True
done = False
timesteps = 0
eval_reward = 0
mirror = False
def isData():
return select.select([sys.stdin], [], [], 0) == ([sys.stdin], [], [])
while render_state:
if isData():
c = sys.stdin.read(1)
if c == 'w':
env.speed = np.clip(env.speed + 0.1, env.min_speed, env.max_speed)
if c == 's':
env.speed = np.clip(env.speed - 0.1, env.min_speed, env.max_speed)
if c == 'q':
env.orient_add -= 0.005 * np.pi
if c == 'e':
env.orient_add += 0.005 * np.pi
if c == 'a':
env.side_speed = np.clip(env.side_speed + 0.05, env.min_side_speed, env.max_side_speed)
if c == 'd':
env.side_speed = np.clip(env.side_speed - 0.05, env.min_side_speed, env.max_side_speed)
if c == 'r':
state = env.reset()
if hasattr(policy, 'init_hidden_state'):
policy.init_hidden_state()
m_policy.init_hidden_state()
print("Resetting environment via env.reset()")
env.speed = 0
env.side_speed = 0
env.phase_add = env.simrate
env.period_shift = [0, 0.5]
if c == 't':
env.phase_add = np.clip(env.phase_add + 1, int(env.simrate * env.min_step_freq), int(env.simrate * env.max_step_freq))
if c == 'g':
env.phase_add = np.clip(env.phase_add - 1, int(env.simrate * env.min_step_freq), int(env.simrate * env.max_step_freq))
if c == 'y':
env.height = np.clip(env.height + 0.01, env.min_height, env.max_height)
if c == 'h':
env.height = np.clip(env.height - 0.01, env.min_height, env.max_height)
if c == 'm':
mirror = not mirror
if c == 'o':
# increase ratio of phase 1
env.ratio[0] = np.clip(env.ratio[0] + 0.01, 0, env.max_swing_ratio)
env.ratio[1] = 1 - env.ratio[0]
if c == 'l':
env.ratio[0] = | np.clip(env.ratio[0] - 0.01, 0, env.max_swing_ratio) | numpy.clip |
from pathlib import Path
import matplotlib.pyplot as plt
from scipy import ndimage
import yaml
import numpy as np
import os, sys
def rotation_axis_to_xyz(rotation_axis, invert=False, setting='xds'):
"""Convert rotation axis angle to XYZ vector compatible with 'xds', or 'dials'
Set invert to 'True' for anti-clockwise rotation
"""
if invert:
rotation_axis += np.pi
rot_x = np.cos(rotation_axis)
rot_y = np.sin(rotation_axis)
rot_z = 0
if setting == 'dials':
return rot_x, -rot_y, rot_z
elif setting == 'xds':
return rot_x, rot_y, rot_z
else:
raise ValueError("Must be one of {'dials', 'xds'}")
def rotation_matrix(axis, theta):
"""Calculates the rotation matrix around axis of angle theta (radians)"""
# axis = axis/np.sqrt(np.dot(axis,axis))
l = np.sqrt(np.dot(axis, axis))
axis = axis/l
a = np.cos(theta/2)
b, c, d = -1*axis*np.sin(theta/2)
return np.array([[a*a+b*b-c*c-d*d, 2*(b*c-a*d), 2*(b*d+a*c)],
[ 2*(b*c+a*d), a*a+c*c-b*b-d*d, 2*(c*d-a*b)],
[ 2*(b*d-a*c), 2*(c*d+a*b), a*a+d*d-b*b-c*c]])
def make_2d_rotmat(theta):
"""Take angle in radians, and return 2D rotation matrix"""
R = np.array([[np.cos(theta), -np.sin(theta)],
[np.sin(theta), np.cos(theta)]])
return R
def random_sample(arr, n):
"""Select random sample of `n` rows from array"""
indices = np.random.choice(arr.shape[0], n, replace=False)
return arr[indices]
def xyz2cyl(arr):
"""Take a set of reflections in XYZ and convert to polar (cylindrical) coordinates"""
sx, sy, sz = arr.T
out = np.empty((len(arr), 2))
np.hypot(sx, sy, out=out[:,0])
np.arctan2(sz, out[:,0], out=out[:,1])
| np.arctan2(sy, sx, out=out[:,0]) | numpy.arctan2 |
import numpy as np
import scipy as sp
import pandas as pd
from sklearn.base import BaseEstimator, RegressorMixin
from modules.KernelRegWrapper import KernelRegWrapper
class DependentKernelReg(BaseEstimator, RegressorMixin):
"""
A sklearn-style NW kernel regression with dependent bandwidth matrix
and multi-variate normal kernel.
"""
def __init__(self, kernel="exp", bw_init="scott"):
self.outp = None
self.regressors = None
self.kernel = kernel
self.bw_init = bw_init
self.kernel_params = None
self.params = []
def fit(self, X, y):
self.regressors = X
self.outp = y
cov_mat = np.cov(X, rowvar=False)
n = X.shape[0]
d = X.shape[1]
H_init = 1
if self.bw_init == "scott":
H_init = 1.06 * n ** (-1. / (d + 4))
elif self.bw_init == "silverman":
H_init = (n * (d+2) / 4.) ** (-1. / (d + 4))
if self.regressors.shape[1] == 1:
H = H_init * np.sqrt(cov_mat)
else:
H = H_init * sp.linalg.sqrtm(cov_mat)
self.params = {"sample_size": n, "no_of_regressors": d, "bw_matrix": H}
if self.kernel == "exp":
if self.regressors.shape[1] == 1:
H_inv = H**(-1)
H_det = H
else:
H_inv = np.linalg.inv(H)
H_det = np.linalg.det(H)
H_const = 1. / ((np.sqrt(2 * np.pi) ** d) * H_det) #0.5))
self.kernel_params = {"H": H, "invH": H_inv, "detH": H_det,
"dim": d, "const": H_const}
return self
def exp_kernel(self, X):
if isinstance(X, pd.DataFrame):
X = X.to_numpy()
if self.regressors.shape[1] == 1:
powa = -0.5 * (self.kernel_params["invH"] ** 2) * (X * X).sum(-1)
else:
xtimesH = np.matmul(X, self.kernel_params["invH"])
powa = -0.5 * (xtimesH * xtimesH).sum(-1) # equiv. to (invH * X)^T (invH *X)
# powa = np.expand_dims(powa, axis=0)
weight = self.kernel_params["const"] * | np.exp(powa) | numpy.exp |
import numpy as np
import utils
class ssdu_masks():
"""
Parameters
----------
rho: split ratio for training and loss mask. \ rho = |\Lambda|/|\Omega|
small_acs_block: keeps a small acs region fully-sampled for training masks
if there is no acs region, the small acs block should be set to zero
input_data: input k-space, nrow x ncol x ncoil
input_mask: input mask, nrow x ncol
Gaussian_selection:
-divides acquired points into two disjoint sets based on Gaussian distribution
-Gaussian selection function has the parameter 'std_scale' for the standard deviation of the distribution. We recommend to keep it as 2<=std_scale<=4.
Uniform_selection: divides acquired points into two disjoint sets based on uniform distribution
Returns
----------
trn_mask: used in data consistency units of the unrolled network
loss_mask: used to define the loss in k-space
"""
def __init__(self, rho=0.4, small_acs_block=(4, 4)):
self.rho = rho
self.small_acs_block = small_acs_block
def Gaussian_selection(self, input_data, input_mask, std_scale=4, num_iter=1):
nrow, ncol = input_data.shape[0], input_data.shape[1]
center_kx = int(utils.find_center_ind(input_data, axes=(1, 2)))
center_ky = int(utils.find_center_ind(input_data, axes=(0, 2)))
if num_iter == 0:
print(f'\n Gaussian selection is processing, rho = {self.rho:.2f}, center of kspace: center-kx: {center_kx}, center-ky: {center_ky}')
temp_mask = np.copy(input_mask)
temp_mask[center_kx - self.small_acs_block[0] // 2:center_kx + self.small_acs_block[0] // 2,
center_ky - self.small_acs_block[1] // 2:center_ky + self.small_acs_block[1] // 2] = 0
loss_mask = np.zeros_like(input_mask)
count = 0
while count <= np.int(np.ceil( | np.sum(input_mask[:]) | numpy.sum |
import time
import numpy as np
from sklearn.metrics import f1_score, accuracy_score
import torch
import torch.nn as nn
from tqdm import tqdm
import segmentation_models_pytorch as smp
def train(
train_dataloader: torch.utils.data.dataloader,
model: nn.Module,
loss_function: nn.Module,
optimizer: torch.optim.Optimizer,
device: torch.device,
scheduler=None,
):
"""[Perfom one training epoch]
Args:
train_dataloader (torch.utils.data.dataloader): [Pytorch dataloader]
model (nn.Module): [Unet based model]
loss_function (nn.Module): []
optimizer (torch.optim.Optimizer): []
device (torch.device): [Training was done on multi GPU]
scheduler ([type], optional): [description]. Defaults to None.
"""
model.train()
total_loss = 0
for step, batch in tqdm(enumerate(train_dataloader)):
if step % 50 == 0 and not step == 0:
print(" Batch {:>5,} of {:>5,}.".format(step, len(train_dataloader)))
data = batch["data"].to(device)
label = batch["label"].to(device)
model.zero_grad()
preds = model(data)
loss = loss_function(preds, label)
total_loss = total_loss + loss.item()
loss.backward()
optimizer.step()
# scheduler.step()
def evaluate(
dataloader: torch.utils.data.dataloader,
model: nn.Module,
loss_function: nn.Module,
optimizer: torch.optim.Optimizer,
device: torch.device,
):
"""[Perform one evaluation step]
Args:
dataloader (torch.utils.data.dataloader): [Pytorch dataloader]
model (nn.Module): [Unet based model]
loss_function (nn.Module): []
optimizer (torch.optim.Optimizer): []
device (torch.device): []
Returns:
[tuple]: [Loss and metric over all the dataloader]
"""
iou = smp.utils.metrics.IoU(
threshold=0.5
) # Metric choosen is IOU, could be DICE or Logloss
print("\nEvaluating...")
model.eval()
total_loss = 0
total_metric = 0
total_preds = []
total_labels = []
for step, batch in enumerate(dataloader):
# Progress update every 50 batches.
if step % 10 == 0 and not step == 0:
# Report progress.
print(" Batch {:>5,} of {:>5,}.".format(step, len(dataloader)))
data = batch["data"].to(device)
label = batch["label"].to(device)
with torch.no_grad():
preds = model(data)
loss = loss_function(preds, label)
metric = iou(preds, label)
total_loss = total_loss + loss.item()
total_metric = total_metric + metric.item()
preds = preds.detach().cpu().numpy()
labels = label.detach().cpu().numpy()
total_preds.append(preds)
total_labels.append(labels)
# compute the validation loss of the epoch
avg_loss = total_loss / len(dataloader)
avg_metric = total_metric / len(dataloader)
total_preds = np.concatenate(total_preds, axis=0)
total_labels = | np.concatenate(total_labels, axis=0) | numpy.concatenate |
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_array_equal
import pytest
import scipy.sparse as sp
from lightfm import LightFM
import lightfm
import lightfm.model
import lightfm.inference as inference
mattypes = sp.coo_matrix, sp.lil_matrix, sp.csr_matrix, sp.csc_matrix
dtypes = np.int32, np.int64, np.float32, np.float64
def test_empty_matrix():
no_users, no_items = 10, 100
train = sp.coo_matrix((no_users, no_items), dtype=np.int32)
model = LightFM()
model.fit_partial(train)
def test_matrix_types():
no_users, no_items = 10, 100
no_features = 20
for mattype in mattypes:
for dtype in dtypes:
train = mattype((no_users, no_items), dtype=dtype)
user_features = mattype((no_users, no_features), dtype=dtype)
item_features = mattype((no_items, no_features), dtype=dtype)
model = LightFM()
model.fit_partial(train, user_features=user_features, item_features=item_features)
model.predict(
np.random.randint(0, no_users, 10).astype(np.int32),
np.random.randint(0, no_items, 10).astype(np.int32),
user_features=user_features,
item_features=item_features,
)
model.predict_rank(train, user_features=user_features, item_features=item_features)
def test_coo_with_duplicate_entries():
# Calling .tocsr on a COO matrix with duplicate entries
# changes its data arrays in-place, leading to out-of-bounds
# array accesses in the WARP code.
# Reported in https://github.com/lyst/lightfm/issues/117.
rows, cols = 1000, 100
mat = sp.random(rows, cols)
mat.data[:] = 1
# Duplicate entries in the COO matrix
mat.data = np.concatenate((mat.data, mat.data[:1000]))
mat.row = np.concatenate((mat.row, mat.row[:1000]))
mat.col = np.concatenate((mat.col, mat.col[:1000]))
for loss in ('warp', 'bpr', 'warp-kos'):
model = LightFM(loss=loss)
model.fit(mat)
def test_predict():
no_users, no_items = 10, 100
train = sp.coo_matrix((no_users, no_items), dtype=np.int32)
model = LightFM()
model.fit_partial(train)
for uid in range(no_users):
scores_arr = model.predict(np.repeat(uid, no_items), np.arange(no_items))
scores_int = model.predict(uid, np.arange(no_items))
assert np.allclose(scores_arr, scores_int)
def test_input_dtypes():
no_users, no_items = 10, 100
no_features = 20
for dtype in dtypes:
train = sp.coo_matrix((no_users, no_items), dtype=dtype)
user_features = sp.coo_matrix((no_users, no_features), dtype=dtype)
item_features = sp.coo_matrix((no_items, no_features), dtype=dtype)
model = LightFM()
model.fit_partial(train, user_features=user_features, item_features=item_features)
model.predict(
np.random.randint(0, no_users, 10).astype(np.int32),
np.random.randint(0, no_items, 10).astype(np.int32),
user_features=user_features,
item_features=item_features,
)
def test_not_enough_features_fails():
no_users, no_items = 10, 100
no_features = 20
train = sp.coo_matrix((no_users, no_items), dtype=np.int32)
user_features = sp.csr_matrix((no_users - 1, no_features), dtype=np.int32)
item_features = sp.csr_matrix((no_items - 1, no_features), dtype=np.int32)
model = LightFM()
with pytest.raises(Exception):
model.fit_partial(train, user_features=user_features, item_features=item_features)
def test_feature_inference_fails():
# On predict if we try to use feature inference and supply
# higher ids than the number of features that were supplied to fit
# we should complain
no_users, no_items = 10, 100
no_features = 20
train = sp.coo_matrix((no_users, no_items), dtype=np.int32)
user_features = sp.csr_matrix((no_users, no_features), dtype=np.int32)
item_features = sp.csr_matrix((no_items, no_features), dtype=np.int32)
model = LightFM()
model.fit_partial(train, user_features=user_features, item_features=item_features)
with pytest.raises(ValueError):
model.predict(np.array([no_features], dtype=np.int32), np.array([no_features], dtype=np.int32))
def test_return_self():
no_users, no_items = 10, 100
train = sp.coo_matrix((no_users, no_items), dtype=np.int32)
model = LightFM()
assert model.fit_partial(train) is model
assert model.fit(train) is model
def test_param_sanity():
with pytest.raises(AssertionError):
LightFM(no_components=-1)
with pytest.raises(AssertionError):
LightFM(user_alpha=-1.0)
with pytest.raises(AssertionError):
LightFM(item_alpha=-1.0)
with pytest.raises(ValueError):
LightFM(max_sampled=-1.0)
def test_sample_weight():
model = LightFM()
train = sp.coo_matrix(np.array([[0, 1], [0, 1]]))
with pytest.raises(ValueError):
# Wrong number of weights
sample_weight = sp.coo_matrix(np.zeros((2, 2)))
model.fit(train, sample_weight=sample_weight)
with pytest.raises(ValueError):
# Wrong shape
sample_weight = sp.coo_matrix(np.zeros(2))
model.fit(train, sample_weight=sample_weight)
with pytest.raises(ValueError):
# Wrong order of entries
model.fit(train, sample_weight=sample_weight)
sample_weight = sp.coo_matrix((train.data, (train.row, train.col)))
model.fit(train, sample_weight=sample_weight)
model = LightFM(loss='warp-kos')
with pytest.raises(NotImplementedError):
model.fit(train, sample_weight=np.ones(1))
def test_predict_ranks():
no_users, no_items = 10, 100
train = sp.rand(no_users, no_items, format='csr', random_state=42)
model = LightFM()
model.fit_partial(train)
# Compute ranks for all items
rank_input = sp.csr_matrix(np.ones((no_users, no_items)))
ranks = model.predict_rank(rank_input, num_threads=2).todense()
assert np.all(ranks.min(axis=1) == 0)
assert np.all(ranks.max(axis=1) == no_items - 1)
for row in range(no_users):
assert np.all(np.sort(ranks[row]) == np.arange(no_items))
# Train set exclusions. All ranks should be zero
# if train interactions is dense.
ranks = model.predict_rank(rank_input,
train_interactions=rank_input).todense()
assert np.all(ranks == 0)
# Max rank should be num_items - 1 - number of positives
# in train in that row
ranks = model.predict_rank(rank_input,
train_interactions=train).todense()
assert np.all(
np.squeeze(np.array(ranks.max(axis=1))) == no_items - 1 - np.squeeze(np.array(train.getnnz(axis=1)))
)
# Make sure ranks are computed pessimistically when
# there are ties (that is, equal predictions for every
# item will assign maximum rank to each).
model.user_embeddings = np.zeros_like(model.user_embeddings)
model.item_embeddings = np.zeros_like(model.item_embeddings)
model.user_biases = np.zeros_like(model.user_biases)
model.item_biases = np.zeros_like(model.item_biases)
ranks = model.predict_rank(rank_input, num_threads=2).todense()
assert np.all(ranks.min(axis=1) == 99)
assert np.all(ranks.max(axis=1) == 99)
# Wrong input dimensions
with pytest.raises(ValueError):
model.predict_rank(sp.csr_matrix((5, 5)), num_threads=2)
def test_exception_on_divergence():
no_users, no_items = 1000, 1000
train = sp.rand(no_users, no_items, format='csr', random_state=42)
model = LightFM(learning_rate=10000000.0, loss='warp')
with pytest.raises(ValueError):
model.fit(train, epochs=10)
def test_sklearn_api():
model = LightFM()
params = model.get_params()
model2 = LightFM(**params)
params2 = model2.get_params()
assert params == params2
model.set_params(**params)
params['invalid_param'] = 666
with pytest.raises(ValueError):
model.set_params(**params)
def test_predict_not_fitted():
model = LightFM()
with pytest.raises(ValueError):
model.predict(np.arange(10), np.arange(10))
with pytest.raises(ValueError):
model.predict_rank(1)
with pytest.raises(ValueError):
model.get_user_representations()
with pytest.raises(ValueError):
model.get_item_representations()
def test_nan_features():
no_users, no_items = 1000, 1000
train = sp.rand(no_users, no_items, format='csr', random_state=42)
features = sp.identity(no_items)
features.data *= np.nan
model = LightFM(loss='warp')
with pytest.raises(ValueError):
model.fit(train, epochs=10, user_features=features, item_features=features)
def test_nan_interactions():
no_users, no_items = 1000, 1000
train = sp.rand(no_users, no_items, format='csr', random_state=42)
train.data *= np.nan
model = LightFM(loss='warp')
with pytest.raises(ValueError):
model.fit(train)
def test_precompute_representation():
n_users = 10 ** 3
n_user_features = 100
no_component = 50
user_features = sp.random(n_users, n_user_features, density=.1)
feature_embeddings = np.random.uniform(size=(n_user_features, no_component))
feature_biases = np.random.uniform(size=n_user_features)
features = user_features
representation, representation_biases = inference._precompute_representation(
features,
feature_embeddings,
feature_biases,
)
assert representation.shape == (n_users, no_component)
assert representation_biases.shape == (n_users, )
def test_batch_predict():
no_components = 2
ds = RandomDataset(density=1.0)
model = LightFM(no_components=no_components)
model.fit_partial(ds.train, user_features=ds.user_features, item_features=ds.item_features)
model.batch_setup(
item_chunks={0: ds.item_ids},
user_features=ds.user_features,
item_features=ds.item_features,
)
user_repr = inference._user_repr
item_repr = inference._item_repr
assert np.sum(user_repr)
assert user_repr.shape == (ds.no_users, no_components)
assert np.sum(item_repr)
assert item_repr.shape == (no_components, ds.no_items)
zeros = 0
for uid in range(ds.no_users):
original_scores = model.predict(
np.repeat(uid, ds.no_items),
np.arange(ds.no_items),
user_features=ds.user_features,
item_features=ds.item_features,
)
# Check scores
_, batch_predicted_scores = model.predict_for_user(user_id=uid, top_k=0, item_ids=ds.item_ids)
assert_array_almost_equal(original_scores, batch_predicted_scores)
# Check ids
original_ids = | np.argsort(-original_scores) | numpy.argsort |
import matplotlib.pyplot as plt
import numpy as np
def plot_chains(chain, fileout=None, tracers=0, labels=None, delay=0, ymax=200000, thin=100, num_xticks=7, truths=None):
if chain.ndim < 3:
print("You must include a multiple chains")
return
n_chains, length, n_var = chain.shape
print(n_chains, length, n_var)
if (labels is not None) and (len(labels) != n_var):
print("You must provide the correct number of variable labels.")
return
if (truths is not None) and (len(truths) != n_var):
print("You must provide the correct number of truths.")
return
fig, ax = plt.subplots(int(n_var/2) + n_var%2, 2, figsize=(8, 0.8*n_var))
plt.subplots_adjust(left=0.09, bottom=0.07, right=0.96, top=0.96, hspace=0)
color = np.empty(n_chains, dtype=str)
color[:] = 'k'
alpha = 0.01 * np.ones(n_chains)
zorder = np.ones(n_chains)
if tracers > 0:
idx = np.random.choice(n_chains, tracers, replace=False)
color[idx] = 'r'
alpha[idx] = 1.0
zorder[idx] = 2.0
for i in range(n_var):
ix = int(i/2)
iy = i%2
for j in range(n_chains):
xvals = (np.arange(length)*thin - delay) / 1000.0
ax[ix,iy].plot(xvals, chain[j,:,i], color=color[j], alpha=alpha[j], rasterized=True, zorder=zorder[j])
if ymax is None: ymax = (length*thin-delay)
ax[ix,iy].set_xlim(-delay/1000.0, ymax/1000.0)
ax[ix,iy].set_xticks(np.linspace(-delay/1000.0,ymax/1000.0,num_xticks))
ax[ix,iy].set_xticklabels([])
# Add y-axis labels if provided by use
if labels is not None: ax[ix,iy].set_ylabel(labels[i])
if delay != 0: ax[ix,iy].axvline(0, color='k', linestyle='dashed', linewidth=2.0, zorder=9)
if truths is not None: ax[ix,iy].axhline(truths[i], color='C0', linestyle='dashed', linewidth=2.0, zorder=10)
# plt.tight_layout()
ax[-1,0].set_xticklabels(np.linspace(-delay/1000.0,ymax/1000.0,num_xticks).astype('i8').astype('U'))
ax[-1,1].set_xticklabels( | np.linspace(-delay/1000.0,ymax/1000.0,num_xticks) | numpy.linspace |
## @ingroup Methods-Noise-Certification
# flyover_noise.py
#
# Created: Oct 2020, <NAME>
# ----------------------------------------------------------------------
# Imports
# ----------------------------------------------------------------------
import numpy as np
from SUAVE.Methods.Noise.Fidelity_One.Noise_Tools.compute_noise import compute_noise
# ----------------------------------------------------------------------
# Flyover noise
# ----------------------------------------------------------------------
## @ingroup Methods-Noise-Certification
def flyover_noise(analyses,noise_configs):
"""This method calculates flyover noise of a turbofan aircraft
Assumptions:
N/A
Source:
N/A
Inputs:
analyses - data structure of SUAVE analyses [None]
noise_configs - data structure for SUAVE vehicle configurations [None]
Outputs:
SPL - sound pressure level [dB]
Properties Used:
N/A
"""
# Update number of control points for noise
mission = analyses.missions.takeoff
takeoff_initialization = mission.evaluate()
n_points = np.ceil(takeoff_initialization.segments.climb.conditions.frames.inertial.time[-1] /0.5 +1)
mission.npoints_takeoff_sign = np.sign(n_points)
mission.segments.climb.state.numerics.number_control_points = int(np.minimum(200, np.abs(n_points))[0])
# Set up analysis
noise_segment = mission.segments.climb
noise_segment.analyses.noise.settings.sideline = False
noise_segment.analyses.noise.settings.flyover = True
noise_settings = noise_segment.analyses.noise.settings
noise_config = noise_configs.takeoff
noise_analyses = noise_segment.analyses
noise_config.engine_flag = True
noise_config.print_output = 0
noise_config.output_file = 'Noise_Flyover_climb.dat'
noise_config.output_file_engine = 'Noise_Flyover_climb_Engine.dat'
noise_config.engine_flag = True
if mission.npoints_takeoff_sign == -1:
noise_result_takeoff_FL_clb = 500. + noise_segment.missions.sideline_takeoff.segments.climb.state.numerics.number_control_points
else:
noise_result_takeoff_FL_clb = compute_noise(noise_config,noise_analyses,noise_segment,noise_settings)
noise_segment = mission.segments.cutback
noise_config = noise_configs.cutback
noise_config.print_output = 0
noise_config.engine_flag = True
noise_config.output_file = 'Noise_Flyover_cutback.dat'
noise_config.output_file_engine = 'Noise_Flyover_cutback_Engine.dat'
if mission.npoints_takeoff_sign == -1:
noise_result_takeoff_FL_cutback = 500. + noise_segment.missions.sideline_takeoff.segments.climb.state.numerics.number_control_points
else:
noise_result_takeoff_FL_cutback = compute_noise(noise_config,noise_analyses,noise_segment,noise_settings)
noise_result_takeoff_FL = 10. * | np.log10(10**(noise_result_takeoff_FL_clb/10)+10**(noise_result_takeoff_FL_cutback/10)) | numpy.log10 |
"""
CS131 - Computer Vision: Foundations and Applications
Assignment 2
Author: <NAME> (<EMAIL>)
Date created: 07/2017
Last modified: 10/18/2017
Python Version: 3.5+
"""
import numpy as np
def conv(image, kernel):
""" An implementation of convolution filter.
This function uses element-wise multiplication and np.sum()
to efficiently compute weighted sum of neighborhood at each
pixel.
Args:
image: numpy array of shape (Hi, Wi).
kernel: numpy array of shape (Hk, Wk).
Returns:
out: numpy array of shape (Hi, Wi).
"""
Hi, Wi = image.shape
Hk, Wk = kernel.shape
out = np.zeros((Hi, Wi))
# For this assignment, we will use edge values to pad the images.
# Zero padding will make derivatives at the image boundary very big,
# whereas we want to ignore the edges at the boundary.
pad_width0 = Hk // 2
pad_width1 = Wk // 2
pad_width = ((pad_width0,pad_width0),(pad_width1,pad_width1))
padded = np.pad(image, pad_width, mode='edge')
kernel_flipped = np.zeros(kernel.shape)
for x in range(Hk):
for y in range(Wk):
kernel_flipped[x,y] = kernel[Hk-x-1,Wk-y-1]
kernel_flipped_vec = kernel_flipped.reshape([1,Hk*Wk])
for i in range(Hi):
for j in range(Wi):
out[i,j] = np.dot(kernel_flipped_vec,padded[i:i+Hk,j:j+Wk].reshape([Hk*Wk,1]))
return out
def gaussian_kernel(size, sigma):
""" Implementation of Gaussian Kernel.
This function follows the gaussian kernel formula,
and creates a kernel matrix.
Hints:
- Use np.pi and np.exp to compute pi and exp.
Args:
size: int of the size of output matrix.
sigma: float of sigma to calculate kernel.
Returns:
kernel: numpy array of shape (size, size).
"""
kernel = np.zeros((size, size))
k = (size-1)/2
for i in range(size):
for j in range(size):
kernel[i,j] = np.exp(-( (i-k)**2 + (j-k)**2 )/(2*(sigma**2)) )
kernel = 1/(2*np.pi*(sigma**2))*kernel
return kernel
def partial_x(img):
""" Computes partial x-derivative of input img.
Hints:
- You may use the conv function in defined in this file.
Args:
img: numpy array of shape (H, W).
Returns:
out: x-derivative image.
"""
out = None
### YOUR CODE HERE
kernel = np.array([1.,0.,-1.],ndmin = 2)/2.
out = conv(img, kernel)
### END YOUR CODE
return out
def partial_y(img):
""" Computes partial y-derivative of input img.
Hints:
- You may use the conv function in defined in this file.
Args:
img: numpy array of shape (H, W).
Returns:
out: y-derivative image.
"""
out = None
### YOUR CODE HERE
kernel = np.array([[1.],[0.] ,[-1.]], ndmin = 2)/2.
out = conv(img, kernel)
### END YOUR CODE
return out
def gradient(img):
""" Returns gradient magnitude and direction of input img.
Args:
img: Grayscale image. Numpy array of shape (H, W).
Returns:
G: Magnitude of gradient at each pixel in img.
Numpy array of shape (H, W).
theta: Direction(in degrees, 0 <= theta < 360) of gradient
at each pixel in img. Numpy array of shape (H, W).
Hints:
- Use np.sqrt and np.arctan2 to calculate square root and arctan
"""
G = np.zeros(img.shape)
theta = np.zeros(img.shape)
### YOUR CODE HERE
Gx = partial_x(img)
Gy = partial_y(img)
#print(Gx)
#print(Gy)
G = np.sqrt(Gx**2+Gy**2)
theta = np.arctan2(Gy,Gx)*180/np.pi
#print(theta)
theta[(theta >= -180) & (theta < 0)] += 360
#print(theta)
### END YOUR CODE
return G, theta
def non_maximum_suppression(G, theta):
""" Performs non-maximum suppression.
This function performs non-maximum suppression along the direction
of gradient (theta) on the gradient magnitude image (G).
Args:
G: gradient magnitude image with shape of (H, W).
theta: direction of gradients with shape of (H, W).
Returns:
out: non-maxima suppressed image.
"""
H, W = G.shape
out = np.zeros((H, W))
# Round the gradient direction to the nearest 45 degrees
theta = np.floor((theta + 22.5) / 45) * 45
# edge padding
pad_width0 = 1
pad_width1 = 1
pad_width = ((pad_width0,pad_width0),(pad_width1,pad_width1))
G_padded = np.pad(G, pad_width, mode='constant',constant_values = (0,0))
theta_padded = np.pad(theta, pad_width, mode='constant',constant_values = (0,0))
### BEGIN YOUR CODE
for i in range(H):
for j in range(W):
G_patch = G_padded[i:i+3,j:j+3]
theta_patch = theta_padded[i:i+3,j:j+3]
theta_current = theta[i,j]
#indice = ((theta_patch == theta_current) | (theta_patch == theta_current + 180 - 2*(theta_current>=180)))
#print("indice",indice)
#pnDirection = G_patch[indice]
if theta_current == 0 or theta_current == 180 or theta_current == 360:
indice = ((1,2),(1,0))
elif theta_current == 45 or theta_current == 225:
indice = ((0,0),(2,2))
elif theta_current == 90 or theta_current == 270:
indice = ((0,1),(2,1))
else:
indice = ((0,2),(2,0))
G_pnDirection = np.array([
[ G_patch[indice[0]] ],
[ G_patch[indice[1]] ]
])
'''
if i == 244 and j == 411:
print("G_patch",G_patch)
print("theta_patch",theta_patch)
print("G_pnDirection",G_pnDirection)
print("G[i,j]",G[i,j]-G_pnDirection.max())
'''
'''
if i == 1 and j == 2:
print("G_patch",G_patch)
print("theta_patch",theta_patch)
print("G_pnDirection",G_pnDirection)
'''
if G[i,j] < G_pnDirection.max() or np.abs(G[i,j]- G_pnDirection.max()<1e-10):
out[i,j] = 0
else:
out[i,j] = G[i,j]
### END YOUR CODE
return out
def double_thresholding(img, high, low):
"""
Args:
img: numpy array of shape (H, W) representing NMS edge response.
high: high threshold(float) for strong edges.
low: low threshold(float) for weak edges.
Returns:
strong_edges: Boolean array representing strong edges.
Strong edeges are the pixels with the values greater than
the higher threshold.
weak_edges: Boolean array representing weak edges.
Weak edges are the pixels with the values smaller or equal to the
higher threshold and greater than the lower threshold.
"""
strong_edges = np.zeros(img.shape, dtype=np.bool)
weak_edges = np.zeros(img.shape, dtype=np.bool)
### YOUR CODE HERE
strong_edges = img>high
weak_edges = (img<=high) & (img>low)
### END YOUR CODE
return strong_edges, weak_edges
def get_neighbors(y, x, H, W):
""" Return indices of valid neighbors of (y, x).
Return indices of all the valid neighbors of (y, x) in an array of
shape (H, W). An index (i, j) of a valid neighbor should satisfy
the following:
1. i >= 0 and i < H
2. j >= 0 and j < W
3. (i, j) != (y, x)
Args:
y, x: location of the pixel.
H, W: size of the image.
Returns:
neighbors: list of indices of neighboring pixels [(i, j)].
"""
neighbors = []
for i in (y-1, y, y+1):
for j in (x-1, x, x+1):
if i >= 0 and i < H and j >= 0 and j < W:
if (i == y and j == x):
continue
neighbors.append((i, j))
return neighbors
def link_edges(strong_edges, weak_edges):
""" Find weak edges connected to strong edges and link them.
Iterate over each pixel in strong_edges and perform breadth first
search across the connected pixels in weak_edges to link them.
Here we consider a pixel (a, b) is connected to a pixel (c, d)
if (a, b) is one of the eight neighboring pixels of (c, d).
Args:
strong_edges: binary image of shape (H, W).
weak_edges: binary image of shape (H, W).
Returns:
edges: numpy boolean array of shape(H, W).
"""
H, W = strong_edges.shape
indices = np.stack(np.nonzero(strong_edges)).T
edges = np.zeros((H, W), dtype=np.bool)
# Make new instances of arguments to leave the original
# references intact
weak_edges = np.copy(weak_edges)
edges = | np.copy(strong_edges) | numpy.copy |
def datacens_workflow_percent(SinkTag="func_preproc", wf_name="data_censoring"):
"""
Modified version of CPAC.scrubbing.scrubbing +
CPAC.generate_motion_statistics.generate_motion_statistics +
CPAC.func_preproc.func_preproc
`source: https://fcp-indi.github.io/docs/developer/_modules/CPAC/scrubbing/scrubbing.html`
`source: https://fcp-indi.github.io/docs/developer/_modules/CPAC/generate_motion_statistics/generate_motion_statistics.html`
`source: https://fcp-indi.github.io/docs/developer/_modules/CPAC/func_preproc/func_preproc.html`
Description:
Do the data censoring on the 4D functional data. First, it calculates the framewise displacement according to Power's method. Second, it
indexes the volumes which FD is in the upper part in percent(determined by the threshold variable which is 5% by default). Thirdly, it excludes those volumes and one volume
before and 2 volumes after the indexed volume. The workflow returns a 4D scrubbed functional data.
Workflow inputs:
:param func: The reoriented,motion occrected, nuissance removed and bandpass filtered functional file.
:param FD: the frame wise displacement calculated by the MotionCorrecter.py script
:param threshold: threshold of FD volumes which should be excluded
:param SinkDir:
:param SinkTag: The output directory in which the returned images (see workflow outputs) could be found in a subdirectory directory specific for this workflow..
Workflow outputs:
:return: datacens_workflow - workflow
<NAME>
<EMAIL>
2018
References
----------
.. [1] <NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2012). Spurious
but systematic correlations in functional connectivity MRI networks arise from subject motion. NeuroImage, 59(3),
2142-2154. doi:10.1016/j.neuroimage.2011.10.018
.. [2] <NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2012). Steps
toward optimizing motion artifact removal in functional connectivity MRI; a reply to Carp.
NeuroImage. doi:10.1016/j.neuroimage.2012.03.017
.. [3] <NAME>., <NAME>., <NAME>., <NAME>., 2002. Improved optimization for the robust
and accurate linear registration and motion correction of brain images. Neuroimage 17, 825-841.
"""
import os
import nipype
import nipype.pipeline as pe
import nipype.interfaces.utility as utility
import nipype.interfaces.io as io
import PUMI.utils.globals as globals
import PUMI.utils.QC as qc
SinkDir = os.path.abspath(globals._SinkDir_ + "/" + SinkTag)
if not os.path.exists(SinkDir):
os.makedirs(SinkDir)
# Identitiy mapping for input variables
inputspec = pe.Node(utility.IdentityInterface(fields=['func',
'FD',
'threshold']),
name='inputspec')
inputspec.inputs.threshold = 5
#TODO_ready check CPAC.generate_motion_statistics.generate_motion_statistics script. It may use the FD of Jenkinson to index volumes which violate the upper threhold limit, no matter what we set.
# - we use the power method to calculate FD
# Determine the indices of the upper part (which is defined by the threshold, deafult 5%) of values based on their FD values
calc_upprperc = pe.MapNode(utility.Function(input_names=['in_file',
'threshold'],
output_names=['frames_in_idx', 'frames_out_idx', 'percentFD', 'out_file', 'nvol'],
function=calculate_upperpercent),
iterfield=['in_file'],
name='calculate_upperpercent')
# Generate the weird input for the scrubbing procedure which is done in afni
craft_scrub_input = pe.MapNode(utility.Function(input_names=['scrub_input', 'frames_in_1D_file'],
output_names=['scrub_input_string'],
function=get_indx),
iterfield=['scrub_input', 'frames_in_1D_file'],
name='scrubbing_craft_input_string')
# Scrub the image
scrubbed_preprocessed = pe.MapNode(utility.Function(input_names=['scrub_input'],
output_names=['scrubbed_image'],
function=scrub_image),
iterfield=['scrub_input'],
name='scrubbed_preprocessed')
myqc = qc.timecourse2png("timeseries", tag="040_censored")
outputspec = pe.Node(utility.IdentityInterface(fields=['scrubbed_image', 'FD']),
name='outputspec')
# save data out with Datasink
ds=pe.Node(interface=io.DataSink(),name='ds')
ds.inputs.base_directory=SinkDir
#TODO_ready: some plot for qualitiy checking
# Create workflow
analysisflow = pe.Workflow(wf_name)
###Calculating mean Framewise Displacement (FD) as Power et al., 2012
# Calculating frames to exclude and include after scrubbing
analysisflow.connect(inputspec, 'FD', calc_upprperc, 'in_file')
analysisflow.connect(inputspec, 'threshold', calc_upprperc, 'threshold')
# Create the proper format for the scrubbing procedure
analysisflow.connect(calc_upprperc, 'frames_in_idx', craft_scrub_input, 'frames_in_1D_file')
analysisflow.connect(calc_upprperc, 'out_file', ds, 'percentFD') # TODO save this in separet folder for QC
analysisflow.connect(inputspec, 'func', craft_scrub_input, 'scrub_input')
# Do the scubbing
analysisflow.connect(craft_scrub_input, 'scrub_input_string', scrubbed_preprocessed, 'scrub_input')
# Output
analysisflow.connect(scrubbed_preprocessed, 'scrubbed_image', outputspec, 'scrubbed_image')
analysisflow.connect(inputspec, 'FD', outputspec, 'FD') #TODO: scrub FD file, as well
# Save a few files
#analysisflow.connect(scrubbed_preprocessed, 'scrubbed_image', ds, 'scrubbed_image')
#analysisflow.connect(calc_upprperc, 'percentFD', ds, 'scrubbed_image.@numberofvols')
analysisflow.connect(scrubbed_preprocessed, 'scrubbed_image', myqc, 'inputspec.func')
return analysisflow
def datacens_workflow_threshold(SinkTag="func_preproc", wf_name="data_censoring", ex_before=1, ex_after=2):
"""
Modified version of CPAC.scrubbing.scrubbing +
CPAC.generate_motion_statistics.generate_motion_statistics +
CPAC.func_preproc.func_preproc
`source: https://fcp-indi.github.io/docs/developer/_modules/CPAC/scrubbing/scrubbing.html`
`source: https://fcp-indi.github.io/docs/developer/_modules/CPAC/generate_motion_statistics/generate_motion_statistics.html`
`source: https://fcp-indi.github.io/docs/developer/_modules/CPAC/func_preproc/func_preproc.html`
Description:
Do the data censoring on the 4D functional data. First, it calculates the framewise displacement according to Power's method. Second, it
indexes the volumes which FD is in the upper part in percent(determined by the threshold variable which is 5% by default). Thirdly, it excludes those volumes and one volume
before and 2 volumes after the indexed volume. The workflow returns a 4D scrubbed functional data.
Workflow inputs:
:param func: The reoriented,motion occrected, nuissance removed and bandpass filtered functional file.
:param FD: the frame wise displacement calculated by the MotionCorrecter.py script
:param threshold: threshold of FD volumes which should be excluded
:param SinkDir:
:param SinkTag: The output directory in which the returned images (see workflow outputs) could be found in a subdirectory directory specific for this workflow..
Workflow outputs:
:return: datacens_workflow - workflow
<NAME>
<EMAIL>
2018
References
----------
.. [1] <NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2012). Spurious
but systematic correlations in functional connectivity MRI networks arise from subject motion. NeuroImage, 59(3),
2142-2154. doi:10.1016/j.neuroimage.2011.10.018
.. [2] <NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2012). Steps
toward optimizing motion artifact removal in functional connectivity MRI; a reply to Carp.
NeuroImage. doi:10.1016/j.neuroimage.2012.03.017
.. [3] <NAME>., <NAME>., <NAME>., <NAME>., 2002. Improved optimization for the robust
and accurate linear registration and motion correction of brain images. Neuroimage 17, 825-841.
"""
import os
import nipype
import nipype.pipeline as pe
import nipype.interfaces.utility as utility
import nipype.interfaces.io as io
import PUMI.utils.utils_convert as utils_convert
import PUMI.utils.globals as globals
import PUMI.utils.QC as qc
SinkDir = os.path.abspath(globals._SinkDir_ + "/" + SinkTag)
if not os.path.exists(SinkDir):
os.makedirs(SinkDir)
# Identitiy mapping for input variables
inputspec = pe.Node(utility.IdentityInterface(fields=['func',
'FD',
'threshold']),
name='inputspec')
inputspec.inputs.threshold = 0.2 #mm
#TODO_ready check CPAC.generate_motion_statistics.generate_motion_statistics script. It may use the FD of Jenkinson to index volumes which violate the upper threhold limit, no matter what we set.
# - we use the power method to calculate FD
above_thr = pe.MapNode(utility.Function(input_names=['in_file',
'threshold',
'frames_before',
'frames_after'],
output_names=['frames_in_idx', 'frames_out_idx', 'percentFD', 'percent_scrubbed_file', 'fd_scrubbed_file', 'nvol'],
function=above_threshold),
iterfield=['in_file'],
name='above_threshold')
above_thr.inputs.frames_before = ex_before
above_thr.inputs.frames_after = ex_after
# Save outputs which are important
ds_fd_scrub = pe.Node(interface=io.DataSink(),
name='ds_fd_scrub')
ds_fd_scrub.inputs.base_directory = SinkDir
ds_fd_scrub.inputs.regexp_substitutions = [("(\/)[^\/]*$", "FD_scrubbed.csv")]
pop_perc_scrub = pe.Node(interface=utils_convert.List2TxtFileOpen,
name='pop_perc_scrub')
# save data out with Datasink
ds_pop_perc_scrub = pe.Node(interface=io.DataSink(), name='ds_pop_perc_scrub')
ds_pop_perc_scrub.inputs.regexp_substitutions = [("(\/)[^\/]*$", "pop_percent_scrubbed.txt")]
ds_pop_perc_scrub.inputs.base_directory = SinkDir
# Generate the weird input for the scrubbing procedure which is done in afni
craft_scrub_input = pe.MapNode(utility.Function(input_names=['scrub_input', 'frames_in_1D_file'],
output_names=['scrub_input_string'],
function=get_indx),
iterfield=['scrub_input', 'frames_in_1D_file'],
name='scrubbing_craft_input_string')
# Scrub the image
scrubbed_preprocessed = pe.MapNode(utility.Function(input_names=['scrub_input'],
output_names=['scrubbed_image'],
function=scrub_image),
iterfield=['scrub_input'],
name='scrubbed_preprocessed')
myqc = qc.timecourse2png("timeseries", tag="040_censored")
outputspec = pe.Node(utility.IdentityInterface(fields=['scrubbed_image', 'FD_scrubbed']),
name='outputspec')
# save data out with Datasink
ds=pe.Node(interface=io.DataSink(),name='ds')
ds.inputs.base_directory=SinkDir
#TODO_ready: some plot for qualitiy checking
# Create workflow
analysisflow = pe.Workflow(wf_name)
###Calculating mean Framewise Displacement (FD) as Power et al., 2012
# Calculating frames to exclude and include after scrubbing
analysisflow.connect(inputspec, 'FD', above_thr, 'in_file')
analysisflow.connect(inputspec, 'threshold', above_thr, 'threshold')
# Create the proper format for the scrubbing procedure
analysisflow.connect(above_thr, 'frames_in_idx', craft_scrub_input, 'frames_in_1D_file')
analysisflow.connect(above_thr, 'percent_scrubbed_file', ds, 'percentFD') # TODO save this in separate folder for QC
analysisflow.connect(inputspec, 'func', craft_scrub_input, 'scrub_input')
# Do the scubbing
analysisflow.connect(craft_scrub_input, 'scrub_input_string', scrubbed_preprocessed, 'scrub_input')
# Output
analysisflow.connect(scrubbed_preprocessed, 'scrubbed_image', outputspec, 'scrubbed_image')
analysisflow.connect(above_thr, 'fd_scrubbed_file', outputspec, 'FD_scrubbed') #TODO_ready: scrub FD file, as well
analysisflow.connect(above_thr, 'fd_scrubbed_file', ds_fd_scrub, 'FD_scrubbed')
analysisflow.connect(above_thr, 'percent_scrubbed_file', pop_perc_scrub, 'in_list')
analysisflow.connect(pop_perc_scrub, 'txt_file', ds_pop_perc_scrub, 'pop')
# Save a few files
analysisflow.connect(scrubbed_preprocessed, 'scrubbed_image', ds, 'scrubbed_image')
#analysisflow.connect(above_thr, 'percentFD', ds, 'scrubbed_image.@numberofvols')
analysisflow.connect(scrubbed_preprocessed, 'scrubbed_image', myqc, 'inputspec.func')
return analysisflow
def spikereg_workflow(SinkTag="func_preproc", wf_name="data_censoring_despike"):
"""
Description:
Calculates volumes to be excluded, creates the despike regressor matrix
Workflow inputs:
:param FD: the frame wise displacement calculated by the MotionCorrecter.py script
:param threshold: threshold of FD volumes which should be excluded
:param SinkDir:
:param SinkTag: The output directory in which the returned images (see workflow outputs) could be found in a subdirectory directory specific for this workflow..
Workflow outputs:
:return: spikereg_workflow - workflow
<NAME>
<EMAIL>
2018
References
----------
.. [1] <NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2012). Spurious
but systematic correlations in functional connectivity MRI networks arise from subject motion. NeuroImage, 59(3),
2142-2154. doi:10.1016/j.neuroimage.2011.10.018
.. [2] <NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2012). Steps
toward optimizing motion artifact removal in functional connectivity MRI; a reply to Carp.
NeuroImage. doi:10.1016/j.neuroimage.2012.03.017
.. [3] <NAME>., <NAME>., <NAME>., <NAME>., 2002. Improved optimization for the robust
and accuratedef datacens_workflow(SinkTag="func_preproc", wf_name="data_censoring"):
"""
import os
import nipype
import nipype.pipeline as pe
import nipype.interfaces.utility as utility
import nipype.interfaces.io as io
import PUMI.utils.globals as globals
import PUMI.utils.QC as qc
SinkDir = os.path.abspath(globals._SinkDir_ + "/" + SinkTag)
if not os.path.exists(SinkDir):
os.makedirs(SinkDir)
# Identitiy mapping for input variables
inputspec = pe.Node(utility.IdentityInterface(fields=['func',
'FD',
'threshold',]),
name='inputspec')
inputspec.inputs.threshold = 5
#TODO_ready check CPAC.generate_motion_statistics.generate_motion_statistics script. It may use the FD of Jenkinson to index volumes which violate the upper threhold limit, no matter what we set.
# - we use the power method to calculate FD
# Determine the indices of the upper part (which is defined by the threshold, deafult 5%) of values based on their FD values
calc_upprperc = pe.MapNode(utility.Function(input_names=['in_file',
'threshold'],
output_names=['frames_in_idx', 'frames_out_idx', 'percentFD', 'out_file', 'nvol'],
function=calculate_upperpercent),
iterfield=['in_file'],
name='calculate_upperpercent')
#create despiking matrix, to be included into nuisance correction
despike_matrix = pe.MapNode(utility.Function(input_names=['frames_excluded', 'total_vols'],
output_names=['despike_mat'],
function=create_despike_regressor_matrix),
iterfield=['frames_excluded', 'total_vols'],
name='create_despike_matrix')
outputspec = pe.Node(utility.IdentityInterface(fields=['despike_mat', 'FD']),
name='outputspec')
# save data out with Datasink
ds=pe.Node(interface=io.DataSink(),name='ds')
ds.inputs.base_directory=SinkDir
#TODO_ready: some plot for qualitiy checking
# Create workflow
analysisflow = pe.Workflow(wf_name)
###Calculating mean Framewise Displacement (FD) as Power et al., 2012
# Calculating frames to exclude and include after scrubbing
analysisflow.connect(inputspec, 'FD', calc_upprperc, 'in_file')
analysisflow.connect(inputspec, 'threshold', calc_upprperc, 'threshold')
# Create the proper format for the scrubbing procedure
analysisflow.connect(calc_upprperc, 'frames_out_idx', despike_matrix, 'frames_excluded')
analysisflow.connect(calc_upprperc, 'nvol', despike_matrix, 'total_vols')
analysisflow.connect(calc_upprperc, 'out_file', ds, 'percentFD') # TODO save this in separet folder for QC
# Output
analysisflow.connect(despike_matrix, 'despike_mat', outputspec, 'despike_mat')
analysisflow.connect(inputspec, 'FD', outputspec, 'FD')
return analysisflow
def above_threshold(in_file, threshold=0.2, frames_before=1, frames_after=2):
import os
import numpy as np
from numpy import loadtxt, savetxt
powersFD_data = loadtxt(in_file, skiprows=1)
np.insert(powersFD_data, 0, 0) # TODO_ready: why do we need this: see output of nipype.algorithms.confounds.FramewiseDisplacement
frames_in_idx = np.argwhere(powersFD_data < threshold)[:, 0]
frames_out = np.argwhere(powersFD_data >= threshold)[:, 0]
extra_indices = []
for i in frames_out:
# remove preceding frames
if i > 0:
count = 1
while count <= frames_before:
extra_indices.append(i - count)
count += 1
# remove following frames
count = 1
while count <= frames_after:
if i+count < len(powersFD_data): # do not censor unexistent data
extra_indices.append(i + count)
count += 1
indices_out = list(set(frames_out) | set(extra_indices))
indices_out.sort()
frames_out_idx = indices_out
frames_in_idx = np.setdiff1d(frames_in_idx, indices_out)
FD_scrubbed = powersFD_data[frames_in_idx]
fd_scrubbed_file = os.path.join(os.getcwd(), 'FD_scrubbed.csv')
savetxt(fd_scrubbed_file, FD_scrubbed, delimiter=",")
frames_in_idx_str = ','.join(str(x) for x in frames_in_idx)
frames_in_idx = frames_in_idx_str.split()
percentFD = (len(frames_out_idx) * 100 / (len(powersFD_data) + 1)) # % of frames censored
percent_scrubbed_file = os.path.join(os.getcwd(), 'percent_scrubbed.txt')
f = open(percent_scrubbed_file, 'w')
f.write("%.3f" % (percentFD))
f.close()
nvol = len(powersFD_data)
return frames_in_idx, frames_out_idx, percentFD, percent_scrubbed_file, fd_scrubbed_file, nvol
def calculate_upperpercent(in_file,threshold, frames_before=1, frames_after=2):
import os
import numpy as np
from numpy import loadtxt
# Receives the FD file to calculate the upper percent of violating volumes
powersFD_data = loadtxt(in_file, skiprows=1)
np.insert(powersFD_data, 0, 0) # TODO_ready: why do we need this: see output of nipype.algorithms.confounds.FramewiseDisplacement
sortedpwrsFDdata = sorted(powersFD_data)
limitvalueindex = int(len(sortedpwrsFDdata) * threshold / 100)
limitvalue = sortedpwrsFDdata[len(sortedpwrsFDdata) - limitvalueindex]
frames_in_idx = np.argwhere(powersFD_data < limitvalue)[:,0]
frames_out = np.argwhere(powersFD_data >= limitvalue)[:, 0]
extra_indices = []
for i in frames_out:
# remove preceding frames
if i > 0:
count = 1
while count <= frames_before:
extra_indices.append(i - count)
count += 1
# remove following frames
count = 1
while count <= frames_after:
if i+count < len(powersFD_data): # do not censor unexistent data
extra_indices.append(i + count)
count += 1
indices_out = list(set(frames_out) | set(extra_indices))
indices_out.sort()
frames_out_idx=indices_out
frames_in_idx= | np.setdiff1d(frames_in_idx, indices_out) | numpy.setdiff1d |
import numpy
from SLIX import toolbox, io, visualization
import matplotlib
from matplotlib import pyplot as plt
import pytest
import shutil
import os
matplotlib.use('agg')
class TestVisualization:
def test_visualize_unit_vectors(self):
example = io.imread('tests/files/demo.nii')
peaks = toolbox.significant_peaks(example, use_gpu=False)
centroid = toolbox.centroid_correction(example, peaks, use_gpu=False)
direction = toolbox.direction(peaks, centroid, use_gpu=False)
unit_x, unit_y = toolbox.unit_vectors(direction, use_gpu=False)
visualization.unit_vectors(unit_x, unit_y, thinout=10)
plt.savefig('tests/output/vis/unit_vectors.tiff', dpi=100,
bbox_inches='tight')
orig = io.imread('tests/files/vis/unit_vectors.tiff')
to_compare = io.imread('tests/output/vis/unit_vectors.tiff')
if numpy.all(numpy.isclose(orig - to_compare, 0)):
assert True
else:
io.imwrite('tests/output/vis/unit_vectors-diff.tiff', orig - to_compare)
assert False
def test_visualize_unit_vector_distribution(self):
example = io.imread('tests/files/demo.nii')
peaks = toolbox.significant_peaks(example, use_gpu=False)
centroid = toolbox.centroid_correction(example, peaks, use_gpu=False)
direction = toolbox.direction(peaks, centroid, use_gpu=False)
unit_x, unit_y = toolbox.unit_vectors(direction, use_gpu=False)
visualization.unit_vector_distribution(unit_x, unit_y, thinout=15, vector_width=5, alpha=0.01)
plt.savefig('tests/output/vis/unit_vector_distribution.tiff', dpi=100,
bbox_inches='tight')
orig = io.imread('tests/files/vis/unit_vector_distribution.tiff')
to_compare = io.imread('tests/output/vis/unit_vector_distribution.tiff')
if numpy.all(numpy.isclose(orig - to_compare, 0)):
assert True
else:
io.imwrite('tests/output/vis/unit_vector_distribution-diff.tiff', orig - to_compare)
assert False
def test_visualize_parameter_map(self):
example = io.imread('tests/files/demo.nii')
prominence = toolbox.mean_peak_prominence(example, kind_of_normalization=1, use_gpu=False)
visualization.parameter_map(prominence, colorbar=False)
plt.savefig('tests/output/vis/parameter_map.tiff', dpi=100,
bbox_inches='tight')
orig = io.imread('tests/files/vis/parameter_map.tiff')
to_compare = io.imread('tests/output/vis/parameter_map.tiff')
assert numpy.all(numpy.isclose(orig - to_compare, 0))
def test_visualize_direction_one_dir(self):
image = numpy.arange(0, 180)
hsv_image = visualization.direction(image)
assert numpy.all(hsv_image[0, :] == [1, 0, 0])
assert numpy.all(hsv_image[30, :] == [1, 1, 0])
assert numpy.all(hsv_image[60, :] == [0, 1, 0])
assert numpy.all(hsv_image[90, :] == [0, 1, 1])
assert numpy.all(hsv_image[120, :] == [0, 0, 1])
assert numpy.all(hsv_image[150, :] == [1, 0, 1])
def test_visualize_direction_multiple_dir(self):
first_dir = numpy.arange(0, 180)[..., numpy.newaxis, numpy.newaxis]
second_dir = (first_dir + 30) % 180
second_dir[0:45] = -1
third_dir = (first_dir + 60) % 180
third_dir[0:90] = -1
fourth_dir = (first_dir + 90) % 180
fourth_dir[0:135] = -1
stack_direction = numpy.concatenate((first_dir,
second_dir,
third_dir,
fourth_dir),
axis=-1)
hsv_image = visualization.direction(stack_direction)
print(hsv_image)
# Check first direction
assert numpy.all(hsv_image[0, 0, :] == [1, 0, 0])
assert numpy.all(hsv_image[1, 1, :] == [1, 0, 0])
assert numpy.all(hsv_image[0, 1, :] == [1, 0, 0])
assert numpy.all(hsv_image[1, 0, :] == [1, 0, 0])
assert numpy.all(hsv_image[60, 0, :] == [1, 1, 0])
assert numpy.all(hsv_image[61, 1, :] == [1, 1, 0])
assert | numpy.all(hsv_image[60, 1, :] == [1, 1, 0]) | numpy.all |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import numpy as np
from oneflow.compatible import single_client as flow
from oneflow.compatible.single_client import typing as oft
# @flow.unittest.skip_unless_1n2d()
# TODO(zhangwenxiao, jiangxuefei): refine in multi-client
@unittest.skipIf(True, "skip for now because of single-client tensor_list removed")
class TestDynamicReshape(flow.unittest.TestCase):
def test_dynamic_reshape(test_case):
data_shape = (10, 10, 10)
flow.config.gpu_device_num(2)
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
func_config.default_logical_view(flow.scope.mirrored_view())
@flow.global_function(type="train", function_config=func_config)
def DynamicReshapeJob(x: oft.ListNumpy.Placeholder(data_shape)):
reshape_out1 = flow.reshape(x, (-1, 20))
my_model = flow.get_variable(
"my_model",
shape=(20, 32),
dtype=flow.float,
initializer=flow.random_uniform_initializer(minval=-10, maxval=10),
trainable=True,
)
my_model = flow.cast_to_current_logical_view(my_model)
mm_out = flow.matmul(reshape_out1, my_model)
reshape_out2 = flow.reshape(mm_out, (-1, 8, 4))
flow.optimizer.SGD(
flow.optimizer.PiecewiseConstantScheduler([], [1e-4]), momentum=0
).minimize(reshape_out2)
return reshape_out1
data = [ | np.random.rand(*data_shape) | numpy.random.rand |
import os, time, sys, platform
import numpy as np
import array, random
import glob
from scipy.io import wavfile
dataset_link = "https://storage.cloud.google.com/download.tensorflow.org/data/speech_commands_v0.01.tar.gz"
filedir = "D:\\speech_commands_v0.01/"
if platform.system().lower() != "windows":
filedir = "/usr/local/speech_commands_v0.01/"
commands = ['yes', 'no', 'up', 'down', 'left', 'right', 'on', 'off', 'stop', 'go']
for command in commands:
if (os.path.exists(filedir + command)):
continue
print("Please download the speech commands dataset from " + dataset_link + " and extract it into " + filedir)
os._exit(0)
random.seed(2)
X_SIZE = 16000
train_in_bytes = bytearray()
train_in_head = np.zeros(8).astype('int32')
train_in_head[1:4] = [X_SIZE, 1, 1]
train_in_bytes += train_in_head.tobytes()
train_out_bytes = bytearray()
train_out_head = np.zeros(8).astype('int32')
train_out_head[1:4] = [1, 1, 1]
train_out_bytes += train_out_head.tobytes()
test_in_bytes = bytearray()
test_in_bytes += train_in_head.tobytes()
test_out_bytes = bytearray()
test_out_bytes += train_out_head.tobytes()
Y = | np.array([0]) | numpy.array |
#!/usr/bin/env python
u"""
fit.py
Written by <NAME> (05/2021)
Utilities for calculating average fits from ATL03 Geolocated Photon Data
PYTHON DEPENDENCIES:
numpy: Scientific Computing Tools For Python
https://numpy.org
https://numpy.org/doc/stable/user/numpy-for-matlab-users.html
scipy: Scientific Tools for Python
https://docs.scipy.org/doc/
scikit-learn: Machine Learning in Python
http://scikit-learn.org/stable/index.html
https://github.com/scikit-learn/scikit-learn
UPDATE HISTORY:
Written 05/2021
"""
import operator
import itertools
import numpy as np
import scipy.stats
import scipy.signal
import scipy.optimize
import sklearn.neighbors
# PURPOSE: compress complete list of valid indices into a set of ranges
def compress_list(i,n):
"""
Compress complete list of valid indices into a set of ranges
Arguments
---------
i: indices to compress
n: largest gap between indices to accept for range
"""
for a,b in itertools.groupby(enumerate(i), lambda v: ((v[1]-v[0])//n)*n):
group = list(map(operator.itemgetter(1),b))
yield (group[0], group[-1])
# PURPOSE: centers the transmit-echo-path histogram reported by ATL03
# using an iterative edit to distinguish between signal and noise
def extract_tep_histogram(tep_hist_time,tep_hist,tep_range_prim):
"""
Centers the transmit-echo-path histogram reported by ATL03
using an iterative edit to distinguish between signal and noise
"""
# ATL03 recommends subset between 15-30 ns to avoid secondary
# using primary histogram range values from ATL03 tep attributes
i, = np.nonzero((tep_hist_time >= tep_range_prim[0]) &
(tep_hist_time < tep_range_prim[1]))
t_tx = np.copy(tep_hist_time[i])
n_tx = len(t_tx)
# noise samples of tep_hist (first 5ns and last 10 ns)
ns,ne = (tep_range_prim[0]+5e-9,tep_range_prim[1]-10e-9)
noise, = np.nonzero((t_tx <= ns) | (t_tx >= ne))
noise_p1 = []
# signal samples of tep_hist
signal = sorted(set(np.arange(n_tx)) - set(noise))
# number of iterations
n_iter = 0
while (set(noise) != set(noise_p1)) & (n_iter < 10):
# value of noise in tep histogram
tep_noise_value = np.sqrt(np.sum(tep_hist[i][noise]**2)/n_tx)
p_tx = np.abs(np.copy(tep_hist[i]) - tep_noise_value)
# calculate centroid of tep_hist
t0_tx = np.sum(t_tx[signal]*p_tx[signal])/np.sum(p_tx[signal])
# calculate cumulative distribution function
TX_cpdf = np.cumsum(p_tx[signal]/np.sum(p_tx[signal]))
# linearly interpolate to 16th and 84th percentile for RDE
TX16,TX84 = np.interp([0.16,0.84],TX_cpdf,t_tx[signal]-t0_tx)
# calculate width of transmitted pulse (RDE)
W_TX = 0.5*(TX84 - TX16)
# recalculate noise
noise_p1 = np.copy(noise)
ns,ne = (t0_tx-6.0*W_TX,t0_tx+6.0*W_TX)
noise, = np.nonzero((t_tx <= ns) | (t_tx >= ne))
signal = sorted(set(np.arange(n_tx)) - set(noise))
# add 1 to counter
n_iter += 1
# valid primary TEP return has full-width at half max < 3 ns
mx = np.argmax(p_tx[signal])
halfmax = np.max(p_tx[signal])/2.0
H1 = np.interp(halfmax,p_tx[signal][:mx],t_tx[signal][:mx])
H2 = np.interp(halfmax,p_tx[signal][:mx:-1],t_tx[signal][:mx:-1])
FWHM = H2 - H1
# return values
return (t_tx[signal]-t0_tx,p_tx[signal],W_TX,FWHM,ns,ne)
# PURPOSE: calculate the interquartile range (Pritchard et al, 2009) and
# robust dispersion estimator (Smith et al, 2017) of the model residuals
def filter_elevation(r0):
"""
Calculates the interquartile range (Pritchard et al, 2009) and
robust dispersion estimator (Smith et al, 2017) of the model residuals
Arguments
---------
r0: height residuals
"""
# calculate percentiles for IQR and RDE
# IQR: first and third quartiles (25th and 75th percentiles)
# RDE: 16th and 84th percentiles
# median: 50th percentile
Q1,Q3,P16,P84,MEDIAN = np.percentile(r0,[25,75,16,84,50])
# calculate interquartile range
IQR = Q3 - Q1
# calculate robust dispersion estimator (RDE)
RDE = P84 - P16
# IQR pass: residual-(median value) is within 75% of IQR
# RDE pass: residual-(median value) is within 50% of P84-P16
return (0.75*IQR,0.5*RDE,MEDIAN)
# PURPOSE: try fitting a surface to the signal photons with progressively
# less confidence if no valid surface is found
def try_surface_fit(x, y, z, confidence_mask, dist_along, SURF_TYPE='linear',
ITERATE=25, CONFIDENCE=[4,3,2,1,0]):
"""
Try fitting a surface to the signal photons with progressively
less confidence if no valid surface is found
"""
# try with progressively less confidence
for i,conf in enumerate(CONFIDENCE):
ind, = np.nonzero(confidence_mask >= conf)
centroid = dict(x=dist_along, y=np.mean(y[ind]))
try:
surf = reduce_surface_fit(x[ind], y[ind], z[ind], centroid, ind,
SURF_TYPE=SURF_TYPE, ITERATE=ITERATE)
except (ValueError, np.linalg.linalg.LinAlgError):
pass
else:
return (i+1,surf,centroid)
# if still no values found: return infinite values
# will need to attempt a backup algorithm
surf = dict(error=np.full(1,np.inf))
centroid = None
return (None,surf,centroid)
# PURPOSE: iteratively fit a polynomial surface to the elevation data to
# reduce to within a valid window
def reduce_surface_fit(x, y, z, centroid, ind, SURF_TYPE='linear', ITERATE=25):
"""
Iteratively fit a polynomial surface to the elevation data to reduce to
within a valid surface window
"""
# calculate x and y relative to centroid point
rel_x = x - centroid['x']
# Constant Term
Z0 = np.ones_like((z))
if (SURF_TYPE == 'linear'):# linear fit
SURFMAT = np.transpose([Z0,rel_x])
elif (SURF_TYPE == 'quadratic'):# quadratic fit
SURFMAT = np.transpose([Z0,rel_x,rel_x**2])
# number of points for fit and number of terms in fit
n_max,n_terms = np.shape(SURFMAT)
# run only if number of points is above number of terms
FLAG1 = ((n_max - n_terms) > 10)
# maximum allowable window size
H_win_max = 20.0
# minimum allowable window size
H_win_min = 3.0
# set initial window to the full z range
window = z.max() - z.min()
window_p1 = np.copy(window)
# initial indices for reducing to window
filt = np.arange(n_max)
filt_p1 = np.copy(filt)
filt_p2 = np.copy(filt_p1)
if FLAG1:
# save initial indices for fitting all photons for confidence level
indices = ind.copy()
# run fit program for polynomial type
fit = fit_surface(x, y, z, centroid, SURF_TYPE=SURF_TYPE)
# number of iterations performed
n_iter = 1
# save beta coefficients
beta_mat = np.copy(fit['beta'])
error_mat = np.copy(fit['error'])
# residuals of model fit
resid = z - np.dot(SURFMAT,beta_mat)
# standard deviation of the residuals
resid_std = np.std(resid)
# save MSE and DOF for error analysis
MSE = np.copy(fit['MSE'])
DOF = np.copy(fit['DOF'])
# Root mean square error
RMSE = np.sqrt(fit['MSE'])
# Normalized root mean square error
NRMSE = RMSE/(np.max(z)-np.min(z))
# IQR pass: residual-(median value) is within 75% of IQR
# RDE pass: residual-(median value) is within 50% of P84-P16
IQR,RDE,MEDIAN = filter_elevation(resid)
# checking if any residuals are outside of the window
window = np.max([H_win_min,6.0*RDE,0.5*window_p1])
filt, = np.nonzero(np.abs(resid-MEDIAN) <= (window/2.0))
# save iteration of window
window_p1 = np.copy(window)
# run only if number of points is above number of terms
n_rem = np.count_nonzero(np.abs(resid-MEDIAN) <= (window/2.0))
FLAG1 = ((n_rem - n_terms) > 10)
# maximum number of iterations to prevent infinite loops
FLAG2 = (n_iter <= ITERATE)
# compare indices over two iterations to prevent false stoppages
FLAG3 = (set(filt) != set(filt_p1)) | (set(filt_p1) != set(filt_p2))
# iterate until there are no additional removed photons
while FLAG1 & FLAG2 & FLAG3:
# fit selected photons for window
x_filt,y_filt,z_filt,indices = (x[filt],y[filt],z[filt],ind[filt])
# run fit program for polynomial type
fit = fit_surface(x_filt,y_filt,z_filt,centroid,SURF_TYPE=SURF_TYPE)
# add to number of iterations performed
n_iter += 1
# save model coefficients
beta_mat = np.copy(fit['beta'])
error_mat = np.copy(fit['error'])
# save MSE and DOF for error analysis
MSE = np.copy(fit['MSE'])
DOF = np.copy(fit['DOF'])
# Root mean square error
RMSE = np.sqrt(fit['MSE'])
# Normalized root mean square error
NRMSE = RMSE/(np.max(z_filt)-np.min(z_filt))
# save number of points
n_max = len(z_filt)
# residuals of model fit
resid = z - np.dot(SURFMAT,beta_mat)
# standard deviation of the residuals
resid_std = np.std(resid)
# IQR pass: residual-(median value) is within 75% of IQR
# RDE pass: residual-(median value) is within 50% of P84-P16
IQR,RDE,MEDIAN = filter_elevation(resid)
# checking if any residuals are outside of the window
window = np.max([H_win_min,6.0*RDE,0.5*window_p1])
# filter out using median statistics and refit
filt_p2 = np.copy(filt_p1)
filt_p1 = np.copy(filt)
filt, = np.nonzero(np.abs(resid-MEDIAN) <= (window/2.0))
# save iteration of window
window_p1 = np.copy(window)
# run only if number of points is above number of terms
n_rem = np.count_nonzero(np.abs(resid-MEDIAN) <= (window/2.0))
FLAG1 = ((n_rem - n_terms) > 10)
# maximum number of iterations to prevent infinite loops
FLAG2 = (n_iter <= ITERATE)
# compare indices over two iterations to prevent false stoppages
FLAG3 = (set(filt) != set(filt_p1)) | (set(filt_p1) != set(filt_p2))
# return reduced model fit
FLAG3 = (set(filt) == set(filt_p1))
if FLAG1 & FLAG3 & (window <= H_win_max):
return {'beta':beta_mat, 'error':error_mat, 'MSE':MSE, 'NRMSE':NRMSE,
'DOF':DOF, 'count':n_max, 'indices':indices, 'iterations':n_iter,
'window':window, 'RDE':RDE}
else:
raise ValueError('No valid data points found')
# PURPOSE: fit a polynomial surface to the elevation data
def fit_surface(x, y, z, centroid, SURF_TYPE='linear'):
"""
Fit a polynomial surface to the elevation data
"""
# calculate x and y relative to centroid point
rel_x = x - centroid['x']
# Constant Term
Z0 = np.ones_like((z))
# Surface design matrix
if (SURF_TYPE == 'linear'):# linear fit
SURFMAT = np.transpose([Z0,rel_x])
elif (SURF_TYPE == 'quadratic'):# quadratic fit
SURFMAT = np.transpose([Z0,rel_x,rel_x**2])
# number of points for fit and number of terms in fit
n_max,n_terms = np.shape(SURFMAT)
# Standard Least-Squares fitting (the [0] denotes coefficients output)
beta_mat = np.linalg.lstsq(SURFMAT,z,rcond=-1)[0]
# modelled surface elevation
model = np.dot(SURFMAT,beta_mat)
# residual of fit
res = z - model
# nu = Degrees of Freedom = number of measurements-number of parameters
nu = n_max - n_terms
# Mean square error
# MSE = (1/nu)*sum((Y-X*B)**2)
MSE = np.dot(np.transpose(z - model),(z - model))/nu
# elevation surface error analysis
Hinv = np.linalg.inv(np.dot(np.transpose(SURFMAT),SURFMAT))
# Taking the diagonal components of the cov matrix
hdiag = np.diag(Hinv)
# Default is 95% confidence interval
alpha = 1.0 - (0.95)
# Student T-Distribution with D.O.F. nu
# t.ppf parallels tinv in matlab
tstar = scipy.stats.t.ppf(1.0-(alpha/2.0),nu)
# beta_err = t(nu,1-alpha/2)*standard error
std_error = np.sqrt(MSE*hdiag)
model_error = np.dot(SURFMAT,tstar*std_error)
return {'beta':beta_mat, 'error':tstar*std_error, 'model':model,
'model_error': model_error, 'residuals':res, 'MSE':MSE, 'DOF':nu}
# PURPOSE: try fitting a function to the signal photon histograms
# with progressively less confidence if no valid fit is found
def try_histogram_fit(x, y, z, confidence_mask, dist_along, dt,
FIT_TYPE='gaussian', ITERATE=25, BACKGROUND=0, CONFIDENCE=[2,1,0]):
"""
Try fitting a function to the signal photon histograms with
progressively less confidence if no valid fit is found
"""
# try with progressively less confidence
for i,conf in enumerate(CONFIDENCE):
ind, = np.nonzero(confidence_mask >= conf)
centroid = dict(x=dist_along, y=np.mean(y[ind]))
try:
surf = reduce_histogram_fit(x[ind], y[ind], z[ind], ind,
dt, FIT_TYPE=FIT_TYPE, ITERATE=ITERATE, PEAKS=2,
BACKGROUND=BACKGROUND)
except (ValueError, RuntimeError, SyntaxError):
pass
else:
return (i+1,surf,centroid)
# if still no values found: return infinite values
# will need to attempt a backup algorithm
surf = dict(error=np.full(1,np.inf))
centroid = None
return (None,surf,centroid)
# PURPOSE: iteratively use decomposition fitting to the elevation data to
# reduce to within a valid window
def reduce_histogram_fit(x, y, z, ind, dt, FIT_TYPE='gaussian',
ITERATE=25, PEAKS=2, BACKGROUND=0):
"""
Iteratively use decomposition fitting to the elevation data to reduce
to within a valid surface window
"""
# speed of light
c = 299792458.0
# use same delta time as calculating first photon bias
# so that the residuals will be the same
dz = dt*c
# number of background photons in each bin
N_BG = dz*BACKGROUND
# create a histogram of the heights
zmin,zmax = (z.min(),z.max())
z_full = np.arange(zmin,zmax+dz,dz)
nz = len(z_full)
# maximum allowable window size
H_win_max = 20.0
# minimum allowable window size
H_win_min = 3.0
# set initial window to the full z range
window = zmax - zmin
window_p1 = np.copy(window)
# number of data points
n_max = len(z)
# number of terms in fit
if (FIT_TYPE == 'gaussian'):# gaussian fit
n_terms = 3
elif (FIT_TYPE == 'general'):# generalized gaussian fit
n_terms = 4
# run only if number of histogram points is above number of terms
FLAG1 = ((nz - n_terms) > 10)
# using kernel density functions from scikit-learn neighbors
# gaussian kernels will reflect more accurate distributions of the data
# with less sensitivity to sampling width than histograms (tophat kernels)
kde = sklearn.neighbors.KernelDensity(bandwidth=dz,kernel='gaussian')
kde.fit(z[:,None])
# kde score_samples outputs are normalized log density functions
hist = np.exp(kde.score_samples(z_full[:,None]) + np.log(n_max*dz))
# smooth histogram before determining differentials
gw = scipy.signal.gaussian(nz,4)
hist_smooth = scipy.signal.convolve(hist, gw/gw.sum(), mode='same')
# First differentials to find zero crossings
# histogram 1st differential
dhist = np.zeros((nz))
# forward differentiation for starting point
dhist[0] = hist_smooth[1] - hist_smooth[0]
# backward differentiation for end point
dhist[-1] = hist_smooth[-1] - hist_smooth[-2]
# centered differentiation for all others
dhist[1:-1] = (hist_smooth[2:] - hist_smooth[0:-2])/2.0
# find positive peaks above amplitude threshold (percent of max)
# by calculating the histogram differentials
# signal amplitude threshold greater than 10% of max or 5.5xbackground rate
AmpThreshold = 0.10
HistThreshold = np.max([5.5*N_BG, AmpThreshold*np.max(hist_smooth)])
n_peaks = np.count_nonzero((np.sign(dhist[0:-1]) >= 0) & (np.sign(dhist[1:]) < 0) &
((hist_smooth[0:-1] > HistThreshold) | (hist_smooth[1:] > HistThreshold)))
n_peaks = np.min([n_peaks,PEAKS])
peak_index, = np.nonzero((np.sign(dhist[0:-1]) >= 0) & (np.sign(dhist[1:]) < 0) &
((hist_smooth[0:-1] > HistThreshold) | (hist_smooth[1:] > HistThreshold)))
# initial indices for reducing to window
filt = np.arange(n_max)
filt_p1 = np.copy(filt)
filt_p2 = np.copy(filt_p1)
if FLAG1 and (n_peaks > 0):
# save initial indices for fitting all photons for confidence level
indices = ind.copy()
# sort peak index by amplitude of peaks (descending from max to min)
# and truncate to a finite number of peaks
sorted_peaks = np.argsort(hist[peak_index])[::-1]
peak_index = peak_index[sorted_peaks][:n_peaks]
# amplitude of the maximum peak
max_amp = hist[peak_index][0]
# cumulative probability distribution function of initial histogram
hist_cpdf = np.cumsum(hist/np.sum(hist))
# IQR: first and third quartiles (25th and 75th percentiles)
# RDE: 16th and 84th percentiles
Q1,Q3,P16,P84 = np.interp([0.25,0.75,0.16,0.84],hist_cpdf,z_full)
# create priors list
priors = []
lower_bound = []
upper_bound = []
for i,p in enumerate(peak_index):
if (FIT_TYPE == 'gaussian'):
# Fit Gaussian functions to photon event histogram
# a*: amplitude of waveform
# r*: range from differential index
# w*: width as 0.75*IQR
priors.append([hist[p],z_full[p],0.75*(Q3-Q1)])
# bounds of each parameter
# amplitude: 0 to histogram max+5.5xbackground rate
# range: zmin to zmax
# width: sz to half width of z
lower_bound.extend([0,zmin,dz])
upper_bound.extend([max_amp+5.5*N_BG,zmax,(zmax-zmin)/2.0])
elif (FIT_TYPE == 'general'):
# Fit Generalized Gaussian functions to photon event histogram
# a*: amplitude of waveform
# r*: range from differential index
# w*: width as 0.75*IQR
# p*: shape parameter = gaussian sqrt(2)
priors.append([hist[p],z_full[p],0.75*(Q3-Q1),np.sqrt(2)])
# bounds of each parameter
# amplitude: 0 to histogram max+5.5xbackground rate
# range: zmin to zmax
# width: sz to half width of z
# shape: positive
lower_bound.extend([0,zmin,dz,0])
upper_bound.extend([max_amp+5.5*N_BG,zmax,(zmax-zmin)/2.0,np.inf])
# run optimized curve fit with Levenberg-Marquardt algorithm
fit = fit_histogram(z_full,hist,priors,lower_bound,upper_bound,
FIT_TYPE=FIT_TYPE)
# number of iterations performed
n_iter = 1
# height fits and height fit errors
height = fit['height'].copy()
amplitude = fit['amplitude'].copy()
height_errors = fit['error'].copy()
# minimum and maximum heights
min_peak = np.min(fit['height'])
max_peak = np.max(fit['height'])
# save MSE and DOF for error analysis
MSE = np.copy(fit['MSE'])
DOF = np.copy(fit['DOF'])
# Root mean square error
RMSE = np.sqrt(fit['MSE'])
# Normalized root mean square error
NRMSE = RMSE/(zmax-zmin)
# histogram fit
model = np.copy(fit['model'])
# histogram fit residuals
resid = np.copy(fit['residuals'])
# cumulative probability distribution function of initial histogram
cpdf = np.cumsum(fit['residuals']/np.sum(fit['residuals']))
# interpolate residuals to percentiles of interest for statistics
Q1,Q3,MEDIAN,P16,P84 = np.interp([0.25,0.75,0.5,0.16,0.84],cpdf,z_full)
# IQR: first and third quartiles (25th and 75th percentiles)
# RDE: 16th and 84th percentiles
IQR = 0.75*(Q3-Q1)
RDE = 0.50*(P84-P16)
# checking if any residuals are outside of the window
window = np.max([H_win_min,6.0*RDE,0.5*window_p1])
filt, = np.nonzero((z > (min_peak-window/2.0)) & (z < (max_peak+window/2.0)))
# run only if number of points is above number of terms
n_rem = np.count_nonzero((z > (min_peak-window/2.0)) & (z < (max_peak+window/2.0)))
nz = (np.max(z[filt])-np.min(z[filt]))//dz + 1
FLAG1 = ((nz - n_terms) > 10) & ((n_rem - n_terms) > 10)
# maximum number of iterations to prevent infinite loops
FLAG2 = (n_iter <= ITERATE)
# compare indices over two iterations to prevent false stoppages
FLAG3 = (set(filt) != set(filt_p1)) | (set(filt_p1) != set(filt_p2))
# iterate until there are no additional removed photons
while FLAG1 & FLAG2 & FLAG3:
# fit selected photons for window
x_filt,y_filt,z_filt,indices = (x[filt],y[filt],z[filt],ind[filt])
zmin,zmax = (z_filt.min(),z_filt.max())
z_full = np.arange(zmin,zmax+dz,dz)
nz = len(z_full)
# using kernel density functions from scikit-learn neighbors
# gaussian kernels will reflect more accurate distributions of the data
# with less sensitivity to sampling width than histograms (tophat kernels)
kde = sklearn.neighbors.KernelDensity(bandwidth=dz,kernel='gaussian')
kde.fit(z_filt[:,None])
# kde score_samples outputs are normalized log density functions
hist = np.exp(kde.score_samples(z_full[:,None]) + np.log(nz*dz))
# smooth histogram before determining differentials
gw = scipy.signal.gaussian(nz,4)
hist_smooth = scipy.signal.convolve(hist, gw/gw.sum(), mode='same')
# First differentials to find zero crossings
# histogram 1st differential
dhist = np.zeros((nz))
# forward differentiation for starting point
dhist[0] = hist_smooth[1] - hist_smooth[0]
# backward differentiation for end point
dhist[-1] = hist_smooth[-1] - hist_smooth[-2]
# centered differentiation for all others
dhist[1:-1] = (hist_smooth[2:] - hist_smooth[0:-2])/2.0
# find positive peaks above amplitude threshold (percent of max)
# by calculating the histogram differentials
# signal amplitude threshold greater than 10% of max or 5.5xbackground rate
HistThreshold = np.max([5.5*N_BG, AmpThreshold*np.max(hist_smooth)])
n_peaks = np.count_nonzero((np.sign(dhist[0:-1]) >= 0) & (np.sign(dhist[1:]) < 0) &
((hist_smooth[0:-1] > HistThreshold) | (hist_smooth[1:] > HistThreshold)))
n_peaks = np.min([n_peaks,PEAKS])
peak_index, = np.nonzero((np.sign(dhist[0:-1]) >= 0) & (np.sign(dhist[1:]) < 0) &
((hist_smooth[0:-1] > HistThreshold) | (hist_smooth[1:] > HistThreshold)))
# sort peak index by amplitude of peaks (descending from max to min)
# and truncate to a finite number of peaks
sorted_peaks = np.argsort(hist[peak_index])[::-1]
peak_index = peak_index[sorted_peaks][:n_peaks]
# amplitude of the maximum peak
max_amp = hist[peak_index][0]
# cumulative probability distribution function of initial histogram
hist_cpdf = np.cumsum(hist/np.sum(hist))
# IQR: first and third quartiles (25th and 75th percentiles)
# RDE: 16th and 84th percentiles
Q1,Q3,P16,P84 = np.interp([0.25,0.75,0.16,0.84],hist_cpdf,z_full)
# create priors list
priors = []
lower_bound = []
upper_bound = []
for i,p in enumerate(peak_index):
if (FIT_TYPE == 'gaussian'):
# Fit Gaussian functions to photon event histogram
# a*: amplitude of waveform
# r*: range from differential index
# w*: width as 0.75*IQR
priors.append([hist[p],z_full[p],0.75*(Q3-Q1)])
# bounds of each parameter
# amplitude: 0 to histogram max+5.5xbackground rate
# range: zmin to zmax
# width: sz to half width of z
lower_bound.extend([0,zmin,dz])
upper_bound.extend([max_amp+5.5*N_BG,zmax,(zmax-zmin)/2.0])
elif (FIT_TYPE == 'general'):
# Fit Generalized Gaussian functions to photon event histogram
# a*: amplitude of waveform
# r*: range from differential index
# w*: width as 0.75*IQR
# p*: shape parameter = gaussian sqrt(2)
priors.append([hist[p],z_full[p],0.75*(Q3-Q1),np.sqrt(2)])
# bounds of each parameter
# amplitude: 0 to histogram max+5.5xbackground rate
# range: zmin to zmax
# width: sz to half width of z
# shape: positive
lower_bound.extend([0,zmin,dz,0])
upper_bound.extend([max_amp+5.5*N_BG,zmax,(zmax-zmin)/2.0,np.inf])
# run optimized curve fit with Levenberg-Marquardt algorithm
fit = fit_histogram(z_full,hist,priors,lower_bound,upper_bound,
FIT_TYPE=FIT_TYPE)
# add to number of iterations performed
n_iter += 1
# height fits and height fit errors
height = fit['height'].copy()
amplitude = fit['amplitude'].copy()
height_errors = fit['error'].copy()
# minimum and maximum heights
min_peak = np.min(fit['height'])
max_peak = np.max(fit['height'])
# save MSE and DOF for error analysis
MSE = np.copy(fit['MSE'])
DOF = np.copy(fit['DOF'])
# Root mean square error
RMSE = np.sqrt(fit['MSE'])
# Normalized root mean square error
NRMSE = RMSE/(zmax-zmin)
# histogram fit
model = np.copy(fit['model'])
# histogram fit residuals
resid = np.copy(fit['residuals'])
# cumulative probability distribution function of initial histogram
cpdf = np.cumsum(resid/np.sum(resid))
# interpolate residuals to percentiles of interest for statistics
Q1,Q3,MEDIAN,P16,P84 = np.interp([0.25,0.75,0.5,0.16,0.84],cpdf,z_full)
# IQR: first and third quartiles (25th and 75th percentiles)
# RDE: 16th and 84th percentiles
IQR = 0.75*(Q3-Q1)
RDE = 0.50*(P84-P16)
# checking if any residuals are outside of the window
window = np.max([H_win_min,6.0*RDE,0.5*window_p1])
# filter out using median statistics and refit
filt_p2 = np.copy(filt_p1)
filt_p1 = np.copy(filt)
filt, = np.nonzero((z > (min_peak-window/2.0)) & (z < (max_peak+window/2.0)))
# save iteration of window
window_p1 = np.copy(window)
# run only if number of points is above number of terms
n_rem = np.count_nonzero((z > (min_peak-window/2.0)) & (z < (max_peak+window/2.0)))
nz = (np.max(z[filt])-np.min(z[filt]))//dz + 1
FLAG1 = ((nz - n_terms) > 10) & ((n_rem - n_terms) > 10)
# maximum number of iterations to prevent infinite loops
FLAG2 = (n_iter <= ITERATE)
# compare indices over two iterations to prevent false stoppages
FLAG3 = (set(filt) != set(filt_p1)) | (set(filt_p1) != set(filt_p2))
# return reduced model fit
FLAG3 = (set(filt) == set(filt_p1))
if FLAG1 & FLAG3 & (window <= H_win_max) & (n_peaks > 0):
# calculate time with respect to mean of fit heights
t_full = -2*(z_full-np.mean(height))/c
# return values
return {'height':height, 'error':height_errors, 'amplitude':amplitude,
'MSE':MSE, 'NRMSE':NRMSE, 'residuals':resid, 'time': t_full,
'model':model, 'DOF':DOF, 'count':n_max, 'indices':indices,
'iterations':n_iter, 'window':window, 'RDE':RDE, 'peaks':n_peaks}
else:
raise ValueError('No valid fit found')
# PURPOSE: optimially fit a function to the photon event histogram
# with Levenberg-Marquardt algorithm
def fit_histogram(z, hist, priors, lower_bound, upper_bound, FIT_TYPE=None):
"""
Optimially fit a function to the photon event histogram with
Levenberg-Marquardt algorithm
"""
# create lists for the initial parameters
# parameters, and functions for each maximum
plist = []
flist = []
n_peaks = len(priors)
# function formatting string and parameter list for each fit type
if (FIT_TYPE == 'gaussian'):
# summation of gaussian functions with:
# peak amplitudes a*
# peak ranges r* (mean)
# peak widths w* (standard deviation)
# Gaussian function formatting string and parameters
function = 'a{0:d}*np.exp(-(x-r{0:d})**2.0/(2*w{0:d}**2))'
parameters = 'a{0:d}, r{0:d}, w{0:d}'
elif (FIT_TYPE == 'general'):
# summation of generalized gaussian functions with:
# peak amplitudes a*
# peak ranges r* (mean)
# peak widths w* (standard deviation)
# shape parameter p* (gaussian=sqrt(2))
# Generalized Gaussian function formatting string and parameters
function = 'a{0:d}*np.exp(-np.abs(x-r{0:d})**(p{0:d}**2.0)/(2*w{0:d}**2))'
parameters = 'a{0:d}, r{0:d}, w{0:d}, p{0:d}'
# fit decomposition functions to photon event histograms
for n,p in enumerate(priors):
# parameter list for peak n
plist.append(parameters.format(n))
# function definition list for peak n
flist.append(function.format(n))
# initial parameters for iteration n
p0 = np.concatenate((priors),axis=0)
# variables for iteration n
lambda_parameters = ', '.join([p for p in plist])
# full function for iteration n
lambda_function = ' + '.join([f for f in flist])
# tuple for parameter bounds (lower and upper)
bounds = (lower_bound, upper_bound)
# create lambda function for iteration n
# lambda functions are inline definitions
# with the parameters, variables and function definition
fsum = eval('lambda x, {0}: {1}'.format(lambda_parameters, lambda_function))
# optimized curve fit with Levenberg-Marquardt algorithm
# with the initial guess parameters p0 and parameter bounds
popt, pcov = scipy.optimize.curve_fit(fsum,z,hist,p0=p0,bounds=bounds)
# modelled histogram fit
model = fsum(z, *popt)
# 1 standard deviation errors in parameters
perr = np.sqrt(np.diag(pcov))
# number of points for fit and number of terms in fit
n_max = len(hist)
n_terms = len(p0)
# extract function outputs
if (FIT_TYPE == 'gaussian'):
# Gaussian function outputs
n = np.arange(n_peaks)*3
peak_amplitude = popt[n]
peak_height = popt[n+1]
peak_height_error = perr[n+1]
peak_stdev = popt[n+2]
elif (FIT_TYPE == 'general'):
# Generalized Gaussian function outputs
n = np.arange(n_peaks)*4
peak_amplitude = popt[n]
peak_height = popt[n+1]
peak_height_error = perr[n+1]
peak_stdev = popt[n+2]
# residual of fit
res = hist - model
# nu = Degrees of Freedom = number of measurements-number of parameters
nu = n_max - n_terms
# Mean square error
# MSE = (1/nu)*sum((Y-X*B)**2)
MSE = np.dot(np.transpose(hist - model),(hist - model))/nu
# Default is 95% confidence interval
alpha = 1.0 - (0.95)
# Student T-Distribution with D.O.F. nu
# t.ppf parallels tinv in matlab
tstar = scipy.stats.t.ppf(1.0-(alpha/2.0),nu)
return {'height':peak_height, 'amplitude':peak_amplitude,
'error':tstar*peak_height_error, 'stdev': peak_stdev,
'model':model, 'residuals':np.abs(res), 'MSE':MSE, 'DOF':nu}
# PURPOSE: calculate delta_time, latitude and longitude of the segment center
def fit_geolocation(var, distance_along_X, X_atc):
"""
Calculate the average of photon event variables by fitting with respect
to the center of the along-track coordinates
"""
# calculate x relative to centroid point
rel_x = distance_along_X - X_atc
# design matrix
XMAT = np.transpose([np.ones_like((distance_along_X)),rel_x])
# Standard Least-Squares fitting (the [0] denotes coefficients output)
beta_mat = np.linalg.lstsq(XMAT,var,rcond=-1)[0]
# return the fitted geolocation
return beta_mat[0]
# PURPOSE: calculate the average value from two segments
def segment_mean(var, **kwargs):
"""
Calculate the average value from two segments with possible invalid values
"""
# verify that data is masked array
if not isinstance(var, np.ma.MaskedArray):
var = np.ma.array(var)
# set default keyword arguments
kwargs.setdefault('fill_value',var.fill_value)
# verify mask is set for fill values or nan points
var.mask = ((var.data == var.fill_value) | np.isnan(var.data))
# update and replace fill values
var.data[var.mask] = var.fill_value
# calculate segment means
ave = np.ma.mean([var[0:-1],var[1:]],axis=0)
# update and replace fill values
ave.fill_value = kwargs['fill_value']
ave.data[ave.mask] = ave.fill_value
return ave
# PURPOSE: estimate mean and median first photon bias corrections
def calc_first_photon_bias(temporal_residuals,n_pulses,n_pixels,dead_time,dt,
METHOD='direct',ITERATE=20):
"""
Estimate mean and median first photon bias corrections
"""
# create a histogram of the temporal residuals
t_full = np.arange(temporal_residuals.min(),temporal_residuals.max()+dt,dt)
nt = len(t_full)
# number of input photon events
cnt = len(temporal_residuals)
# using kernel density functions from scikit-learn neighbors
# gaussian kernels will reflect more accurate distributions of the data
# with less sensitivity to sampling width than histograms (tophat kernels)
kde = sklearn.neighbors.KernelDensity(bandwidth=dt,kernel='gaussian')
kde.fit(temporal_residuals[:,None])
# kde score_samples outputs are normalized log density functions
hist = np.exp(kde.score_samples(t_full[:,None]) + np.log(cnt*dt))
N0_full = hist/(n_pulses*n_pixels)
# centroid of initial histogram
hist_centroid = np.sum(t_full*hist)/ | np.sum(hist) | numpy.sum |
'''
Spatial pyramid kernel implementation with Python.
Automates the process of reading the data and executes the SVM with the specified kernel parameters
through the terminal.
__authors__ = <NAME>, <NAME> and <NAME>
'''
#####################################################################################################
# IMPORTING LIBRARIES
#####################################################################################################
import argparse
import numpy as np
import pandas as pd
from time import time
from functools import wraps
from collections import Counter
from sklearn import svm
from sklearn.model_selection import train_test_split
#####################################################################################################
# UTILITY FUNCTIONS
#####################################################################################################
def print_duration(start, end):
''' Display the duration of an execution in the format -> 00:00:00.00
----------
PARAMETERS
- start, end: time.time() object representing CPU time at a certain moment
----------
RETURNS
- None
'''
hours, rem = divmod(end - start, 3600)
minutes, seconds = divmod(rem, 60)
print("{:0>2}:{:0>2}:{:05.2f}\n".format(int(hours), int(minutes), seconds))
def timing(f):
''' Decorator used to claculate the time used to compute a function
----------
PARAMETERS
- f: executable function
----------
RETURNS
- returns a wrapper for time calculation for the function f
'''
@wraps(f)
def wrapper(*args, **kwargs):
start = time()
result = f(*args, **kwargs)
end = time()
# Display the elapsed time in pretty format -> 00:00:00.00
print(f"Elapsed time for {f.__name__}(): ", end = "")
print_duration(start, end)
return result
return wrapper
@timing
def read_data(data_path):
''' Read a .csv file from the specified path
----------
PARAMETERS
- data_path: string with the name of the file to be read
(notice that data_path only contains the name of the file, and thus the file
must be located in the same directory as this 'histogram_svm.py' script)
----------
RETURNS
- a pd.DataFrame with the representation of the data
'''
# Specify dtypes and column names
length = int(data_path.split('.')[0][2:])**2
dtypes = {'pixel-' + str(i): 'uint8' for i in range(1, length +1)}
dtypes.update({'label' : 'category'})
colnames = list(dtypes.keys())
print('-' * 60)
print(f"Reading {data_path}...")
data = pd.read_csv(
data_path, header = None, names = colnames, dtype = dtypes
)
# Output some metrics of the data file
print(f"train.cv file has {data.shape[0]} rows and {data.shape[1]} columns")
print(f"Memory usage: {round(data.memory_usage().sum() / 1024**2, 3)} Mb")
print('-' * 60)
return data
@timing
def quantization(data, n_bins):
''' Returns the dataframe with all the values quantized into n_bins levels.
---------
PARAMETERS
- data: pd.DataFrame to be quantized (without the response value 'target')
- n_bins: integer representing the number of levels of the quantization
---------
RETURNS
- a pd.DataFrame with all the values quantized
'''
print('-' * 60)
print(f"Quantizing into {n_bins} levels...")
print('-' * 60)
# Hence (as the colormap is grayspace) we only have 255 levels
l = 255 // n_bins
# Define the mapping ranges
mapping = {range((i*l), (i*l + l)): i for i in range(n_bins)}
# Apply the quantization elementwise in the dataframe
return data.applymap(lambda x: next((v for k, v in mapping.items() if x in k), 0))
#####################################################################################################
# HISTOGRAM KERNEL FUNCTION
#####################################################################################################
def histogram_kernel_wrapper(L: int = 0):
# Global wrapper variables (# parameters, and image resolution (res x res))
length, res = 0, 0
def divide_image(im, l):
''' Divide the image into equally sized blocks
----------
PARAMETERS
- im: flattened numpy.ndarray representing an image
- l: integer representing the level of divisions that should be performed
----------
RETURNS
- an array with a flattened numpy.ndarray of every block at every position
'''
# Compute resolution of each block
newres = res // (2**l)
# Unflatten the image array to its original size
im = np.reshape(im, newshape = (res, res))
# Split the image into 2^2l sub-blocks
splits = (im.reshape(res // newres, newres, -1, newres)
.swapaxes(1, 2)
.reshape(-1, newres, newres))
# Return the flattened split of the image
return [split.flatten() for split in splits]
def compute_all_histograms(X):
'''
'''
# We will have a total of L+1 different histograms
hist = []
for i in range(L + 1):
hist_i = []
# Compute the histogram for every image
for j in range(X.shape[0]):
# No splitting required if l = 0
if i == 0:
hist_i.append(Counter(X[j]))
# Else compute the image splits, and calculate the histogram for each block
else:
splits = divide_image(X[j], i)
# For every block
for k in range(len(splits)):
splits[k] = Counter(splits[k])
hist_i.append(splits)
# Append the histogram calculation of l = i to the global list
hist.append(hist_i)
return hist
def K(h1, h2):
''' Compute the histogram-based-kernel value between two images
----------
PARAMETERS
- im1, im2: flattened numpy.ndarray representing an image of the collection
- L: integer representing the level of divisions that should be performed
----------
RETURNS
- a float representing the value of the kernel between images 'im1' and 'im2'
'''
# For I_0
k_ij = (1/2**L) * (sum((h1[0] & h2[0]).values()) / length)
# For every level of partitioning (I_1, I_2, ..., I_L):
for l in range(1, L + 1):
# Factor in the l-th iteration
factor = 1 / (2**(L - l + 1))
# Compute and add histogram intersection of every block
for k in range(len(h1[l])):
k_ij += factor * (sum((h1[l][k] & h2[l][k]).values()) / length)
return k_ij
def hist_kernel(X, Y):
''' Histogram kernel function -> computes the Gram Matrix of the kernel
----------
PARAMETERS
- X,Y: numpy.ndarray representing the data
(notice that while training, Y = X, and thus the Gram Matrix is symmetric)
----------
RETURNS
- Gram_matrix: numpy.ndarray representing the Gram_matrix of the histogram kernel
'''
# Update image resolution
nonlocal length
length = X.shape[1]
nonlocal res
res = int(np.sqrt(length))
# Initialize the Gram matrix with zeros (allocate memory)
Gram_matrix = np.zeros((X.shape[0], Y.shape[0]))
# If X == Y, i.e. we are training the SVM, the Gram Matrix will be symmetric, and
# thus we can halve the total number of computations, as G[i,j] = G[j,i]
if X.shape[0] == Y.shape[0]:
# Construct the histogram matrices
histograms = compute_all_histograms(X)
for i in range(X.shape[0]):
# Get all the histograms (for all particions) of image i
h1 = [histograms[k][i] for k in range(L + 1)]
for j in range(i, X.shape[0]):
# Get all the istograms (for all particions) of image j
h2 = [histograms[k][j] for k in range(L + 1)]
# Compute the intersection of image i and image j (histograms)
Gram_matrix[i, j] = K(h1, h2)
Gram_matrix[j, i] = Gram_matrix[i, j].copy()
# Otherwise, the matrix is not symmetric, we cannot reuse computations
else:
# Construct the histogram matrices
# Hence that they are not the same if we are not in training
histogramsX = compute_all_histograms(X)
histogramsY = compute_all_histograms(Y)
for i in range(X.shape[0]):
# Get all the histograms (for all particions) for image i
h1 = [histogramsX[k][i] for k in range(L + 1)]
for j in range(Y.shape[0]):
# Get all the histograms (for all particions) for image j
h2 = [histogramsY[k][j] for k in range(L + 1)]
# Compute the intersection of image i and image j (histograms)
Gram_matrix[i, j] = K(h1, h2)
return Gram_matrix
return hist_kernel
#####################################################################################################
# MAIN PROGRAM (fit and predict with the SVM)
#####################################################################################################
if __name__ == '__main__':
# Parser arguments (arguments taken by the script)
parser = argparse.ArgumentParser(
description = 'Compute the histogram kernel SVM.')
parser.add_argument(
'--data', type = str, help = 'File from where to read image data.')
parser.add_argument(
'--quantization', type = int, default = None, help = 'Quantization level for the histograms.')
parser.add_argument(
'--L', type = int, default = 3, help = 'Level of image splitting in the kernel.')
parser.add_argument(
'--train_frac', type = float, default = .25,
help = 'Fraction of train samples to be taken from data.')
parser.add_argument(
'--test_frac', type = float, default = .1,
help = 'Fraction of test samples to be taken from data.')
# Get the arguments
args = parser.parse_args()
data_path = args.data # path to data file
n_bins = args.quantization; L = args.L # kernel parameters
train_frac = args.train_frac; test_frac = args.test_frac # train_test splitting
print(f"Histogram-kernel SVM with L = {L} and quantization = {n_bins}")
print(f"train size = {6379 // int(train_frac**(-1))}, test size = {6379 // int(test_frac**(-1))}")
# Read the data
df = read_data(data_path)
# Split the target (label) from data
Y = np.array(df.label)
df = df.drop(columns = ['label'])
# Check if quantization is required
if n_bins is not None:
df = quantization(df, n_bins)
# Convert data to a numpy ndarray
X = | np.array(df) | numpy.array |
# Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Model-Test Coverage Metrics.
"""
from abc import abstractmethod
from collections import defaultdict
import math
import numpy as np
from mindspore import Tensor
from mindspore import Model
from mindspore.train.summary.summary_record import _get_summary_tensor_data
from mindarmour.utils._check_param import check_model, check_numpy_param, check_int_positive, \
check_param_type, check_value_positive
from mindarmour.utils.logger import LogUtil
LOGGER = LogUtil.get_instance()
TAG = 'CoverageMetrics'
class CoverageMetrics:
"""
The abstract base class for Neuron coverage classes calculating coverage metrics.
As we all known, each neuron output of a network will have a output range after training (we call it original
range), and test dataset is used to estimate the accuracy of the trained network. However, neurons' output
distribution would be different with different test datasets. Therefore, similar to function fuzz, model fuzz means
testing those neurons' outputs and estimating the proportion of original range that has emerged with test
datasets.
Reference: `DeepGauge: Multi-Granularity Testing Criteria for Deep Learning Systems
<https://arxiv.org/abs/1803.07519>`_
Args:
model (Model): The pre-trained model which waiting for testing.
incremental (bool): Metrics will be calculate in incremental way or not. Default: False.
batch_size (int): The number of samples in a fuzz test batch. Default: 32.
"""
def __init__(self, model, incremental=False, batch_size=32):
self._model = check_model('model', model, Model)
self.incremental = check_param_type('incremental', incremental, bool)
self.batch_size = check_int_positive('batch_size', batch_size)
self._activate_table = defaultdict(list)
@abstractmethod
def get_metrics(self, dataset):
"""
Calculate coverage metrics of given dataset.
Args:
dataset (numpy.ndarray): Dataset used to calculate coverage metrics.
Raises:
NotImplementedError: It is an abstract method.
"""
msg = 'The function get_metrics() is an abstract method in class `CoverageMetrics`, and should be' \
' implemented in child class.'
LOGGER.error(TAG, msg)
raise NotImplementedError(msg)
def _init_neuron_activate_table(self, data):
"""
Initialise the activate table of each neuron in the model with format:
{'layer1': [n1, n2, n3, ..., nn], 'layer2': [n1, n2, n3, ..., nn], ...}
Args:
data (numpy.ndarray): Data used for initialising the activate table.
Return:
dict, return a activate_table.
"""
self._model.predict(Tensor(data))
layer_out = _get_summary_tensor_data()
if not layer_out:
msg = 'User must use TensorSummary() operation to specify the middle layer of the model participating in ' \
'the coverage calculation.'
LOGGER.error(TAG, msg)
raise ValueError(msg)
activate_table = defaultdict()
for layer, value in layer_out.items():
activate_table[layer] = np.zeros(value.shape[1], np.bool)
return activate_table
def _get_bounds(self, train_dataset):
"""
Update the lower and upper boundaries of neurons' outputs.
Args:
train_dataset (numpy.ndarray): Training dataset used for determine the neurons' output boundaries.
Return:
- numpy.ndarray, upper bounds of neuron' outputs.
- numpy.ndarray, lower bounds of neuron' outputs.
"""
upper_bounds = defaultdict(list)
lower_bounds = defaultdict(list)
batches = math.ceil(train_dataset.shape[0] / self.batch_size)
for i in range(batches):
inputs = train_dataset[i * self.batch_size: (i + 1) * self.batch_size]
self._model.predict(Tensor(inputs))
layer_out = _get_summary_tensor_data()
for layer, tensor in layer_out.items():
value = tensor.asnumpy()
value = np.mean(value, axis=tuple([i for i in range(2, len(value.shape))]))
min_value = np.min(value, axis=0)
max_value = np.max(value, axis=0)
if np.any(upper_bounds[layer]):
max_flag = upper_bounds[layer] > max_value
min_flag = lower_bounds[layer] < min_value
upper_bounds[layer] = upper_bounds[layer] * max_flag + max_value * (1 - max_flag)
lower_bounds[layer] = lower_bounds[layer] * min_flag + min_value * (1 - min_flag)
else:
upper_bounds[layer] = max_value
lower_bounds[layer] = min_value
return upper_bounds, lower_bounds
def _activate_rate(self):
"""
Calculate the activate rate of neurons.
"""
total_neurons = 0
activated_neurons = 0
for _, value in self._activate_table.items():
activated_neurons += np.sum(value)
total_neurons += len(value)
activate_rate = activated_neurons / total_neurons
return activate_rate
class NeuronCoverage(CoverageMetrics):
"""
Calculate the neurons activated coverage. Neuron is activated when its output is greater than the threshold.
Neuron coverage equals the proportion of activated neurons to total neurons in the network.
Args:
model (Model): The pre-trained model which waiting for testing.
threshold (float): Threshold used to determined neurons is activated or not. Default: 0.1.
incremental (bool): Metrics will be calculate in incremental way or not. Default: False.
batch_size (int): The number of samples in a fuzz test batch. Default: 32.
"""
def __init__(self, model, threshold=0.1, incremental=False, batch_size=32):
super(NeuronCoverage, self).__init__(model, incremental, batch_size)
threshold = check_param_type('threshold', threshold, float)
self.threshold = check_value_positive('threshold', threshold)
def get_metrics(self, dataset):
"""
Get the metric of neuron coverage: the proportion of activated neurons to total neurons in the network.
Args:
dataset (numpy.ndarray): Dataset used to calculate coverage metrics.
Returns:
float, the metric of 'neuron coverage'.
Examples:
>>> nc = NeuronCoverage(model, threshold=0.1)
>>> nc_metrics = nc.get_metrics(test_data)
"""
dataset = check_numpy_param('dataset', dataset)
batches = math.ceil(dataset.shape[0] / self.batch_size)
if not self.incremental or not self._activate_table:
self._activate_table = self._init_neuron_activate_table(dataset[0:1])
for i in range(batches):
inputs = dataset[i * self.batch_size: (i + 1) * self.batch_size]
self._model.predict(Tensor(inputs))
layer_out = _get_summary_tensor_data()
for layer, tensor in layer_out.items():
value = tensor.asnumpy()
value = np.mean(value, axis=tuple([i for i in range(2, len(value.shape))]))
activate = np.sum(value > self.threshold, axis=0) > 0
self._activate_table[layer] = np.logical_or(self._activate_table[layer], activate)
neuron_coverage = self._activate_rate()
return neuron_coverage
class TopKNeuronCoverage(CoverageMetrics):
"""
Calculate the top k activated neurons coverage. Neuron is activated when its output has the top k largest value in
that hidden layers. Top k neurons coverage equals the proportion of activated neurons to total neurons in the
network.
Args:
model (Model): The pre-trained model which waiting for testing.
top_k (int): Neuron is activated when its output has the top k largest value in that hidden layers. Default: 3.
incremental (bool): Metrics will be calculate in incremental way or not. Default: False.
batch_size (int): The number of samples in a fuzz test batch. Default: 32.
"""
def __init__(self, model, top_k=3, incremental=False, batch_size=32):
super(TopKNeuronCoverage, self).__init__(model, incremental=incremental, batch_size=batch_size)
self.top_k = check_int_positive('top_k', top_k)
def get_metrics(self, dataset):
"""
Get the metric of Top K activated neuron coverage.
Args:
dataset (numpy.ndarray): Dataset used to calculate coverage metrics.
Returns:
float, the metrics of 'top k neuron coverage'.
Examples:
>>> tknc = TopKNeuronCoverage(model, top_k=3)
>>> metrics = tknc.get_metrics(test_data)
"""
dataset = check_numpy_param('dataset', dataset)
batches = math.ceil(dataset.shape[0] / self.batch_size)
if not self.incremental or not self._activate_table:
self._activate_table = self._init_neuron_activate_table(dataset[0:1])
for i in range(batches):
inputs = dataset[i * self.batch_size: (i + 1) * self.batch_size]
self._model.predict(Tensor(inputs))
layer_out = _get_summary_tensor_data()
for layer, tensor in layer_out.items():
value = tensor.asnumpy()
if len(value.shape) > 2:
value = np.mean(value, axis=tuple([i for i in range(2, len(value.shape))]))
top_k_value = np.sort(value)[:, -self.top_k].reshape(value.shape[0], 1)
top_k_value = np.sum((value - top_k_value) >= 0, axis=0) > 0
self._activate_table[layer] = np.logical_or(self._activate_table[layer], top_k_value)
top_k_neuron_coverage = self._activate_rate()
return top_k_neuron_coverage
class SuperNeuronActivateCoverage(CoverageMetrics):
"""
Get the metric of 'super neuron activation coverage'. :math:`SNAC = |UpperCornerNeuron|/|N|`. SNAC refers to the
proportion of neurons whose neurons output value in the test set exceeds the upper bounds of the corresponding
neurons output value in the training set.
Args:
model (Model): The pre-trained model which waiting for testing.
train_dataset (numpy.ndarray): Training dataset used for determine the neurons' output boundaries.
incremental (bool): Metrics will be calculate in incremental way or not. Default: False.
batch_size (int): The number of samples in a fuzz test batch. Default: 32.
"""
def __init__(self, model, train_dataset, incremental=False, batch_size=32):
super(SuperNeuronActivateCoverage, self).__init__(model, incremental=incremental, batch_size=batch_size)
train_dataset = check_numpy_param('train_dataset', train_dataset)
self.upper_bounds, self.lower_bounds = self._get_bounds(train_dataset=train_dataset)
def get_metrics(self, dataset):
"""
Get the metric of 'strong neuron activation coverage'.
Args:
dataset (numpy.ndarray): Dataset used to calculate coverage metrics.
Returns:
float, the metric of 'strong neuron activation coverage'.
Examples:
>>> snac = SuperNeuronActivateCoverage(model, train_dataset)
>>> metrics = snac.get_metrics(test_data)
"""
dataset = check_numpy_param('dataset', dataset)
if not self.incremental or not self._activate_table:
self._activate_table = self._init_neuron_activate_table(dataset[0:1])
batches = math.ceil(dataset.shape[0] / self.batch_size)
for i in range(batches):
inputs = dataset[i * self.batch_size: (i + 1) * self.batch_size]
self._model.predict(Tensor(inputs))
layer_out = _get_summary_tensor_data()
for layer, tensor in layer_out.items():
value = tensor.asnumpy()
if len(value.shape) > 2:
value = np.mean(value, axis=tuple([i for i in range(2, len(value.shape))]))
activate = np.sum(value > self.upper_bounds[layer], axis=0) > 0
self._activate_table[layer] = np.logical_or(self._activate_table[layer], activate)
snac = self._activate_rate()
return snac
class NeuronBoundsCoverage(SuperNeuronActivateCoverage):
"""
Get the metric of 'neuron boundary coverage' :math:`NBC = (|UpperCornerNeuron| + |LowerCornerNeuron|)/(2*|N|)`,
where :math:`|N|` is the number of neurons, NBC refers to the proportion of neurons whose neurons output value in
the test dataset exceeds the upper and lower bounds of the corresponding neurons output value in the training
dataset.
Args:
model (Model): The pre-trained model which waiting for testing.
train_dataset (numpy.ndarray): Training dataset used for determine the neurons' output boundaries.
incremental (bool): Metrics will be calculate in incremental way or not. Default: False.
batch_size (int): The number of samples in a fuzz test batch. Default: 32.
"""
def __init__(self, model, train_dataset, incremental=False, batch_size=32):
super(NeuronBoundsCoverage, self).__init__(model, train_dataset, incremental=incremental, batch_size=batch_size)
def get_metrics(self, dataset):
"""
Get the metric of 'neuron boundary coverage'.
Args:
dataset (numpy.ndarray): Dataset used to calculate coverage metrics.
Returns:
float, the metric of 'neuron boundary coverage'.
Examples:
>>> nbc = NeuronBoundsCoverage(model, train_dataset)
>>> metrics = nbc.get_metrics(test_data)
"""
dataset = check_numpy_param('dataset', dataset)
if not self.incremental or not self._activate_table:
self._activate_table = self._init_neuron_activate_table(dataset[0:1])
batches = math.ceil(dataset.shape[0] / self.batch_size)
for i in range(batches):
inputs = dataset[i * self.batch_size: (i + 1) * self.batch_size]
self._model.predict(Tensor(inputs))
layer_out = _get_summary_tensor_data()
for layer, tensor in layer_out.items():
value = tensor.asnumpy()
if len(value.shape) > 2:
value = np.mean(value, axis=tuple([i for i in range(2, len(value.shape))]))
outer = np.logical_or(value > self.upper_bounds[layer], value < self.lower_bounds[layer])
activate = np.sum(outer, axis=0) > 0
self._activate_table[layer] = np.logical_or(self._activate_table[layer], activate)
nbc = self._activate_rate()
return nbc
class KMultisectionNeuronCoverage(SuperNeuronActivateCoverage):
"""
Get the metric of 'k-multisection neuron coverage'. KMNC measures how thoroughly the given set of test inputs
covers the range of neurons output values derived from training dataset.
Args:
model (Model): The pre-trained model which waiting for testing.
train_dataset (numpy.ndarray): Training dataset used for determine the neurons' output boundaries.
segmented_num (int): The number of segmented sections of neurons' output intervals. Default: 100.
incremental (bool): Metrics will be calculate in incremental way or not. Default: False.
batch_size (int): The number of samples in a fuzz test batch. Default: 32.
"""
def __init__(self, model, train_dataset, segmented_num=100, incremental=False, batch_size=32):
super(KMultisectionNeuronCoverage, self).__init__(model, train_dataset, incremental=incremental,
batch_size=batch_size)
self.segmented_num = check_int_positive('segmented_num', segmented_num)
self.intervals = defaultdict(list)
for keys in self.upper_bounds.keys():
self.intervals[keys] = (self.upper_bounds[keys] - self.lower_bounds[keys]) / self.segmented_num
def _init_k_multisection_table(self, data):
""" Initial the activate table."""
self._model.predict(Tensor(data))
layer_out = _get_summary_tensor_data()
activate_section_table = defaultdict()
for layer, value in layer_out.items():
activate_section_table[layer] = np.zeros((value.shape[1], self.segmented_num), np.bool)
return activate_section_table
def get_metrics(self, dataset):
"""
Get the metric of 'k-multisection neuron coverage'.
Args:
dataset (numpy.ndarray): Dataset used to calculate coverage metrics.
Returns:
float, the metric of 'k-multisection neuron coverage'.
Examples:
>>> kmnc = KMultisectionNeuronCoverage(model, train_dataset, segmented_num=100)
>>> metrics = kmnc.get_metrics(test_data)
"""
dataset = check_numpy_param('dataset', dataset)
if not self.incremental or not self._activate_table:
self._activate_table = self._init_k_multisection_table(dataset[0:1])
batches = math.ceil(dataset.shape[0] / self.batch_size)
for i in range(batches):
inputs = dataset[i * self.batch_size: (i + 1) * self.batch_size]
self._model.predict(Tensor(inputs))
layer_out = _get_summary_tensor_data()
for layer, tensor in layer_out.items():
value = tensor.asnumpy()
value = np.mean(value, axis=tuple([i for i in range(2, len(value.shape))]))
hits = | np.floor((value - self.lower_bounds[layer]) / self.intervals[layer]) | numpy.floor |
"""
Collection of tests for unified gradient functions
"""
# global
import pytest
import numpy as np
from numbers import Number
# local
import ivy
import ivy.numpy
import ivy_tests.helpers as helpers
from ivy.core.container import Container
# variable
@pytest.mark.parametrize(
"object_in", [[], [0.], [1], [True], [[1., 2.]]])
@pytest.mark.parametrize(
"dtype_str", ['float16', 'float32', 'float64'])
def test_variable(object_in, dtype_str, dev_str, call):
if call is helpers.tf_graph_call:
# cannot create variables as part of compiled tf graph
pytest.skip()
if call in [helpers.mx_call] and dtype_str == 'int16':
# mxnet does not support int16
pytest.skip()
if len(object_in) == 0 and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
# smoke test
ret = ivy.variable(ivy.array(object_in, dtype_str, dev_str))
# type test
if call is not helpers.np_call:
assert ivy.is_variable(ret)
# cardinality test
assert ret.shape == np.array(object_in).shape
# value test
assert np.allclose(call(ivy.variable, ivy.array(object_in, dtype_str, dev_str)),
np.array(object_in).astype(dtype_str))
# compilation test
if call in [helpers.torch_call]:
# pytorch scripting does not support string devices
return
if not ivy.wrapped_mode():
helpers.assert_compilable(ivy.variable)
# is_variable
@pytest.mark.parametrize(
"object_in", [[], [0.], [1], [True], [[1., 2.]]])
@pytest.mark.parametrize(
"dtype_str", ['float16', 'float32', 'float64'])
def test_is_variable(object_in, dtype_str, dev_str, call):
if call is helpers.tf_graph_call:
# cannot create variables as part of compiled tf graph
pytest.skip()
if call in [helpers.mx_call] and dtype_str == 'int16':
# mxnet does not support int16
pytest.skip()
if len(object_in) == 0 and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
# smoke test
non_var = ivy.array(object_in, dtype_str, dev_str)
var = ivy.variable(ivy.array(object_in, dtype_str, dev_str))
non_var_res = ivy.is_variable(non_var)
var_res = ivy.is_variable(var)
# type test
assert ivy.is_array(non_var)
if call is not helpers.np_call:
assert ivy.is_variable(var)
if call in [helpers.np_call, helpers.jnp_call]:
# numpy and jax do not support flagging variables
pytest.skip()
# value test
assert non_var_res is False
assert var_res is True
# compilation test
if not ivy.wrapped_mode():
helpers.assert_compilable(ivy.is_variable)
# variable data
@pytest.mark.parametrize(
"object_in", [[], [0.], [1], [True], [[1., 2.]]])
@pytest.mark.parametrize(
"dtype_str", ['float16', 'float32', 'float64'])
def test_variable_data(object_in, dtype_str, dev_str, call):
if call is helpers.tf_graph_call:
# cannot create variables as part of compiled tf graph
pytest.skip()
if call in [helpers.mx_call] and dtype_str == 'int16':
# mxnet does not support int16
pytest.skip()
if len(object_in) == 0 and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
# smoke test
var = ivy.variable(ivy.array(object_in, dtype_str, dev_str))
var_data = ivy.variable_data(var)
# type test
if call is not helpers.np_call:
# numpy does not support variables
assert ivy.is_variable(var)
if call is not helpers.mx_call:
# jax variables and their data are the same instance
assert not ivy.is_variable(var_data, exclusive=True)
assert ivy.is_array(var_data)
# cardinality test
assert var_data.shape == var.shape
# value test
assert np.allclose(ivy.to_numpy(var), ivy.to_numpy(var_data))
# stop_gradient
@pytest.mark.parametrize(
"x_raw", [[0.]])
@pytest.mark.parametrize(
"dtype_str", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [('array', ivy.array), ('var', helpers.var_fn)])
def test_stop_gradient(x_raw, dtype_str, tensor_fn, dev_str, call):
# smoke test
fn_name, tensor_fn = tensor_fn
x = tensor_fn(x_raw, dtype_str, dev_str)
ret = ivy.stop_gradient(x)
# type test
if fn_name == 'array':
assert ivy.is_array(ret)
elif call is not helpers.np_call:
# Numpy does not support variables, is_variable() always returns False
assert ivy.is_variable(ret)
# cardinality test
assert ret.shape == x.shape
# value test
if call is not helpers.tf_graph_call:
# Tf graph mode cannot create variables as part of the computation graph
assert np.array_equal(call(ivy.stop_gradient, x), ivy.numpy.array(x_raw, dtype_str))
# compilation test
if call in [helpers.torch_call]:
# pytorch scripting does not support attribute setting
return
if not ivy.wrapped_mode():
helpers.assert_compilable(ivy.stop_gradient)
# execute_with_gradients
@pytest.mark.parametrize(
"func_n_xs_n_ty_n_te_n_tg", [(lambda xs_in: (xs_in['w'] * xs_in['w'])[0],
Container({'w': [3.]}), | np.array(9.) | numpy.array |
import numpy as np
from plico_interferometer import interferometer
from plico_dm import deformableMirror
from astropy.io import fits
from functools import reduce
import matplotlib.pyplot as plt
from arte.types.mask import CircularMask
from arte.utils.zernike_generator import ZernikeGenerator
from tesi_ao.mems_command_to_position_linearization_measurer import CommandToPositionLinearizationMeasurer
from tesi_ao.mems_command_to_position_linearization_analyzer import CommandToPositionLinearizationAnalyzer
from tesi_ao.mems_command_linearization import MemsCommandLinearization
def create_devices():
wyko = interferometer('172.16.17.32', 7300)
bmc = deformableMirror('192.168.3.11', 7000)
return wyko, bmc
# def _2dgaussian(self, X, amplitude, x0, y0, sigmax, sigmay, offset):
# y, x = X
# z = np.zeros((len(y), len(x)), dtype='float')
# N = amplitude # *0.5 / (np.pi * sigmax * sigmay)
# for xi in np.arange(len(x)):
# a = 0.5 * ((xi - x0) / sigmax)**2
# for yi in np.arange(len(y)):
# b = 0.5 * ((yi - y0) / sigmay)**2
#
# z[yi, xi] = N * np.exp(-(a + b)) + offset
# return z.ravel()
#
# def _gaussian_fitting(self, act_idx, cmd_index):
# wf = self._wfs[act_idx, cmd_index]
# wf2 = self._wfs[act_idx, 2]
# b, t, l, r = self._get_max_roi(act_idx)
# wfroi = wf[b:t, l:r]
# wfroi2 = wf2[b:t, l:r]
# coord_max = np.argwhere(np.abs(wfroi2) == np.max(np.abs(wfroi2)))[0]
# x0 = coord_max[1]
# y0 = coord_max[0]
# #z = wfroi[wfroi.data != 0.]
#
# #z = wfroi
#
# NvalidX = (wfroi.mask[y0, :] == False).sum()
# NvalidY = (wfroi.mask[:, x0] == False).sum()
# x = np.arange(NvalidX, dtype='float')
# y = np.arange(NvalidY, dtype='float')
#
# Z = []
# for yi in range(wfroi.shape[0]):
# for xi in range(wfroi.shape[1]):
# if(wfroi[yi, xi].data != 0.):
# Z.append(wfroi[yi, xi])
#
# Z = np.array(Z, dtype='float')
#
# Z = wfroi.compressed()
#
# A0 = self._max_wavefront(act_idx, cmd_index)
#
# sigma0 = 25.
# sigmax = sigma0
# sigmay = sigma0
# offset = 0.
# starting_values = [A0, x0, y0, sigmax, sigmay, offset]
# X = y, x
#
# #err_z = Z.std() * np.ones(len(x) * len(y))
#
# fpar, fcov = curve_fit(self._2dgaussian, X, Z,
# p0=starting_values, absolute_sigma=True)
# #err_fpar = np.sqrt(np.diag(fcov))
# print('1curve_fit done')
# error = (Z - self._2dgaussian(X, *fpar))
# starting_values = [fpar[0], fpar[1],
# fpar[2], fpar[3], fpar[4], fpar[5]]
# fpar, fcov = curve_fit(
# self._2dgaussian, X, Z, p0=starting_values, sigma=error, absolute_sigma=True)
# print('2curve_fit done')
# return fpar[0]
#
# def _compute_gaussian_amplitude_deflection(self):
# self._max_deflection = np.zeros(
# (self._cmd_vector.shape[0], self._cmd_vector.shape[1]))
# for act in range(self._cmd_vector.shape[0]):
# for cmd_idx in range(self._cmd_vector.shape[1]):
# self._max_deflection[act, cmd_idx] = self._gaussian_fitting(
# act, cmd_idx)
#
# def compute_gaussian_linearization(self):
# self._compute_gaussian_amplitude_deflection()
#
# return MemsCommandLinearization(
# self._actuators_list,
# self._cmd_vector,
# self._max_deflection,
# self._reference_shape_tag)
# def plot_interpolated_function(mcl):
# '''
# F_int(pos)=cmd
# '''
# plt.figure()
# plt.clf()
# for idx, act in enumerate(mcl._actuators_list):
# a = np.min(mcl._deflection[act])
# b = np.max(mcl._deflection[act])
# xx = np.linspace(a, b, 1000)
# plt.plot(mcl._finter[act](xx), xx / 1.e-9, '.-')
# plt.xlabel('Command [au]', size=25)
# plt.ylabel('Deflection [nm]', size=25)
# plt.title('Calibration curve per actuator', size=25)
# plt.grid()
def _plot_acquired_measures(mcl):
plt.figure()
plt.clf()
for idx, act in enumerate(mcl._actuators_list):
plt.plot(mcl._cmd_vector[idx], mcl._deflection[idx] / 1.e-9, '.-')
plt.xlabel('Command [au]', size=25)
plt.ylabel('Deflection [nm]', size=25)
plt.title('Acquired Measures per actuator', size=25)
plt.grid()
# def plot_single_curve(mcl, act):
# '''
# F_int(pos)=cmd
# '''
# plt.figure()
# plt.clf()
# a = np.min(mcl._deflection[act])
# b = np.max(mcl._deflection[act])
# xx = np.linspace(a, b, 1000)
# plt.plot(mcl._cmd_vector[act], mcl._deflection[act] /
# 1.e-9, 'or', label='sampling points')
# plt.plot(mcl._finter[act](xx), xx / 1.e-9, '-', label='finter')
# plt.title('Calibration Curve: act#%d' % act, size=25)
# plt.xlabel('Commands [au]', size=25)
# plt.ylabel('Deflection [nm]', size=25)
# plt.grid()
# plt.legend(loc='best')
def _plot_pos_vs_cmd(mcl, act):
'''
F_int(cmd)=pos
'''
plt.figure()
plt.clf()
plt.plot(mcl._cmd_vector[act], mcl._deflection[act] /
1.e-9, 'or', label='sampling points')
plt.title('act=%d' % act, size=25)
plt.ylabel('pos[nm]')
plt.xlabel('cmd[au]')
plt.grid()
a = np.min(mcl._cmd_vector[act])
b = np.max(mcl._cmd_vector[act])
vv = np.linspace(a, b, 1000)
plt.plot(vv, mcl._finter[act](vv) / 1.e-9, '-', label='finter')
plt.legend(loc='best')
def _plot_all_int_funcs(mcl):
plt.figure()
plt.clf()
for idx, act in enumerate(mcl._actuators_list):
a = np.min(mcl._cmd_vector[act])
b = np.max(mcl._cmd_vector[act])
vv = np.linspace(a, b, 1000)
plt.plot(vv, mcl._finter[act](vv) / 1.e-9, '.-', label='finter')
plt.xlabel('Command [au]', size=25)
plt.ylabel('Deflection [nm]', size=25)
plt.title('Calibration curve per actuator', size=25)
plt.grid()
class PupilMaskBuilder():
def __init__(self, wfmask):
self._wfmask = wfmask # is the interferometer mask!
def get_circular_mask(self, radius, center):
mask = CircularMask(self._wfmask.shape,
maskRadius=radius, maskCenter=center)
return mask # .mask()
def get_centred_circular_mask_wrt_interferometer_mask(self):
# TODO: controllare che i dati a False siano una mappa rettangolare
# prendo un generico pixel che sia a False per ricostruire base
# e altezza della mappa rettangolare a False
yFalsePixel = np.where(self._wfmask == False)[0][0]
xFalsePixel = np.where(self._wfmask == False)[1][0]
HeightInPixels = (self._wfmask[:, xFalsePixel] == False).sum()
WidthInPixels = (self._wfmask[yFalsePixel, :] == False).sum()
offsetX = (self._wfmask[yFalsePixel, 0:xFalsePixel] == True).sum()
offsetY = (self._wfmask[0:yFalsePixel, xFalsePixel] == True).sum()
# center of False map and origin of circular pupil in pixel
yc0 = offsetY + 0.5 * HeightInPixels
xc0 = offsetX + 0.5 * WidthInPixels
MaxRadiusInPixel = min(WidthInPixels, HeightInPixels) * 0.5
cmask = self.get_circular_mask(MaxRadiusInPixel, (yc0, xc0))
return cmask
def get_barycenter_of_false_pixels(self):
N_of_pixels = self._wfmask.shape[0] * self._wfmask.shape[1]
True_pixels = self._wfmask.sum()
False_pixels = N_of_pixels - True_pixels
coord_yi = np.where(self._wfmask == False)[0]
coord_xi = np.where(self._wfmask == False)[1]
yc = coord_yi.sum() / float(False_pixels)
xc = coord_xi.sum() / float(False_pixels)
return yc, xc
def get_number_of_false_pixels_along_barycenter_axis(self):
y, x = self.get_barycenter_of_false_pixels()
y = int(y)
x = int(x)
n_pixels_along_x = (self._wfmask[y, :] == False).sum()
n_pixels_along_y = (self._wfmask[:, x] == False).sum()
return n_pixels_along_y, n_pixels_along_x
def get_number_of_false_pixels_along_pixel_axis(self, yp, xp):
y = int(yp)
x = int(xp)
n_pixels_along_x = (self._wfmask[y, :] == False).sum()
n_pixels_along_y = (self._wfmask[:, x] == False).sum()
return n_pixels_along_y, n_pixels_along_x
def get_number_of_false_pixels_along_frame_axis(self):
n_pixels_along_x_axis = np.zeros(
self._wfmask.shape[1]) # shape[1]== len(y_axis)
n_pixels_along_y_axis = np.zeros(
self._wfmask.shape[0]) # shape[0]== len(x_axis)
n_pixels_along_x_axis = (self._wfmask == False).sum(axis=1)
n_pixels_along_y_axis = (self._wfmask == False).sum(axis=0)
return n_pixels_along_y_axis, n_pixels_along_x_axis
def build_max_radius_and_pupil_in_imask(self):
self._max_radius_in_imask, self._max_pupil_in_imask = self.get_centred_circular_mask_wrt_interferometer_mask()
# da provare sul file cplm_all_fixed fatto il 17/3
class ModeGenerator():
NORM_AT_THIS_CMD = 19 # such that wyko noise and saturation are avoided
VISIBLE_AT_THIS_CMD = 19 # related cmd for actuators visibility in the given mask
THRESHOLD_RMS = 0.5 # threshold for nasty actuators outside a given mask
def __init__(self, cpla, mcl):
self._cpla = cpla
self._mcl = mcl
self._n_of_act = self._cpla._wfs.shape[0]
self._build_intersection_mask()
def _build_intersection_mask(self):
self._imask = reduce(lambda a, b: np.ma.mask_or(
a, b), self._cpla._wfs[:, self.NORM_AT_THIS_CMD].mask)
def _check_actuators_visibility(self, cmd=None):
if cmd is None:
cmd = self.VISIBLE_AT_THIS_CMD
self._rms_wf = np.zeros(self._n_of_act)
for act in range(self._n_of_act):
self._rms_wf[act] = np.ma.array(data=self._cpla._wfs[act, cmd],
mask=self._pupil_mask).std()
def _show_actuators_visibility(self):
plt.figure()
plt.clf()
plt.ion()
plt.plot(self._rms_wf / 1.e-9, 'o', label='cmd=%d' %
self.VISIBLE_AT_THIS_CMD)
plt.xlabel('#N actuator', size=25)
plt.ylabel('Wavefront rms [nm]', size=25)
plt.grid()
plt.legend(loc='best')
def _build_valid_actuators_list(self, cmd=None):
self._check_actuators_visibility(cmd)
self._acts_in_pupil = np.where(
self._rms_wf > self.THRESHOLD_RMS * self._rms_wf.max())[0]
self._n_of_selected_acts = len(self._acts_in_pupil)
def _normalize_influence_function(self, act):
return (self._cpla._wfs[act, self.NORM_AT_THIS_CMD][self._pupil_mask == False] /
self._mcl._deflection[act, self.NORM_AT_THIS_CMD]).data
def _build_interaction_matrix(self):
if self._acts_in_pupil is None:
selected_act_list = self._cpla._actuators_list
else:
selected_act_list = self._acts_in_pupil
self._im = np.column_stack([self._normalize_influence_function(
act) for act in selected_act_list])
def _build_reconstruction_matrix(self):
self._rec = np.linalg.pinv(self._im)
def compute_reconstructor(self, mask_obj=None):
# TODO: check that mask.shape is equal to self._imask.shape
# WARNING: zernike_generator uses the pupil mask as the object!!!
# while self._pupil_mask is a bool array!
if mask_obj is None:
mask = self._imask # bool array
else:
self._pupil_mask_obj = mask_obj
mask = self._pupil_mask_obj.mask()
assert self._imask.shape == mask.shape, f"mask has not the same dimension of self._imask!\nGot:{mask.shape}\nShould be:{self._imask.shape}"
self._pupil_mask = np.ma.mask_or(self._imask, mask)
self._build_valid_actuators_list()
self._build_interaction_matrix()
self._build_reconstruction_matrix()
def generate_mode(self, wfmap):
self._wfmode = np.ma.array(data=wfmap, mask=self._pupil_mask)
def generate_zernike_mode(self, j, AmpInMeters):
zg = ZernikeGenerator(self._pupil_mask_obj)
self._wfmode = np.zeros(self._pupil_mask.shape)
self._wfmode = np.ma.array(data=self._wfmode, mask=self._pupil_mask)
z_mode = zg.getZernike(j)
a = (z_mode.mask == False).sum()
b = (self._wfmode.mask == False).sum()
assert a == b, f"zerike valid points: {a} wfmode valid points: {b}\nShould be equal!"
# should be useless
unmasked_index_wf = np.ma.where(self._wfmode.mask == False)
unmasked_index_zernike = np.ma.where(z_mode.mask == False)
self._wfmode[unmasked_index_wf[0], unmasked_index_wf[1]
] = z_mode.data[unmasked_index_zernike[0], unmasked_index_zernike[1]]
self._wfmode = self._wfmode * AmpInMeters
def generate_tilt(self):
self._wfmode = np.tile(np.linspace(-100.e-9, 100.e-9, 640), (486, 1))
self._wfmode = np.ma.array(data=self._wfmode, mask=self._pupil_mask)
def get_position_cmds_from_wf(self, wfmap=None):
if wfmap is None:
wfmap = self._wfmode
pos = np.dot(self._rec, wfmap[self._pupil_mask == False])
# check and clip cmds
# should I clip voltage or stroke cmds?
# act's stroke increases when moved with its neighbour
self._clip_recorder = np.zeros((self._n_of_selected_acts, 2))
for idx in range(len(pos)):
max_stroke = np.max(
self._mcl._deflection[self._acts_in_pupil[idx]])
min_stroke = np.min(
self._mcl._deflection[self._acts_in_pupil[idx]])
if(pos[idx] > max_stroke):
pos[idx] = max_stroke
self._clip_recorder[idx
] = self._acts_in_pupil[idx], pos[idx]
print('act%d reached max stroke' % self._acts_in_pupil[idx])
if(pos[idx] < min_stroke):
pos[idx] = min_stroke
self._clip_recorder[idx
] = self._acts_in_pupil[idx], pos[idx]
print('act%d reached min stroke' % self._acts_in_pupil[idx])
return pos
def build_fitted_wavefront(self, wfmap=None):
if wfmap is None:
wfmap = self._wfmode
pos_from_wf = self.get_position_cmds_from_wf(wfmap)
self._wffitted = np.zeros(
(self._cpla._wfs.shape[2], self._cpla._wfs.shape[3]))
self._wffitted[self._pupil_mask == False] = np.dot(
self._im, pos_from_wf)
self._wffitted = np.ma.array(
data=self._wffitted, mask=self._pupil_mask)
def plot_generated_and_expected_wf(self):
plt.figure()
plt.clf()
plt.imshow(self._wfmode / 1.e-9)
plt.colorbar(label='[nm]')
plt.title('Generated Mode', size=25)
plt.figure()
plt.clf()
plt.imshow(self._wffitted / 1.e-9)
plt.colorbar(label='[nm]')
plt.title('Fitted Mode', size=25)
plt.figure()
plt.clf()
plt.imshow((self._wffitted - self._wfmode) / 1.e-9)
plt.colorbar(label='[nm]')
plt.title('Mode difference', size=25)
print("Expectations:")
amp = self._wffitted.std()
amp = amp / 1.e-9
print("mode amplitude: %g nm rms " % amp)
fitting_error = (self._wffitted - self._wfmode).std()
fitting_error = fitting_error / 1.e-9
print("fitting error: %g nm rms " % fitting_error)
def vector_to_map(self, wf_vector):
mappa = np.zeros(
(self._cpla._wfs.shape[2], self._cpla._wfs.shape[3]))
mappa[self._pupil_mask == False] = wf_vector
return np.ma.array(data=mappa, mask=self._pupil_mask)
def _show_clipped_act(self):
for idx in range(self._n_of_selected_acts):
if(self._clip_recorder[idx][-1] != 0):
print('Act %d' % self._clip_recorder[idx][0]
+ ' clipped to %g [m]' % self._clip_recorder[idx][-1])
# class ShapeReconstructionCommands():
# '''
# the aim of this class is to get new flat reference
# shape commands for DM, erasing any membrane deformations
# as far as possible
# '''
# TIME_OUT = 10
#
# def __init__(self, interferometer, mems_deformable_mirror):
# self._interf = interferometer
# self._bmc = mems_deformable_mirror
#
# def _get_new_reference_cmds(self, mcl, mg):
#
# Nacts = self._bmc.get_number_of_actuators()
# cmd0 = np.zeros(Nacts)
# self._bmc.set_shape(cmd0)
# wf_meas = self._interf.wavefront(timeout_in_sec=self.TIME_OUT)
# mg._imask = wf_meas.mask
# mg.compute_reconstructor()
# # compute positions from reconstructor
# pos = np.dot(mg._rec, wf_meas.compressed())
# pos_of_all_acts = np.zeros(Nacts)
# pos_of_all_acts[mg._acts_in_pupil] = pos
# # compute position from bmc cmds
# bmc_cmds = self._bmc.get_shape()
# bmc_pos = np.zeros(Nacts)
# for i in range(Nacts):
# bmc_pos[i] = mcl._finter[i](bmc_cmds[i])
# # compute required cmd
# delta_pos = bmc_pos - pos_of_all_acts
# delta_cmd = np.zeros(Nacts)
# for i in range(Nacts):
# delta_cmd[i] = mcl._sampled_p2c(i, delta_pos[i])
# self._bmc.set_shape(delta_cmd)
# return delta_cmd
class ModeMeasurer():
# fraction of valid pixels in wf measures: avoids nasty maps
THRESHOLD_RATIO = 0.99
fnpath = 'prova/static_zernike_modes/'
ffmt = '.png'
AmpInNanometer = 100
TIME_OUT = 10 # s
def __init__(self, interferometer, mems_deformable_mirror):
self._interf = interferometer
self._bmc = mems_deformable_mirror
def execute_measure(self, mcl, mg, pos=None):
if pos is None:
pos = mg.get_position_cmds_from_wf()
flat_cmd = np.zeros(self._bmc.get_number_of_actuators())
self._bmc.set_shape(flat_cmd)
#ref_cmd = self._bmc.get_reference_shape()
expected_valid_points = (mg._pupil_mask == False).sum()
wfflat = self._interf.wavefront(timeout_in_sec=self.TIME_OUT)
wfflat = np.ma.array(data=wfflat, mask=mg._pupil_mask)
# avoid nasty wf maps
measured_valid_points = (wfflat.mask == False).sum()
ratio = measured_valid_points / expected_valid_points
while(ratio < self.THRESHOLD_RATIO):
print('Warning: Nasty map acquired!Reloading...')
wfflat = self._interf.wavefront(timeout_in_sec=self.TIME_OUT)
wfflat = np.ma.array(data=wfflat, mask=mg._pupil_mask)
measured_valid_points = (wfflat.mask == False).sum()
ratio = measured_valid_points / expected_valid_points
act_list = mg._acts_in_pupil
cmd = np.zeros(self._bmc.get_number_of_actuators())
# TODO: clip voltage!
assert len(act_list) == len(
pos), "Error: act_list and pos must have the same shape!"
for idx, act in enumerate(act_list):
cmd[int(act)] = mcl.linear_p2c(int(act), pos[idx])
# for idx in range(len(pos)):
# cmd[act_list[idx]] = mcl.linear_p2c(act_list[idx], pos[idx])
# giustamente se clippo in tensione...
# ValueError: A value in x_new is above the interpolation range.
# volt_control = cmd[act_list[idx]] + ref_cmd[act_list[idx]]
# if(volt_control > 1.):
# print('act%d reaches min stroke!' % act_list[idx])
# cmd[act_list[idx]] = 1. - ref_cmd[act_list[idx]]
# if(volt_control < 0.):
# print('act%d reaches max stroke!' % act_list[idx])
# cmd[act_list[idx]] = 0. - ref_cmd[act_list[idx]]
self._bmc.set_shape(cmd)
#_get_wavefront_flat_subtracted
wfflatsub = self._interf.wavefront(
timeout_in_sec=self.TIME_OUT) - wfflat
self._wfmeas = wfflatsub - np.ma.median(wfflatsub)
self._wfmeas = np.ma.array(data=self._wfmeas, mask=mg._pupil_mask)
# avoid nasty wf maps
measured_valid_points = (self._wfmeas.mask == False).sum()
ratio = measured_valid_points / expected_valid_points
while(ratio < self.THRESHOLD_RATIO):
print('Warning: Nasty map acquired!Reloading...')
wfflatsub = self._interf.wavefront(
timeout_in_sec=self.TIME_OUT) - wfflat
self._wfmeas = wfflatsub - np.ma.median(wfflatsub)
self._wfmeas = np.ma.array(data=self._wfmeas, mask=mg._pupil_mask)
measured_valid_points = (self._wfmeas.mask == False).sum()
ratio = measured_valid_points / expected_valid_points
def plot_expected_and_measured_mode(self, wfexpected):
plt.figure()
plt.clf()
plt.ion()
plt.imshow(self._wfmeas / 1.e-9)
plt.colorbar(label='[nm]')
plt.title('Observed Mode', size=25)
plt.figure()
plt.clf()
plt.ion()
plt.imshow((self._wfmeas - wfexpected) / 1.e-9)
plt.colorbar(label='[nm]')
plt.title('Difference Observed-Expected', size=25)
print("Observation:")
amp_mode = self._wfmeas.std()
amp_mode = amp_mode / 1.e-9
print("mode amplitude: %g nm rms " % amp_mode)
fitting_meas_error = (self._wfmeas - wfexpected).std()
fitting_meas_error = fitting_meas_error / 1.e-9
print("fitting error: %g nm rms " % fitting_meas_error)
def _test_modes_measure(self, mcl, mg, Nmodes):
j_modes = np.arange(2, Nmodes + 1, dtype='int')
Nmodes = len(j_modes)
a_j = self.AmpInNanometer * 1.e-9
A = self.AmpInNanometer
expected_modes_stat = np.zeros((Nmodes, 1, 2))
measured_modes_stat = np.zeros((Nmodes, 1, 2))
for idx, j in enumerate(j_modes):
j = int(j)
mg.generate_zernike_mode(j, a_j)
mg.build_fitted_wavefront()
exp_wf_rms = mg._wfmode.std()
exp_fitting_error = (mg._wffitted - mg._wfmode).std()
self.execute_measure(mcl, mg)
meas_wf_rms = self._wfmeas.std()
meas_fitting_error = (self._wfmeas - mg._wfmode).std()
plt.ioff()
plt.figure()
plt.clf()
plt.imshow(mg._wfmode / 1.e-9)
plt.colorbar(label='[nm]')
plt.title('Generated Mode')
plt.savefig(fname=self.fnpath + 'Z%d' %
j + '_1gen' + '_A%d' % A + self.ffmt, bbox_inches='tight')
plt.close()
plt.figure()
plt.clf()
plt.imshow((mg._wffitted) / 1.e-9)
plt.colorbar(label='[nm]')
plt.title('Fitted')
plt.savefig(fname=self.fnpath + 'Z%d' %
j + '_2fitted' + '_A%d' % A + self.ffmt, bbox_inches='tight')
plt.close()
plt.figure()
plt.clf()
plt.imshow((mg._wffitted - mg._wfmode) / 1.e-9)
plt.colorbar(label='[nm]')
a = exp_fitting_error / 1.e-9
plt.title('Fitted - Generated: rms %g nm' % a)
plt.savefig(fname=self.fnpath + 'Z%d' %
j + '_3fitgendiff' + '_A%d' % A + self.ffmt, bbox_inches='tight')
plt.close()
plt.figure()
plt.clf()
plt.imshow((self._wfmeas) / 1.e-9)
plt.colorbar(label='[nm]')
plt.title('Observed Mode')
plt.savefig(fname=self.fnpath + 'Z%d' %
j + '_4obs' + '_A%d' % A + self.ffmt, bbox_inches='tight')
plt.close()
plt.figure()
plt.clf()
plt.imshow((self._wfmeas - mg._wfmode) / 1.e-9)
plt.colorbar(label='[nm]')
a = meas_fitting_error / 1.e-9
plt.title('Observed - Generated: rms %g nm' % a)
plt.savefig(fname=self.fnpath + 'Z%d' %
j + '_5obsgendiff' + '_A%d' % A + self.ffmt, bbox_inches='tight')
plt.close()
expected_modes_stat[idx, 0, 0] = exp_wf_rms
expected_modes_stat[idx, 0, 1] = exp_fitting_error
measured_modes_stat[idx, 0, 0] = meas_wf_rms
measured_modes_stat[idx, 0, 1] = meas_fitting_error
return expected_modes_stat, measured_modes_stat
class SuitableActuatorsInPupilAnalyzer():
# TODO: data la pupilla e dati i modi che si vogliono generare,
# determinare la lista degli attuatori/threshold che minimizzano il fitting error osservato
# dei modi che si vogliono generare
# to be continued...
# white spectra
THRESHOLD_SPAN = np.array([0.01, 0.1, 0.2, 0.3, 0.4, 0.5])
Aj_SPAN = 100.e-9 * np.arange(1, 6) # meters
def __init__(self, mcl, mg, mm, pupil_mask_obj):
self._calibration = mcl
self._mode_generator = mg
self._mode_measurer = mm
self._pupil_mask = pupil_mask_obj
def _test_measure(self, NumOfZmodes):
# except Z1
jmodes = np.arange(2, NumOfZmodes + 1)
self._generated_jmodes = jmodes
num_of_gen_modes = len(jmodes)
num_of_threshold = len(self.THRESHOLD_SPAN)
num_of_ampj = len(self.Aj_SPAN)
frame_shape = self._pupil_mask.mask().shape
# per un dato threshold, modo e ampiezza misuro il
# fitting error aspettato e misurato
self._fitting_sigmas = np.zeros(
(num_of_threshold, num_of_ampj, num_of_gen_modes, 2))
self._wfs_gen = np.ma.zeros(
(num_of_threshold, num_of_ampj, num_of_gen_modes, frame_shape[0], frame_shape[1]))
self._wfs_fitted = np.ma.zeros(
(num_of_threshold, num_of_ampj, num_of_gen_modes, frame_shape[0], frame_shape[1]))
self._wfs_meas = np.ma.zeros(
(num_of_threshold, num_of_ampj, num_of_gen_modes, frame_shape[0], frame_shape[1]))
self._valid_act_per_thres = []
for thres_idx, threshold in enumerate(self.THRESHOLD_SPAN):
print("Threshold set to:%g" % threshold)
self._mode_generator.THRESHOLD_RMS = threshold
self._mode_generator.compute_reconstructor(
mask_obj=self._pupil_mask)
self._valid_act_per_thres.append(
self._mode_generator._acts_in_pupil)
for amp_idx, aj in enumerate(self.Aj_SPAN):
print("Generating Zmodes with amplitude[m] set to: %g" % aj)
for j_idx, j in enumerate(jmodes):
self._mode_generator.generate_zernike_mode(int(j), aj)
self._mode_generator.build_fitted_wavefront()
# expected_amplitude = (self._mode_generator._wffitted).std()
expected_fitting_error = (
self._mode_generator._wffitted - self._mode_generator._wfmode).std()
self._mode_measurer.execute_measure(
self._calibration, self._mode_generator)
# measured_amplitude = (self._mode_measurer._wfmeas).std()
measured_fitting_error = (
self._mode_measurer._wfmeas - self._mode_generator._wfmode).std()
self._fitting_sigmas[thres_idx, amp_idx,
j_idx] = expected_fitting_error, measured_fitting_error
self._wfs_gen[thres_idx, amp_idx,
j_idx] = self._mode_generator._wfmode
self._wfs_fitted[thres_idx, amp_idx,
j_idx] = self._mode_generator._wffitted
self._wfs_meas[thres_idx, amp_idx,
j_idx] = self._mode_measurer._wfmeas
def _show_fitting_errors_for(self, threshold, amplitude, jmode):
thres_idx = np.where(self.THRESHOLD_SPAN == threshold)[0][0]
amp_idx = np.where(self.Aj_SPAN == amplitude)[0][0]
j_idx = np.where(self._generated_jmodes == jmode)[0][0]
print("Threshold = {}; Amplitude[m] = {}; Mode = Z{}".format(
threshold, amplitude, jmode))
print("Expected fitting error[m] = {} \nMeasured fitting error[m] = {} ".format(
self._fitting_sigmas[thres_idx, amp_idx, j_idx, 0], self._fitting_sigmas[thres_idx, amp_idx, j_idx, 1]))
def _test_recontruct_zmodes_up_to(self, NumOfZmodes, threshold, AmplitudeInMeters):
'''
per fissato threshold e AmplitudeInMeters, ricostruisce
i primi NumOfZmodes modi di zernike che il mems, potenzialmente,
e in grado di riprodurre
'''
jmodes = np.arange(2, NumOfZmodes + 1)
self._expected_fitting_error = np.zeros(len(jmodes))
self._mode_generator.THRESHOLD_RMS = threshold
self._mode_generator.compute_reconstructor(
mask_obj=self._pupil_mask)
for idx, j in enumerate(jmodes):
self._mode_generator.generate_zernike_mode(
int(j), AmplitudeInMeters)
self._mode_generator.build_fitted_wavefront()
self._expected_fitting_error[idx] = (
self._mode_generator._wffitted - self._mode_generator._wfmode).std()
print(self._mode_generator._acts_in_pupil)
print('Suitable actuators #N = %d' %
len(self._mode_generator._acts_in_pupil))
plt.figure()
plt.clf()
plt.plot(jmodes, self._expected_fitting_error /
1.e-9, 'bo-', label='expected')
plt.title('expected fitting error for: amp = %g[m]' %
AmplitudeInMeters + ' threshold = %g' % threshold, size=25)
plt.xlabel(r'$Z_j$', size=25)
plt.ylabel(r'$WF_{fit}-WF_{gen} rms [nm]$', size=25)
plt.grid()
plt.legend(loc='best')
return self._expected_fitting_error
def _test_compute_exp_fitting_err_up_to(self, NumOfZmodes, AmplitudeInMeters):
'''
voglio capire fino a quale modo Zj potenzialmente il mems e in grado di riprodurre
al variare del numero di attuatori(threshold della visibilita) e al variare dell
ampiezza del modo
'''
jmodes = np.arange(2, NumOfZmodes + 1)
num_of_jmodes = len(jmodes)
num_of_threshold = len(self.THRESHOLD_SPAN)
num_of_valid_acts = np.zeros(len(self.THRESHOLD_SPAN))
fitting_error = np.zeros((num_of_threshold, num_of_jmodes))
for thres_idx, threshold in enumerate(self.THRESHOLD_SPAN):
self._mode_generator.THRESHOLD_RMS = threshold
self._mode_generator.compute_reconstructor(
mask_obj=self._pupil_mask)
num_of_valid_acts[thres_idx] = len(
self._mode_generator._acts_in_pupil)
for j_idx, j in enumerate(jmodes):
self._mode_generator.generate_zernike_mode(
int(j), AmplitudeInMeters)
self._mode_generator.build_fitted_wavefront()
fitting_error[thres_idx, j_idx] = (
self._mode_generator._wffitted - self._mode_generator._wfmode).std()
plt.figure()
plt.clf()
plt.title('expected fitting error for: amp = %g[m]' %
AmplitudeInMeters)
self._test_plot_exp_fitting_err(fitting_error, num_of_valid_acts)
return fitting_error, num_of_valid_acts
def _test_plot_exp_fitting_err(self, fitting_error, num_of_valid_acts):
jmodes = 2 + np.arange(fitting_error.shape[1])
for thres_idx, thres in enumerate(self.THRESHOLD_SPAN):
plt.plot(jmodes, fitting_error[thres_idx] /
1.e-9, 'o-', label='thres=%g' % thres + '#Nact=%d' % num_of_valid_acts[thres_idx])
plt.xlabel(r'$Z_j$', size=25)
plt.ylabel(r'$WF_{fit}-WF_{gen} rms [nm]$', size=25)
plt.grid()
plt.legend(loc='best')
def _plot_fitting_errors_for(self, threshold, amplitude):
thres_idx = np.where(self.THRESHOLD_SPAN == threshold)[0][0]
amp_idx = np.where(self.Aj_SPAN == amplitude)[0][0]
plt.figure()
plt.clf()
exp_fitting_err = self._fitting_sigmas[thres_idx, amp_idx, :, 0]
meas_fitting_err = self._fitting_sigmas[thres_idx, amp_idx, :, 1]
exp_fitting_err = exp_fitting_err / 1.e-9
meas_fitting_err = meas_fitting_err / 1.e-9
plt.plot(self._generated_jmodes, exp_fitting_err,
'bo-', label='expected')
plt.plot(self._generated_jmodes, meas_fitting_err,
'ro-', label='measured')
plt.legend(loc='best')
plt.title('fitting error: amp[m]=%g' %
amplitude + ' threshold=%g' % threshold, size=25)
plt.xlabel('Zmode j index', size=25)
plt.ylabel('WF rms [nm]', size=25)
def save_results(self, fname):
# syntax error see astropy
hdr = fits.Header()
#hdr['CMASK'] = self._pupil_mask
#hdr['AMP_INM'] = self.Aj_SPAN
fits.writeto(fname, self._fitting_sigmas, hdr)
fits.append(fname, self.THRESHOLD_SPAN)
fits.append(fname, self.Aj_SPAN)
fits.append(fname, self._generated_jmodes)
fits.append(fname, self._pupil_mask.mask().astype(int))
fits.append(fname, self._wfs_gen)
fits.append(fname, self._wfs_fitted)
fits.append(fname, self._wfs_meas)
#fits.append(fname, self._valid_act_per_thres)
@staticmethod
def load(fname):
header = fits.getheader(fname)
hduList = fits.open(fname)
sigma_data = hduList[0].data
thres_data = hduList[1].data
amp_data = hduList[2].data
jmodes_data = hduList[3].data
cmask2d = hduList[4].data.astype(bool)
wfs_gen_data = hduList[5].data
wfs_fit_data = hduList[6].data
wfs_meas_data = hduList[7].data
wfs_mask = np.ma.zeros(wfs_gen_data.shape)
wfs_mask[:, :, :] = cmask2d
ma_wfsgen = np.ma.array(data=wfs_gen_data, mask=wfs_mask)
ma_wfsfit = np.ma.array(data=wfs_fit_data, mask=wfs_mask)
ma_wfsmeas = np.ma.array(data=wfs_meas_data, mask=wfs_mask)
#valid_act_data = hduList[4].data
return{'sigmas': sigma_data,
'thres': thres_data,
'amp': amp_data,
'jmode': jmodes_data,
'wfsmask': wfs_mask,
'wfsgen': ma_wfsgen,
'wfsfit': ma_wfsfit,
'wfsmeas': ma_wfsmeas
#'valid_acts': valid_act_data
}
class _test_saipa_load():
def __init__(self, fname):
res = SuitableActuatorsInPupilAnalyzer.load(fname)
self._sigmas = res['sigmas']
self._threshold_span = res['thres']
self._amplitude_span = res['amp']
self._jmodes = res['jmode']
self._wfs_mask = res['wfsmask']
self._wfs_gen = res['wfsgen']
self._wfs_fitted = res['wfsfit']
self._wfs_meas = res['wfsmeas']
self._wfs_mask = res['wfsmask']
#self._act_list_per_thres = res['valid_acts']
def _test_plot_meas_vs_exp_fitting_errors(self):
for amp_idx, amplitude in enumerate(self._amplitude_span):
plt.figure()
plt.clf()
for thres_idx, threshold in enumerate(self._threshold_span):
exp_fitting_err = self._sigmas[thres_idx, amp_idx, :, 0]
meas_fitting_err = self._sigmas[thres_idx,
amp_idx, :, 1]
exp_fitting_err = exp_fitting_err / 1.e-9
meas_fitting_err = meas_fitting_err / 1.e-9
plt.plot(self._jmodes, exp_fitting_err,
'.--', label='exp: threshold=%g' % threshold)
plt.plot(self._jmodes, meas_fitting_err,
'o-', label='meas: threshold=%g' % threshold, color=plt.gca().lines[-1].get_color())
plt.legend(loc='best')
plt.title('fitting error: amp[m]=%g' %
amplitude, size=25)
plt.xlabel(r'$Z_j$', size=25)
plt.ylabel(r'$WF_-WF_{gen} rms [nm]$', size=25)
plt.grid()
class TestSvd():
def __init__(self, mg):
self.mg = mg
# mg = ModeGenerator
self.u, self.s, self.vh = np.linalg.svd(
self.mg._im, full_matrices=False)
def autovettori(self, eigenvalue_index):
wf = np.zeros((486, 640))
wf[self.mg._imask == False] = np.dot(
self.mg._im, self.vh.T[:, eigenvalue_index])
return np.ma.array(wf, mask=self.mg._imask)
def rec(self, eigenvalue_to_use):
large = np.zeros(self.s.shape).astype(bool)
large[eigenvalue_to_use] = True
s = np.divide(1, self.s, where=large)
s[~large] = 0
res = np.matmul(np.transpose(self.vh), np.multiply(
s[..., np.newaxis], np.transpose(self.u)))
return res
def animate(self, interval=100):
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
fig = plt.figure()
self._ani_index = 0
im = plt.imshow(self.autovettori(0), animated=True)
def updatefig(*args):
self._ani_index += 1
im.set_array(self.autovettori(self._ani_index % 140))
plt.title("Eigenmode %d" % self._ani_index)
return im,
self._ani = animation.FuncAnimation(
fig, updatefig, interval=interval, blit=True)
plt.show()
class TestRepeatedMeasures():
ffmt = '.fits'
fpath = 'prova/misure_ripetute/trm_'
def __init__(self, interferometer, mems_deformable_mirror):
self._interf = interferometer
self._bmc = mems_deformable_mirror
def _execute_repeated_measure(self, act_list, Ntimes):
act_list = | np.array(act_list) | numpy.array |
"""Functions for working with features in a raster dataset."""
import logging
import warnings
import math
import os
import numpy as np
import rasterio
from rasterio._features import _shapes, _sieve, _rasterize, _bounds
from rasterio.crs import CRS
from rasterio.dtypes import validate_dtype, can_cast_dtype, get_minimum_dtype
from rasterio.enums import MergeAlg
from rasterio.env import ensure_env
from rasterio.rio.helpers import coords
from rasterio.transform import Affine
from rasterio.transform import IDENTITY, guard_transform
from rasterio.windows import Window
from rasterio import warp
log = logging.getLogger(__name__)
@ensure_env
def geometry_mask(
geometries,
out_shape,
transform,
all_touched=False,
invert=False):
"""Create a mask from shapes.
By default, mask is intended for use as a
numpy mask, where pixels that overlap shapes are False.
Parameters
----------
geometries : iterable over geometries (GeoJSON-like objects)
out_shape : tuple or list
Shape of output numpy ndarray.
transform : Affine transformation object
Transformation from pixel coordinates of `source` to the
coordinate system of the input `shapes`. See the `transform`
property of dataset objects.
all_touched : boolean, optional
If True, all pixels touched by geometries will be burned in. If
false, only pixels whose center is within the polygon or that
are selected by Bresenham's line algorithm will be burned in.
invert: boolean, optional
If True, mask will be True for pixels that overlap shapes.
False by default.
Returns
-------
out : numpy ndarray of type 'bool'
Result
"""
fill, mask_value = (0, 1) if invert else (1, 0)
return rasterize(
geometries,
out_shape=out_shape,
transform=transform,
all_touched=all_touched,
fill=fill,
default_value=mask_value).astype('bool')
@ensure_env
def shapes(source, mask=None, connectivity=4, transform=IDENTITY):
"""Yield (polygon, value for each set of adjacent pixels of the same value.
Parameters
----------
source : array or dataset object opened in 'r' mode or Band or tuple(dataset, bidx)
Data type must be one of rasterio.int16, rasterio.int32,
rasterio.uint8, rasterio.uint16, or rasterio.float32.
mask : numpy ndarray or rasterio Band object, optional
Values of False or 0 will be excluded from feature generation
Must evaluate to bool (rasterio.bool_ or rasterio.uint8)
connectivity : int, optional
Use 4 or 8 pixel connectivity for grouping pixels into features
transform : Affine transformation, optional
If not provided, feature coordinates will be generated based on pixel
coordinates
Yields
-------
tuple
A pair of (polygon, value) for each feature found in the image.
Polygons are GeoJSON-like dicts and the values are the associated value
from the image, in the data type of the image.
Note: due to floating point precision issues, values returned from a
floating point image may not exactly match the original values.
Notes
-----
The amount of memory used by this algorithm is proportional to the number
and complexity of polygons produced. This algorithm is most appropriate
for simple thematic data. Data with high pixel-to-pixel variability, such
as imagery, may produce one polygon per pixel and consume large amounts of
memory.
"""
transform = guard_transform(transform)
for s, v in _shapes(source, mask, connectivity, transform):
yield s, v
@ensure_env
def sieve(source, size, out=None, mask=None, connectivity=4):
"""Replace small polygons in `source` with value of their largest neighbor.
Polygons are found for each set of neighboring pixels of the same value.
Parameters
----------
source : array or dataset object opened in 'r' mode or Band or tuple(dataset, bidx)
Must be of type rasterio.int16, rasterio.int32, rasterio.uint8,
rasterio.uint16, or rasterio.float32
size : int
minimum polygon size (number of pixels) to retain.
out : numpy ndarray, optional
Array of same shape and data type as `source` in which to store results.
mask : numpy ndarray or rasterio Band object, optional
Values of False or 0 will be excluded from feature generation
Must evaluate to bool (rasterio.bool_ or rasterio.uint8)
connectivity : int, optional
Use 4 or 8 pixel connectivity for grouping pixels into features
Returns
-------
out : numpy ndarray
Result
Notes
-----
GDAL only supports values that can be cast to 32-bit integers for this
operation.
The amount of memory used by this algorithm is proportional to the number
and complexity of polygons found in the image. This algorithm is most
appropriate for simple thematic data. Data with high pixel-to-pixel
variability, such as imagery, may produce one polygon per pixel and consume
large amounts of memory.
"""
if out is None:
out = np.zeros(source.shape, source.dtype)
_sieve(source, size, out, mask, connectivity)
return out
@ensure_env
def rasterize(
shapes,
out_shape=None,
fill=0,
out=None,
transform=IDENTITY,
all_touched=False,
merge_alg=MergeAlg.replace,
default_value=1,
dtype=None):
"""Return an image array with input geometries burned in.
Parameters
----------
shapes : iterable of (geometry, value) pairs or iterable over
geometries. `geometry` can either be an object that implements
the geo interface or GeoJSON-like object.
out_shape : tuple or list with 2 integers
Shape of output numpy ndarray.
fill : int or float, optional
Used as fill value for all areas not covered by input
geometries.
out : numpy ndarray, optional
Array of same shape and data type as `source` in which to store
results.
transform : Affine transformation object, optional
Transformation from pixel coordinates of `source` to the
coordinate system of the input `shapes`. See the `transform`
property of dataset objects.
all_touched : boolean, optional
If True, all pixels touched by geometries will be burned in. If
false, only pixels whose center is within the polygon or that
are selected by Bresenham's line algorithm will be burned in.
merge_alg : str, optional
Merge algorithm to use. One of:
MergeAlg.replace (default): the new value will overwrite the
existing value.
MergeAlg.add: the new value will be added to the existing raster.
default_value : int or float, optional
Used as value for all geometries, if not provided in `shapes`.
dtype : rasterio or numpy data type, optional
Used as data type for results, if `out` is not provided.
Returns
-------
out : numpy ndarray
Results
Notes
-----
Valid data types for `fill`, `default_value`, `out`, `dtype` and
shape values are rasterio.int16, rasterio.int32, rasterio.uint8,
rasterio.uint16, rasterio.uint32, rasterio.float32,
rasterio.float64.
"""
valid_dtypes = (
'int16', 'int32', 'uint8', 'uint16', 'uint32', 'float32', 'float64'
)
def format_invalid_dtype(param):
return '{0} dtype must be one of: {1}'.format(
param, ', '.join(valid_dtypes)
)
def format_cast_error(param, dtype):
return '{0} cannot be cast to specified dtype: {1}'.format(param, dtype)
if fill != 0:
fill_array = np.array([fill])
if not validate_dtype(fill_array, valid_dtypes):
raise ValueError(format_invalid_dtype('fill'))
if dtype is not None and not can_cast_dtype(fill_array, dtype):
raise ValueError(format_cast_error('fill', dtype))
if default_value != 1:
default_value_array = np.array([default_value])
if not validate_dtype(default_value_array, valid_dtypes):
raise ValueError(format_invalid_dtype('default_value'))
if dtype is not None and not can_cast_dtype(default_value_array, dtype):
raise ValueError(format_cast_error('default_vaue', dtype))
if dtype is not None and np.dtype(dtype).name not in valid_dtypes:
raise ValueError(format_invalid_dtype('dtype'))
valid_shapes = []
shape_values = []
for index, item in enumerate(shapes):
if isinstance(item, (tuple, list)):
geom, value = item
else:
geom = item
value = default_value
geom = getattr(geom, '__geo_interface__', None) or geom
# geom must be a valid GeoJSON geometry type and non-empty
if not is_valid_geom(geom):
raise ValueError(
'Invalid geometry object at index {0}'.format(index)
)
if geom['type'] == 'GeometryCollection':
# GeometryCollections need to be handled as individual parts to
# avoid holes in output:
# https://github.com/mapbox/rasterio/issues/1253.
# Only 1-level deep since GeoJSON spec discourages nested
# GeometryCollections
for part in geom['geometries']:
valid_shapes.append((part, value))
else:
valid_shapes.append((geom, value))
shape_values.append(value)
if not valid_shapes:
raise ValueError('No valid geometry objects found for rasterize')
shape_values = np.array(shape_values)
if not validate_dtype(shape_values, valid_dtypes):
raise ValueError(format_invalid_dtype('shape values'))
if dtype is None:
dtype = get_minimum_dtype(np.append(shape_values, fill))
elif not can_cast_dtype(shape_values, dtype):
raise ValueError(format_cast_error('shape values', dtype))
if out is not None:
if np.dtype(out.dtype).name not in valid_dtypes:
raise ValueError(format_invalid_dtype('out'))
if not can_cast_dtype(shape_values, out.dtype):
raise ValueError(format_cast_error('shape values', out.dtype.name))
elif out_shape is not None:
if len(out_shape) != 2:
raise ValueError('Invalid out_shape, must be 2D')
out = np.empty(out_shape, dtype=dtype)
out.fill(fill)
else:
raise ValueError('Either an out_shape or image must be provided')
if min(out.shape) == 0:
raise ValueError("width and height must be > 0")
transform = guard_transform(transform)
_rasterize(valid_shapes, out, transform, all_touched, merge_alg)
return out
def bounds(geometry, north_up=True, transform=None):
"""Return a (left, bottom, right, top) bounding box.
From Fiona 1.4.8. Modified to return bbox from geometry if available.
Parameters
----------
geometry: GeoJSON-like feature (implements __geo_interface__),
feature collection, or geometry.
Returns
-------
tuple
Bounding box: (left, bottom, right, top)
"""
geometry = getattr(geometry, '__geo_interface__', None) or geometry
if 'bbox' in geometry:
return tuple(geometry['bbox'])
geom = geometry.get('geometry') or geometry
return _bounds(geom, north_up=north_up, transform=transform)
def geometry_window(dataset, shapes, pad_x=0, pad_y=0, north_up=True,
rotated=False, pixel_precision=3):
"""Calculate the window within the raster that fits the bounds of the
geometry plus optional padding. The window is the outermost pixel indices
that contain the geometry (floor of offsets, ceiling of width and height).
If shapes do not overlap raster, a WindowError is raised.
Parameters
----------
dataset: dataset object opened in 'r' mode
Raster for which the mask will be created.
shapes: iterable over geometries.
A geometry is a GeoJSON-like object or implements the geo interface.
Must be in same coordinate system as dataset.
pad_x: float
Amount of padding (as fraction of raster's x pixel size) to add to left
and right side of bounds.
pad_y: float
Amount of padding (as fraction of raster's y pixel size) to add to top
and bottom of bounds.
north_up: bool
If True (default), the origin point of the raster's transform is the
northernmost point and y pixel values are negative.
rotated: bool
If true, some rotation terms exist in the dataset transform (this
requires special attention.)
pixel_precision: int
Number of places of rounding precision for evaluating bounds of shapes.
Returns
-------
window: rasterio.windows.Window instance
"""
if pad_x:
pad_x = abs(pad_x * dataset.res[0])
if pad_y:
pad_y = abs(pad_y * dataset.res[1])
if not rotated:
all_bounds = [bounds(shape, north_up=north_up) for shape in shapes]
lefts, bottoms, rights, tops = zip(*all_bounds)
left = min(lefts) - pad_x
right = max(rights) + pad_x
if north_up:
bottom = min(bottoms) - pad_y
top = max(tops) + pad_y
else:
bottom = max(bottoms) + pad_y
top = min(tops) - pad_y
else:
# get the bounds in the pixel domain by specifying a transform to the bounds function
all_bounds_px = [bounds(shape, transform=~dataset.transform) for shape in shapes]
# get left, right, top, and bottom as above
lefts, bottoms, rights, tops = zip(*all_bounds_px)
left = min(lefts) - pad_x
right = max(rights) + pad_x
top = min(tops) - pad_y
bottom = max(bottoms) + pad_y
# do some clamping if there are any values less than zero or greater than dataset shape
left = max(0, left)
top = max(0, top)
right = min(dataset.shape[1], right)
bottom = min(dataset.shape[0], bottom)
# convert the bounds back to the CRS domain
left, top = (left, top) * dataset.transform
right, bottom = (right, bottom) * dataset.transform
window = dataset.window(left, bottom, right, top)
window = window.round_offsets(op='floor', pixel_precision=pixel_precision)
window = window.round_shape(op='ceil', pixel_precision=pixel_precision)
# Make sure that window overlaps raster
raster_window = Window(0, 0, dataset.width, dataset.height)
# This will raise a WindowError if windows do not overlap
window = window.intersection(raster_window)
return window
def is_valid_geom(geom):
"""
Checks to see if geometry is a valid GeoJSON geometry type or
GeometryCollection.
Geometries must be non-empty, and have at least x, y coordinates.
Note: only the first coordinate is checked for validity.
Parameters
----------
geom: an object that implements the geo interface or GeoJSON-like object
Returns
-------
bool: True if object is a valid GeoJSON geometry type
"""
geom_types = {'Point', 'MultiPoint', 'LineString', 'MultiLineString',
'Polygon', 'MultiPolygon'}
if 'type' not in geom:
return False
try:
geom_type = geom['type']
if geom_type not in geom_types.union({'GeometryCollection'}):
return False
except TypeError:
return False
if geom_type in geom_types:
if 'coordinates' not in geom:
return False
coords = geom['coordinates']
if geom_type == 'Point':
# Points must have at least x, y
return len(coords) >= 2
if geom_type == 'MultiPoint':
# Multi points must have at least one point with at least x, y
return len(coords) > 0 and len(coords[0]) >= 2
if geom_type == 'LineString':
# Lines must have at least 2 coordinates and at least x, y for
# a coordinate
return len(coords) >= 2 and len(coords[0]) >= 2
if geom_type == 'MultiLineString':
# Multi lines must have at least one LineString
return (len(coords) > 0 and len(coords[0]) >= 2 and
len(coords[0][0]) >= 2)
if geom_type == 'Polygon':
# Polygons must have at least 1 ring, with at least 4 coordinates,
# with at least x, y for a coordinate
return (len(coords) > 0 and len(coords[0]) >= 4 and
len(coords[0][0]) >= 2)
if geom_type == 'MultiPolygon':
# Muti polygons must have at least one Polygon
return (len(coords) > 0 and len(coords[0]) > 0 and
len(coords[0][0]) >= 4 and len(coords[0][0][0]) >= 2)
if geom_type == 'GeometryCollection':
if 'geometries' not in geom:
return False
if not len(geom['geometries']) > 0:
# While technically valid according to GeoJSON spec, an empty
# GeometryCollection will cause issues if used in rasterio
return False
for g in geom['geometries']:
if not is_valid_geom(g):
return False # short-circuit and fail early
return True
def dataset_features(
src,
bidx=None,
sampling=1,
band=True,
as_mask=False,
with_nodata=False,
geographic=True,
precision=-1):
"""Yield GeoJSON features for the dataset
The geometries are polygons bounding contiguous regions of the same raster value.
Parameters
----------
src: Rasterio Dataset
bidx: int
band index
sampling: int (DEFAULT: 1)
Inverse of the sampling fraction; a value of 10 decimates
band: boolean (DEFAULT: True)
extract features from a band (True) or a mask (False)
as_mask: boolean (DEFAULT: False)
Interpret band as a mask and output only one class of valid data shapes?
with_nodata: boolean (DEFAULT: False)
Include nodata regions?
geographic: str (DEFAULT: True)
Output shapes in EPSG:4326? Otherwise use the native CRS.
precision: int (DEFAULT: -1)
Decimal precision of coordinates. -1 for full float precision output
Yields
------
GeoJSON-like Feature dictionaries for shapes found in the given band
"""
if bidx is not None and bidx > src.count:
raise ValueError('bidx is out of range for raster')
img = None
msk = None
# Adjust transforms.
transform = src.transform
if sampling > 1:
# Determine the target shape (to decimate)
shape = (int(math.ceil(src.height / sampling)),
int(math.ceil(src.width / sampling)))
# Calculate independent sampling factors
x_sampling = src.width / shape[1]
y_sampling = src.height / shape[0]
# Decimation of the raster produces a georeferencing
# shift that we correct with a translation.
transform *= Affine.translation(
src.width % x_sampling, src.height % y_sampling)
# And follow by scaling.
transform *= Affine.scale(x_sampling, y_sampling)
# Most of the time, we'll use the valid data mask.
# We skip reading it if we're extracting every possible
# feature (even invalid data features) from a band.
if not band or (band and not as_mask and not with_nodata):
if sampling == 1:
msk = src.read_masks(bidx)
else:
msk_shape = shape
if bidx is None:
msk = np.zeros(
(src.count,) + msk_shape, 'uint8')
else:
msk = | np.zeros(msk_shape, 'uint8') | numpy.zeros |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 26 08:48:00 2018
@author: prmiles
"""
from pymcmcstat.structures.ParameterSet import ParameterSet
from pymcmcstat.samplers.Metropolis import Metropolis
import test.general_functions as gf
import unittest
from mock import patch
import numpy as np
# --------------------------
# Evaluation
# --------------------------
class UnpackSet(unittest.TestCase):
def test_unpack_set(self):
CL = {'theta':1.0, 'ss': 1.0, 'prior':0.0, 'sigma2': 0.0}
parset = ParameterSet(theta = CL['theta'], ss = CL['ss'], prior = CL['prior'], sigma2 = CL['sigma2'])
MA = Metropolis()
oldpar, ss, oldprior, sigma2 = MA.unpack_set(parset)
NL = {'theta':oldpar, 'ss': ss, 'prior':oldprior, 'sigma2': sigma2}
self.assertDictEqual(CL,NL)
# --------------------------
class CalculatePosteriorRatio(unittest.TestCase):
@classmethod
def setup_size_2(cls):
MA = Metropolis()
ss1 = np.array([1., 2.])
ss2 = | np.array([1.1, 2.4]) | numpy.array |
import numpy as np
import progressbar
import os
from sklearn.model_selection import train_test_split
ALL_CPU = -1
def read_dataset_and_reshape_for_conv(X_list=None, y_list=None, X_paths=None,
y_paths=None, validation_size=None):
"""
Reads X and y from given paths and reshapes them for applying in
convolutional networks.
If X_list and y_list are given, X_paths and y_paths are ignored.
Reshaping is done by splitting different letters in separate channels,
eg. letter 'A' has it's own channel, letter 'C' has it's own channel, etc.
:param X_list: list of X pileup dataset
:type X_list: list of np.ndarray
:param y_list: list of y pileup dataset
:type y_list: list of np.ndarray
:param X_paths: list of paths to X data
:type X_paths: list of str
:param y_paths: list of paths to y data
:type y_paths: list of str
:param validation_size: specifies percentage of dataset used for validation
:type validation_size: float
:return: If validation_size is None, returns just X and y reshaped. If
validation_size is float, returns a tuple in following order: (X, y,
X_train, X_validate, y_train, y_validate).
:rtype: tuple of np.ndarray
"""
if validation_size is not None:
if validation_size < 0 or validation_size > 1.0:
raise ValueError('Validation size must be float from [0, 1], but {}'
' given.'.format(validation_size))
if X_paths is not None:
if not len(X_paths) == 1:
raise ValueError(
'Validation size can only be provided if there is only '
'one X path and y_path.')
if not ((X_list is None and y_list is None)
or (X_paths is None and y_paths is None)):
raise ValueError('Either X_list and y_list or X_paths and y_paths '
'must be provided!')
if X_list is None and y_list is None:
X_list = [np.load(X_path) for X_path in X_paths]
y_list = [np.load(y_path) for y_path in y_paths]
reshaped_X_list, reshaped_y_list = list(), list()
for X, y in zip(X_list, y_list):
print('X shape before reshaping:', X.shape)
print('y shape before reshaping:', y.shape)
new_X = list()
neighbourhood_size = X[0].shape[0]
# Number of columns is equal to the number of letters in dataset (A, C,
# G, T, I, D, ...).
num_columns = X[0].shape[1]
num_data = X.shape[0]
with progressbar.ProgressBar(max_value=num_data) as progress_bar:
for i, xi in enumerate(X):
new_xi = np.dstack(
[xi[:, col_index].reshape(neighbourhood_size, 1)
for col_index in range(num_columns)]
)
new_X.append(new_xi)
progress_bar.update(i)
new_X = np.array(new_X)
X = new_X
print('X shape after reshaping:', X.shape)
print('y shape after reshaping:', y.shape)
reshaped_X_list.append(X), reshaped_y_list.append(y)
if validation_size is None:
return reshaped_X_list, reshaped_y_list
else:
# There is only one X and y (because, all datasets are concatenated
# for training).
X, y = reshaped_X_list[0], reshaped_y_list[0]
print('Splitting to train and validation set.')
X_train, X_validate, y_train, y_validate = train_test_split(
X, y, test_size=validation_size)
print('X_train shape:', X_train.shape)
print('X_validate shape:', X_validate.shape)
print('y_train:', y_train.shape)
print('y_validate:', y_validate.shape)
return X, y, X_train, X_validate, y_train, y_validate
def _calc_empty_rows(X):
"""
Calculates which rows in X are empty rows (i.e. all numbers in that row
are equal to 0).
:param X: 2-D data
:type X: np.ndarray
:return: 1-D array with 1s on positions which correspond to empty rows.
:rtype: np.ndarray
"""
empty_row = np.zeros((1, X.shape[1])) # size is second axis of X
empty_rows = [int(v) for v in np.all(empty_row == X, axis=1)]
return empty_rows
def create_dataset_with_neighbourhood(neighbourhood_size, mode, X_list=None,
y_list=None, X_paths=None,
y_paths=None, save_directory_path=None):
"""
Creates datasets by mixing all pileups with given neighbourhood_size.
If X_list and y_list are given, X_paths and y_paths are ignored.
Datasets are concatenated after extracting neighbourhood_size positions in
given datasets separately if 'training' mode is selected. If 'inference'
mode is selected, X_paths are assumed to be paths to different contigs of
same
Dataset at i-th position in X_paths should match given labels at i-th
position in y_paths.
If save_directory_path is provided, generated pileups are stored in that
directory.
:param neighbourhood_size: number of neighbours to use from one size (eg.
if you set this parameter to 3, it will take 3 neighbours from both
sides so total number of positions in one example will be 7 -
counting the middle position)
:type neighbourhood_size: int
:param mode: either 'training' or 'inference' string, representing the
mode for pileups generation
:type mode: str
:param X_list: list of X pileup dataset
:type X_list: list of np.ndarray
:param y_list: list of y pileup dataset
:type y_list: list of np.ndarray
:param X_paths: list of paths to X pileup dataset
:type X_paths: list of str
:param y_paths: list of paths to y pileup dataset
:type y_paths: list of str
:param save_directory_path: path to directory for storing dataset
:type save_directory_path: str
:return:
:rtype tuple of np.ndarray or tuple of np.array and list of str
"""
_check_mode(mode)
if not ((X_list is None and y_list is None)
or (X_paths is None and y_paths is None)):
raise ValueError('Either X_list and y_list or X_paths and y_paths '
'must be provided!')
# If training mode is selected, all pileups will be concatenated.
if mode == 'training':
total_pileups = 1
else:
if X_list is not None:
total_pileups = len(X_list)
else:
total_pileups = len(X_paths)
X_save_paths, y_save_paths = None, None
if save_directory_path is not None:
X_save_paths, y_save_paths = _generate_save_paths(neighbourhood_size,
save_directory_path,
total_pileups)
X_neighbourhood_list, y_neighbourhood_list = list(), list()
if X_list is None and y_list is None:
X_list = [np.load(X_path) for X_path in X_paths]
y_list = [np.load(y_path) for y_path in y_paths]
total_length = np.sum([curr_X.shape[0] for curr_X in X_list])
progress_counter = 0
with progressbar.ProgressBar(max_value=total_length) as progress_bar:
for pileup_pair_id, (curr_X, curr_y) in enumerate(zip(X_list, y_list)):
# Removing last column which contains everything which was not
# 'A' nor 'C' nor 'G' nor 'T'.
curr_y = curr_y[:, :-1]
new_curr_X, new_curr_y = list(), list()
empty_rows = _calc_empty_rows(curr_X)
print('Creating dataset with neighbourhood ...')
# TODO(ajuric): Check if this can be speed up.
for i in range(curr_X.shape[0]):
progress_bar.update(progress_counter)
progress_counter += 1
if empty_rows[i] == 1:
continue # current row is empty row
if i < neighbourhood_size or \
i >= curr_X.shape[0] - neighbourhood_size:
# Current position is not suitable to build an example.
continue
zeros_to_left = | np.sum(empty_rows[i - neighbourhood_size:i]) | numpy.sum |
import os
import datetime
import json
from PIL import Image
import numpy as np
from glob import glob
from PIL import Image, ImageDraw
from scipy import ndimage, misc
import scipy.misc
import imageio
import cv2
# http://scikit-image.org/docs/dev/auto_examples/filters/plot_denoise.html
from skimage.util import random_noise
"""
IMAGE PROCESSING
- read_dataset_image_path
- read_dataset_images
- read_lst_images
- read_image
- get_noisy_data
"""
def get_noisy_data(data):
lst_noisy = []
sigma = 0.155
for image in data:
noisy = random_noise(image, var=sigma ** 2)
lst_noisy.append(noisy)
return np.array(lst_noisy)
def read_dataset_image_path(s_dataset_url, n_number_count=None):
lst_dir_inner_images_path = []
for s_dir_path in glob(os.path.join(s_dataset_url, "*")):
for s_image_path in glob(os.path.join(s_dir_path, "*")):
lst_dir_inner_images_path.append(s_image_path)
if n_number_count is not None:
if len(lst_dir_inner_images_path) >= n_number_count:
return np.array(lst_dir_inner_images_path)
return lst_dir_inner_images_path
def read_image_w_noise(s_image_path):
tmp_image = read_image(s_image_path)
sigma = 0.155
noisy = random_noise(tmp_image, var=sigma ** 2)
return np.array(noisy)
def read_lst_images_w_noise2(lst_images_path, nd_patch_size, n_patch_step):
lst_images = []
for image_path in lst_images_path:
lst_images.append(read_image_w_noise(image_path))
return np.array(lst_images)
def read_lst_images_w_noise(lst_images_path, nd_patch_size, n_patch_step):
lst_slices = []
lst_location = []
for image_path in lst_images_path:
tmp_img = read_image_w_noise(image_path)
tmp_slices, tmp_location_slice = get_image_patches(
[tmp_img], nd_patch_size, n_patch_step
)
lst_slices.extend(tmp_slices)
lst_location.extend(tmp_location_slice)
return np.array(lst_slices), lst_location
def read_lst_images(lst_images_path, nd_patch_size, n_patch_step, b_work_on_patch=True):
if b_work_on_patch:
lst_slices = []
lst_location = []
for image_path in lst_images_path:
tmp_img = read_image(image_path)
tmp_slices, tmp_location_slice = get_image_patches(
[tmp_img], nd_patch_size, n_patch_step
)
lst_slices.extend(tmp_slices)
lst_location.extend(tmp_location_slice)
return lst_slices, lst_location
else:
lst_images = [read_image(image_path) for image_path in lst_images_path]
return np.array(lst_images)
def read_dataset_images(s_dataset_url, nd_img_size, n_number_count):
lst_images = []
for s_dir_path in glob(os.path.join(s_dataset_url, "*")):
for s_image_path in glob(os.path.join(s_dir_path, "*")):
lst_images.append(read_image(s_image_path, nd_img_size))
if n_number_count is not None:
if len(lst_images) >= n_number_count:
return np.array(lst_images)
return np.array(lst_images)
def read_image(s_image_path):
tmp_image = cv2.imread(s_image_path)
tmp_image = cv2.cvtColor(tmp_image, cv2.COLOR_BGR2GRAY)
tmp_image = (
tmp_image[
int(0.125 * 1080) : int(0.875 * 1080), int(0.125 * 720) : int(0.875 * 720)
]
/ 127.5
- 1.0
)
tmp_image = cv2.resize(tmp_image, (180, 270), interpolation=cv2.INTER_NEAREST)
tmp_image = tmp_image[..., np.newaxis]
return np.array(tmp_image)
def get_patch_video(lst_images, nd_patch_size, nd_stride, n_depth):
lst_video_slice = []
lst_video_location = []
n_video_numbers = len(lst_images) // n_depth
flag = True
n_video_slices_number = 0
for i in range(0, n_video_numbers):
tmp_video = read_lst_images(lst_images[i * n_depth : ((i + 1) * n_depth)])
lst_tmp_video, lst_tmp_location = get_image_patches(
tmp_video, nd_patch_size, nd_stride
)
if flag:
flag = False
n_video_slices_number = len(lst_tmp_video)
lst_video_slice.extend(lst_tmp_video)
lst_video_location.extend(lst_tmp_location)
print("video patches is ready ({} patches)".format(len(lst_video_slice)))
return np.array(lst_video_slice), lst_video_location
def get_image_patches(image_src, nd_patch_size, nd_stride):
image_src = np.array(image_src)
lst_patches = []
lst_locations = []
n_stride_h = nd_stride[0]
n_stride_w = nd_stride[1]
tmp_frame = image_src[0].shape
n_frame_h = tmp_frame[0]
n_frame_w = tmp_frame[1]
# for i in range(0,n_frame_h,n_stride_h):
# np.array(lst_patches[10])[0,:,:]
flag_permission_h = flag_permission_w = True
i = 0
while i < n_frame_h and flag_permission_h:
flag_permission_w = True
start_h = i
end_h = i + nd_patch_size[0]
if end_h > n_frame_h:
end_h = n_frame_h
start_h = n_frame_h - nd_patch_size[0]
# break
# for j in range(0,n_frame_w,n_stride_w):
j = 0
while j < n_frame_w and flag_permission_w:
start_w = j
end_w = j + nd_patch_size[1]
if end_w > n_frame_w:
end_w = n_frame_w
start_w = n_frame_w - nd_patch_size[1]
# break
# print(start_w,end_w,'**',start_h,end_h)
tmp_slices = np.array(image_src[:, start_h:end_h, start_w:end_w])
lst_patches.append(tmp_slices)
lst_locations.append([start_h, start_w])
j += n_stride_w
if j > n_frame_w:
flag_permission_w = False
j = n_frame_w - nd_patch_size[1]
i += n_stride_h
if i > n_frame_h:
flag_permission_h = False
i = n_frame_h - nd_patch_size[0]
return np.array(lst_patches), lst_locations
def kh_isDirExist(path):
if not os.path.exists(path):
os.makedirs(path)
print("path ", path, " is created")
return
def kh_crop(img, nStartX, nEndX, nStartY, nEndY):
return img[nStartY:nEndY, nStartX:nEndX]
def kh_extractPatches(sImg, nStride=1, ndSliceSize=(10, 10), bSaveImages=False):
i = 0
j = 0
imgArray = np.zeros([Image.open(sImg[0]).size[1], Image.open(sImg[0]).size[0], 3])
while i < len(sImg):
# read Images
imgTmp1 = Image.open(sImg[i])
imgTmp2 = Image.open(sImg[i + 1])
# Image to Numpy array
imgArray1 = np.array(imgTmp1)
imgArray2 = np.array(imgTmp1)
A = imgArray1
A = (A - np.mean(A)) / np.std(A)
imgArray1 = A
A = imgArray2
A = (A - np.mean(A)) / np.std(A)
imgArray2 = A
imgArray[:, :, j] = np.add(imgArray1, imgArray2) / 2
i = i + 2
j = j + 1
# =========================================================
nImgArrayH = imgArray.shape[0]
nImgArrayW = imgArray.shape[1]
best_rg = imgArray[100 : nImgArrayH - 14, 0:nImgArrayW]
# best_rg = imgArray[0:nImgArrayH, 0:nImgArrayW]
ndMainSize = (best_rg.shape[0], best_rg.shape[1])
ndSliceSizeWidth = ndSliceSize[0]
ndSliceSizeHeight = ndSliceSize[1]
# Copy master
path = os.path.dirname(sImg[0])
base = os.path.basename(sImg[0])
# slice the image to 1000 x 1000 tiles
slice_size = ndSliceSizeWidth
lst_fNamesTmp = []
lst_Patches = []
beforeViewedX = []
beforeViewedY = []
for y in range(0, nImgArrayH - ndSliceSizeHeight + 1, nStride):
for x in range(0, nImgArrayW - ndSliceSizeWidth + 1, nStride):
# fname = os.path.join(path, sPrefixOutput+"/p-%d-%d-%s" % (x, y, base))
# basePosition = "%s--[%d,%d]--(%d,%d)" % (sFileAddress,ndSliceSizeWidth,ndSliceSizeHeight,x, y)
# print("Creating tile:" + basePosition)
minX = x
minY = y
if (x + slice_size) >= nImgArrayW:
minX = x - slice_size - 1
else:
minX = x
if (y + slice_size) >= nImgArrayH:
minY = y - slice_size - 1
else:
minY = y
mx = min(x + slice_size, nImgArrayW)
my = min(y + slice_size, nImgArrayH)
if (mx or x) > nImgArrayW and (my or y) > nImgArrayH:
continue
sSaveBasePatchImg = "./" # +'/' + base
basePosition = "(%d,%d)" % (minX, minY)
saveAddress = (
sSaveBasePatchImg
+ "/"
+ path[(len(path) - 8) : len(path)]
+ "_"
+ base[0:3]
+ "_"
+ basePosition
)
# buffer = Image.new("RGB", [slice_size, slice_size], (255, 255, 255))
# buffer = Image.new("YCbCr", [slice_size, slice_size])
# tile = imgTmp.crop((minX, minY, mx, my))
crp = kh_crop(imgArray, minX, mx, minY, my)
tile = np.resize(crp, [slice_size, slice_size, 3])
# tmpArr=np.array(tile)
# tile = Image.fromarray(tmpArr)
# buffer.paste(tile.resize(ndSliceSize), (0, 0))
if bSaveImages:
kh_isDirExist(sSaveBasePatchImg)
# buffer.save(saveAddress, "JPEG")
npTile = np.array(tile.resize(ndSliceSize))
scipy.misc.imsave(saveAddress + ".jpg", npTile)
if True: # basePosition not in lst_fNamesTmp:
lst_fNamesTmp.append(basePosition)
# Image to Numpy array
# imgBuffer = np.array(buffer)
# imgBuffer = np.expand_dims(np.array(tile.resize(ndSliceSize)), axis=-1)
# expand_tile = np.expand_dims(tile,-1)
# buffer = Image.new("RGB", [slice_size, slice_size], (100, 10, 100))
# buffer.paste(Image.fromarray(tile))
# img = np.zeros([ndSliceSizeWidth, ndSliceSizeHeight, 3])
# img[:, :, 0] = tile
# img[:, :, 1] = tile
# img[:, :, 2] = tile
lst_Patches.append(tile)
# print('add => ', saveAddress)
else:
print("it is copy => ", basePosition)
if bSaveImages:
# buffer = Image.new("RGB", [ndMainSize[1], ndMainSize[0]], (255, 255, 255))
# buffer.paste(imgTmp, (0, 0))
npImgTmp = | np.array(tile) | numpy.array |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
評価用のテストパターン作成ツール集
"""
import os
import cv2
import matplotlib.pyplot as plt
import numpy as np
from colour.colorimetry import CMFS, ILLUMINANTS
from colour.models import XYZ_to_xy, xy_to_XYZ, XYZ_to_RGB, RGB_to_XYZ
from colour.models import xy_to_xyY, xyY_to_XYZ, Lab_to_XYZ
from colour.models import BT709_COLOURSPACE
from colour.utilities import normalise_maximum
from colour import models
from colour import RGB_COLOURSPACES, COLOURCHECKERS
from scipy.spatial import Delaunay
from scipy.ndimage.filters import convolve
import math
import transfer_functions as tf
CMFS_NAME = 'CIE 1931 2 Degree Standard Observer'
D65_WHITE = ILLUMINANTS[CMFS_NAME]['D65']
YCBCR_CHECK_MARKER = [0, 0, 0]
UNIVERSAL_COLOR_LIST = ["#F6AA00", "#FFF100", "#03AF7A",
"#005AFF", "#4DC4FF", "#804000"]
def preview_image(img, order='rgb', over_disp=False):
if order == 'rgb':
cv2.imshow('preview', img[:, :, ::-1])
elif order == 'bgr':
cv2.imshow('preview', img)
elif order == 'mono':
cv2.imshow('preview', img)
else:
raise ValueError("order parameter is invalid")
if over_disp:
cv2.resizeWindow('preview', )
cv2.waitKey(0)
cv2.destroyAllWindows()
def equal_devision(length, div_num):
"""
# 概要
length を div_num で分割する。
端数が出た場合は誤差拡散法を使って上手い具合に分散させる。
"""
base = length / div_num
ret_array = [base for x in range(div_num)]
# 誤差拡散法を使った辻褄合わせを適用
# -------------------------------------------
diff = 0
for idx in range(div_num):
diff += math.modf(ret_array[idx])[0]
if diff >= 1.0:
diff -= 1.0
ret_array[idx] = int(math.floor(ret_array[idx]) + 1)
else:
ret_array[idx] = int(math.floor(ret_array[idx]))
# 計算誤差により最終点が +1 されない場合への対処
# -------------------------------------------
diff = length - sum(ret_array)
if diff != 0:
ret_array[-1] += diff
# 最終確認
# -------------------------------------------
if length != sum(ret_array):
raise ValueError("the output of equal_division() is abnormal.")
return ret_array
def do_matrix(img, mtx):
"""
img に対して mtx を適用する。
"""
base_shape = img.shape
r, g, b = img[..., 0], img[..., 1], img[..., 2]
ro = r * mtx[0][0] + g * mtx[0][1] + b * mtx[0][2]
go = r * mtx[1][0] + g * mtx[1][1] + b * mtx[1][2]
bo = r * mtx[2][0] + g * mtx[2][1] + b * mtx[2][2]
out_img = np.dstack((ro, go, bo)).reshape(base_shape)
return out_img
def _get_cmfs_xy():
"""
xy色度図のプロットのための馬蹄形の外枠のxy値を求める。
Returns
-------
array_like
xy coordinate for chromaticity diagram
"""
# 基本パラメータ設定
# ------------------
cmf = CMFS.get(CMFS_NAME)
d65_white = D65_WHITE
# 馬蹄形のxy値を算出
# --------------------------
cmf_xy = XYZ_to_xy(cmf.values, d65_white)
return cmf_xy
def get_primaries(name='ITU-R BT.2020'):
"""
prmary color の座標を求める
Parameters
----------
name : str
a name of the color space.
Returns
-------
array_like
prmaries. [[rx, ry], [gx, gy], [bx, by], [rx, ry]]
"""
primaries = RGB_COLOURSPACES[name].primaries
primaries = np.append(primaries, [primaries[0, :]], axis=0)
rgb = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])
return primaries, rgb
def xy_to_rgb(xy, name='ITU-R BT.2020', normalize='maximum', specific=None):
"""
xy値からRGB値を算出する。
いい感じに正規化もしておく。
Parameters
----------
xy : array_like
xy value.
name : string
color space name.
normalize : string
normalize method. You can select 'maximum', 'specific' or None.
Returns
-------
array_like
rgb value. the value is normalized.
"""
illuminant_XYZ = D65_WHITE
illuminant_RGB = D65_WHITE
chromatic_adaptation_transform = 'CAT02'
large_xyz_to_rgb_matrix = get_xyz_to_rgb_matrix(name)
if normalize == 'specific':
xyY = xy_to_xyY(xy)
xyY[..., 2] = specific
large_xyz = xyY_to_XYZ(xyY)
else:
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
"""
そのままだとビデオレベルが低かったりするので、
各ドット毎にRGB値を正規化&最大化する。必要であれば。
"""
if normalize == 'maximum':
rgb = normalise_maximum(rgb, axis=-1)
else:
if(np.sum(rgb > 1.0) > 0):
print("warning: over flow has occured at xy_to_rgb")
if(np.sum(rgb < 0.0) > 0):
print("warning: under flow has occured at xy_to_rgb")
rgb[rgb < 0] = 0
rgb[rgb > 1.0] = 1.0
return rgb
def get_white_point(name):
"""
white point を求める。CIE1931ベース。
"""
if name != "DCI-P3":
illuminant = RGB_COLOURSPACES[name].illuminant
white_point = ILLUMINANTS[CMFS_NAME][illuminant]
else:
white_point = ILLUMINANTS[CMFS_NAME]["D65"]
return white_point
def get_secondaries(name='ITU-R BT.2020'):
"""
secondary color の座標を求める
Parameters
----------
name : str
a name of the color space.
Returns
-------
array_like
secondaries. the order is magenta, yellow, cyan.
"""
secondary_rgb = np.array([[1.0, 0.0, 1.0],
[1.0, 1.0, 0.0],
[0.0, 1.0, 1.0]])
illuminant_XYZ = D65_WHITE
illuminant_RGB = D65_WHITE
chromatic_adaptation_transform = 'CAT02'
rgb_to_xyz_matrix = get_rgb_to_xyz_matrix(name)
large_xyz = RGB_to_XYZ(secondary_rgb, illuminant_RGB,
illuminant_XYZ, rgb_to_xyz_matrix,
chromatic_adaptation_transform)
xy = XYZ_to_xy(large_xyz, illuminant_XYZ)
return xy, secondary_rgb.reshape((3, 3))
# def plot_chromaticity_diagram(
# rate=480/755.0*2, xmin=0.0, xmax=0.8, ymin=0.0, ymax=0.9, **kwargs):
# # キーワード引数の初期値設定
# # ------------------------------------
# monitor_primaries = kwargs.get('monitor_primaries', None)
# secondaries = kwargs.get('secondaries', None)
# test_scatter = kwargs.get('test_scatter', None)
# intersection = kwargs.get('intersection', None)
# # プロット用データ準備
# # ---------------------------------
# xy_image = get_chromaticity_image(
# xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax)
# cmf_xy = _get_cmfs_xy()
# bt709_gamut, _ = get_primaries(name=cs.BT709)
# bt2020_gamut, _ = get_primaries(name=cs.BT2020)
# dci_p3_gamut, _ = get_primaries(name=cs.P3_D65)
# ap0_gamut, _ = get_primaries(name=cs.ACES_AP0)
# ap1_gamut, _ = get_primaries(name=cs.ACES_AP1)
# xlim = (min(0, xmin), max(0.8, xmax))
# ylim = (min(0, ymin), max(0.9, ymax))
# ax1 = pu.plot_1_graph(fontsize=20 * rate,
# figsize=((xmax - xmin) * 10 * rate,
# (ymax - ymin) * 10 * rate),
# graph_title="CIE1931 Chromaticity Diagram",
# graph_title_size=None,
# xlabel=None, ylabel=None,
# axis_label_size=None,
# legend_size=18 * rate,
# xlim=xlim, ylim=ylim,
# xtick=[x * 0.1 + xmin for x in
# range(int((xlim[1] - xlim[0])/0.1) + 1)],
# ytick=[x * 0.1 + ymin for x in
# range(int((ylim[1] - ylim[0])/0.1) + 1)],
# xtick_size=17 * rate,
# ytick_size=17 * rate,
# linewidth=4 * rate,
# minor_xtick_num=2,
# minor_ytick_num=2)
# ax1.plot(cmf_xy[..., 0], cmf_xy[..., 1], '-k', lw=3.5*rate, label=None)
# ax1.plot((cmf_xy[-1, 0], cmf_xy[0, 0]), (cmf_xy[-1, 1], cmf_xy[0, 1]),
# '-k', lw=3.5*rate, label=None)
# ax1.plot(bt709_gamut[:, 0], bt709_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[0], label="BT.709", lw=2.75*rate)
# ax1.plot(bt2020_gamut[:, 0], bt2020_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[1], label="BT.2020", lw=2.75*rate)
# ax1.plot(dci_p3_gamut[:, 0], dci_p3_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[2], label="DCI-P3", lw=2.75*rate)
# ax1.plot(ap1_gamut[:, 0], ap1_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[3], label="ACES AP1", lw=2.75*rate)
# ax1.plot(ap0_gamut[:, 0], ap0_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[4], label="ACES AP0", lw=2.75*rate)
# if monitor_primaries is not None:
# ax1.plot(monitor_primaries[:, 0], monitor_primaries[:, 1],
# c="#202020", label="???", lw=3*rate)
# if secondaries is not None:
# xy, rgb = secondaries
# ax1.scatter(xy[..., 0], xy[..., 1], s=700*rate, marker='s', c=rgb,
# edgecolors='#404000', linewidth=2*rate)
# if test_scatter is not None:
# xy, rgb = test_scatter
# ax1.scatter(xy[..., 0], xy[..., 1], s=300*rate, marker='s', c=rgb,
# edgecolors='#404040', linewidth=2*rate)
# if intersection is not None:
# ax1.scatter(intersection[..., 0], intersection[..., 1],
# s=300*rate, marker='s', c='#CCCCCC',
# edgecolors='#404040', linewidth=2*rate)
# ax1.imshow(xy_image, extent=(xmin, xmax, ymin, ymax))
# plt.legend(loc='upper right')
# plt.savefig('temp_fig.png', bbox_inches='tight')
# plt.show()
def get_chromaticity_image(samples=1024, antialiasing=True, bg_color=0.9,
xmin=0.0, xmax=1.0, ymin=0.0, ymax=1.0):
"""
xy色度図の馬蹄形の画像を生成する
Returns
-------
ndarray
rgb image.
"""
"""
色域設定。sRGBだと狭くて少し変だったのでBT.2020に設定。
若干色が薄くなるのが難点。暇があれば改良したい。
"""
# color_space = models.BT2020_COLOURSPACE
# color_space = models.S_GAMUT3_COLOURSPACE
color_space = models.ACES_CG_COLOURSPACE
# 馬蹄形のxy値を算出
# --------------------------
cmf_xy = _get_cmfs_xy()
"""
馬蹄の内外の判別をするために三角形で領域分割する(ドロネー図を作成)。
ドロネー図を作れば後は外積計算で領域の内外を判別できる(たぶん)。
なお、作成したドロネー図は以下のコードでプロット可能。
1点補足しておくと、```plt.triplot``` の第三引数は、
第一、第二引数から三角形を作成するための **インデックス** のリスト
になっている。[[0, 1, 2], [2, 4, 3], ...]的な。
```python
plt.figure()
plt.triplot(xy[:, 0], xy[:, 1], triangulation.simplices.copy(), '-o')
plt.title('triplot of Delaunay triangulation')
plt.show()
```
"""
triangulation = Delaunay(cmf_xy)
"""
```triangulation.find_simplex()``` で xy がどのインデックスの領域か
調べることができる。戻り値が ```-1``` の場合は領域に含まれないため、
0以下のリストで領域判定の mask を作ることができる。
"""
xx, yy\
= np.meshgrid(np.linspace(xmin, xmax, samples),
np.linspace(ymax, ymin, samples))
xy = np.dstack((xx, yy))
mask = (triangulation.find_simplex(xy) < 0).astype(np.float)
# アンチエイリアシングしてアルファチャンネルを滑らかに
# ------------------------------------------------
if antialiasing:
kernel = np.array([
[0, 1, 0],
[1, 2, 1],
[0, 1, 0],
]).astype(np.float)
kernel /= np.sum(kernel)
mask = convolve(mask, kernel)
# ネガポジ反転
# --------------------------------
mask = 1 - mask[:, :, np.newaxis]
# xy のメッシュから色を復元
# ------------------------
illuminant_XYZ = D65_WHITE
illuminant_RGB = color_space.whitepoint
chromatic_adaptation_transform = 'XYZ Scaling'
large_xyz_to_rgb_matrix = color_space.XYZ_to_RGB_matrix
xy[xy == 0.0] = 1.0 # ゼロ割対策
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
"""
そのままだとビデオレベルが低かったりするので、
各ドット毎にRGB値を正規化&最大化する。
"""
rgb[rgb == 0] = 1.0 # ゼロ割対策
rgb = normalise_maximum(rgb, axis=-1)
# mask 適用
# -------------------------------------
mask_rgb = np.dstack((mask, mask, mask))
rgb *= mask_rgb
# 背景色をグレーに変更
# -------------------------------------
bg_rgb = np.ones_like(rgb)
bg_rgb *= (1 - mask_rgb) * bg_color
rgb += bg_rgb
rgb = rgb ** (1/2.2)
return rgb
def get_csf_color_image(width=640, height=480,
lv1=np.uint16(np.array([1.0, 1.0, 1.0]) * 1023 * 0x40),
lv2=np.uint16(np.array([1.0, 1.0, 1.0]) * 512 * 0x40),
stripe_num=18):
"""
長方形を複数個ズラして重ねることでCSFパターンっぽいのを作る。
入力信号レベルは16bitに限定する。
Parameters
----------
width : numeric.
width of the pattern image.
height : numeric.
height of the pattern image.
lv1 : numeric
video level 1. this value must be 10bit.
lv2 : numeric
video level 2. this value must be 10bit.
stripe_num : numeric
number of the stripe.
Returns
-------
array_like
a cms pattern image.
"""
width_list = equal_devision(width, stripe_num)
height_list = equal_devision(height, stripe_num)
h_pos_list = equal_devision(width // 2, stripe_num)
v_pos_list = equal_devision(height // 2, stripe_num)
lv1_16bit = lv1
lv2_16bit = lv2
img = np.zeros((height, width, 3), dtype=np.uint16)
width_temp = width
height_temp = height
h_pos_temp = 0
v_pos_temp = 0
for idx in range(stripe_num):
lv = lv1_16bit if (idx % 2) == 0 else lv2_16bit
temp_img = np.ones((height_temp, width_temp, 3), dtype=np.uint16)
# temp_img *= lv
temp_img[:, :] = lv
ed_pos_h = h_pos_temp + width_temp
ed_pos_v = v_pos_temp + height_temp
img[v_pos_temp:ed_pos_v, h_pos_temp:ed_pos_h] = temp_img
width_temp -= width_list[stripe_num - 1 - idx]
height_temp -= height_list[stripe_num - 1 - idx]
h_pos_temp += h_pos_list[idx]
v_pos_temp += v_pos_list[idx]
return img
def plot_xyY_color_space(name='ITU-R BT.2020', samples=1024,
antialiasing=True):
"""
SONY の HDR説明資料にあるような xyY の図を作る。
Parameters
----------
name : str
name of the target color space.
Returns
-------
None
"""
# 馬蹄の領域判別用データ作成
# --------------------------
primary_xy, _ = get_primaries(name=name)
triangulation = Delaunay(primary_xy)
xx, yy\
= np.meshgrid(np.linspace(0, 1, samples), np.linspace(1, 0, samples))
xy = | np.dstack((xx, yy)) | numpy.dstack |
# -*- coding: utf-8 -*-
"""base class for columns"""
from ELDAmwl.component.interface import ILogger
from ELDAmwl.utils.constants import NC_FILL_BYTE
from ELDAmwl.utils.constants import NC_FILL_INT
from zope import component
import numpy as np
import xarray as xr
class Columns(object):
"""
base column class (2 dimensional: (time, level))
"""
def __init__(self):
self.ds = xr.Dataset(
{'data': (['time', 'level'], np.empty((0, 0))),
'err': (['time', 'level'], | np.empty((0, 0)) | numpy.empty |
import os
import glob
import random
from PIL import Image
import numpy as np
import trimesh
from lib.data.core import Field
from lib.common import random_crop_occ
class IndexField(Field):
''' Basic index field.'''
# def load(self, model_path, idx, category):
def load(self, model_path, idx, start_idx=0, dataset_folder=None, **kwargs):
''' Loads the index field.
Args:
model_path (str): path to model
idx (int): ID of data point
start_idx (int): id of sequence start
dataset_folder (str): dataset folder
'''
return idx
def check_complete(self, files):
''' Check if field is complete.
Args:
files: files
'''
return True
class PointsSubseqField(Field):
''' Points subsequence field class.
Args:
folder_name (str): points folder name
transform (transform): transform
seq_len (int): length of sequence
all_steps (bool): whether to return all time steps
fixed_time_step (int): if and which fixed time step to use
unpackbits (bool): whether to unpack bits
scale_type (str, optional): Specifies the type of transformation to apply to the point cloud:
``'cr'`` | ``'oflow'``. ``'cr'``: transform the point cloud to align with the output,
``'oflow'``: scale the point cloud w.r.t. the first point cloud of the sequence
spatial_completion (bool): whether to remove some points for 4D spatial completion experiment
'''
def __init__(self, folder_name, transform=None, seq_len=17,
all_steps=False, fixed_time_step=None, unpackbits=False,
scale_type=None, spatial_completion=False, **kwargs):
self.folder_name = folder_name
self.transform = transform
self.seq_len = seq_len
self.all_steps = all_steps
self.sample_padding = 0.1
self.fixed_time_step = fixed_time_step
self.unpackbits = unpackbits
self.scale_type = scale_type
self.spatial_completion = spatial_completion
if scale_type is not None:
assert scale_type in ['oflow', 'cr']
def get_loc_scale(self, mesh):
''' Returns location and scale of mesh.
Args:
mesh (trimesh): mesh
'''
bbox = mesh.bounding_box.bounds
# Compute location and scale with padding of 0.1
loc = (bbox[0] + bbox[1]) / 2
scale = (bbox[1] - bbox[0]).max() / (1 - self.sample_padding)
return loc, scale
def normalize_mesh(self, mesh, loc, scale):
''' Normalize mesh.
Args:
mesh (trimesh): mesh
loc (tuple): location for normalization
scale (float): scale for normalization
'''
# Transform input mesh
mesh.apply_translation(-loc)
mesh.apply_scale(1 / scale)
return mesh
def load_files(self, model_path, start_idx):
''' Loads the model files.
Args:
model_path (str): path to model
start_idx (int): id of sequence start
'''
folder = os.path.join(model_path, self.folder_name)
files = glob.glob(os.path.join(folder, '*.npz'))
files.sort()
files = files[start_idx:start_idx+self.seq_len]
return files
def load_all_steps(self, files, loc0, scale0, loc_global, scale_global, dataset_folder):
''' Loads data for all steps.
Args:
files (list): list of files
points_dict (dict): points dictionary for first step of sequence
loc0 (tuple): location of first time step mesh
scale0 (float): scale of first time step mesh
'''
p_list = []
o_list = []
t_list = []
for i, f in enumerate(files):
points_dict = np.load(f)
# Load points
points = points_dict['points']
if (points.dtype == np.float16):
# break symmetry (nec. for some version)
points = points.astype(np.float32)
points += 1e-4 * np.random.randn(*points.shape)
occupancies = points_dict['occupancies']
if self.unpackbits:
occupancies = np.unpackbits(occupancies)[:points.shape[0]]
occupancies = occupancies.astype(np.float32)
loc = points_dict['loc'].astype(np.float32)
scale = points_dict['scale'].astype(np.float32)
model_id, _, frame_id = f.split('/')[-3:]
# Remove some points for 4D spatial completion experiment
if self.spatial_completion:
data_folder = os.path.join(dataset_folder, 'test', 'D-FAUST', model_id)
mask_folder = os.path.join(dataset_folder, 'spatial_mask', model_id)
if not os.path.exists(mask_folder):
os.makedirs(mask_folder)
mask_file = os.path.join(mask_folder, frame_id.replace('.npz', '.npy'))
if os.path.exists(mask_file):
mask = np.load(mask_file)
else:
pcl = np.load(os.path.join(data_folder, 'pcl_seq', frame_id))['points']
mask, _, _ = random_crop_occ(points, pcl)
np.save(mask_file, mask)
points = points[mask, :]
occupancies = occupancies[mask]
if self.scale_type is not None:
# Transform to loc0, scale0
if self.scale_type == 'oflow':
points = (loc + scale * points - loc0) / scale0
# Align the testing data of the original D-FAUST with the output of our model
if self.scale_type == 'cr':
trans = np.load(os.path.join(dataset_folder, 'smpl_params', model_id, frame_id))['trans']
loc -= trans
points = (loc + scale * points - loc_global) / scale_global
points = points.astype(np.float32)
time = np.array(i / (self.seq_len - 1), dtype=np.float32)
p_list.append(points)
o_list.append(occupancies)
t_list.append(time)
if not self.spatial_completion:
data = {
None: np.stack(p_list),
'occ': np.stack(o_list),
'time': np.stack(t_list),
}
else:
data = {
None: p_list,
'occ': o_list,
'time': np.stack(t_list),
}
return data
def load_single_step(self, files, points_dict, loc0, scale0):
''' Loads data for a single step.
Args:
files (list): list of files
points_dict (dict): points dictionary for first step of sequence
loc0 (tuple): location of first time step mesh
scale0 (float): scale of first time step mesh
'''
if self.fixed_time_step is None:
# Random time step
time_step = np.random.choice(self.seq_len)
else:
time_step = int(self.fixed_time_step)
if time_step != 0:
points_dict = np.load(files[time_step])
# Load points
points = points_dict['points'].astype(np.float32)
occupancies = points_dict['occupancies']
if self.unpackbits:
occupancies = np.unpackbits(occupancies)[:points.shape[0]]
occupancies = occupancies.astype(np.float32)
if self.scale_type == 'oflow':
loc = points_dict['loc'].astype(np.float32)
scale = points_dict['scale'].astype(np.float32)
# Transform to loc0, scale0
points = (loc + scale * points - loc0) / scale0
if self.seq_len > 1:
time = np.array(
time_step / (self.seq_len - 1), dtype=np.float32)
else:
time = np.array([1], dtype=np.float32)
data = {
None: points,
'occ': occupancies,
'time': time,
}
return data
def load(self, model_path, idx, c_idx=None, start_idx=0, dataset_folder=None, **kwargs):
''' Loads the points subsequence field.
Args:
model_path (str): path to model
idx (int): ID of data point
start_idx (int): id of sequence start
dataset_folder (str): dataset folder
'''
files = self.load_files(model_path, start_idx)
# Load loc and scale from t_0, we use the global loc and scale calculated from the whole training set
points_dict = np.load(files[0])
loc0 = points_dict['loc'].astype(np.float32)
scale0 = points_dict['scale'].astype(np.float32)
loc_global = np.array([-0.005493, -0.1888, 0.07587]).astype(np.float32)
scale_global = 2.338
if self.all_steps:
data = self.load_all_steps(files, loc0, scale0, loc_global, scale_global, dataset_folder)
else:
data = self.load_single_step(files, points_dict, loc0, scale0)
if self.transform is not None:
data = self.transform(data)
return data
class PointCloudSubseqField(Field):
''' Point cloud subsequence field class.
Args:
folder_name (str): points folder name
transform (transform): transform
seq_len (int): length of sequence
only_end_points (bool): whether to only return end points
scale_type (str, optional): Specifies the type of transformation to apply to the input point cloud:
``'cr'`` | ``'oflow'``. ``'cr'``: transform the point cloud the original scale and location of SMPL model,
``'oflow'``: scale the point cloud w.r.t. the first point cloud of the sequence
'''
def __init__(self, folder_name, transform=None, seq_len=17,
only_end_points=False, scale_type=None, eval_mode=False):
self.folder_name = folder_name
self.transform = transform
self.seq_len = seq_len
self.only_end_points = only_end_points
self.scale_type = scale_type
self.eval_mode = eval_mode
if scale_type is not None:
assert scale_type in ['oflow', 'cr']
def return_loc_scale(self, mesh):
''' Returns location and scale of mesh.
Args:
mesh (trimesh): mesh
'''
bbox = mesh.bounding_box.bounds
# Compute location and scale
loc = (bbox[0] + bbox[1]) / 2
scale = (bbox[1] - bbox[0]).max() / (1 - 0)
return loc, scale
def apply_normalization(self, mesh, loc, scale):
''' Normalizes the mesh.
Args:
mesh (trimesh): mesh
loc (tuple): location for normalization
scale (float): scale for normalization
'''
mesh.apply_translation(-loc)
mesh.apply_scale(1/scale)
return mesh
def load_files(self, model_path, start_idx):
''' Loads the model files.
Args:
model_path (str): path to model
start_idx (int): id of sequence start
'''
folder = os.path.join(model_path, self.folder_name)
files = glob.glob(os.path.join(folder, '*.npz'))
files.sort()
files = files[start_idx:start_idx+self.seq_len]
if self.only_end_points:
files = [files[0], files[-1]]
return files
def load_single_file(self, file_path):
''' Loads a single file.
Args:
file_path (str): file path
'''
pointcloud_dict = np.load(file_path)
points = pointcloud_dict['points'].astype(np.float32)
loc = pointcloud_dict['loc'].astype(np.float32)
scale = pointcloud_dict['scale'].astype(np.float32)
return points, loc, scale
def get_time_values(self):
''' Returns the time values.
'''
if self.seq_len > 1:
time = \
np.array([i/(self.seq_len - 1) for i in range(self.seq_len)],
dtype=np.float32)
else:
time = np.array([1]).astype(np.float32)
return time
def load(self, model_path, idx, c_idx=None, start_idx=0, dataset_folder=None, **kwargs):
''' Loads the point cloud sequence field.
Args:
model_path (str): path to model
idx (int): ID of data point
c_idx (int): index of category
start_idx (int): id of sequence start
dataset_folder (str): dataset folder
'''
pc_seq = []
# Get file paths
files = self.load_files(model_path, start_idx)
# Load first pcl file
_, loc0, scale0 = self.load_single_file(files[0])
loc_global = np.array([-0.005493, -0.1888, 0.07587]).astype(np.float32)
scale_global = 2.338
for f in files:
points, loc, scale = self.load_single_file(f)
if self.scale_type is not None:
# Transform mesh to loc0 / scale0
if self.scale_type == 'oflow':
points = (loc + scale * points - loc0) / scale0
# Transform to original scale and location of SMPL model
if self.scale_type == 'cr':
points = loc + scale * points
model_id, _, frame_id = f.split('/')[-3:]
trans = np.load(os.path.join(dataset_folder, 'smpl_params', model_id, frame_id))['trans']
points = points - trans
# Only for evaluation, align the output with the testing data in D-FAUST
if self.eval_mode:
points = (points - loc_global) / scale_global
pc_seq.append(points)
data = {
None: | np.stack(pc_seq) | numpy.stack |
import numpy as np
import os
import re
import requests
import sys
import time
from netCDF4 import Dataset
import pandas as pd
from bs4 import BeautifulSoup
from tqdm import tqdm
# setup constants used to access the data from the different M2M interfaces
BASE_URL = 'https://ooinet.oceanobservatories.org/api/m2m/' # base M2M URL
SENSOR_URL = '12576/sensor/inv/' # Sensor Information
# setup access credentials
AUTH = ['OOIAPI-853A3LA6QI3L62', '<KEY>']
def M2M_Call(uframe_dataset_name, start_date, end_date):
options = '?beginDT=' + start_date + '&endDT=' + end_date + '&format=application/netcdf'
r = requests.get(BASE_URL + SENSOR_URL + uframe_dataset_name + options, auth=(AUTH[0], AUTH[1]))
if r.status_code == requests.codes.ok:
data = r.json()
else:
return None
# wait until the request is completed
print('Waiting for OOINet to process and prepare data request, this may take up to 20 minutes')
url = [url for url in data['allURLs'] if re.match(r'.*async_results.*', url)][0]
check_complete = url + '/status.txt'
with tqdm(total=400, desc='Waiting') as bar:
for i in range(400):
r = requests.get(check_complete)
bar.update(1)
if r.status_code == requests.codes.ok:
bar.n = 400
bar.last_print_n = 400
bar.refresh()
print('\nrequest completed in %f minutes.' % elapsed)
break
else:
time.sleep(3)
elapsed = (i * 3) / 60
return data
def M2M_Files(data, tag=''):
"""
Use a regex tag combined with the results of the M2M data request to collect the data from the THREDDS catalog.
Collected data is gathered into an xarray dataset for further processing.
:param data: JSON object returned from M2M data request with details on where the data is to be found for download
:param tag: regex tag to use in discriminating the data files, so we only collect the correct ones
:return: the collected data as an xarray dataset
"""
# Create a list of the files from the request above using a simple regex as a tag to discriminate the files
url = [url for url in data['allURLs'] if re.match(r'.*thredds.*', url)][0]
files = list_files(url, tag)
return files
def list_files(url, tag=''):
"""
Function to create a list of the NetCDF data files in the THREDDS catalog created by a request to the M2M system.
:param url: URL to user's THREDDS catalog specific to a data request
:param tag: regex pattern used to distinguish files of interest
:return: list of files in the catalog with the URL path set relative to the catalog
"""
page = requests.get(url).text
soup = BeautifulSoup(page, 'html.parser')
pattern = re.compile(tag)
return [node.get('href') for node in soup.find_all('a', text=pattern)]
def M2M_Data(nclist,variables):
thredds = 'https://opendap.oceanobservatories.org/thredds/dodsC/ooi/'
#nclist is going to contain more than one url eventually
for jj in range(len(nclist)):
url=nclist[jj]
url=url[25:]
dap_url = thredds + url + '#fillmismatch'
openFile = Dataset(dap_url,'r')
for ii in range(len(variables)):
dum = openFile.variables[variables[ii].name]
variables[ii].data = np.append(variables[ii].data, dum[:].data)
tmp = variables[0].data/60/60/24
time_converted = pd.to_datetime(tmp, unit='D', origin=pd.Timestamp('1900-01-01'))
return variables, time_converted
class var(object):
def __init__(self):
"""A Class that generically holds data with a variable name
and the units as attributes"""
self.name = ''
self.data = np.array([])
self.units = ''
def __repr__(self):
return_str = "name: " + self.name + '\n'
return_str += "units: " + self.units + '\n'
return_str += "data: size: " + str(self.data.shape)
return return_str
class structtype(object):
def __init__(self):
""" A class that imitates a Matlab structure type
"""
self._data = []
def __getitem__(self, index):
"""implement index behavior in the struct"""
if index == len(self._data):
self._data.append(var())
return self._data[index]
def __len__(self):
return len(self._data)
def M2M_URLs(platform_name,node,instrument_class,method):
var_list = structtype()
#MOPAK
if platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#METBK
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
#FLORT
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/04-FLORTK000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
#FDCHP
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/telemetered/fdchp_a_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#DOSTA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/02-DOFSTK000/telemetered/dofst_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
#ADCP
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
#ZPLSC
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#WAVSS
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
#VELPT
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
#PCO2W
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
#PHSEN
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
#SPKIR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
#PRESF
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/02-PRESFA000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/02-PRESFA000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/02-PRESFB000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/02-PRESFC000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
#CTDBP
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/06-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/06-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#VEL3D
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
#VEL3DK
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/01-VEL3DK000/telemetered/vel3d_k_wfp_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/03-CTDPFK000/telemetered/ctdpf_ckl_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#PCO2A
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = | np.array([]) | numpy.array |
# -*- coding: utf-8 -*-
"""
Created on Sat Jan 8 21:39:07 2022
@author: rainn
"""
import numpy as np
from PIL import Image
# from https://stackoverflow.com/questions/34913005/color-space-mapping-ycbcr-to-rgb
def ycbcr2rgb(im):
xform = np.array([[1, 0, 1.402], [1, -0.34414, -.71414], [1, 1.772, 0]])
rgb = im.astype(float)
rgb[:,:,[1,2]] -= 128
rgb = rgb.dot(xform.T)
np.putmask(rgb, rgb > 255, 255)
np.putmask(rgb, rgb < 0, 0)
return np.uint8(rgb)
# from https://stackoverflow.com/questions/16388110/double-the-length-of-a-python-numpy-array-with-interpolated-new-values
def ntrpl8(Y):
N = len(Y)
X = np.arange(0, 2*N, 2)
X_new = np.arange(2*N) # Where you want to interpolate
Y_new = | np.interp(X_new, X, Y) | numpy.interp |
from __future__ import print_function, division, absolute_import
import numpy as np
import six.moves as sm
from .. import imgaug as ia
class HeatmapsOnImage(object):
"""
Object representing heatmaps on images.
Parameters
----------
arr : (H,W) ndarray or (H,W,C) ndarray
Array representing the heatmap(s).
Must be of dtype float32.
If multiple heatmaps are provided, then ``C`` is expected to denote their number.
shape : tuple of int
Shape of the image on which the heatmap(s) is/are placed. NOT the shape of the
heatmap(s) array, unless it is identical to the image shape (note the likely
difference between the arrays in the number of channels).
If there is not a corresponding image, use the shape of the heatmaps array.
min_value : float, optional
Minimum value for the heatmaps that `arr` represents. This will usually be ``0.0``.
max_value : float, optional
Maximum value for the heatmaps that `arr` represents. This will usually be ``1.0``.
"""
def __init__(self, arr, shape, min_value=0.0, max_value=1.0):
"""Construct a new HeatmapsOnImage object."""
ia.do_assert(ia.is_np_array(arr), "Expected numpy array as heatmap input array, got type %s" % (type(arr),))
# TODO maybe allow 0-sized heatmaps? in that case the min() and max() must be adjusted
ia.do_assert(arr.shape[0] > 0 and arr.shape[1] > 0,
"Expected numpy array as heatmap with height and width greater than 0, got shape %s." % (
arr.shape,))
ia.do_assert(arr.dtype.type in [np.float32],
"Heatmap input array expected to be of dtype float32, got dtype %s." % (arr.dtype,))
ia.do_assert(arr.ndim in [2, 3], "Heatmap input array must be 2d or 3d, got shape %s." % (arr.shape,))
ia.do_assert(len(shape) in [2, 3],
"Argument 'shape' in HeatmapsOnImage expected to be 2d or 3d, got shape %s." % (shape,))
ia.do_assert(min_value < max_value)
if np.min(arr.flat[0:50]) < min_value - np.finfo(arr.dtype).eps \
or np.max(arr.flat[0:50]) > max_value + np.finfo(arr.dtype).eps:
import warnings
warnings.warn(
("Value range of heatmap was chosen to be (%.8f, %.8f), but "
"found actual min/max of (%.8f, %.8f). Array will be "
"clipped to chosen value range.") % (
min_value, max_value, np.min(arr), np.max(arr)))
arr = | np.clip(arr, min_value, max_value) | numpy.clip |
from sklearn.metrics import roc_curve, roc_auc_score
import tensorflow as tf
from tensorflow import keras
from keras.models import load_model
import mtcnn
from os import listdir
from os.path import isdir
from PIL import Image
from matplotlib import pyplot
from numpy import savez_compressed
from numpy import asarray
from numpy import load
from numpy import expand_dims
from mtcnn.mtcnn import MTCNN
from typing import Tuple
from typing import List
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVC
from random import choice
import matplotlib.pyplot as plt
import numpy as np
import time
from PIL import Image
import requests
from io import BytesIO
import pickle
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import cross_val_predict
import seaborn as sns
import pandas as pd
TRAIN_SET_PATH = r"C:\Users\thiag\OneDrive\Área de Trabalho\virtual_environments\ufc_project\ufc_2fighters\2-ufc-fighters\train"
TESTE_SET_PATH = r'C:\Users\thiag\OneDrive\Área de Trabalho\virtual_environments\ufc_project\ufc_2fighters\2-ufc-fighters\test'
PATH_TO_LOAD_FACENET_MODEL = r"C:\Users\thiag\OneDrive\Área de Trabalho\virtual_environments\ufc_project\ufc_2fighters\FaceNet\model\facenet_keras.h5"
FACES_PATH = '2-ufc-fighters/'
TRAINED_MODEL = 'trained_model.sav'
TRAINED_OUT_ENCODER = 'trained_out_encoder.sav'
URL_TEST = 'https://i.pinimg.com/originals/8b/95/b5/8b95b5db2d2b315dd75ddeddfe388538.jpg'
UFC_PROJECT_PATH = r'C:\Users\thiag\OneDrive\Área de Trabalho\virtual_environments\ufc_project\ufc_2fighters'
facenet_model = load_model(PATH_TO_LOAD_FACENET_MODEL, compile=False)
detector = MTCNN()
def extract_image_from_path(filename: str) -> np.ndarray:
"""Receives a path to an image, open it and convert it to RGB. Finally,
returns it as a numpy array.
Args:
filename (str): path to the photo in disk.
Returns:
np.ndarray: photo in a numpy array format.
"""
image = Image.open(filename).convert('RGB')
pixels = np.asarray(image)
return pixels
def extract_face_from_image_array(image_array: np.ndarray) -> np.ndarray:
"""Receives a photo represented in a numpy array. Then, makes use of the
method detect_faces of the neural network MTCNN. This method returns the
position of the bounding box. It, then, returns the numpy array containing
only the faces in the image.
Args:
image_array (np.ndarray): numpy array representation of the photo.
Returns:
np.ndarray: numpy array representation of the photo containing only the
face found in the photo by the neural network MTCNN.
"""
faces = detector.detect_faces(image_array)
x1, y1, width, height = faces[0]['box']
# Fixing a bug, since sometimes the positions may come as negative numbers
x1, y1 = abs(x1), abs(y1)
x2, y2 = x1 + width, y1 + height
face = image_array[y1:y2, x1:x2]
return face
def resize_extracted_face(image_array: np.ndarray,
required_size: Tuple[int, int] = (160, 160)) -> np.ndarray:
"""Receives the a numpy array representation of the image, and returns it
reshaped by the requireded size.
Args:
image_array (np.ndarray): numpy array representation of the photo.
required_size (Tuple[int, int], optional): Image size. Defaults to
(160, 160).
Returns:
np.ndarray: Resized image.
"""
image = Image.fromarray(image_array).resize(required_size)
face_array = | asarray(image) | numpy.asarray |
import copy
import logging
from abc import ABC
from collections import namedtuple
from enum import Enum
import numpy as np
import open3d as o3d
from hydra.conf import dataclass, MISSING, ConfigStore, field
# Hydra and OmegaConf
from omegaconf import DictConfig, OmegaConf
# Project Imports
from slam.backend import Backend
from slam.common.modules import _with_cv2, _with_o3d
from slam.common.pointcloud import grid_sample
from slam.common.pose import transform_pointcloud
from slam.common.utils import assert_debug, check_tensor, ObjectLoaderEnum
# ----------------------------------------------------------------------------------------------------------------------
@dataclass
class LoopClosureConfig:
"""Configuration for a LoopClosure Algorithm"""
type: str = MISSING
class LoopClosure(ABC):
"""An abstract class for a LoopClosure Algorithm
The Loop Closure Algorithm searches the data_dict for KeyFrames
Which are typically aggregated PointCloud from Odometry Algorithms
And Return optionally some constraints on the trajectory
Loop Closure Algorithms typically need to store a lot of information
"""
def __init__(self, config: LoopClosureConfig, **kwargs):
self.config = config
def init(self):
"""Cleans and Initializes the Loop Closure Algorithm"""
self.clean()
def clean(self):
"""Delete all previous data of the LoopClosure"""
raise NotImplementedError("")
def process_next_frame(self, data_dict: dict):
raise NotImplementedError("")
def update_positions(self, trajectory: np.ndarray):
"""Updates trajectory
Args:
trajectory (np.ndarray): The absolute poses making the trajectory `(N, 4, 4)`
"""
pass
@staticmethod
def pointcloud_key() -> str:
"""Returns the key in the keyframe dict for a new pointcloud"""
return "lc_pointcloud"
@staticmethod
def relative_pose_key() -> str:
"""Returns the key in the keyframe dict for a new pose"""
return "lc_relative_pose"
# ----------------------------------------------------------------------------------------------------------------------
if _with_cv2:
import cv2
from slam.common.registration import ElevationImageRegistration
if _with_o3d:
import open3d as o3d
@dataclass
class EILoopClosureConfig:
"""Configuration for a ElevationImageLoopClosure Algorithm"""
type: str = "elevation_image"
local_map_size: int = 50 # The number of frames in the stored local map
overlap: int = 20 # The number of frames overlapping in the stored local map
debug: bool = False
max_num_candidates: int = 10 # Maximum number of candidates to inspect
max_distance: float = 100 # Limit the maximum distance to search for loops
min_id_distance: int = 200 # Do not try to detect loop closure between temporally close poses
stride: int = 1
icp_distance_threshold: float = 1.0
with_icp_refinement: bool = _with_o3d # Only activated if open3d can be loaded
ei_registration_config: DictConfig = field(default_factory=lambda: OmegaConf.create({
"features": "akaze",
"pixel_size": 0.1,
"z_min": -3.0,
"z_max": 5,
"sigma": 0.1,
"im_height": 1200,
"im_width": 1200,
"color_map": "jet",
"inlier_threshold": 50,
"distance_threshold": 2.0,
"flip_z_axis": False
}))
@dataclass
class MapData:
local_map_data: list = field(default_factory=lambda: [])
last_inserted_pose: np.ndarray = field(default_factory=lambda: np.eye(4, dtype=np.float64))
current_frame_id: int = 0
all_frames_absolute_poses: list = field(default_factory=lambda: [])
maps_absolute_poses: np.ndarray = field(default_factory=lambda: np.zeros((0, 4, 4), dtype=np.float64))
maps_frame_ids: list = field(default_factory=lambda: [])
current_map_pcs: list = field(default_factory=lambda: []) # Store the pointclouds
current_map_poses: list = field(default_factory=lambda: []) # Absolute poses
current_map_frameids: list = field(default_factory=lambda: [])
LocalMapData = namedtuple("LocalMapData", ['keypoints', 'descriptors', 'pointcloud', 'frame_id'])
if _with_o3d:
import open3d as o3d
def draw_registration_result(source, target, transformation):
source_temp = copy.deepcopy(source)
target_temp = copy.deepcopy(target)
source_temp.paint_uniform_color([1, 0.706, 0])
target_temp.paint_uniform_color([0, 0.651, 0.929])
source_temp.transform(transformation)
o3d.visualization.draw_geometries([source_temp, target_temp],
zoom=0.4459,
front=[0.9288, -0.2951, -0.2242],
lookat=[1.6784, 2.0612, 1.4451],
up=[-0.3402, -0.9189, -0.1996])
class ElevationImageLoopClosure(LoopClosure):
"""
An Implementation of a Loop Detection and Estimation Algorithm
"""
def __init__(self, config: EILoopClosureConfig, **kwargs):
super().__init__(config, **kwargs)
self.registration_2D = ElevationImageRegistration(config.ei_registration_config)
self.with_window = config.debug
self.winname = "Loop Closure Map"
if config.debug:
cv2.namedWindow(self.winname, cv2.WINDOW_KEEPRATIO | cv2.WINDOW_NORMAL)
self.data = MapData()
self.maps_saved_data: list = []
def serialize(self) -> MapData:
# Return MapData (convert cv2.KeyPoint which are not handled by the pickling protocol)
def convert_tuple(nt: namedtuple):
keypoints, descriptors, pointcloud, frame_id = nt
return ([(kpt.pt, kpt.size, kpt.angle, kpt.response, kpt.octave,
kpt.class_id) for kpt in keypoints], descriptors, pointcloud, frame_id)
self.data.local_map_data = [convert_tuple(nt) for nt in self.maps_saved_data]
return self.data
def update_positions(self, trajectory: np.ndarray):
check_tensor(trajectory, [self.data.current_frame_id, 4, 4])
if self.data.current_frame_id == 0:
return
num_saved_poses = len(self.data.all_frames_absolute_poses)
self.data.all_frames_absolute_poses = [trajectory[idx] for idx in range(num_saved_poses)]
self.data.maps_absolute_poses = np.array([trajectory[frame_id] for frame_id in self.data.maps_frame_ids])
self.data.last_inserted_pose = trajectory[-1]
num_poses_in_current_map = len(self.data.current_map_pcs)
for idx in range(num_poses_in_current_map):
self.data.current_map_poses[-idx] = trajectory[-idx]
def load(self, map_data: MapData):
# Return MapData (convert cv2.KeyPoint which are not handled by the pickling protocol)
def convert_tuple(_tuple):
keypoints, descriptors, pointcloud, frame_id = _tuple
return LocalMapData(
[cv2.KeyPoint(kpt[0][0], kpt[0][1], kpt[1], kpt[2], kpt[3], kpt[4], kpt[5]) for kpt in keypoints],
descriptors, pointcloud, frame_id)
self.data = map_data
self.maps_saved_data = [convert_tuple(_tuple) for _tuple in map_data.local_map_data]
def __del__(self):
if self.with_window:
cv2.destroyWindow(self.winname)
def clean(self):
self.data.current_map_pcs.clear()
self.data.current_map_poses.clear()
self.data.current_map_frameids.clear()
self.data.all_frames_absolute_poses.clear()
self.data.maps_frame_ids.clear()
self.data.last_inserted_pose = np.eye(4, dtype=np.float64)
self.data.current_frame_id = 0
self.data.maps_absolute_poses = np.zeros((0, 4, 4), dtype=np.float64)
self.maps_saved_data.clear()
def _compute_transform(self, initial_transform, candidate_pc, target_pc):
if not _with_o3d:
return initial_transform
assert isinstance(self.config, EILoopClosureConfig)
# Refine the transform by an ICP on the point cloud
source = o3d.geometry.PointCloud()
source.points = o3d.utility.Vector3dVector(candidate_pc)
target = o3d.geometry.PointCloud()
target.points = o3d.utility.Vector3dVector(target_pc)
result = o3d.pipelines.registration.registration_icp(
source, target, self.config.icp_distance_threshold, initial_transform.astype(np.float64),
o3d.pipelines.registration.TransformationEstimationPointToPoint())
return np.linalg.inv(result.transformation), candidate_pc, target_pc
def _match_candidates(self, candidate_ids, feat, desc, points, frame_id, data_dict: dict):
assert isinstance(self.config, EILoopClosureConfig)
for candidate in candidate_ids:
cd_feat, cd_desc, cd_pc_image, cd_frame_id = self.maps_saved_data[candidate]
transform, points_2D, inlier_matches = self.registration_2D.align_2d(feat, desc, cd_feat,
cd_desc, None, None)
if self.config.debug:
logging.info(f"Found {len(inlier_matches)}")
if transform is not None:
if self.config.with_icp_refinement and _with_o3d:
cd_points = cd_pc_image.reshape(-1, 3)
cd_points = cd_points[ | np.linalg.norm(cd_points, axis=1) | numpy.linalg.norm |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of the
# Apode Project (https://github.com/mchalela/apode).
# Copyright (c) 2020, <NAME> and <NAME>
# License: MIT
# Full Text: https://github.com/ngrion/apode/blob/master/LICENSE.txt
from apode import datasets
from apode.basic import ApodeData
import numpy as np
import pandas as pd
import pytest
# =============================================================================
# TESTS COMMON
# =============================================================================
def test_default_call():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = 0.5 * np.median(ad.data.values)
call_result = ad.poverty("headcount", pline=pline)
method_result = ad.poverty.headcount(pline=pline)
assert call_result == method_result
def test_invalid():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
with pytest.raises(AttributeError):
ad.poverty("foo")
def test_get_pline_none():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
# pline is None
pline = 0.5 * np.median(ad.data.values)
assert ad.poverty.headcount(pline=None) == ad.poverty.headcount(
pline=pline
)
def test_get_pline_factor():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = 0.5 * np.median(ad.data.values)
# factor < 0:
with pytest.raises(ValueError):
ad.poverty.hagenaars(pline=pline, factor=-3)
with pytest.raises(ValueError):
ad.poverty.chakravarty(pline=pline, factor=-3)
with pytest.raises(ValueError):
ad.poverty.hagenaars(pline=None, factor=-3)
with pytest.raises(ValueError):
ad.poverty.chakravarty(pline=None, factor=-3)
def test_get_pline_median():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = 0.5 * np.median(ad.data.values)
factor = 0.3
pline = factor * np.median(ad.data.values)
assert ad.poverty.headcount(
pline="median", factor=factor
) == ad.poverty.headcount(pline=pline)
def test_get_pline_mean():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
factor = 0.3
pline = factor * np.mean(ad.data.values)
assert ad.poverty.headcount(
pline="mean", factor=factor
) == ad.poverty.headcount(pline=pline)
def test_get_pline_quantile():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
# pline = "quantile"
q = 0.3
factor = 0.3
pline = factor * np.quantile(ad.data.values, q)
assert ad.poverty.chakravarty(
pline="quantile", factor=factor, q=q
) == ad.poverty.chakravarty(pline=pline)
assert ad.poverty.hagenaars(
pline="quantile", factor=factor, q=q
) == ad.poverty.hagenaars(pline=pline)
# pline = "quantile", q out of range
with pytest.raises(ValueError):
ad.poverty.hagenaars(pline="quantile", q=1.2)
with pytest.raises(ValueError):
ad.poverty.hagenaars(pline="quantile", q=-0.2)
# =============================================================================
# TESTS HEADCOUNT
# =============================================================================
def test_headcount_method():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = 0.5 * np.median(ad.data.values)
assert ad.poverty.headcount(pline=pline) == 0.27
def test_headcount_call():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = 0.5 * np.median(ad.data.values)
assert ad.poverty("headcount", pline=pline) == 0.27
def test_headcount_call_equal_method():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = 0.5 * np.median(ad.data.values)
call_result = ad.poverty("headcount", pline=pline)
method_result = ad.poverty.headcount(pline=pline)
assert call_result == method_result
def test_headcount_valid_pline():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
with pytest.raises(ValueError):
ad.poverty("headcount", pline=-1)
with pytest.raises(ValueError):
ad.poverty("headcount", pline=0)
def test_headcount_extreme_values():
ad = datasets.make_uniform(seed=42, size=300, mu=100, nbin=None)
pline_min = np.min(ad.data.values) / 2
pline_max = np.max(ad.data.values) + 1
assert ad.poverty("headcount", pline=pline_min) == 0
assert ad.poverty("headcount", pline=pline_max) == 1
def test_headcount_symmetry():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = np.mean(ad.data.values)
y = ad.data["x"].tolist()
np.random.shuffle(y)
df2 = pd.DataFrame({"x": y})
ad2 = ApodeData(df2, income_column="x")
assert ad.poverty(method="headcount", pline=pline) == ad2.poverty(
method="headcount", pline=pline
)
def test_headcount_replication():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
k = 2 # factor
pline = np.mean(ad.data.values)
y = k * ad.data["x"].tolist()
df2 = pd.DataFrame({"x": y})
ad2 = ApodeData(df2, income_column="x")
assert ad.poverty("headcount", pline=pline) == ad2.poverty(
"headcount", pline=pline
)
def test_headcount_homogeneity():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
k = 2 # factor
pline = np.mean(ad.data.values)
y = ad.data["x"].tolist()
y = [yi * k for yi in y]
df2 = pd.DataFrame({"x": y})
ad2 = ApodeData(df2, income_column="x")
assert ad.poverty("headcount", pline=pline) == ad2.poverty(
"headcount", pline=pline * k
)
# =============================================================================
# TESTS GAP
# =============================================================================
def test_gap_method():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = 0.5 * np.median(ad.data.values)
assert ad.poverty.gap(pline=pline) == 0.13715275200855706
def test_gap_call():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = 0.5 * np.median(ad.data.values)
assert ad.poverty("gap", pline=pline) == 0.13715275200855706
def test_gap_call_equal_method():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = 0.5 * np.median(ad.data.values)
call_result = ad.poverty("gap", pline=pline)
method_result = ad.poverty.gap(pline=pline)
assert call_result == method_result
def test_gap_valid_pline():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
with pytest.raises(ValueError):
ad.poverty("gap", pline=-1)
def test_gap_extreme_values():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline_min = np.min(ad.data.values) / 2
pline_max = np.max(ad.data.values) + 1
assert ad.poverty("gap", pline=pline_min) == 0
assert ad.poverty("gap", pline=pline_max) <= 1
def test_gap_symmetry():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = np.mean(ad.data.values)
y = ad.data["x"].tolist()
np.random.shuffle(y)
df2 = pd.DataFrame({"x": y})
ad2 = ApodeData(df2, income_column="x")
assert ad.poverty(method="gap", pline=pline) == ad2.poverty(
method="gap", pline=pline
)
def test_gap_replication():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
k = 2 # factor
pline = np.mean(ad.data.values)
y = k * ad.data["x"].tolist()
df2 = pd.DataFrame({"x": y})
ad2 = ApodeData(df2, income_column="x")
np.testing.assert_allclose(
ad.poverty("gap", pline=pline), ad2.poverty("gap", pline=pline)
)
def test_gap_homogeneity():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
k = 2 # factor
pline = np.mean(ad.data.values)
y = ad.data["x"].tolist()
y = [yi * k for yi in y]
df2 = pd.DataFrame({"x": y})
ad2 = ApodeData(df2, income_column="x")
assert ad.poverty("gap", pline=pline) == ad2.poverty(
"gap", pline=pline * k
)
# =============================================================================
# TESTS SEVERITY
# =============================================================================
def test_severity_method():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = 0.5 * np.median(ad.data.values)
assert ad.poverty.severity(pline=pline) == 0.0925444945807559
def test_severity_call():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = 0.5 * np.median(ad.data.values)
assert ad.poverty("severity", pline=pline) == 0.0925444945807559
def test_severity_call_equal_method():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = 0.5 * np.median(ad.data.values)
call_result = ad.poverty("severity", pline=pline)
method_result = ad.poverty.severity(pline=pline)
assert call_result == method_result
def test_severity_valid_pline():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
with pytest.raises(ValueError):
ad.poverty("severity", pline=-1)
def test_severity_extreme_values():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline_min = np.min(ad.data.values) / 2
pline_max = np.max(ad.data.values) + 1
assert ad.poverty("severity", pline=pline_min) == 0
assert ad.poverty("severity", pline=pline_max) <= 1
def test_severity_symmetry():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = np.mean(ad.data.values)
y = ad.data["x"].tolist()
np.random.shuffle(y)
df2 = pd.DataFrame({"x": y})
ad2 = ApodeData(df2, income_column="x")
assert ad.poverty(method="severity", pline=pline) == ad2.poverty(
method="severity", pline=pline
)
def test_severity_replication():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
k = 2 # factor
pline = np.mean(ad.data.values)
y = k * ad.data["x"].tolist()
df2 = pd.DataFrame({"x": y})
ad2 = ApodeData(df2, income_column="x")
np.testing.assert_allclose(
ad.poverty("severity", pline=pline),
ad2.poverty("severity", pline=pline),
)
def test_severity_homogeneity():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
k = 2 # factor
pline = np.mean(ad.data.values)
y = ad.data["x"].tolist()
y = [yi * k for yi in y]
df2 = pd.DataFrame({"x": y})
ad2 = ApodeData(df2, income_column="x")
assert ad.poverty("severity", pline=pline) == ad2.poverty(
"severity", pline=pline * k
)
# =============================================================================
# TESTS FGT
# =============================================================================
def test_fgt_method():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = 0.5 * np.median(ad.data.values)
assert ad.poverty.fgt(pline=pline) == 0.27
def test_fgt_call():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = 0.5 * np.median(ad.data.values)
assert ad.poverty("fgt", pline=pline) == 0.27
def test_fgt_call_equal_method():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = 0.5 * np.median(ad.data.values)
call_result = ad.poverty("fgt", pline=pline)
method_result = ad.poverty.fgt(pline=pline)
assert call_result == method_result
def test_fgt_valid_pline():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
with pytest.raises(ValueError):
ad.poverty("fgt", pline=-1)
with pytest.raises(ValueError):
ad.poverty("fgt", pline=0)
def test_fgt_valid_alpha():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
with pytest.raises(ValueError):
ad.poverty.fgt(pline=1, alpha=-2)
def test_fgt_alpha_values():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = np.mean(ad.data.values)
assert ad.poverty.fgt(pline=pline, alpha=1) == 0.26003924372489007
assert ad.poverty.fgt(pline=pline, alpha=0) == 0.4766666666666667
assert ad.poverty.fgt(pline=pline, alpha=10) == 0.049479474144909996
def test_fgt_extreme_values():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline_min = np.min(ad.data.values) / 2
pline_max = np.max(ad.data.values) + 1
assert ad.poverty("fgt", pline=pline_min) == 0
assert ad.poverty("fgt", pline=pline_max) <= 1
def test_fgt_symmetry():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = np.mean(ad.data.values)
y = ad.data["x"].tolist()
np.random.shuffle(y)
df2 = pd.DataFrame({"x": y})
ad2 = ApodeData(df2, income_column="x")
assert ad.poverty(method="fgt", pline=pline) == ad2.poverty(
method="fgt", pline=pline
)
def test_fgt_replication():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
k = 2 # factor
pline = np.mean(ad.data.values)
y = k * ad.data["x"].tolist()
df2 = pd.DataFrame({"x": y})
ad2 = ApodeData(df2, income_column="x")
assert ad.poverty("fgt", pline=pline) == ad2.poverty("fgt", pline=pline)
def test_fgt_homogeneity():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
k = 2 # factor
pline = np.mean(ad.data.values)
y = ad.data["x"].tolist()
y = [yi * k for yi in y]
df2 = pd.DataFrame({"x": y})
ad2 = ApodeData(df2, income_column="x")
assert ad.poverty("fgt", pline=pline) == ad2.poverty(
"fgt", pline=pline * k
)
# =============================================================================
# TESTS SEN
# =============================================================================
def test_sen_method():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = 0.5 * np.median(ad.data.values)
assert ad.poverty.sen(pline=pline) == 0.1826297337125855
def test_sen_call():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = 0.5 * np.median(ad.data.values)
assert ad.poverty("sen", pline=pline) == 0.1826297337125855
def test_sen_call_equal_method():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = 0.5 * np.median(ad.data.values)
call_result = ad.poverty("sen", pline=pline)
method_result = ad.poverty.sen(pline=pline)
assert call_result == method_result
def test_sen_valid_pline():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
with pytest.raises(ValueError):
ad.poverty("sen", pline=-1)
with pytest.raises(ValueError):
ad.poverty("sen", pline=0)
def test_sen_extreme_values():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline_min = np.min(ad.data.values) / 2
pline_max = np.max(ad.data.values) + 1
assert ad.poverty("sen", pline=pline_min) == 0
assert ad.poverty("sen", pline=pline_max) <= 1
def test_sen_symmetry():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = np.mean(ad.data.values)
y = ad.data["x"].tolist()
np.random.shuffle(y)
df2 = pd.DataFrame({"x": y})
ad2 = ApodeData(df2, income_column="x")
assert ad.poverty(method="sen", pline=pline) == ad2.poverty(
method="sen", pline=pline
)
def test_sen_homogeneity():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
k = 2 # factor
pline = np.mean(ad.data.values)
y = ad.data["x"].tolist()
y = [yi * k for yi in y]
df2 = pd.DataFrame({"x": y})
ad2 = ApodeData(df2, income_column="x")
assert ad.poverty("sen", pline=pline) == ad2.poverty(
"sen", pline=pline * k
)
# =============================================================================
# TESTS SST
# =============================================================================
def test_sst_method():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = 0.5 * np.median(ad.data.values)
assert ad.poverty.sst(pline=pline) == 0.24950968072455512
def test_sst_call():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = 0.5 * np.median(ad.data.values)
assert ad.poverty("sst", pline=pline) == 0.24950968072455512
def test_sst_call_equal_method():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = 0.5 * np.median(ad.data.values)
call_result = ad.poverty("sst", pline=pline)
method_result = ad.poverty.sst(pline=pline)
assert call_result == method_result
def test_sst_valid_pline():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
with pytest.raises(ValueError):
ad.poverty("sst", pline=-1)
with pytest.raises(ValueError):
ad.poverty("sst", pline=0)
# @pytest.mark.xfail
def test_sst_extreme_values():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline_min = np.min(ad.data.values) / 2
pline_max = np.max(ad.data.values) + 1
assert ad.poverty("sst", pline=pline_min) == 0
assert ad.poverty("sst", pline=pline_max) <= 1 # CHECK, fails
def test_sst_symmetry():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = np.mean(ad.data.values)
y = ad.data["x"].tolist()
np.random.shuffle(y)
df2 = pd.DataFrame({"x": y})
ad2 = ApodeData(df2, income_column="x")
assert ad.poverty(method="sst", pline=pline) == ad2.poverty(
method="sst", pline=pline
)
def test_sst_homogeneity():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
k = 2 # factor
pline = np.mean(ad.data.values)
y = ad.data["x"].tolist()
y = [yi * k for yi in y]
df2 = pd.DataFrame({"x": y})
ad2 = ApodeData(df2, income_column="x")
assert ad.poverty("sst", pline=pline) == ad2.poverty(
"sst", pline=pline * k
)
# =============================================================================
# TESTS WATTS
# =============================================================================
def test_watts_method():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = 0.5 * np.median(ad.data.values)
assert ad.poverty.watts(pline=pline) == 0.2724322042654472
def test_watts_call():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = 0.5 * np.median(ad.data.values)
assert ad.poverty("watts", pline=pline) == 0.2724322042654472
def test_watts_call_equal_method():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = 0.5 * np.median(ad.data.values)
call_result = ad.poverty("watts", pline=pline)
method_result = ad.poverty.watts(pline=pline)
assert call_result == method_result
def test_watts_valid_pline():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
with pytest.raises(ValueError):
ad.poverty("watts", pline=-1)
with pytest.raises(ValueError):
ad.poverty("watts", pline=0)
def test_watts_extreme_values():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline_min = np.min(ad.data.values) / 2
assert ad.poverty("watts", pline=pline_min) == 0
def test_watts_symmetry():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = np.mean(ad.data.values)
y = ad.data["x"].tolist()
np.random.shuffle(y)
df2 = pd.DataFrame({"x": y})
ad2 = ApodeData(df2, income_column="x")
assert ad.poverty(method="watts", pline=pline) == ad2.poverty(
method="watts", pline=pline
)
def test_watts_replication():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
k = 2 # factor
pline = np.mean(ad.data.values)
y = k * ad.data["x"].tolist()
df2 = pd.DataFrame({"x": y})
ad2 = ApodeData(df2, income_column="x")
np.testing.assert_allclose(
ad.poverty("watts", pline=pline), ad2.poverty("watts", pline=pline)
)
def test_watts_homogeneity():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
k = 2 # factor
pline = np.mean(ad.data.values)
y = ad.data["x"].tolist()
y = [yi * k for yi in y]
df2 = pd.DataFrame({"x": y})
ad2 = ApodeData(df2, income_column="x")
assert ad.poverty("watts", pline=pline) == ad2.poverty(
"watts", pline=pline * k
)
# =============================================================================
# TESTS CUH
# =============================================================================
def test_cuh_method():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = 0.5 * np.median(ad.data.values)
assert ad.poverty.cuh(pline=pline) == 0.18341653809400216
def test_cuh_call():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = 0.5 * np.median(ad.data.values)
assert ad.poverty.cuh(pline=pline) == 0.18341653809400216
def test_cuh_call_equal_method():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = 0.5 * np.median(ad.data.values)
call_result = ad.poverty("cuh", pline=pline)
method_result = ad.poverty.cuh(pline=pline)
assert call_result == method_result
def test_cuh_valid_pline():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
with pytest.raises(ValueError):
ad.poverty("cuh", pline=-1)
with pytest.raises(ValueError):
ad.poverty("cuh", pline=0)
def test_cuh_valid_alpha():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = np.mean(ad.data.values)
with pytest.raises(ValueError):
ad.poverty(method="cuh", pline=pline, alpha=-2)
with pytest.raises(ValueError):
ad.poverty(method="cuh", pline=pline, alpha=2)
def test_cuh_alpha_values():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = np.mean(ad.data.values)
assert (
ad.poverty(method="cuh", pline=pline, alpha=0.4) == 0.3739168025918481
)
assert (
ad.poverty(method="cuh", pline=pline, alpha=0) == 0.14377616581364483
)
def test_cuh_extreme_values():
ad = datasets.make_uniform(seed=42, size=300, mu=100, nbin=None)
pline_min = np.min(ad.data.values) / 2
pline_max = np.max(ad.data.values) + 1
assert ad.poverty("cuh", pline=pline_min) == 0 # CHECK, Fails
assert ad.poverty("cuh", pline=pline_max) <= 1
def test_cuh_symmetry():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = np.mean(ad.data.values)
y = ad.data["x"].tolist()
np.random.shuffle(y)
df2 = pd.DataFrame({"x": y})
ad2 = ApodeData(df2, income_column="x")
assert ad.poverty(method="cuh", pline=pline) == ad2.poverty(
method="cuh", pline=pline
)
def test_cuh_homogeneity():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
k = 2 # factor
pline = np.mean(ad.data.values)
y = ad.data["x"].tolist()
y = [yi * k for yi in y]
df2 = pd.DataFrame({"x": y})
ad2 = ApodeData(df2, income_column="x")
assert ad.poverty("cuh", pline=pline) == ad2.poverty(
"cuh", pline=pline * k
)
# =============================================================================
# TESTS TAKAYAMA
# =============================================================================
def test_takayama_method():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = 0.5 * np.median(ad.data.values)
# assert ad.poverty.takayama(pline=pline) == 0.13021647687646376
np.testing.assert_allclose(
ad.poverty.takayama(pline=pline),
0.13021647687646376,
)
def test_takayama_call():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = 0.5 * np.median(ad.data.values)
# assert ad.poverty("takayama", pline=pline) == 0.13021647687646376
np.testing.assert_allclose(
ad.poverty("takayama", pline=pline),
0.13021647687646376,
)
def test_takayama_call_equal_method():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = 0.5 * np.median(ad.data.values)
call_result = ad.poverty("takayama", pline=pline)
method_result = ad.poverty.takayama(pline=pline)
assert call_result == method_result
def test_takayama_valid_pline():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
with pytest.raises(ValueError):
ad.poverty("takayama", pline=-1)
with pytest.raises(ValueError):
ad.poverty("takayama", pline=0)
def test_takayama_extreme_values():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline_min = np.min(ad.data.values) / 2
pline_max = np.max(ad.data.values) + 1
assert ad.poverty("takayama", pline=pline_min) == 0
assert ad.poverty("takayama", pline=pline_max) <= 1 # CHE¶CK, fails
def test_takayama_symmetry():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = np.mean(ad.data.values)
y = ad.data["x"].tolist()
np.random.shuffle(y)
df2 = pd.DataFrame({"x": y})
ad2 = ApodeData(df2, income_column="x")
assert ad.poverty(method="takayama", pline=pline) == ad2.poverty(
method="takayama", pline=pline
)
def test_takayama_replication():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
k = 2 # factor
pline = np.mean(ad.data.values)
y = k * ad.data["x"].tolist()
df2 = pd.DataFrame({"x": y})
ad2 = ApodeData(df2, income_column="x")
np.testing.assert_allclose(
ad.poverty("takayama", pline=pline),
ad2.poverty("takayama", pline=pline),
)
def test_takayama_homogeneity():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
k = 2 # factor
pline = np.mean(ad.data.values)
y = ad.data["x"].tolist()
y = [yi * k for yi in y]
df2 = pd.DataFrame({"x": y})
ad2 = ApodeData(df2, income_column="x")
assert ad.poverty("takayama", pline=pline) == ad2.poverty(
"takayama", pline=pline * k
)
def test_takayama_avoid_zero_div_error():
# u = 0
df = pd.DataFrame({"x": np.zeros(10)})
ad = ApodeData(df, income_column="x")
pline = 0.2
assert ad.poverty.takayama(pline=pline) == 0
# n = 0
# df = pd.DataFrame({"x": []})
# ad = ApodeData(df, income_column="x")
# assert ad.poverty.takayama(pline=pline) == 0
# =============================================================================
# TESTS KAKWANI
# =============================================================================
def test_kakwani_method():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = 0.5 * np.median(ad.data.values)
# assert ad.poverty.kakwani(pline=pline) == 0.2027705302170293
np.testing.assert_allclose(
ad.poverty.kakwani(pline=pline), 0.2027705302170293
)
def test_kakwani_call():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = 0.5 * np.median(ad.data.values)
# assert ad.poverty("kakwani", pline=pline) == 0.2027705302170293
np.testing.assert_allclose(
ad.poverty("kakwani", pline=pline), 0.2027705302170293
)
def test_kakwani_call_equal_method():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = 0.5 * np.median(ad.data.values)
call_result = ad.poverty("kakwani", pline=pline)
method_result = ad.poverty.kakwani(pline=pline)
assert call_result == method_result
def test_kakwani_valid_pline():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
with pytest.raises(ValueError):
ad.poverty("kakwani", pline=-1)
with pytest.raises(ValueError):
ad.poverty("kakwani", pline=0)
def test_kakwani_extreme_values():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline_min = np.min(ad.data.values) / 2
pline_max = np.max(ad.data.values) + 1
assert ad.poverty("kakwani", pline=pline_min) == 0
assert ad.poverty("kakwani", pline=pline_max) <= 1
def test_kakwani_symmetry():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = np.mean(ad.data.values)
y = ad.data["x"].tolist()
np.random.shuffle(y)
df2 = pd.DataFrame({"x": y})
ad2 = ApodeData(df2, income_column="x")
assert ad.poverty(method="kakwani", pline=pline) == ad2.poverty(
method="kakwani", pline=pline
)
def test_kakwani_homogeneity():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
k = 2 # factor
pline = np.mean(ad.data.values)
y = ad.data["x"].tolist()
y = [yi * k for yi in y]
df2 = pd.DataFrame({"x": y})
ad2 = ApodeData(df2, income_column="x")
assert ad.poverty("kakwani", pline=pline) == ad2.poverty(
"kakwani", pline=pline * k
)
# =============================================================================
# TESTS THON
# =============================================================================
def test_thon_method():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = 0.5 * np.median(ad.data.values)
assert ad.poverty.thon(pline=pline) == 0.24913640189161163
def test_thon_call():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = 0.5 * np.median(ad.data.values)
assert ad.poverty("thon", pline=pline) == 0.24913640189161163
def test_thon_call_equal_method():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = 0.5 * np.median(ad.data.values)
call_result = ad.poverty("thon", pline=pline)
method_result = ad.poverty.thon(pline=pline)
assert call_result == method_result
def test_thon_valid_pline():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
with pytest.raises(ValueError):
ad.poverty("thon", pline=-1)
with pytest.raises(ValueError):
ad.poverty("thon", pline=0)
def test_thon_extreme_values():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline_min = np.min(ad.data.values) / 2
pline_max = np.max(ad.data.values) + 1
assert ad.poverty("thon", pline=pline_min) == 0
assert ad.poverty("thon", pline=pline_max) <= 1
def test_thon_symmetry():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = np.mean(ad.data.values)
y = ad.data["x"].tolist()
np.random.shuffle(y)
df2 = pd.DataFrame({"x": y})
ad2 = ApodeData(df2, income_column="x")
assert ad.poverty(method="thon", pline=pline) == ad2.poverty(
method="thon", pline=pline
)
def test_thon_homogeneity():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
k = 2 # factor
pline = np.mean(ad.data.values)
y = ad.data["x"].tolist()
y = [yi * k for yi in y]
df2 = pd.DataFrame({"x": y})
ad2 = ApodeData(df2, income_column="x")
assert ad.poverty("thon", pline=pline) == ad2.poverty(
"thon", pline=pline * k
)
# =============================================================================
# TESTS BD
# =============================================================================
def test_bd_method():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = 0.5 * np.median(ad.data.values)
assert ad.poverty.bd(pline=pline) == 0.2170854187584956
def test_bd_call():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = 0.5 * np.median(ad.data.values)
assert ad.poverty("bd", pline=pline) == 0.2170854187584956
assert ad.poverty("bd", pline=30) == 0.9950410832744983
def test_bd_call_equal_method():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = 0.5 * np.median(ad.data.values)
call_result = ad.poverty("bd", pline=pline)
method_result = ad.poverty.bd(pline=pline)
assert call_result == method_result
def test_bd_valid_pline():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
with pytest.raises(ValueError):
ad.poverty("bd", pline=-1)
with pytest.raises(ValueError):
ad.poverty("bd", pline=0)
def test_bd_extreme_values():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline_min = | np.min(ad.data.values) | numpy.min |
"""General Kalman filter
Description:
------------
Kalman filter and the modified Bryson-Frazier smoother is covered in the book "Factorization Methods for Discrete
Sequential Estimation" by Bierman :cite:`bierman2006`. However, there are some typos but a corrected version of the
algorithm is listed in :cite:`gibbs2011`.
"""
# External library imports
import numpy as np
import h5py
# Standard library import
import os
# Midgard imports
from midgard.math.unit import Unit
# Where imports
from where.lib import log
from where.lib import config
class KalmanFilter(object):
"""A general Kalman filter
See for instance https://en.wikipedia.org/wiki/Kalman_filter#Details for information about a general Kalman
filter. We use the Modified Bryson-Frazier smoother, which is described at
https://en.wikipedia.org/wiki/Kalman_filter#Modified_Bryson.E2.80.93Frazier_smoother
Notation:
h: Partial derivatives # num_obs x n x 1
x: Predicted state estimate (x-tilde) # num_obs x n x 1
x_hat: Updated state estimate (x-hat) # num_obs x n x 1
sigma: Residual covariance # num_obs
innovation: Measurement residual # num_obs
z: Observed residual # num_obs
r: Observation noise covariance # num_obs
Q: Process noise covariance # dict()
k: Kalman gain # num_obs x n x 1
p: Predicted estimate covariance (p-tilde) # n x n (not stored)
p_hat: Updated estimate covariance (p-hat) # num_obs x n x n
phi: State transition # n x n (not stored)
x_smooth: Smoothed state estimates # num_obs x n x 1
lam: (lambda) # num_obs x n x 1
"""
def __init__(self, h, z=None, apriori_stdev=None, phi=None, r=None, Q=None, param_names=None):
"""Initialize the Kalman filter
Args:
h (Numpy array): Partial derivatives (num_obs x n x 1)
z (Numpy array): Observations (num_obs)
apriori_stdev (Numpy array): Apriori standard deviation (n)
phi (Numpy array): State transition (num_obs x n x n)
r (Numpy array): Observation noise covariance (num_obs)
Q (Numpy array): Process noise covariance (num_obs x n x n)
"""
self.h = h
self.num_obs, self.n, _ = self.h.shape
self.apriori_stdev = np.ones(self.n) if apriori_stdev is None else apriori_stdev
self.z = np.zeros((self.num_obs)) if z is None else z
self.phi = np.eye(self.n).repeat(self.num_obs).reshape(self.n, self.n, -1).T if phi is None else phi
self.r = np.ones((self.num_obs)) if r is None else r
self.Q = dict() if Q is None else Q
self.x_hat = np.zeros((self.num_obs, self.n, 1))
self.x_hat_ferr = np.zeros((self.num_obs, self.n))
self.x_smooth = np.zeros((self.num_obs, self.n, 1))
self.param_names = param_names if param_names else []
self.p_hat_file_path = config.files.path("output_covariance_matrix")
self.p_hat_file = h5py.File(self.p_hat_file_path, "w")
self.p_hat_file.attrs["labels"] = ", ".join(self.param_names)
self.p_hat_file.close()
def filter(self):
"""Run the Kalman filter forward and backward
"""
# Initialize
x_tilde = np.zeros((self.n, 1))
p_tilde = np.diag(self.apriori_stdev ** 2)
sigma = np.zeros(self.num_obs)
innovation = np.zeros(self.num_obs)
k = np.zeros((self.num_obs, self.n, 1))
lam = np.zeros((self.n, 1))
# Makes calculations easier to read (and gives a slight speed-up)
h = self.h
z = self.z
phi = self.phi
r = self.r
Q = self.Q
x_hat = self.x_hat
x_smooth = self.x_smooth
I = np.eye(self.n)
# Run filter forward over all observations
for epoch in range(self.num_obs):
innovation[epoch] = z[epoch] - h[epoch].T @ x_tilde
sigma[epoch] = (h[epoch].T @ p_tilde @ h[epoch]) + r[epoch]
k[epoch] = p_tilde @ h[epoch] / sigma[epoch]
x_hat[epoch] = x_tilde + k[epoch] * innovation[epoch]
p_hat = (I - k[epoch] @ h[epoch].T) @ p_tilde
x_tilde = phi[epoch] @ x_hat[epoch]
p_tilde = phi[epoch] @ p_hat @ phi[epoch].T
for (idx1, idx2), noise in Q.get(epoch, {}).items():
p_tilde[idx1, idx2] += noise
self._set_p_hat(epoch, p_hat)
self.x_hat_ferr[epoch, :] = np.sqrt(np.diagonal(p_hat))
# Run smoother backwards over all observations
for epoch in range(self.num_obs - 1, -1, -1):
# TODO smooth covariance matrix
p_hat = self._get_p_hat(epoch)
x_smooth[epoch] = x_hat[epoch] + p_hat.T @ lam
lam = (
phi[epoch - 1].T @ h[epoch] * innovation[epoch] / sigma[epoch]
+ phi[epoch - 1].T @ (I - k[epoch] @ h[epoch].T).T @ lam
)
def update_dataset(self, dset, param_names, normal_idx, num_unknowns):
"""Update the given dataset with results from the filtering
Args:
dset (Dataset): The dataset.
param_names (List): Strings with names of parameters. Used to form field names.
normal_idx (Slice): Slice denoting which parameters should be used for the normal equations.
num_unknowns (Int): Number of unknowns.
"""
# Update dataset with state and estimation fields and calculate new residuals
self._add_fields(dset, param_names)
dset.residual[:] = dset.est - (dset.obs - dset.calc)
num_unknowns += dset.meta.get("num_clock_coeff", 0)
# Calculate normal equations, and add statistics about estimation to dataset
N, b = self._normal_equations(normal_idx, dset.num_obs - 1)
g = self.x_hat[dset.num_obs - 1, normal_idx, :]
deg_freedom = dset.num_obs - num_unknowns
v = dset.residual[:, None]
P = np.diag(1 / self.r[: dset.num_obs])
sq_sum_residuals = np.asscalar(v.T @ P @ v)
sq_sum_omc_terms = | np.asscalar(2 * b.T @ g - g.T @ N @ g) | numpy.asscalar |
import matplotlib.pyplot as plt
from helpers import d2df
import pandas as pd
import numpy as np
from matplotlib.patches import Rectangle
def plotMain(type, dict_structure, processed, *args):
# This function plots the contents of the analysed files.
# Two input values for "type" are accepted:
# - "off" [default]: does not print anything
# - "prompt": prompts the user for what s/he wants to print
# - "csv": reads info on what is to be read from a csv file
if type == "prompt":
plot_info = plotPrompt(dict_structure)
data_type = "processed"
elif type == "prompt_raw":
hd = args[0]
plot_info = plotPromptRaw(dict_structure,hd)
data_type = "raw"
elif type == "csv":
filename = args[0]
plot_info = plotFromCSV(dict_structure)
data_type = "processed"
else:
print("Either the -type- input was not correct, or you don't wish to print anything. No printout")
return
plottingFunction(plot_info, processed, data_type)
def plotPrompt(dict_structure):
# This function prompts the user for the required inputs to plot.
# Two alternatives are available:
# - The default is "automatic". It is the one that is set by default. The input needs to be provided using a
# specific format
# - The alternative is "simple", that is activated either typing "S" or "s" or "Simple" or "simple". This helps the
# user plotting by specifically asking for the required input
#
# This function outputs an object structured in this way:
# - The main object is a LIST.
# - Every list element is a DICTIONARY, representing a different plot. There are two fields:
# --- A "plot_mode" field, which is just a STRING
# --- A "variables" field, which is a LIST of DICTIONARIES. Each element is a different line in the plot
# - Each line is identified by the following fields:
# --- System
# --- Component
# --- Flow
# --- Property
output = []
accepted_plot_mode = ["sankey", "hist", "timeSeries"]
input1 = input("Provide input for advanced plotting. If you wish to switch to simple mode, type ""s"" ")
if input1 == "s" or input1 == "S" or input1 == "Simple" or input1 == "simple":
output.append({})
output[0]["variables"] = []
while True:
plot_mode = input("Switched to simple mode. Please enter the plot type. Available choices are: ""sankey"", ""hist"", ""timeSeries"" ")
if plot_mode in accepted_plot_mode:
output[0]["plot_mode"] = plot_mode
break
else:
print("The input you gave (" + plot_mode + ") for the plot type is not correct! Maybe there was a typo? Try again!")
while True:
# We start here a loop to allow for printing more things in the same figure
line_info = {}
while True:
plot_system = input("Now input what system you like to plot for. Available are choices are ""ME#"" (#=1,2,3,4), ""AE#"" (#=1,2,3,4), and ""Other"" ")
if plot_system in dict_structure["systems"]:
line_info["system"] = plot_system
break
else:
print("The input you gave (" + plot_system + ") for the system name is not correct! Maybe there was a typo? Try again!")
while True:
plot_unit = input("Now input what component of the chosen system you like to plot for:")
if plot_unit in dict_structure["systems"][line_info["system"]]["units"]:
line_info["unit"] = plot_unit
break
else:
print("The input you gave (" + plot_unit + ") for the component name is not correct! Maybe there was a typo? Try again!")
while True:
plot_flow = input("Now input what flow of the chosen component you like to plot for:")
if plot_flow in dict_structure["systems"][line_info["system"]]["untis"][line_info["unit"]]["flows"]:
line_info["flow"] = plot_flow
break
else:
print("The input you gave (" + plot_flow + ") for the flow name is not correct! Maybe there was a typo? Try again!")
while True:
plot_property = input("Finally, input what property of the chosen flow you like to plot for:")
if plot_property in dict_structure["systems"][line_info["system"]]["untis"][line_info["unit"]]["flows"][line_info["flow"]]["properties"]:
line_info["property"] = plot_property
break
else:
print("The input you gave (" + plot_property + ") for the property name is not correct! Maybe there was a typo? Try again!")
output[0]["variables"].append(line_info)
plot_cycle = input("Thanks! We have everything we need! Do you wish to add anything more to the same plot? if so, type ""y"". Otherwise, type any other character ")
if plot_cycle == "y":
print("Cool! Let's add a line to the plot, shall we?")
else:
print("OK, so no more inputs. Here comes the plot!")
break
else:
# The automatic reading is based on the following structure
# - First you give the plot type, followed by "->"
# - Then you give the inputs in the order as above: system, component, flow, property. Separated by a comma ","
# - To plot more than one variable in the same plot, a new entry is added with ";" as separator
# - To add a new plot, the separator to be used is "%"
# Example: "hist->ME1,TC,EG_in,T;ME1,TC,EG_out,T%timeSeries->ME1,TC,EG_in,T;ME1,TC,EG_out,T"
split1 = input1.split("%")
for plot in split1:
plot_info = {}
plot_info["variables"] = []
split2 = plot.split("->")
plot_mode = split2[0]
if plot_mode in accepted_plot_mode:
plot_info["plot_mode"] = plot_mode
else:
print("The input you gave (" + plot_mode + ") for the plot type is not correct! Maybe there was a typo? Try again!")
break
split3 = split2[1].split(";")
for line in split3:
line_info = {}
split4 = line.split(",")
plot_system = split4[0]
plot_unit = split4[1]
plot_flow = split4[2]
plot_property = split4[3]
if plot_system in dict_structure["systems"]:
line_info["system"] = plot_system
else:
print("The input you gave (" + plot_system + ") for the system name is not correct! Maybe there was a typo? Try again!")
break
if plot_unit in dict_structure["systems"][line_info["system"]]["units"]:
line_info["unit"] = plot_unit
else:
print("The input you gave (" + plot_unit + ") for the component name is not correct! Maybe there was a typo? Try again!")
break
if plot_flow in dict_structure["systems"][line_info["system"]]["units"][line_info["unit"]]["flows"]:
line_info["flow"] = plot_flow
else:
print("The input you gave (" + plot_flow + ") for the flow name is not correct! Maybe there was a typo? Try again!")
break
if plot_property in dict_structure["systems"][line_info["system"]]["units"][line_info["unit"]]["flows"][line_info["flow"]]["properties"]:
line_info["property"] = plot_property
else:
print("The input you gave (" + plot_property + ") for the property name is not correct! Maybe there was a typo? Try again!")
break
plot_info["variables"].append(line_info)
output.append(plot_info)
return output
def plotPromptRaw(structure, hd):
output = []
accepted_plot_mode = ["sankey", "hist", "timeSeries"]
output.append({})
output[0]["variables"] = []
while True:
plot_mode = input(
"Please enter the plot type. Available choices are: ""sankey"", ""hist"", ""timeSeries"" ")
if plot_mode in accepted_plot_mode:
output[0]["plot_mode"] = plot_mode
break
else:
print(
"The input you gave (" + plot_mode + ") for the plot type is not correct! Maybe there was a typo? Try again!")
while True:
# We start here a loop to allow for printing more things in the same figure
while True:
plot_var = input("Now input what variable you like to plot for. ")
if hd[plot_var] in structure.keys():
output[0]["variables"].append(hd[plot_var])
break
else:
print("The input you gave (" + plot_var + ") for the plot type is not correct! Maybe there was a typo? Try again!")
plot_cycle = input(
"Thanks! We have everything we need! Do you wish to add anything more to the same plot? if so, type ""y"". Otherwise, type any other character ")
if plot_cycle == "y":
print("Cool! Let's add a line to the plot, shall we?")
else:
print("OK, so no more inputs. Here comes the plot!")
break
return output
def plotFromCSV(filename):
# Plots a list of things defined in a CSV files
print("Still to be defined")
def plottingFunction(plot_info, processed, data_type):
# Here we go
if data_type == "processed":
for figure in plot_info:
if figure["plot_mode"] == "sankey":
print("Plot Sankey diagram...as if this was easy")
else:
fig = plt.figure()
for plot in figure["variables"]:
x = processed[d2df(plot["system"], plot["unit"], plot["flow"], plot["property"])]
if figure["plot_mode"] == "hist":
num_bins = 50
# the histogram of the data
n, bins, patches = plt.hist(x, num_bins, normed=1, alpha=0.5)
# add a 'best fit' line
plt.xlabel(plot["property"]+" of "+plot["flow"]+" of "+plot["unit"]+" in "+plot["system"])
plt.ylabel('Probability')
if figure["plot_mode"] == "timeSeries":
plt.plot(x)
plt.ylabel("Time [YYY:MM]")
plt.ylabel(plot["property"] + " of " + plot["flow"] + " of " + plot["unit"] + " in " + plot["system"])
plt.show()
elif data_type == "raw":
for figure in plot_info:
if figure["plot_mode"] == "sankey":
print("Plot Sankey diagram...as if this was easy")
else:
fig = plt.figure()
for plot in figure["variables"]:
x = processed[plot]
if figure["plot_mode"] == "hist":
num_bins = 50
# the histogram of the data
n, bins, patches = plt.hist(x, num_bins, normed=1, alpha=0.5)
# add a 'best fit' line
plt.xlabel("Variable of interest")
plt.ylabel('Probability')
if figure["plot_mode"] == "timeSeries":
x.plot()
else:
print("Data type is wrong. It should be either -raw- or -processed-")
def predefinedPlots(processed, dataset_raw, CONSTANTS, dict_structure, filenames):
for filename in filenames:
fig, ax = plt.subplots()
### TIME SERIES ###
if filename == "TimeSeries:Heat_vs_time":
# Contribution from the HRSGs
temp = (processed["ME2:HRSG:Steam_in:mdot"] * CONSTANTS["Steam"]["DH_STEAM"] + processed["ME3:HRSG:Steam_in:mdot"] * CONSTANTS["Steam"]["DH_STEAM"] + processed["AE1:HRSG:Steam_in:mdot"] * CONSTANTS["Steam"]["DH_STEAM"] + processed["AE2:HRSG:Steam_in:mdot"] * CONSTANTS["Steam"]["DH_STEAM"] + processed["AE3:HRSG:Steam_in:mdot"] * CONSTANTS["Steam"]["DH_STEAM"] + processed["AE4:HRSG:Steam_in:mdot"] * CONSTANTS["Steam"]["DH_STEAM"])
hrsg = temp.resample("D").sum() * 60 * 15
# Contribution from the HTHR
temp = processed["HTHR:HTHR13:HRWater_in:mdot"] * CONSTANTS["General"]["CP_WATER"] * (processed["HTHR:HTHR13:HRWater_out:T"] - processed["HTHR:HTHR24:HRWater_in:T"])
hthr = temp.resample("D").sum() * 60 * 15
# Maximum theoretical contribution from the HT systems
engine_set = {"ME1" , "ME2" , "ME3" , "ME4" , "AE1" , "AE2" , "AE3" , "AE4"}
temp = (sum((processed[d2df(idx, "CAC_HT", "HTWater_out", "T")] - processed[d2df(idx, "JWC", "HTWater_in", "T")]) *
processed[d2df(idx, "CAC_HT", "HTWater_out", "mdot")] * CONSTANTS["General"]["CP_WATER"] for idx in engine_set))
hthr_max = temp.resample("D").sum() * 60 * 15
# Contribution from the auxiliary boilers
boilers_measured = (dataset_raw["Boiler_Port"].resample("D").mean() + dataset_raw["Boiler_starbord"].resample("D").mean()) * CONSTANTS["General"]["HFO"]["LHV"]
boilers_calculated = processed["Steam:Boiler1:FuelPh_in:mdot"].resample("D").sum() * 60 * 15 * CONSTANTS["General"]["HFO"]["LHV"]
# Total demand
total = processed["Demands:Heat:Total:Edot"].resample("D").sum() * 60 * 15
# Actual plotting
ax.plot(hrsg, 'k-', label="HRSG")
ax.plot(hthr, 'b-', label="HTHR")
# plt.plot(hthr_max, 'b:', label="HTHR max")
ax.plot(boilers_measured, 'r-', label="Boilers (M)")
ax.plot(boilers_calculated, 'r:', label="Boilers (C)")
ax.plot(total, 'g-', label='Total heating demand')
plt.legend()
if filename == "TimeSeries:TypicalWinterDay":
ax.plot(processed["Demands:Mechanical:Total:Edot"]["2014-01-31"], "g-", label = "Propulsion power")
ax.plot(processed["Demands:Electricity:Total:Edot"]["2014-01-31"], "b--", label="Electric power")
ax.plot(processed["Demands:Heat:Total:Edot"]["2014-01-31"], "r:", label="Heat")
fig.autofmt_xdate()
plt.xlabel("Time [MM-DD HH]")
plt.ylabel("Power [kW]")
plt.legend()
if filename == "TimeSeries:TypicalSummerDay":
ax.plot(processed["Demands:Mechanical:Total:Edot"]["2014-07-31"], "g-", label = "Propulsion power")
ax.plot(processed["Demands:Electricity:Total:Edot"]["2014-07-31"], "b--", label="Electric power")
ax.plot(processed["Demands:Heat:Total:Edot"]["2014-07-31"], "r:", label="Heat")
fig.autofmt_xdate()
plt.xlabel("Time, [MM-DD HH]")
plt.ylabel("Power [kW]")
plt.legend()
if filename == "TimeSeries:El+Tair_vs_time":
# Plotting with two different y axis
ax.plot(processed["Demands:Electricity:Total:Edot"]["2014-04-01":"2014-11-01"].resample('D').mean(), "b--", label="Electric power")
ax.set_xlabel('Time [YYYY:MM]')
ax.set_ylabel('Power [kW]')
plt.legend()
# Adding the second axis
ax2 = ax.twinx()
ax2.plot(processed["T_air"]["2014-04-01":"2014-11-01"].resample('D').mean(), 'r--', label="Ambient air temperature")
ax2.set_ylabel('Temperature [K]')
plt.legend()
if filename == "TimeSeries:HeatBalance":
# Contribution from the HRSGs
Qdot_hrsg = (processed["ME2:HRSG:Steam_in:mdot"] * CONSTANTS["Steam"]["DH_STEAM"] +
processed["ME3:HRSG:Steam_in:mdot"] * CONSTANTS["Steam"]["DH_STEAM"] +
processed["AE1:HRSG:Steam_in:mdot"] * CONSTANTS["Steam"]["DH_STEAM"] +
processed["AE2:HRSG:Steam_in:mdot"] * CONSTANTS["Steam"]["DH_STEAM"] +
processed["AE3:HRSG:Steam_in:mdot"] * CONSTANTS["Steam"]["DH_STEAM"] +
processed["AE4:HRSG:Steam_in:mdot"] * CONSTANTS["Steam"]["DH_STEAM"])
# Contribution from the HTHR
Qdot_hthr = processed["HTHR:HTHR13:HRWater_in:mdot"] * CONSTANTS["General"]["CP_WATER"] * (processed["HTHR:HTHR13:HRWater_out:T"] - processed["HTHR:HTHR24:HRWater_in:T"])
Qdot_ab = processed["Steam:Boiler1:FuelPh_in:mdot"] * CONSTANTS["General"]["HFO"]["LHV"] * CONSTANTS["OtherUnits"]["BOILER"]["ETA_DES"]
Qdot_dumped = processed["Steam:HotWell:LTWater_in:mdot"] * (
processed["Steam:HotWell:LTWater_out:T"] - processed["Steam:HotWell:LTWater_in:T"]
) * CONSTANTS["General"]["CP_WATER"]
Qdot_balance = Qdot_hrsg + Qdot_hthr + Qdot_ab - processed["Demands:Heat:Total:Edot"] - Qdot_dumped
Qdot_balance.plot()
ax.plot(Qdot_balance.cumsum())
plt.title("Heat balance")
plt.xlabel("Time")
plt.ylabel("Heat balance [kW]")
if filename == "TimeSeries:HeatGenerationStacked":
Qdot_hrsg = (processed["ME2:HRSG:Steam_in:mdot"] * CONSTANTS["Steam"]["DH_STEAM"] +
processed["ME3:HRSG:Steam_in:mdot"] * CONSTANTS["Steam"]["DH_STEAM"] +
processed["AE1:HRSG:Steam_in:mdot"] * CONSTANTS["Steam"]["DH_STEAM"] +
processed["AE2:HRSG:Steam_in:mdot"] * CONSTANTS["Steam"]["DH_STEAM"] +
processed["AE3:HRSG:Steam_in:mdot"] * CONSTANTS["Steam"]["DH_STEAM"] +
processed["AE4:HRSG:Steam_in:mdot"] * CONSTANTS["Steam"]["DH_STEAM"]).resample('D').mean()
Qdot_hthr = (processed["HTHR:HTHR13:HRWater_in:mdot"] * CONSTANTS["General"]["CP_WATER"] * (
processed["HTHR:HTHR13:HRWater_out:T"] - processed["HTHR:HTHR24:HRWater_in:T"])).resample('D').mean()
Qdot_ab = (processed["Steam:Boiler1:Steam_HotWell_in:mdot"] * CONSTANTS["Steam"]["DH_STEAM"]).resample('D').mean()
x = Qdot_ab.index
ax.stackplot(x, Qdot_hrsg, Qdot_hthr, Qdot_ab, colors = ("0.66", "0.33", "0")) # label = ["HRSG", "HTHR", "Aux boiler"]
p1 = Rectangle((0, 0), 1, 1, fc="0.66")
p2 = Rectangle((0, 0), 1, 1, fc="0.33")
p3 = Rectangle((0, 0), 1, 1, fc="0")
plt.legend([p1, p2, p3], ["HRSG", "HTHR", "Aux boiler"])
plt.xlabel("Time [YYYY-MM]")
plt.ylabel("Power [kW]")
### PIE CHARTS ###
if filename == "Pie:TotalEnergySimple":
quantities = [processed["Demands:Mechanical:Total:Edot"].sum() , processed["Demands:Electricity:Total:Edot"].sum() , processed["Demands:Heat:Total:Edot"].sum()]
labels = ["Mechanical Power" , "Electric Power" , "Thermal Power"]
explode = (0.05 , 0.05 , 0.05)
ax.pie(quantities, labels=labels, explode=explode, autopct='%1.1f%%', shadow=True,)
if filename == "Pie:DemandFull":
quantities = [processed["Demands:Mechanical:Propeller1:Edot"].sum() ,
processed["Demands:Mechanical:Propeller2:Edot"].sum(),
processed["Demands:Electricity:HVAC:Edot"].sum() ,
processed["Demands:Electricity:Thrusters:Edot"].sum(),
processed["Demands:Electricity:Other:Edot"].sum(),
processed["Demands:Heat:HVACpreheater:Edot"].sum(),
processed["Demands:Heat:HVACreheater:Edot"].sum(),
processed["Demands:Heat:HotWaterHeater:Edot"].sum(),
processed["Demands:Heat:MachinerySpaceHeaters:Edot"].sum(),
processed["Demands:Heat:OtherTanks:Edot"].sum(),
processed["Demands:Heat:TankHeating:Edot"].sum(),
processed["Demands:Heat:HFOheater:Edot"].sum(),
processed["Demands:Heat:HFOtankHeating:Edot"].sum(),
processed["Demands:Heat:Galley:Edot"].sum()]
labels = ["Propeller-1" , "Propeller-2" ,
"HVAC", "Thrusters", "Other users",
"HVAC Preheater", "HVAC Reheater", "Hot water heater",
"Machinery space heaters", "Other tanks heating", "Fuel tanks heating", "HFO tanks heating", "HFO pre-injection heating", "Galley"]
#explode = [0, 0, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2]
explode = []
for idx in range(len(labels)):
explode.append((1-(quantities[idx]/sum(quantities))) * 0.05)
colors = ["green", "green",
"blue", "blue", "blue",
"sandybrown","sandybrown","sandybrown",
"red","red","red","red","red","red"]
patches, texts, autotexts = ax.pie(quantities, labels=labels, explode=explode, autopct='%1.1f%%', colors=colors, pctdistance=0.9)
[_.set_fontsize(14) for _ in texts]
[_.set_fontsize(14) for _ in autotexts]
if filename == "Pie:GenerationFull":
quantities = [processed["ME1:Cyl:FuelPh_in:mdot"].sum(),
processed["ME2:Cyl:FuelPh_in:mdot"].sum(),
processed["ME3:Cyl:FuelPh_in:mdot"].sum(),
processed["ME4:Cyl:FuelPh_in:mdot"].sum(),
processed["AE1:Cyl:FuelPh_in:mdot"].sum(),
processed["AE2:Cyl:FuelPh_in:mdot"].sum(),
processed["AE3:Cyl:FuelPh_in:mdot"].sum(),
processed["AE4:Cyl:FuelPh_in:mdot"].sum(),
processed["Steam:Boiler1:FuelPh_in:mdot"].sum()]
labels = ["ME1", "ME2", "ME3", "ME4", "AE1", "AE2", "AE3", "AE4", "AB"]
colors = ["0.33", "0.33", "0.33", "0.33", "0.66", "0.66", "0.66", "0.66", "black"]
ax.pie(quantities, labels=labels, explode=(0.05, )*len(labels), autopct='%1.1f%%', colors=colors)
if filename == "Pie:HeatDemand":
quantities = []
labels = []
explode = []
for demand in dict_structure["systems"]["Demands"]["units"]["Heat"]["flows"]:
quantities.append(processed[d2df("Demands","Heat",demand,"Edot")].sum())
labels.append(demand)
explode.append(0.05)
ax.pie(quantities, labels=labels, explode=tuple(explode), autopct='%1.1f%%', shadow=True)
if filename == "Pie:HeatGeneration":
quantities = []
quantities.append((processed["ME2:HRSG:Steam_in:mdot"] * CONSTANTS["Steam"]["DH_STEAM"]).sum())
quantities.append((processed["ME3:HRSG:Steam_in:mdot"] * CONSTANTS["Steam"]["DH_STEAM"]).sum())
quantities.append((processed["AE1:HRSG:Steam_in:mdot"] * CONSTANTS["Steam"]["DH_STEAM"]).sum())
quantities.append((processed["AE2:HRSG:Steam_in:mdot"] * CONSTANTS["Steam"]["DH_STEAM"]).sum())
quantities.append((processed["AE3:HRSG:Steam_in:mdot"] * CONSTANTS["Steam"]["DH_STEAM"]).sum())
quantities.append((processed["AE4:HRSG:Steam_in:mdot"] * CONSTANTS["Steam"]["DH_STEAM"]).sum())
quantities.append((processed["HTHR:HTHR13:HRWater_in:mdot"] * CONSTANTS["General"]["CP_WATER"] * (
processed["HTHR:HTHR13:HRWater_out:T"] - processed["HTHR:HTHR13:HRWater_in:T"])).sum())
quantities.append((processed["HTHR:HTHR13:HRWater_in:mdot"] * CONSTANTS["General"]["CP_WATER"] * (
processed["HTHR:HTHR24:HRWater_out:T"] - processed["HTHR:HTHR24:HRWater_in:T"])).sum())
quantities.append((processed["Steam:Boiler1:Steam_HotWell_in:mdot"] * CONSTANTS["Steam"]["DH_STEAM"]).sum())
labels = ["HRSG - ME2", "HRSG - ME3",
"HRSG - AE1", "HRSG - AE2", "HRSG - AE3", "HRSG - AE4",
"HTHR - ER13", "HTHR - ER24", "Aux Boiler"]
explode = (0.05, ) * len(labels)
colors = ("0.33", "0.33", "0.33", "0.33", "0.33", "0.33", "0.66", "0.66", "1.0")
ax.pie(quantities, labels=labels, explode=tuple(explode), autopct='%1.1f%%', shadow=True, colors=colors)
if filename == "Pie:OperationalMode":
quantities = []
labels = []
colors = []
temp = pd.Series(processed["operationalMode"].values, dtype="category")
for category in temp.cat.categories:
quantities.append(sum(temp == category))
labels.append(category)
colors.append("gray")
patches = ax.pie(quantities, labels=labels, explode=(0.05, 0.05, 0.05, 0.05), autopct='%1.1f%%', shadow=True, colors=colors)[0]
patches[0].set_hatch('/')
patches[1].set_hatch('\\')
patches[2].set_hatch('x')
### HISTOGRAMS ###
if filename == "Hist:WHR":
temp = processed["HTHR:HTHR13:HRWater_in:mdot"] * CONSTANTS["General"]["CP_WATER"] * (processed["HTHR:HTHR13:HRWater_out:T"] - processed["HTHR:HTHR24:HRWater_in:T"])
temp2 = (processed["ME2:HRSG:Steam_in:mdot"] * CONSTANTS["Steam"]["DH_STEAM"] + processed["ME3:HRSG:Steam_in:mdot"] * CONSTANTS["Steam"]["DH_STEAM"] + processed["AE1:HRSG:Steam_in:mdot"] * CONSTANTS["Steam"]["DH_STEAM"] + processed["AE2:HRSG:Steam_in:mdot"] * CONSTANTS["Steam"]["DH_STEAM"] + processed["AE3:HRSG:Steam_in:mdot"] * CONSTANTS["Steam"]["DH_STEAM"] + processed["AE4:HRSG:Steam_in:mdot"] * CONSTANTS["Steam"]["DH_STEAM"])
ax.hist(temp, 50, normed=1, alpha=0.5, label="HTHR")
ax.hist(temp2, 50, normed=1, alpha=0.5, label="HRSG")
plt.legend()
if filename == "Hist:AuxEngines":
temp1 = []
for engine in {"AE1", "AE2", "AE3", "AE4"}:
temp1.append(processed[engine+":Cyl:Power_out:Edot"][processed[engine+":on"]]/CONSTANTS["AuxEngines"]["MCR"])
ax.hist(tuple(temp1), normed=False, alpha=0.8, label=["AE1", "AE2", "AE3", "AE4"])
plt.legend()
plt.xlabel("Engine load")
plt.ylabel("Number of observations")
if filename == "Hist:MainEngines":
temp1 = []
for engine in {"ME1", "ME2", "ME3", "ME4"}:
temp1.append(processed[engine + ":Cyl:Power_out:Edot"][processed[engine + ":on"]] / CONSTANTS["MainEngines"]["MCR"])
ax.hist(tuple(temp1), normed=False, alpha=0.8, label=["ME1", "ME2", "ME3", "ME4"])
plt.legend()
plt.xlabel("Engine load")
plt.ylabel("Number of observations")
### SCATTER PLOTS ###
if filename == "Scatter:Pmech_vs_Vship":
enginesOn = processed["ME1:on"].astype(int) + processed["ME2:on"].astype(int) + processed["ME3:on"].astype(int) + processed["ME4:on"].astype(int)
for idx in range(5):
ax.scatter(
dataset_raw["SHIPS SPEED:79025:knot:Average:900"][enginesOn==idx],
processed["Demands:Mechanical:Total:Edot"][enginesOn==idx],
label=(str(idx) + "Engines on"))
plt.legend()
### BAR CHARTS ###
if filename == "Bar:PercentageWHR":
# Exhaust gas
Qdot_hrsg = 0
Qdot_eg = 0
Qdot_eg160 = 0
Bdot_hrsg = 0
Bdot_eg = 0
Bdot_eg160diff = 0
Qdot_hthr = (processed["HTHR:HTHR13:HRWater_out:Edot"] - processed["HTHR:HTHR24:HRWater_in:Edot"]).sum()
Qdot_ht = 0
Qdot_lt = 0
Bdot_hthr = (processed["HTHR:HTHR13:HRWater_out:Bdot"] - processed["HTHR:HTHR24:HRWater_in:Bdot"]).sum()
Bdot_ht = 0
Bdot_lt = 0
for system in {"AE1", "AE2", "AE3", "AE4", "ME1" , "ME2", "ME3", "ME4"}:
# Exhaust gas, Energy
if system in {"AE1", "AE2", "AE3", "AE4", "ME2", "ME3"}:
Qdot_hrsg = Qdot_hrsg + (processed[system+":HRSG:Steam_in:mdot"] * CONSTANTS["Steam"]["DH_STEAM"]).sum()
Bdot_hrsg = Bdot_hrsg + (processed[system + ":HRSG:Steam_out:Bdot"] - processed[system + ":HRSG:Steam_in:Bdot"]).sum()
Qdot_eg = Qdot_eg + processed[system+":Turbine:Mix_out:Edot"].sum()
Qdot_eg160 = Qdot_eg160 + (processed[system+":Turbine:Mix_out:mdot"] * CONSTANTS["General"]["CP_EG"] * (processed[system+":Turbine:Mix_out:T"] - 160-273.15)).sum()
# Exhaust gas, Exergy
Bdot_eg = Bdot_eg + processed[system + ":Turbine:Mix_out:Bdot"].sum()
Bdot_eg160diff = Bdot_eg160diff + (processed[system + ":Turbine:Mix_out:mdot"] * CONSTANTS["General"]["CP_EG"] * np.log((160+237.15)/(processed["T_0"])) * ((160+237.15-processed["T_0"]) / | np.log((160+237.15)/(processed["T_0"])) | numpy.log |
import os
import numpy as np
import mdtraj as md
from simtk import unit
from foldamers.cg_model.cgmodel import CGModel
from foldamers.utilities.plot import plot_distribution
# These functions calculate and plot bond angle and torsion distributions from a CGModel object and pdb trajectory
def calc_bond_angle_distribution(
cgmodel,pdbfile,nbins=90,plotfile="angle_hist"
):
"""
Calculate and plot all bond angle distributions from a CGModel object and pdb trajectory
:param cgmodel: CGModel() object
:type cgmodel: class
:param pdbfile: path to pdb trajectory file
:type pdbfile: str
:param nbins: number of bins spanning the range of 0 to 180 degrees, default = 90
:type nbins: int
:param plotfile: Base filename for saving bond angle distribution pdf plots
:type plotfile: str
"""
# Load in a trajectory pdb file:
traj = md.load(pdbfile)
nframes = traj.n_frames
# Get angle list
angle_list = CGModel.get_bond_angle_list(cgmodel)
ang_types = [] # List of angle types for each angle in angle_list
ang_array = np.zeros((len(angle_list),3))
# Relevant angle types are added to a dictionary as they are discovered
ang_dict = {}
# Create an inverse dictionary for getting angle string name from integer type
inv_ang_dict = {}
# Counter for number of angle types found:
i_angle_type = 0
# Assign angle types:
for i in range(len(angle_list)):
ang_array[i,0] = angle_list[i][0]
ang_array[i,1] = angle_list[i][1]
ang_array[i,2] = angle_list[i][2]
particle_types = [
CGModel.get_particle_type_name(cgmodel,angle_list[i][0]),
CGModel.get_particle_type_name(cgmodel,angle_list[i][1]),
CGModel.get_particle_type_name(cgmodel,angle_list[i][2])
]
string_name = ""
reverse_string_name = ""
for particle in particle_types:
string_name += f"{particle}_"
string_name = string_name[:-1]
for particle in reversed(particle_types):
reverse_string_name += f"{particle}_"
reverse_string_name = reverse_string_name[:-1]
if (string_name in ang_dict.keys()) == False:
# New angle type found, add to angle dictionary
i_angle_type += 1
ang_dict[string_name] = i_angle_type
ang_dict[reverse_string_name] = i_angle_type
# For inverse dict we will use only the forward name based on first encounter
inv_ang_dict[str(i_angle_type)] = string_name
print(f"adding new angle type {i_angle_type}: {string_name} to dictionary")
print(f"adding reverse version {i_angle_type}: {reverse_string_name} to dictionary")
ang_types.append(ang_dict[string_name])
# Sort angles by type into separate sub arrays for mdtraj compute_angles
ang_sub_arrays = {}
for i in range(i_angle_type):
ang_sub_arrays[str(i+1)] = np.zeros((ang_types.count(i+1),3))
# Counter vector for all angle types
n_i = np.zeros((i_angle_type,1), dtype=int)
for i in range(len(angle_list)):
ang_sub_arrays[str(ang_types[i])][n_i[ang_types[i]-1],:] = ang_array[i,:]
n_i[ang_types[i]-1] += 1
# Create dictionary for saving angle histogram data:
angle_hist_data = {}
# Set bin edges:
angle_bin_edges = np.linspace(0,180,nbins+1)
angle_bin_centers = np.zeros((len(angle_bin_edges)-1,1))
for i in range(len(angle_bin_edges)-1):
angle_bin_centers[i] = (angle_bin_edges[i]+angle_bin_edges[i+1])/2
for i in range(i_angle_type):
# Compute all angle values in trajectory
# This returns an [nframes x n_angles] array
ang_val_array = md.compute_angles(traj,ang_sub_arrays[str(i+1)])
# Reshape arrays and convert to degrees:
ang_val_array = (180/np.pi)*np.reshape(ang_val_array, (nframes*n_i[i][0],1))
# Histogram and plot results:
n_out, bin_edges_out = np.histogram(
ang_val_array, bins=angle_bin_edges,density=True)
angle_hist_data[f"{inv_ang_dict[str(i+1)]}_density"]=n_out
angle_hist_data[f"{inv_ang_dict[str(i+1)]}_bin_centers"]=angle_bin_centers
plot_distribution(
inv_ang_dict,
angle_hist_data,
xlabel="Bond angle (degrees)",
ylabel="Probability density",
xlim=[0,180],
figure_title="Angle distributions",
file_name=f"{plotfile}",
marker_string='o-r',
)
return angle_hist_data
def calc_torsion_distribution(
cgmodel,pdbfile,nbins=180,plotfile="torsion_hist"
):
"""
Calculate and plot all torsion distributions from a CGModel object and pdb trajectory
:param cgmodel: CGModel() object
:type cgmodel: class
:param pdbfile: path to pdb trajectory file
:type pdbfile: str
:param nbins: number of bins spanning the range of -180 to 180 degrees, default = 180
:type nbins: int
:param plotfile: Base filename for saving torsion distribution pdf plots
:type plotfile: str
"""
# Load in a trajectory pdb file:
traj = md.load(pdbfile)
nframes = traj.n_frames
# Get torsion list
torsion_list = CGModel.get_torsion_list(cgmodel)
torsion_types = [] # List of torsion types for each torsion in torsion_list
torsion_array = np.zeros((len(torsion_list),4))
# Relevant torsion types are added to a dictionary as they are discovered
torsion_dict = {}
# Create an inverse dictionary for getting torsion string name from integer type
inv_torsion_dict = {}
# Counter for number of torsion types found:
i_torsion_type = 0
# Assign torsion types:
for i in range(len(torsion_list)):
torsion_array[i,0] = torsion_list[i][0]
torsion_array[i,1] = torsion_list[i][1]
torsion_array[i,2] = torsion_list[i][2]
torsion_array[i,3] = torsion_list[i][3]
particle_types = [
CGModel.get_particle_type_name(cgmodel,torsion_list[i][0]),
CGModel.get_particle_type_name(cgmodel,torsion_list[i][1]),
CGModel.get_particle_type_name(cgmodel,torsion_list[i][2]),
CGModel.get_particle_type_name(cgmodel,torsion_list[i][3])
]
string_name = ""
reverse_string_name = ""
for particle in particle_types:
string_name += f"{particle}_"
string_name = string_name[:-1]
for particle in reversed(particle_types):
reverse_string_name += f"{particle}_"
reverse_string_name = reverse_string_name[:-1]
if (string_name in torsion_dict.keys()) == False:
# New torsion type found, add to torsion dictionary
i_torsion_type += 1
torsion_dict[string_name] = i_torsion_type
torsion_dict[reverse_string_name] = i_torsion_type
# For inverse dict we will use only the forward name based on first encounter
inv_torsion_dict[str(i_torsion_type)] = string_name
print(f"adding new torsion type {i_torsion_type}: {string_name} to dictionary")
print(f"adding reverse version {i_torsion_type}: {reverse_string_name} to dictionary")
torsion_types.append(torsion_dict[string_name])
# Sort torsions by type into separate sub arrays for mdtraj compute_dihedrals
torsion_sub_arrays = {}
for i in range(i_torsion_type):
torsion_sub_arrays[str(i+1)] = np.zeros((torsion_types.count(i+1),4))
# Counter vector for all angle types
n_i = | np.zeros((i_torsion_type,1), dtype=int) | numpy.zeros |
import numpy as np
import pytest
from hamcrest import *
from src.data_preprocessing import *
# @pytest.mark.parametrize("data_in", GET_MODEL_REFERENCE_DATA)
def test_intersect():
box_a = | np.array([[100, 100], [400, 400]]) | numpy.array |
import matplotlib.pyplot as plt
import numpy as np
# Functions
x = np.linspace(0,np.pi)
y_1 = np.sin(x)
y_2 = np.sin(x) + np.random.normal(0,0.1,50)
y_3 = np.sin(x) + | np.random.uniform(-0.1,0.1,50) | numpy.random.uniform |
import os
import hashlib
import numpy as np
from pybullet_envs import env_bases
from pybullet_envs import scene_abstract
from d4rl.pointmaze_bullet import bullet_robot
from d4rl.pointmaze import maze_model
from d4rl import offline_env
class MazeRobot(bullet_robot.MJCFBasedRobot):
def __init__(self, maze_spec):
model = maze_model.point_maze(maze_spec)
maze_hash = hashlib.md5(maze_spec.encode('ascii')).hexdigest()
filename = os.path.join(offline_env.DATASET_PATH, 'tmp_bullet_xml', maze_hash+'.xml')
if not os.path.exists(filename):
os.makedirs(os.path.dirname(filename), exist_ok=True)
with model.asfile() as f:
model_xml = f.read()
with open(filename, 'w') as f:
f.write(model_xml)
self.dt = 0.0165
self.last_qpos = None
super(MazeRobot, self).__init__(model_xml=filename,
robot_name='maze2d',
action_dim=2,
obs_dim=4,
self_collision=True)
@property
def qpos(self):
x = self.particle.get_position()[0:2]
return x
@property
def qvel(self):
#vx = self.particle.speed()[0:2]
#vx = np.array([self.ball_x.get_velocity(), self.ball_y.get_velocity()], dtype=np.float32)
vx = (self.qpos - self.last_qpos) / self.dt
return vx
def calc_state(self):
#import pdb; pdb.set_trace()
return np.concatenate([self.qpos - 1.0, self.qvel])
def set_state(self, qpos, qvel):
self.particle.reset_position(np.array([qpos[0], qpos[1], 0.0]))
self.particle.reset_velocity(np.array([qvel[0], qvel[1], 0.0]))
self.last_qpos = self.qpos
#self.ball_x.set_velocity(qvel[0])
#self.ball_y.set_velocity(qvel[1])
def get_obs(self):
return self.calc_state()
def robot_specific_reset(self, bullet_client):
self._p = bullet_client
self.particle = self.parts["particle"]
self.ball_x = self.jdict["ball_x"]
self.ball_y = self.jdict["ball_y"]
#u = self.np_random.uniform(low=-.1, high=.1)
#self.j1.reset_current_position(u if not self.swingup else 3.1415 + u, 0)
self.ball_x.set_motor_torque(0)
self.ball_y.set_motor_torque(0)
self.last_qpos = self.qpos
def apply_action(self, a):
assert (np.isfinite(a).all())
self.last_qpos = self.qpos
self.ball_x.set_motor_torque(a[0]*10)
self.ball_y.set_motor_torque(a[1]*10)
class Maze2DBulletEnv(env_bases.MJCFBaseBulletEnv, offline_env.OfflineEnv):
def __init__(self, maze_spec,
reward_type='dense',
reset_target=False,
**kwargs):
self.robot = MazeRobot(maze_spec)
env_bases.MJCFBaseBulletEnv.__init__(self, self.robot)
offline_env.OfflineEnv.__init__(self, **kwargs)
self.stateId = -1
self.reset_target = reset_target
self.str_maze_spec = maze_spec
self.maze_arr = maze_model.parse_maze(maze_spec)
self.reward_type = reward_type
self.reset_locations = list(zip(*np.where(self.maze_arr == maze_model.EMPTY)))
self.reset_locations.sort()
self._target = np.array([0.0,0.0])
# Set the default goal (overriden by a call to set_target)
# Try to find a goal if it exists
self.goal_locations = list(zip(*np.where(self.maze_arr == maze_model.GOAL)))
if len(self.goal_locations) == 1:
self.set_target(self.goal_locations[0])
elif len(self.goal_locations) > 1:
raise ValueError("More than 1 goal specified!")
else:
# If no goal, use the first empty tile
self.set_target(np.array(self.reset_locations[0]).astype(self.observation_space.dtype))
self.empty_and_goal_locations = self.reset_locations + self.goal_locations
def create_single_player_scene(self, bullet_client):
return scene_abstract.SingleRobotEmptyScene(bullet_client, gravity=9.8, timestep=0.0165, frame_skip=1)
def reset(self):
if (self.stateId >= 0):
self._p.restoreState(self.stateId)
r = env_bases.MJCFBaseBulletEnv.reset(self)
if (self.stateId < 0):
self.stateId = self._p.saveState()
self.reset_model()
ob = self.robot.calc_state()
return ob
def step(self, action):
action = | np.clip(action, -1.0, 1.0) | numpy.clip |
import photutils
from astropy.io import fits, ascii
import sys
import os
from pkg_resources import resource_filename
import matplotlib.pyplot as plt
import glob
from photutils import CircularAperture, CircularAnnulus
from photutils import RectangularAperture
from photutils import centroid_2dg, aperture_photometry
import photutils
import numpy as np
from astropy.time import Time
import astropy.units as u
import pdb
from copy import deepcopy
import yaml
import warnings
from scipy.stats import binned_statistic
from astropy.table import Table
import multiprocessing
from multiprocessing import Pool
import time
import logging
import urllib
import tqdm
from .phot_pipeline import get_baseDir
def gauss_2d(x,y,x0,y0,sigx=1.0,sigy=1.0,norm=1.0):
"""
A 2D Gaussian function
"""
arg = -1.0 * ((x - x0)**2/(2. * sigx**2) +
(y - y0)**2/(2. * sigy**2))
## only evaluate exponential where it will avoid
## undeflow errors for tiny results
high_pts = arg >= -15.
z = np.zeros_like(x,dtype=float)
z[high_pts] = np.exp(arg[high_pts]) /(2. * np.pi * sigx * sigy)
return z * norm
def make_gauss_star(dimen=30,cen=[15,15],flux=1.0):
x = np.arange(dimen)
y = np.arange(dimen)
#x = np.linspace(-halfdimen, halfdimen,)
#y = np.linspace(-halfdimen, halfdimen)
x, y = np.meshgrid(x, y) # get 2D variables instead of 1D
z = gauss_2d(x, y, cen[0],cen[1],norm=flux)
return z
def sim_phot_w_large_shift():
nImg = 10
dimen = 30
xcen = np.linspace(4,25,nImg)
ycen = np.linspace(25,4,nImg)
time_start = Time('2020-05-04T00:00:00.0',format='fits')
time_obs = time_start + np.arange(nImg) * 10.0 * u.second
outDir = os.path.join(get_baseDir(),
'example_tshirt_data',
'sim_data','drift_phot')
np.random.seed(0)
fileNames = []
for ind in np.arange(nImg):
z = make_gauss_star(dimen=dimen,cen=[xcen[ind],ycen[ind]],flux=100.0)
noise = | np.random.randn(dimen,dimen) | numpy.random.randn |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 27 15:10:36 2020
@author: chitra
"""
import time
_start_time = time.time()
def tick():
global _start_time
_start_time = time.time()
def tock():
t_sec = round(time.time() - _start_time)
(t_min, t_sec) = divmod(t_sec,60)
(t_hour,t_min) = divmod(t_min,60)
print('Time passed: {}hour:{}min:{}sec'.format(t_hour,t_min,t_sec))
import numpy as np
import pyblp
import pandas as pd
# the standard deviation of log income is constant across years, but it has year-varying means
# 0.375 is calibrated to match OG diversion of 2nd choice data
def solve_nl_nevo(df,rho=0.375):
groups = df.groupby(['market_ids', 'nesting_ids'])
df['demand_instruments20'] = groups['shares'].transform(np.size)
nl_formulation = pyblp.Formulation('0 + prices')
problem = pyblp.Problem(nl_formulation, df)
res=problem.solve(rho=rho,optimization=pyblp.Optimization('return'))
og=res.extract_diagonals(res.compute_diversion_ratios()).mean()
print(og)
return problem,res
def draw_blp_agents(ndraws=10000):
log_income_sd = 1.72
log_income_means = {
1971: 2.01156,
1972: 2.06526,
1973: 2.07843,
1974: 2.05775,
1975: 2.02915,
1976: 2.05346,
1977: 2.06745,
1978: 2.09805,
1979: 2.10404,
1980: 2.07208,
1981: 2.06019,
1982: 2.06561,
1983: 2.07672,
1984: 2.10437,
1985: 2.12608,
1986: 2.16426,
1987: 2.18071,
1988: 2.18856,
1989: 2.21250,
1990: 2.18377,
}
# construct agent data year-by-year
market_ids = []
weights = []
nodes = []
income = []
for index, (year, log_income_mean) in enumerate(log_income_means.items()):
integration = pyblp.Integration('halton', ndraws, {'discard': 1000 + index * ndraws,'seed': index})
untransformed_agents = pyblp.build_integration(integration, 6)
market_ids.append(np.repeat(year, untransformed_agents.weights.size))
weights.append(untransformed_agents.weights)
nodes.append(untransformed_agents.nodes[:, :-1])
income.append(np.exp(log_income_mean + log_income_sd * untransformed_agents.nodes[:, -1]))
# concatenate the constructed agent data
agent_data = {
'market_ids': np.concatenate(market_ids),
'weights': np.concatenate(weights),
'nodes': np.vstack(nodes),
'income': np.concatenate(income),
}
# Make this a dataframe
agents=agent_data.copy()
del agents['nodes']
del agents['weights']
agent_df=pd.DataFrame.from_dict(agents)
for index, vi in enumerate(np.vstack(nodes).T):
agent_df[f'nodes{index}'] = vi
agent_df['weights']=np.concatenate(weights).flatten()
return agent_df
def save_pyblp_results(results, problem,filename):
## add in all the other things we could potentially be interested in
res_dict = results.to_dict()
res_dict['diversion_ratios'] = results.compute_diversion_ratios()
res_dict['quality_diversion_ratios'] = results.compute_diversion_ratios(name=None)
res_dict['own_diversion'] = results.extract_diagonals(res_dict['diversion_ratios'])
res_dict['long_run_diversion_ratios'] = results.compute_long_run_diversion_ratios()
res_dict['objective'] = results.objective.item()
res_dict['objective_scaled'] = results.objective.item()/problem.N
res_dict['elasticities'] = results.compute_elasticities()
res_dict['aggregate_elasticities'] = results.compute_aggregate_elasticities()
res_dict['diag_elasticities'] = results.extract_diagonals(res_dict['elasticities'])
res_dict['consumer_surplus'] = results.compute_consumer_surpluses()
res_dict['markups'] =results.compute_markups()
res_dict['probabilities'] = results.compute_probabilities()
np.save(filename, res_dict, allow_pickle =True)
def load_pyblp_dict(filename):
dict = np.load(filename, allow_pickle=True)
return dict
# this ONLY works for the base!
def load_blp_base(problem, filename):
base_res = np.load(filename, allow_pickle=True)
dict_W = base_res.item().get('W')
dict_delta = base_res.item().get('delta')
dict_gamma = base_res.item().get('gamma')
dict_beta = base_res.item().get('beta')
dict_sigma = base_res.item().get('sigma')
dict_pi = base_res.item().get('pi')
## Use these to quickly get the exact results as estimation
fast_options = dict(
method='1s',
check_optimality='gradient',
costs_bounds=(0.001, None),
W_type='clustered',
se_type='clustered',
initial_update=False,
iteration=pyblp.Iteration('squarem', {'atol': 1e-14}),
optimization=pyblp.Optimization('return'),
scale_objective=False,
W=dict_W,
delta=dict_delta,
beta=dict_beta,
gamma=dict_gamma,
sigma = dict_sigma,
pi = dict_pi
)
results_fast = problem.solve(**fast_options)
return results_fast
def get_params_nevo(results_dict, w=None):
elasticities = results_dict.item().get('diag_elasticities')
agg_elas = results_dict.item().get('aggregate_elasticities')
diversion0 = results_dict.item().get('own_diversion')
div = results_dict.item().get('diversion_ratios')
div[np.isnan(div)]=0
div[div==diversion0]=0
div.sort(axis=1)
top5=div[:,-5:].sum(axis=1)
price_param = results_dict.item().get('beta').item()
price_param_se = results_dict.item().get('beta_se').item()
cs = results_dict.item().get('consumer_surplus')*100
markups=results_dict.item().get('markups')
# CRM: Adding the interactions as pi
if results_dict.item().get('sigma').shape[0] == 0:
sigmas = np.zeros(5)
sigma_ses = np.zeros((5,5))
else:
sigma_ses = results_dict.item().get('sigma_se')
sigmas=np.abs(np.diag(results_dict.item().get('sigma')))
if results_dict.item().get('pi').shape[0] == 0 :
pis = np.zeros((5,5))
pi_ses = np.zeros((5,5))
else:
pis = results_dict.item().get('pi')
pi_ses = results_dict.item().get('pi_se')
objective = results_dict.item().get('objective')
objective_scaled = results_dict.item().get('objective_scaled')
return {'sigma_cons': sigmas[0],
'sigma_price': sigmas[1],
'sigma_sugar': sigmas[2],
'sigma_mushy': sigmas[3],
'sigma_cons_se': sigma_ses[0,0],
'sigma_price_se': sigma_ses[1,1],
'sigma_sugar_se': sigma_ses[2,2],
'sigma_mushy_se': sigma_ses[3,3],
'pi_cons_inc': pis[0,0],
'pi_cons_inc2': pis[0,1],
'pi_cons_age': pis[0,2],
'pi_price_inc': pis[1,0],
'pi_price_inc2': pis[1,1],
'pi_price_child': pis[1,3],
'pi_sugar_inc': pis[2,0],
'pi_sugar_age': pis[2,2],
'pi_mushy_inc': pis[3,0],
'pi_mushy_age': pis[3,2],
'pi_cons_inc_se': pi_ses[0,0],
'pi_cons_inc2_se': pi_ses[0,1],
'pi_cons_age_se': pi_ses[0,2],
'pi_price_inc_se': pi_ses[1,0],
'pi_price_inc2_se': pi_ses[1,1],
'pi_price_child_se': pi_ses[1,3],
'pi_sugar_inc_se': pi_ses[2,0],
'pi_sugar_age_se': pi_ses[2,2],
'pi_mushy_inc_se': pi_ses[3,0],
'pi_mushy_age_se': pi_ses[3,2],
'price_coeff': price_param,
'price_se': price_param_se,
'median_own_elas':np.median(elasticities),
'median_agg_elas': np.median(agg_elas),
'mean_og_div': np.average(diversion0,weights=w),
'median_og_div': np.median(diversion0),
'mean_top5_div': np.average(top5[:,None],weights=w),
'mean_markup': np.average(markups,weights=w),
'median_cs': np.median(cs),
'objective': objective,
'objective_scaled': objective_scaled,
}
def get_params_blp(results_dict, w=None):
elasticities = results_dict.item().get('diag_elasticities')
agg_elas = results_dict.item().get('aggregate_elasticities')
diversion0 = results_dict.item().get('own_diversion')
div = results_dict.item().get('diversion_ratios')
# set missing and outside good diversion =0
div[np.isnan(div)]=0
div[div==diversion0]=0
div.sort(axis=1)
top5=div[:,-5:].sum(axis=1)
# why the difference? weird
if results_dict.item().get('pi').shape[1]>0:
price_param = results_dict.item().get('pi')[1][0]
else:
price_param = results_dict.item().get('beta')[1][0]
price_se = results_dict.item().get('beta_se')[1][0]
cs = results_dict.item().get('consumer_surplus')
markups = results_dict.item().get('markups')
objective = results_dict.item().get('objective')
objective_scaled = results_dict.item().get('objective_scaled')
betas = results_dict.item().get('beta')[:,0]
beta_ses = results_dict.item().get('beta_se')[:,0]
sigmas=np.abs(np.diag(results_dict.item().get('sigma')))
sigma_ses = np.abs(np.diag(results_dict.item().get('sigma_se')))
if sigmas.shape[0] == 0:
sigmas = np.zeros(6)
sigma_ses = sigmas
other_sigmas=sigmas[-4:]
other_sigma_ses=sigma_ses[-4:]
# if pis are suppressed or not
if results_dict.item().get('pi').shape[1] == 0:
pis = np.zeros((results_dict.item().get('pi').shape[0],results_dict.item().get('pi').shape[0]))
else:
pis = results_dict.item().get('pi')[:,0]
if results_dict.item().get('pi_se').shape[1] == 0:
pi_ses = np.zeros(5)
else:
pi_ses = results_dict.item().get('pi_se')[:,0]
if results_dict.item().get('gamma').shape[0] == 0:
gammas = np.zeros(6)
gamma_ses = gammas
else:
gammas = results_dict.item().get('gamma')[:,0]
gamma_ses = results_dict.item().get('gamma_se')[:,0]
# defining the sigmas is weird
sigma_cons = sigmas[0]
sigma_hpwt = other_sigmas[0]
sigma_air = other_sigmas[1]
sigma_mpd = other_sigmas[2]
sigma_size = other_sigmas[3]
sigma_cons_se = sigma_ses[0]
sigma_hpwt_se = other_sigma_ses[0]
sigma_air_se = other_sigma_ses[1]
sigma_mpd_se = other_sigma_ses[2]
sigma_size_se = other_sigma_ses[3]
return {
'coeff_cons':betas[0],
'coeff_hpwt':betas[1],
'coeff_air':betas[2],
'coeff_mpd':betas[3],
'coeff_size':betas[4],
'se_cons':beta_ses[0],
'se_hpwt':beta_ses[1],
'se_air':beta_ses[2],
'se_mpd':beta_ses[3],
'se_size':beta_ses[4],
'sigma_cons':sigma_cons,
'sigma_hpwt':sigma_hpwt,
'sigma_air':sigma_air,
'sigma_mpd':sigma_mpd,
'sigma_size':sigma_size,
'sigma_cons_se':sigma_cons_se,
'sigma_hpwt_se':sigma_hpwt_se,
'sigma_air_se':sigma_air_se,
'sigma_mpd_se':sigma_mpd_se,
'sigma_size_se':sigma_size_se,
#not TOTALLY sure this should be absolute value
'price_term':price_param,
'price_se': price_se,
'gamma_cons':gammas[0],
'gamma_hpwt':gammas[1],
'gamma_air':gammas[2],
'gamma_mpg':gammas[3],
'gamma_size':gammas[4],
'gamma_trend':gammas[5],
'gamma_cons_se':gamma_ses[0],
'gamma_hpwt_se':gamma_ses[1],
'gamma_air_se':gamma_ses[2],
'gamma_mpg_se':gamma_ses[3],
'gamma_size_se':gamma_ses[4],
'gamma_trend_se':gamma_ses[5],
'median_own_elas':np.median(elasticities),
'median_agg_elas': np.median(agg_elas),
'mean_own_elas:': np.average(elasticities,weights=w),
'median_og_div': np.median(diversion0),
'mean_og_div': np.average(diversion0,weights=w),
'median_top5_div': np.median(top5[:,None]),
'mean_top5_div': np.average(top5[:,None],weights=w),
'median_markup': np.median(markups),
'mean_markup': np.average(markups,weights=w),
'median_cs': np.median(cs),
'objective': objective,
'objective_scaled': objective_scaled,
}
def make_df(x,stub):
df=pd.DataFrame(x)
df.columns=[stub+str(x) for x in df.columns]
return df
# for each market, do the WTP calculations
def do_single_market(results,product_data,ids):
prodlist = product_data[product_data.market_ids.isin(ids)]['product_ids'].unique()
base=results.compute_consumer_surpluses(keep_all=False,market_id=ids)
wtp=np.vstack([base-results.compute_consumer_surpluses(eliminate_product_ids=[x],keep_all=False,market_id=ids) for x in prodlist]).flatten()
div0=np.diag(results.compute_diversion_ratios(market_id=ids))
shares=product_data[product_data.market_ids.isin(ids)]['shares'].values
df=pd.DataFrame(np.vstack([wtp,div0,shares]).transpose(),columns=['wtp','div0','shares'])
df['market_ids']=ids[0]
df['product_ids']=product_data[product_data.market_ids.isin(ids)]['product_ids'].values
return df
def do_single_market_indiv(results,product_data,ids):
# get the relevant market and product IDs
mktslice = product_data[product_data.market_ids.isin(ids)].copy()
prodlist = mktslice['product_ids'].unique()
# compute consumer surplus in the market WITH every product
base=results.compute_consumer_surpluses(keep_all=True,market_id=ids)
# WTP is surplus WITH (base) MINUS surplus without (eliminate)
wtp=np.vstack([base-results.compute_consumer_surpluses(eliminate_product_ids=[x],keep_all=True,market_id=ids) for x in prodlist])
# get diversion ratios
div0=np.diag(results.compute_diversion_ratios(market_id=ids))
# get market share for i j t
sijt = results.compute_probabilities(market_id=ids)
# Dij,0
div_i0=((1-sijt.sum(axis=0)[None,:])/(1-sijt))
shares=sijt.mean(axis=1)
df=pd.concat([make_df(wtp,'wtp_'), make_df(sijt,'sijt_'), make_df(div_i0,'divi0_')],axis=1)
df['market_ids']=ids[0]
df['product_ids']=product_data[product_data.market_ids.isin(ids)]['product_ids'].values
return df
def reshape_wtp(wide_df):
wide_df2=wide_df.set_index(['market_ids','product_ids'])
tmp=wide_df2.filter(regex='wtp_').stack()
draw_ids=np.array([int(str1.split('_')[1]) for str1 in tmp.index.get_level_values(2)])
long_df=pd.concat([
tmp.reset_index(level=2,drop=True),
wide_df2.filter(regex='sijt_').stack().reset_index(level=2,drop=True),
wide_df2.filter(regex='divi0_').stack().reset_index(level=2,drop=True)
],axis=1)
long_df.columns=['wtp','shares','div0']
long_df['draw_ids']=draw_ids
return long_df
def outreg(beta, sigma,names=None):
# assume everything is in the right order
# i won't do any rearranging here
# create a new table by drawing from each
paramnames = beta.index
paramnames_se = sigma.index
modelnames = beta.columns
# first, cut off each at three decimal places
tab_beta = beta.round(decimals=3)
tab_sigma= sigma.round(decimals=3)
# fill in NAs and Zeroes:
tab_sigma = tab_sigma.fillna('--')
#tab_sigma = tab_sigma.fillna('--')
tab_beta = tab_beta.replace(0, '--')
tab_beta = tab_beta.replace(0.0, '--')
#tab_beta = tab_beta.astype(str)
tab_sigma = tab_sigma.astype(str)
# replace the ZEROES with '--'
# which requires first converting to string
tab_new = pd.DataFrame()
# strip the rownames
for i in range(0, len(beta)):
name_p = paramnames[i]
name_s = paramnames_se[i]
new_beta = tab_beta.loc[name_p]
new_sigma = '(' + tab_sigma.loc[name_s] + ')'
#new_sigma = f'({tab_sigma.loc[name_s] + ')'
tab_new = tab_new.append(new_beta)
tab_new = tab_new.append(new_sigma)
# reset the index according to the paramnames
if names == None:
names = paramnames
tab_new=tab_new.replace('(0.0)', '--')
tab_new=tab_new.replace('(--)', '--')
indexcol = []
for i in range(0, len(beta)):
print(names[i])
indexcol = np.append(indexcol,names[i]) # for the beta
indexcol = | np.append(indexcol,' ') | numpy.append |
import numpy as np
from rpi_d3m_primitives.featSelect.RecognizePC_BayesFactor import RecognizePC_BayesFactor
from rpi_d3m_primitives.featSelect.tian_checkDataSize import checkDataSize
from rpi_d3m_primitives.featSelect.helperFunctions import joint
from rpi_d3m_primitives.featSelect.conditionalMI import cmi
from rpi_d3m_primitives.featSelect.HypothesisTest import Bayesian_Factor_conditional as BF_CI
def STMB_BayesFactor(train_data, targets, bayesfactors):
numf = train_data.shape[1] # feature number
#targets = data[:, targetindex] # selected index data
# %% Recognize Target PC
CanMB = np.arange(numf) # candidates
PCD, Sepset_t, cutSetSize = RecognizePC_BayesFactor(targets, CanMB, train_data, bayesfactors)
spouse = [[]]*numf
#print("===========PC Result==========")
#print(PCD)
# print(Sepset_t)
# print(cutSetSize)
#scores = []
Des = [[]]*PCD.size
datasizeFlag = 0
#%% Find Markov blanket
for yind in range(PCD.size):
flag = 0
y = PCD[yind]
searchset = np.setdiff1d(CanMB, PCD)
for xind in range(searchset.size):
x = searchset[xind]
col = set(Sepset_t[x]).union(set([y]))
cmbVector = joint(train_data[:, np.array(list(col))])
# datasizeFlag = checkDataSize(train_data[:, x], targets, cmbVector)
#print("datasizeFlag",x,datasizeFlag)
if datasizeFlag != 1:
Independency = BF_CI(train_data[:,x], targets, cmbVector, bayesfactors)
if Independency == 0: # V structure
for s in np.setdiff1d(np.union1d(PCD,[x]), np.array([y])):
Independency = BF_CI(train_data[:,y], targets, train_data[:,s], bayesfactors)
if Independency == 1:
temp = set(Des[yind]).union(set([y]))
Des[yind] = np.array(list(temp))
flag = 1
break
else:
temp = set(spouse[y]).union(set([x]))
spouse[y]= np.array(list(temp))
if flag == 1:
break
des = [item for sublist in Des for item in sublist]
PCD = np.setdiff1d(PCD, des)
#print(PCD)
#assert(1==2)
#%% Shrink spouse
NonS = []
S = []
for i in np.setdiff1d(np.arange(numf), PCD):
spouse[i] = [] # empty
for y in np.arange(len(spouse)):
if spouse[y] != []:
S.append( y) # Y has spouses
# shrink
spousecan = spouse[y]
for sind in np.arange(spousecan.size):
s = spousecan[sind]
col = set([y]).union(set(spousecan),set(PCD))
cmbVector = joint(train_data[:, np.setdiff1d(np.array(list(col)), s)])
# datasizeFlag = checkDataSize(train_data[:, s], targets, cmbVector)
if datasizeFlag != 1:
Independency = BF_CI(train_data[:,s], targets, cmbVector, bayesfactors)
if Independency == 1:
NonS = set(NonS).union(set([s]))
spouse[y] = np.setdiff1d(spousecan, np.array(list(NonS)))
NonS = []
b = []
for i in range(len(spouse)):
if len(spouse[i]) > 0:
b = set(b).union(set(spouse[i]))
# remove false spouse from PC
M = PCD # setdiff(PCD,S); % M has no spouses in PCD set
PCsize = M.size
testSet = set(S).union(set(b))
#testSet = np.array(list(temp))
C = np.zeros(shape = (PCsize, 1))
for x in M:
col = set(PCD).union(set(testSet))
cmbVector = joint(train_data[:, np.setdiff1d(np.array(list(col)), x)])
# datasizeFlag = checkDataSize(train_data[:, x], targets, cmbVector)
if datasizeFlag != 1:
Independency = BF_CI(train_data[:,x], targets, cmbVector, bayesfactors)
if Independency == 1:
PCD = | np.setdiff1d(PCD, x) | numpy.setdiff1d |
Subsets and Splits