prompt
stringlengths 15
655k
| completion
stringlengths 3
32.4k
| api
stringlengths 8
52
|
---|---|---|
import nipype.pipeline.engine as pe
import nipype.interfaces.io as nio
import nipype.interfaces.utility as util
import nipype.interfaces.fsl as fsl
import os
import pandas as pd
from CPAC.group_analysis.group_analysis import create_group_analysis
dropbox_root = "/scr/adenauer1/PowerFolder/Dropbox"
regressors_file = dropbox_root + "/papers/neural_correlates_of_mind_wandering/regressors.csv"
from variables import workingdir, resultsdir, subjects
derivatives = {
"reho": "reho_z/_subject_id_%s/*.nii.gz",
# # "alff": "alff_z/_subject_id_%s/*.nii.gz",
"falff": "falff_z/_subject_id_%s/*.nii.gz",
# # "left_pcc": "seed_based_z/_roi_-8.-56.26/_subject_id_%s/*.nii.gz",
# # "right_pcc": "seed_based_z/_roi_8.-56.26/_subject_id_%s/*.nii.gz",
# # "left_mpfc": "seed_based_z/_roi_-6.52.-2/_subject_id_%s/*.nii.gz",
# # "right_mpfc": "seed_based_z/_roi_6.52.-2/_subject_id_%s/*.nii.gz",
"centrality": "degree_centrality/_subject_id_%s/_z_score0/*.nii.gz",
# "falff_neg_past_c96": "post_hoc_seed_based_z/_seed_name_falff_neg_past_c96/_subject_id_%s/corr_map_calc.nii.gz",
# "falff_neg_words_c70": "post_hoc_seed_based_z/_seed_name_falff_neg_words_c70/_subject_id_%s/corr_map_calc.nii.gz",
# "falff_pos_negative_c81": "post_hoc_seed_based_z/_seed_name_falff_pos_negative_c81/_subject_id_%s/corr_map_calc.nii.gz",
# "reho_pos_friends_c93": "post_hoc_seed_based_z/_seed_name_reho_pos_friends_c93/_subject_id_%s/corr_map_calc.nii.gz",
# "falff_neg_positive_c90": "post_hoc_seed_based_z/_seed_name_falff_neg_positive_c90/_subject_id_%s/corr_map_calc.nii.gz",
# "falff_neg_words_c71": "post_hoc_seed_based_z/_seed_name_falff_neg_words_c71/_subject_id_%s/corr_map_calc.nii.gz",
# "falff_pos_positive_c101": "post_hoc_seed_based_z/_seed_name_falff_pos_positive_c101/_subject_id_%s/corr_map_calc.nii.gz",
# "reho_pos_specific_vague_c82": "post_hoc_seed_based_z/_seed_name_reho_pos_specific_vague_c82/_subject_id_%s/corr_map_calc.nii.gz",
# "falff_neg_specific_vague_c71": "post_hoc_seed_based_z/_seed_name_falff_neg_specific_vague_c71/_subject_id_%s/corr_map_calc.nii.gz",
# "falff_pos_friends_c83": "post_hoc_seed_based_z/_seed_name_falff_pos_friends_c83/_subject_id_%s/corr_map_calc.nii.gz",
# "reho_neg_future_c73": "post_hoc_seed_based_z/_seed_name_reho_neg_future_c73/_subject_id_%s/corr_map_calc.nii.gz",
# "falff_neg_words_c70_and_c71": "post_hoc_seed_based_z/_seed_name_falff_neg_words_c70/_subject_id_%s/corr_map_calc.nii.gz",
# "all_with_MeanFD_falff_neg_past_c97": "post_hoc_seed_based_z/_seed_name_all_with_MeanFD_falff_neg_past_c97/_subject_id_%s/corr_map_calc.nii.gz",
# "all_with_MeanFD_falff_neg_words_c75": "post_hoc_seed_based_z/_seed_name_all_with_MeanFD_falff_neg_words_c75/_subject_id_%s/corr_map_calc.nii.gz",
# "all_with_MeanFD_falff_pos_negative_c81": "post_hoc_seed_based_z/_seed_name_all_with_MeanFD_falff_pos_negative_c81/_subject_id_%s/corr_map_calc.nii.gz",
# "all_with_MeanFD_reho_pos_friends_c92": "post_hoc_seed_based_z/_seed_name_all_with_MeanFD_reho_pos_friends_c92/_subject_id_%s/corr_map_calc.nii.gz",
# 'all_with_MeanFD_falff_neg_positive_c88': "post_hoc_seed_based_z/_seed_name_all_with_MeanFD_falff_neg_positive_c88/_subject_id_%s/corr_map_calc.nii.gz",
# "all_with_MeanFD_falff_pos_friends_c84": "post_hoc_seed_based_z/_seed_name_all_with_MeanFD_falff_pos_friends_c84/_subject_id_%s/corr_map_calc.nii.gz",
# "all_with_MeanFD_falff_pos_positive_c98": "post_hoc_seed_based_z/_seed_name_all_with_MeanFD_falff_pos_positive_c98/_subject_id_%s/corr_map_calc.nii.gz",
# "all_with_MeanFD_reho_pos_specific_vague_c78": "post_hoc_seed_based_z/_seed_name_all_with_MeanFD_reho_pos_specific_vague_c78/_subject_id_%s/corr_map_calc.nii.gz",
# 'all_with_MeanFD_falff_neg_words_c74_and_c75': "post_hoc_seed_based_z/_seed_name_all_with_MeanFD_falff_neg_words_c74_and_c75/_subject_id_%s/corr_map_calc.nii.gz",
# 'all_with_MeanFD_falff_pos_images_c88': "post_hoc_seed_based_z/_seed_name_all_with_MeanFD_falff_pos_images_c88/_subject_id_%s/corr_map_calc.nii.gz",
# 'all_with_MeanFD_reho_neg_future_c75': "post_hoc_seed_based_z/_seed_name_all_with_MeanFD_reho_neg_future_c75/_subject_id_%s/corr_map_calc.nii.gz",
'all_with_MeanFD_falff_past_higher_than_future_c76': "post_hoc_seed_based_z/_seed_name_all_with_MeanFD_falff_past_higher_than_future_c76/_subject_id_%s/corr_map_calc.nii.gz",
'all_with_MeanFD_falff_neg_friends_c82': "post_hoc_seed_based_z/_seed_name_all_with_MeanFD_falff_neg_friends_c82/_subject_id_%s/corr_map_calc.nii.gz",
'all_with_MeanFD_falff_neg_specific_vague_c77': "post_hoc_seed_based_z/_seed_name_all_with_MeanFD_falff_neg_specific_vague_c77/_subject_id_%s/corr_map_calc.nii.gz",
'all_with_MeanFD_centrality_neg_past_c26': "post_hoc_seed_based_z/_seed_name_all_with_MeanFD_centrality_neg_past_c26_2mm/_subject_id_%s/corr_map_calc.nii.gz",
'all_with_MeanFD_reho_neg_negative_c87': "post_hoc_seed_based_z/_seed_name_all_with_MeanFD_reho_neg_negative_c87/_subject_id_%s/corr_map_calc.nii.gz",
'all_with_MeanFD_reho_positive_higher_than_negative_c75': "post_hoc_seed_based_z/_seed_name_all_with_MeanFD_reho_positive_higher_than_negative_c75/_subject_id_%s/corr_map_calc.nii.gz",
}
# for i, RSNid in enumerate([5, 15, 9, 6, 8, 1, 2, 7, 12, 11]):
# derivatives["RSN%d"%(i+1)] = "dual_regression_z/_subject_id_%s" + "/temp_reg_map_z_%04d.nii.gz"%RSNid
if __name__ == '__main__':
wf = pe.Workflow(name="group_analysis")
wf.base_dir = workingdir
wf.config['execution']['crashdump_dir'] = wf.base_dir + "/crash_files"
mask_datasource = pe.Node(nio.DataGrabber(infields=['subject_ids'], outfields = ['mask_files']), name="mask_datasource")
mask_datasource.inputs.base_directory = resultsdir
mask_datasource.inputs.template = 'functional_mask/_subject_id_%s/*.nii'
mask_datasource.inputs.template_args['mask_files'] = [['subject_ids']]
mask_datasource.inputs.sort_filelist = True
mask_datasource.inputs.subject_ids = subjects
def calculate_group_mask(list_of_subject_masks):
import nibabel as nb
import numpy as np
import os
first_nii = nb.load(list_of_subject_masks[0])
sum_mask = np.zeros(first_nii.get_shape())
for mask in list_of_subject_masks:
mask_data = nb.load(mask).get_data()
sum_mask[np.logical_and(np.logical_not(
|
np.isnan(mask_data)
|
numpy.isnan
|
#!/usr/bin/env python3
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for package mediapy.
To run this test:
pip install -r requirements.txt
./mediapy_test.py
"""
import io
import os
import pathlib
import re
import tempfile
import unittest.mock as mock
from absl.testing import absltest
from absl.testing import parameterized
import IPython
import mediapy as media
import numpy as np
_TEST_TYPES = ['uint8', 'uint16', 'uint32', 'float32', 'float64']
_TEST_SHAPES1 = [(13, 21, 3), (14, 38, 2), (16, 21, 1), (18, 20), (17, 19)]
_TEST_SHAPES2 = [(128, 128, 3), (128, 160, 1), (160, 128), (64, 64, 3),
(64, 64)]
def _rms_diff(a, b) -> float:
"""Compute the root-mean-square of the difference between two arrays."""
a = np.array(a, dtype=np.float64)
b = np.array(b, dtype=np.float64)
if a.shape != b.shape:
raise ValueError(f'Shapes {a.shape} and {b.shape} do not match.')
return np.sqrt(np.mean(np.square(a - b)))
class MediapyTest(parameterized.TestCase):
"""Tests for mediapy package."""
def assert_all_equal(self, a, b):
if not np.all(np.asarray(a) == np.asarray(b)):
self.fail(f'{a} and {b} differ.')
def assert_all_close(self, a, b, **kwargs):
if not np.allclose(a, b, **kwargs):
self.fail(f'{a} and {b} are not close enough.')
def _check_similar(self, original_array, new_array, max_rms, msg=None):
"""Verifies that the rms error between two arrays is less than max_rms."""
self.assert_all_equal(original_array.shape, new_array.shape)
rms = _rms_diff(new_array, original_array)
self.assertLess(rms, max_rms, msg)
def test_chunked(self):
self.assertEqual(list(media._chunked(range(0), 3)), [])
self.assertEqual(list(media._chunked(range(1), 3)), [(0,)])
self.assertEqual(list(media._chunked(range(2), 3)), [(0, 1)])
self.assertEqual(list(media._chunked(range(3), 3)), [(0, 1, 2)])
self.assertEqual(list(media._chunked(range(4), 3)), [(0, 1, 2), (3,)])
self.assertEqual(list(media._chunked(range(5), 3)), [(0, 1, 2), (3, 4)])
self.assertEqual(list(media._chunked(range(0), 1)), [])
self.assertEqual(list(media._chunked(range(1), 1)), [(0,)])
self.assertEqual(list(media._chunked(range(2), 1)), [(0,), (1,)])
self.assertEqual(list(media._chunked(range(3), 1)), [(0,), (1,), (2,)])
self.assertEqual(list(media._chunked(range(0), None)), [])
self.assertEqual(list(media._chunked(range(1), None)), [(0,)])
self.assertEqual(list(media._chunked(range(2), None)), [(0, 1)])
self.assertEqual(list(media._chunked(range(3), None)), [(0, 1, 2)])
def test_peek_first_on_generator(self):
generator = range(1, 5)
first, generator = media.peek_first(generator)
self.assertEqual(first, 1)
self.assert_all_equal(tuple(generator), [1, 2, 3, 4])
def test_peek_first_on_container(self):
container = [1, 2, 3, 4]
first, container = media.peek_first(container)
self.assertEqual(first, 1)
self.assert_all_equal(tuple(container), [1, 2, 3, 4])
def test_run_string(self):
with mock.patch('sys.stdout', io.StringIO()) as mock_stdout:
media.run('echo "$((17 + 22))"')
self.assertEqual(mock_stdout.getvalue(), '39\n')
with mock.patch('sys.stdout', io.StringIO()) as mock_stdout:
media.run('/bin/bash -c "echo $((17 + 22))"')
self.assertEqual(mock_stdout.getvalue(), '39\n')
with self.assertRaisesRegex(RuntimeError, 'failed with code 3'):
media.run('exit 3')
def test_run_args_sequence(self):
with mock.patch('sys.stdout', io.StringIO()) as mock_stdout:
media.run(['/bin/bash', '-c', 'echo $((17 + 22))'])
self.assertEqual(mock_stdout.getvalue(), '39\n')
def test_to_type(self):
def check(src, dtype, expected):
output = media.to_type(src, dtype)
self.assertEqual(output.dtype.type, np.dtype(dtype).type)
self.assert_all_equal(output, expected)
max32 = 4_294_967_295
b = np.array([False, True, False])
self.assertEqual(b.dtype, bool)
check(b, np.uint8, [0, 255, 0])
check(b, np.uint16, [0, 65535, 0])
check(b, np.uint32, [0, max32, 0])
check(b, np.float32, [0.0, 1.0, 0.0])
check(b, np.float64, [0.0, 1.0, 0.0])
u8 = np.array([3, 255], dtype=np.uint8)
check(u8, 'uint8', [3, 255])
check(u8, 'uint16', [int(3 / 255 * 65535 + 0.5), 65535])
check(u8, 'uint32', [int(3 / 255 * max32 + 0.5), max32])
check(u8, 'float32', [np.float32(3 / 255), 1.0])
check(u8, 'float64', [3 / 255, 1.0])
u16 = np.array([57, 65535], dtype=np.uint16)
check(u16, np.uint8, [0, 255])
check(u16, np.uint16, [57, 65535])
check(u16, np.uint32, [int(57 / 65535 * max32 + 0.5), max32])
check(u16, np.float32, [np.float32(57 / 65535), 1.0])
check(u16, 'float', [57 / 65535, 1.0])
u32 = np.array([100_000, max32], dtype=np.uint32)
check(u32, 'uint8', [0, 255])
check(u32, 'uint16', [2, 65535])
check(u32, 'uint32', u32)
check(u32, 'float32', [np.float32(100_000 / max32), 1.0])
check(u32, 'float64', [100_000 / max32, 1.0])
f32 = np.array([0.0, 0.4, 1.0], dtype=np.float32)
check(f32, np.uint8, [0, int(np.float32(0.4) * 255 + 0.5), 255])
check(f32, np.uint16, [0, int(np.float32(0.4) * 65535 + 0.5), 65535])
check(f32, np.uint32, [0, int(np.float32(0.4) * max32 + 0.5), max32])
check(f32, np.float32, [0.0, np.float32(0.4), 1.0])
check(f32, np.float64, [0.0, np.float32(0.4), 1.0])
f64 = np.array([0.0, 0.4, 1.0], dtype=np.float64)
check(f64, np.uint8, [0, int(0.4 * 255 + 0.5), 255])
check(f64, np.uint16, [0, int(0.4 * 65535 + 0.5), 65535])
check(f64, np.uint32, [0, int(0.4 * max32 + 0.5), max32])
check(f64, np.float32, [0.0, np.float32(0.4), 1.0])
check(f64, np.float, [0.0, 0.4, 1.0])
# An array with data type 'uint64' is possible, but it is awkward to process
# exactly because it requires more than float64 intermediate precision.
def test_to_type_extreme_value(self):
types = ['uint8', 'uint16', 'uint32', 'uint64', 'float32', 'float64']
max_of_type = dict(
bool=True,
uint8=255,
uint16=65535,
uint32=4294967295,
uint64=18446744073709551615,
float32=1.0,
float64=1.0)
for src_dtype in types + ['bool']:
for dst_dtype in types:
for shape in [(), (1,), (2, 2)]:
src_value = max_of_type[src_dtype]
src = np.full(shape, src_value, dtype=src_dtype)
dst = media.to_type(src, dst_dtype)
dst_value = dst.flat[0]
expected_value = max_of_type[dst_dtype]
msg = f'{src_dtype} {dst_dtype} {shape} {src} {dst}'
self.assertEqual(dst.dtype, dst_dtype, msg=msg)
self.assertEqual(dst.shape, src.shape, msg=msg)
self.assertEqual(dst_value, expected_value, msg=msg)
def test_to_float01(self):
self.assert_all_close(
media.to_float01(np.array([0, 1, 128, 254, 255], dtype=np.uint8)),
[0 / 255, 1 / 255, 128 / 255, 254 / 255, 255 / 255])
self.assert_all_close(
media.to_float01(np.array([0, 1, 128, 254, 65535], dtype=np.uint16)),
[0 / 65535, 1 / 65535, 128 / 65535, 254 / 65535, 65535 / 65535])
a = np.array([0.0, 0.1, 0.5, 0.9, 1.0])
self.assertIs(media.to_float01(a), a)
a = np.array([0.0, 0.1, 0.5, 0.9, 1.0], dtype=np.float32)
self.assertIs(media.to_float01(a), a)
def test_to_uint8(self):
self.assert_all_equal(
media.to_uint8(np.array([0, 1, 128, 254, 255], dtype=np.uint8)),
[0, 1, 128, 254, 255])
self.assert_all_close(
media.to_uint8([-0.2, 0.0, 0.1, 0.5, 0.9, 1.0, 1.1]), [
0, 0,
int(0.1 * 255 + 0.5),
int(0.5 * 255 + 0.5),
int(0.9 * 255 + 0.5), 255, 255
])
def test_color_ramp_float(self):
shape = (2, 3)
image = media.color_ramp(shape=shape)
self.assert_all_equal(image.shape[:2], shape)
self.assert_all_close(image, [
[
[0.5 / shape[0], 0.5 / shape[1], 0.0],
[0.5 / shape[0], 1.5 / shape[1], 0.0],
[0.5 / shape[0], 2.5 / shape[1], 0.0],
],
[
[1.5 / shape[0], 0.5 / shape[1], 0.0],
[1.5 / shape[0], 1.5 / shape[1], 0.0],
[1.5 / shape[0], 2.5 / shape[1], 0.0],
],
])
def test_color_ramp_uint8(self):
shape = (1, 3)
image = media.color_ramp(shape=shape, dtype=np.uint8)
self.assert_all_equal(image.shape[:2], shape)
expected = [[
[int(0.5 / shape[0] * 255 + 0.5),
int(0.5 / shape[1] * 255 + 0.5), 0],
[int(0.5 / shape[0] * 255 + 0.5),
int(1.5 / shape[1] * 255 + 0.5), 0],
[int(0.5 / shape[0] * 255 + 0.5),
int(2.5 / shape[1] * 255 + 0.5), 0],
]]
self.assert_all_equal(image, expected)
@parameterized.parameters(np.uint8, 'uint8', 'float32')
def test_moving_circle(self, dtype):
video = media.moving_circle(shape=(256, 256), num_images=10, dtype=dtype)
self.assert_all_equal(video.shape, (10, 256, 256, 3))
mean_image = np.mean(video, axis=0)
expected_mean = 0.329926 if dtype == 'float32' else 84.295
self.assertAlmostEqual(np.std(mean_image), expected_mean, delta=0.001)
def test_rgb_yuv_roundtrip(self):
image = np.array(
[[0, 0, 0], [255, 0, 0], [0, 255, 0], [0, 0, 255], [255, 255, 0],
[0, 255, 255], [255, 0, 255], [255, 255, 255], [128, 128, 128]],
dtype=np.uint8)
new = media.to_uint8(media.rgb_from_yuv(media.yuv_from_rgb(image)))
self.assert_all_close(image, new, atol=1)
def test_rgb_ycbcr_roundtrip(self):
image = np.array(
[[0, 0, 0], [255, 0, 0], [0, 255, 0], [0, 0, 255], [255, 255, 0],
[0, 255, 255], [255, 0, 255], [255, 255, 255], [128, 128, 128]],
dtype=np.uint8)
new = media.to_uint8(media.rgb_from_ycbcr(media.ycbcr_from_rgb(image)))
self.assert_all_close(image, new, atol=1)
def test_pil_image(self):
im = media._pil_image(
np.array([[[10, 11, 12], [40, 41, 42]]], dtype=np.uint8))
self.assertEqual(im.width, 2)
self.assertEqual(im.height, 1)
self.assertEqual(im.mode, 'RGB')
a = np.array(im)
self.assert_all_equal(a.shape, (1, 2, 3))
self.assert_all_equal(a, [[[10, 11, 12], [40, 41, 42]]])
@parameterized.parameters(zip(_TEST_TYPES, _TEST_SHAPES1))
def test_resize_image(self, str_dtype, shape):
dtype = np.dtype(str_dtype)
def create_image(shape):
image = media.color_ramp(shape[:2], dtype=dtype)
return image.mean(
axis=-1).astype(dtype) if len(shape) == 2 else image[..., :shape[2]]
image = create_image(shape)
self.assertEqual(image.dtype, dtype)
new_shape = (17, 19) + shape[2:]
new_image = media.resize_image(image, new_shape[:2])
self.assertEqual(new_image.dtype, dtype)
expected_image = create_image(new_shape)
atol = 0.0 if new_shape == shape else 0.015
self.assert_all_close(
media.to_float01(new_image),
media.to_float01(expected_image),
atol=atol)
@parameterized.parameters(zip(_TEST_TYPES, _TEST_SHAPES2))
def test_resize_video(self, str_dtype, shape):
dtype = np.dtype(str_dtype)
def create_video(shape, num_images=5):
video = media.moving_circle(shape[:2], num_images, dtype=dtype)
return video.mean(
axis=-1).astype(dtype) if len(shape) == 2 else video[..., :shape[2]]
video = create_video(shape)
self.assertEqual(video.dtype, dtype)
new_shape = (17, 19) + shape[2:]
new_video = media.resize_video(video, new_shape[:2])
self.assertEqual(new_video.dtype, dtype)
expected_video = create_video(new_shape)
self._check_similar(
media.to_float01(new_video),
media.to_float01(expected_video),
max_rms=(0.0 if new_shape == shape else 0.07))
def test_read_contents(self):
data = b'Test data'
temp_file = self.create_tempfile(content=data)
new_data = media.read_contents(temp_file)
self.assertEqual(new_data, data)
new_data = media.read_contents(pathlib.Path(temp_file))
self.assertEqual(new_data, data)
def test_read_via_local_file_on_local_file(self):
with tempfile.TemporaryDirectory() as directory_name:
filename = os.path.join(directory_name, 'file')
with open(filename, 'w') as f:
f.write('text')
with media.read_via_local_file(filename) as local_filename:
self.assertEqual(local_filename, filename)
def test_write_via_local_file_on_local_file(self):
with tempfile.TemporaryDirectory() as directory_name:
filename = os.path.join(directory_name, 'file')
with media.write_via_local_file(filename) as local_filename:
self.assertEqual(local_filename, filename)
@parameterized.parameters('uint8', 'uint16')
def test_image_write_read_roundtrip(self, dtype):
image = media.color_ramp((27, 63), dtype=dtype)
if dtype == 'uint16':
# Unfortunately PIL supports only single-channel 16-bit images for now.
image = image[..., 0]
with tempfile.TemporaryDirectory() as directory_name:
path = pathlib.Path(directory_name) / 'test.png'
media.write_image(path, image)
new_image = media.read_image(path, dtype=dtype)
self.assert_all_equal(image.shape, new_image.shape)
self.assertEqual(image.dtype, new_image.dtype)
self.assert_all_equal(image, new_image)
def test_write_image(self):
image = media.color_ramp(shape=(500, 500), dtype=np.uint8)
np.random.seed(1)
image += np.random.randint(0, 10, size=image.shape, dtype=np.uint8)
def get_num_bytes(**kwargs):
with tempfile.TemporaryDirectory() as directory_name:
filename = os.path.join(directory_name, 'test.png')
media.write_image(filename, image, **kwargs)
return os.path.getsize(filename)
self.assertAlmostEqual(get_num_bytes(), 383588, delta=300)
self.assertAlmostEqual(get_num_bytes(optimize=True), 382909, delta=300)
def test_to_rgb(self):
a = np.array([[-0.2, 0.0, 0.2, 0.8, 1.0, 1.2]])
gray_color = lambda x: [x, x, x]
self.assert_all_close(
media.to_rgb(a), [[
gray_color(0.0 / 1.4),
gray_color(0.2 / 1.4),
gray_color(0.4 / 1.4),
gray_color(1.0 / 1.4),
gray_color(1.2 / 1.4),
gray_color(1.4 / 1.4),
]],
atol=0.002)
self.assert_all_close(
media.to_rgb(a, vmin=0.0, vmax=1.0), [[
gray_color(0.0),
gray_color(0.0),
gray_color(0.2),
gray_color(0.8),
gray_color(1.0),
gray_color(1.0),
]],
atol=0.002)
a = np.array([-0.4, 0.0, 0.2])
self.assert_all_close(
media.to_rgb(a, vmin=-1.0, vmax=1.0, cmap='bwr'),
[[0.596078, 0.596078, 1.0], [1.0, 0.996078, 0.996078], [1.0, 0.8, 0.8]],
atol=0.002)
@parameterized.parameters('uint8', 'uint16')
def test_compress_decompress_image_roundtrip(self, dtype):
image = media.color_ramp((27, 63), dtype=dtype)
if dtype == 'uint16':
# Unfortunately PIL supports only single-channel 16-bit images for now.
image = image[..., 0]
data = media.compress_image(image)
new_image = media.decompress_image(data, dtype=dtype)
self.assertEqual(image.shape, new_image.shape)
self.assertEqual(image.dtype, new_image.dtype)
self.assert_all_equal(image, new_image)
def test_show_image(self):
htmls = []
with mock.patch('IPython.display.display', htmls.append):
media.show_image(media.color_ramp())
self.assertLen(htmls, 1)
self.assertIsInstance(htmls[0], IPython.display.HTML)
self.assertLen(re.findall('(?s)<table', htmls[0].data), 1)
self.assertRegex(htmls[0].data, '(?s)<img width=[^<>]*/>')
self.assertLen(re.findall('(?s)<img', htmls[0].data), 1)
def test_show_save_image(self):
with tempfile.TemporaryDirectory() as directory_name:
with media.show_save.to_dir(directory_name):
with mock.patch('IPython.display.display'):
media.show_images({'ramp': media.color_ramp((128, 128))})
filename = os.path.join(directory_name, 'ramp.png')
self.assertTrue(os.path.isfile(filename))
self.assertBetween(os.path.getsize(filename), 200, 1000)
def test_show_image_downsampled(self):
|
np.random.seed(1)
|
numpy.random.seed
|
import numpy as np
import os
from astropy.io import fits
from astropy.utils.data import get_pkg_data_filename
from astropy.wcs import WCS
from astropy.table import Table
from astropy.time import Time
from .utils_for_test import create_test_ffis
from .. import cutout_processing, CubeFactory, CutoutFactory
# Example FFI WCS for testing
with open(get_pkg_data_filename('data/ex_ffi_wcs.txt'), "r") as FLE:
WCS_STR = FLE.read()
def test_combine_headers():
header_1 = fits.Header(cards=[('KWD_SHR', 20, 'Shared keyword'),
('KWD_DIF', 'one', 'Different keyword'),
('CHECKSUM', 1283726182378, "Keyword to drop")])
header_2 = fits.Header(cards=[('KWD_SHR', 20, 'Shared keyword'),
('KWD_DIF', 'two', 'Different keyword'),
('CHECKSUM', 1248721378218, "Keyword to drop")])
combined_header = cutout_processing._combine_headers([header_1, header_2])
assert len(combined_header) == 7
assert 'KWD_SHR' in combined_header
assert 'KWD_DIF' not in combined_header
assert 'CHECKSUM' not in combined_header
assert combined_header['F01_K01'] == combined_header['F02_K01']
assert combined_header['F01_V01'] != combined_header['F02_V01']
assert combined_header['F01_V01'] == header_1[combined_header['F01_K01']]
assert 'F01_K02' not in combined_header
combined_header = cutout_processing._combine_headers([header_1, header_2], constant_only=True)
assert len(combined_header) == 1
assert 'KWD_SHR' in combined_header
assert 'KWD_DIF' not in combined_header
assert 'F01_K01' not in combined_header
def test_get_bounds():
x = [5, 10]
y = [2, 20]
size = [3, 5]
bounds = cutout_processing._get_bounds(x, y, size)
assert (bounds == np.array([[[4, 7], [0, 5]], [[8, 11], [18, 23]]])).all()
for nx, ny in bounds:
assert nx[1]-nx[0] == size[0]
assert ny[1]-ny[0] == size[1]
# test that if we move the center a small amount, we still get the same integer bounds
x = [5.9, 9.8]
y = [2.2, 20.2]
assert (cutout_processing._get_bounds(x, y, size) == bounds).all()
def test_combine_bounds():
x = [5, 10]
y = [2, 20]
size = [3, 5]
bounds = cutout_processing._get_bounds(x, y, size)
big_bounds = cutout_processing._combine_bounds(bounds[0], bounds[1])
assert big_bounds.dtype == int
for bx, by in bounds:
assert big_bounds[0, 0] <= bx[0]
assert big_bounds[0, 1] >= bx[1]
assert big_bounds[1, 0] <= by[0]
assert big_bounds[1, 1] >= by[1]
def test_area():
x = [5, 10]
y = [2, 20]
size = [3, 5]
area = np.multiply(*size)
bounds = cutout_processing._get_bounds(x, y, size)
area_0 = cutout_processing._area(bounds[0])
area_1 = cutout_processing._area(bounds[1])
assert area_0 == area
assert area_0 == area_1
def test_get_args():
wcs_obj = WCS(WCS_STR, relax=True)
bounds = np.array([[0, 4], [0, 6]])
args = cutout_processing._get_args(bounds, wcs_obj)
assert args["coordinates"] == wcs_obj.pixel_to_world(2, 3)
assert args["size"] == (6, 4)
def test_path_to_footprints():
img_wcs = WCS(WCS_STR, relax=True)
size = [4, 5]
xs = [10, 20, 30, 40, 50]
ys = [1000, 950, 900, 810, 800]
path = img_wcs.pixel_to_world(xs, ys)
footprints = cutout_processing.path_to_footprints(path, size, img_wcs)
assert len(footprints) == 1
assert (np.max(xs) - np.min(xs) + size[0]) == footprints[0]["size"][1]
assert (np.max(ys) - np.min(ys) + size[1]) == footprints[0]["size"][0]
cent_x = (np.max(xs) - np.min(xs) + size[0])//2 + np.min(xs) - size[0]/2
cent_y = (np.max(ys) - np.min(ys) + size[1])//2 +
|
np.min(ys)
|
numpy.min
|
################################################################################
# Copyright (C) 2013-2015 <NAME>
#
# This file is licensed under the MIT License.
################################################################################
"""
Unit tests for `gaussian` module.
"""
import numpy as np
from scipy import special
from numpy import testing
from .. import gaussian
from bayespy.nodes import (Gaussian,
GaussianARD,
GaussianGamma,
Gamma,
Wishart,
ConcatGaussian)
from ..wishart import WishartMoments
from ...vmp import VB
from bayespy.utils import misc
from bayespy.utils import linalg
from bayespy.utils import random
from bayespy.utils.misc import TestCase
class TestGaussianFunctions(TestCase):
def test_rotate_covariance(self):
"""
Test the Gaussian array covariance rotation.
"""
# Check matrix
R = np.random.randn(2,2)
Cov = np.random.randn(2,2)
self.assertAllClose(gaussian.rotate_covariance(Cov, R),
np.einsum('ik,kl,lj', R, Cov, R.T))
# Check matrix with plates
R = np.random.randn(2,2)
Cov = np.random.randn(4,3,2,2)
self.assertAllClose(gaussian.rotate_covariance(Cov, R),
np.einsum('...ik,...kl,...lj', R, Cov, R.T))
# Check array, first axis
R = np.random.randn(2,2)
Cov = np.random.randn(2,3,3,2,3,3)
self.assertAllClose(gaussian.rotate_covariance(Cov, R,
ndim=3,
axis=-3),
np.einsum('...ik,...kablcd,...lj->...iabjcd',
R,
Cov,
R.T))
self.assertAllClose(gaussian.rotate_covariance(Cov, R,
ndim=3,
axis=0),
np.einsum('...ik,...kablcd,...lj->...iabjcd',
R,
Cov,
R.T))
# Check array, middle axis
R = np.random.randn(2,2)
Cov = np.random.randn(3,2,3,3,2,3)
self.assertAllClose(gaussian.rotate_covariance(Cov, R,
ndim=3,
axis=-2),
np.einsum('...ik,...akbcld,...lj->...aibcjd',
R,
Cov,
R.T))
self.assertAllClose(gaussian.rotate_covariance(Cov, R,
ndim=3,
axis=1),
np.einsum('...ik,...akbcld,...lj->...aibcjd',
R,
Cov,
R.T))
# Check array, last axis
R = np.random.randn(2,2)
Cov = np.random.randn(3,3,2,3,3,2)
self.assertAllClose(gaussian.rotate_covariance(Cov, R,
ndim=3,
axis=-1),
np.einsum('...ik,...abkcdl,...lj->...abicdj',
R,
Cov,
R.T))
self.assertAllClose(gaussian.rotate_covariance(Cov, R,
ndim=3,
axis=2),
np.einsum('...ik,...abkcdl,...lj->...abicdj',
R,
Cov,
R.T))
# Check array, middle axis with plates
R = np.random.randn(2,2)
Cov = np.random.randn(4,4,3,2,3,3,2,3)
self.assertAllClose(gaussian.rotate_covariance(Cov, R,
ndim=3,
axis=-2),
np.einsum('...ik,...akbcld,...lj->...aibcjd',
R,
Cov,
R.T))
self.assertAllClose(gaussian.rotate_covariance(Cov, R,
ndim=3,
axis=1),
np.einsum('...ik,...akbcld,...lj->...aibcjd',
R,
Cov,
R.T))
pass
class TestGaussianARD(TestCase):
def test_init(self):
"""
Test the constructor of GaussianARD
"""
def check_init(true_plates, true_shape, mu, alpha, **kwargs):
X = GaussianARD(mu, alpha, **kwargs)
self.assertEqual(X.dims, (true_shape, true_shape+true_shape),
msg="Constructed incorrect dimensionality")
self.assertEqual(X.plates, true_plates,
msg="Constructed incorrect plates")
#
# Create from constant parents
#
# Use ndim=0 for constant mu
check_init((),
(),
0,
1)
check_init((3,2),
(),
np.zeros((3,2,)),
np.ones((2,)))
check_init((4,2,2,3),
(),
np.zeros((2,1,3,)),
np.ones((4,1,2,3)))
# Use ndim
check_init((4,2),
(2,3),
np.zeros((2,1,3,)),
np.ones((4,1,2,3)),
ndim=2)
# Use shape
check_init((4,2),
(2,3),
np.zeros((2,1,3,)),
np.ones((4,1,2,3)),
shape=(2,3))
# Use ndim and shape
check_init((4,2),
(2,3),
np.zeros((2,1,3,)),
np.ones((4,1,2,3)),
ndim=2,
shape=(2,3))
#
# Create from node parents
#
# ndim=0 by default
check_init((3,),
(),
GaussianARD(0, 1,
plates=(3,)),
Gamma(1, 1,
plates=(3,)))
check_init((4,2,2,3),
(),
GaussianARD(np.zeros((2,1,3)),
np.ones((2,1,3)),
ndim=3),
Gamma(np.ones((4,1,2,3)),
np.ones((4,1,2,3))))
# Use ndim
check_init((4,),
(2,2,3),
GaussianARD(np.zeros((4,1,2,3)),
np.ones((4,1,2,3)),
ndim=2),
Gamma(np.ones((4,2,1,3)),
np.ones((4,2,1,3))),
ndim=3)
# Use shape
check_init((4,),
(2,2,3),
GaussianARD(np.zeros((4,1,2,3)),
np.ones((4,1,2,3)),
ndim=2),
Gamma(np.ones((4,2,1,3)),
np.ones((4,2,1,3))),
shape=(2,2,3))
# Use ndim and shape
check_init((4,2),
(2,3),
GaussianARD(np.zeros((2,1,3)),
np.ones((2,1,3)),
ndim=2),
Gamma(np.ones((4,1,2,3)),
np.ones((4,1,2,3))),
ndim=2,
shape=(2,3))
# Test for a found bug
check_init((),
(3,),
np.ones(3),
1,
ndim=1)
# Parent mu has more axes
check_init(
(2,),
(3,),
GaussianARD(np.zeros((2,3)),
np.ones((2,3)),
ndim=2),
np.ones((2,3)),
ndim=1
)
# DO NOT add axes if necessary
self.assertRaises(
ValueError,
GaussianARD,
GaussianARD(np.zeros((2,3)),
np.ones((2,3)),
ndim=2),
1,
ndim=3
)
#
# Errors
#
# Inconsistent shapes
self.assertRaises(ValueError,
GaussianARD,
GaussianARD(np.zeros((2,3)),
np.ones((2,3)),
ndim=1),
np.ones((4,3)),
ndim=2)
# Inconsistent dims of mu and alpha
self.assertRaises(ValueError,
GaussianARD,
np.zeros((2,3)),
np.ones((2,)))
# Inconsistent plates of mu and alpha
self.assertRaises(ValueError,
GaussianARD,
GaussianARD(np.zeros((3,2,3)),
np.ones((3,2,3)),
ndim=2),
np.ones((3,4,2,3)),
ndim=3)
# Inconsistent ndim and shape
self.assertRaises(ValueError,
GaussianARD,
np.zeros((2,3)),
np.ones((2,)),
shape=(2,3),
ndim=1)
# Incorrect shape
self.assertRaises(ValueError,
GaussianARD,
GaussianARD(np.zeros((2,3)),
np.ones((2,3)),
ndim=2),
np.ones((2,3)),
shape=(2,2))
pass
def test_message_to_child(self):
"""
Test moments of GaussianARD.
"""
# Check that moments have full shape when broadcasting
X = GaussianARD(np.zeros((2,)),
np.ones((3,2)),
shape=(4,3,2))
(u0, u1) = X._message_to_child()
self.assertEqual(np.shape(u0),
(4,3,2))
self.assertEqual(np.shape(u1),
(4,3,2,4,3,2))
# Check the formula
X = GaussianARD(2, 3)
(u0, u1) = X._message_to_child()
self.assertAllClose(u0, 2)
self.assertAllClose(u1, 2**2 + 1/3)
# Check the formula for multidimensional arrays
X = GaussianARD(2*np.ones((2,1,4)),
3*np.ones((2,3,1)),
ndim=3)
(u0, u1) = X._message_to_child()
self.assertAllClose(u0, 2*np.ones((2,3,4)))
self.assertAllClose(u1,
2**2 * np.ones((2,3,4,2,3,4))
+ 1/3 * misc.identity(2,3,4))
# Check the formula for dim-broadcasted mu
X = GaussianARD(2*np.ones((3,1)),
3*np.ones((2,3,4)),
ndim=3)
(u0, u1) = X._message_to_child()
self.assertAllClose(u0, 2*np.ones((2,3,4)))
self.assertAllClose(u1,
2**2 * np.ones((2,3,4,2,3,4))
+ 1/3 * misc.identity(2,3,4))
# Check the formula for dim-broadcasted alpha
X = GaussianARD(2*np.ones((2,3,4)),
3*np.ones((3,1)),
ndim=3)
(u0, u1) = X._message_to_child()
self.assertAllClose(u0, 2*np.ones((2,3,4)))
self.assertAllClose(u1,
2**2 * np.ones((2,3,4,2,3,4))
+ 1/3 * misc.identity(2,3,4))
# Check the formula for dim-broadcasted mu and alpha
X = GaussianARD(2*np.ones((3,1)),
3*np.ones((3,1)),
shape=(2,3,4))
(u0, u1) = X._message_to_child()
self.assertAllClose(u0, 2*np.ones((2,3,4)))
self.assertAllClose(u1,
2**2 * np.ones((2,3,4,2,3,4))
+ 1/3 * misc.identity(2,3,4))
# Check the formula for dim-broadcasted mu with plates
mu = GaussianARD(2*np.ones((5,1,3,4)),
np.ones((5,1,3,4)),
shape=(3,4),
plates=(5,1))
X = GaussianARD(mu,
3*np.ones((5,2,3,4)),
shape=(2,3,4),
plates=(5,))
(u0, u1) = X._message_to_child()
self.assertAllClose(u0, 2*np.ones((5,2,3,4)))
self.assertAllClose(u1,
2**2 * np.ones((5,2,3,4,2,3,4))
+ 1/3 * misc.identity(2,3,4))
# Check posterior
X = GaussianARD(2, 3)
Y = GaussianARD(X, 1)
Y.observe(10)
X.update()
(u0, u1) = X._message_to_child()
self.assertAllClose(u0,
1/(3+1) * (3*2 + 1*10))
self.assertAllClose(u1,
(1/(3+1) * (3*2 + 1*10))**2 + 1/(3+1))
pass
def test_message_to_parent_mu(self):
"""
Test that GaussianARD computes the message to the 1st parent correctly.
"""
# Check formula with uncertain parent alpha
mu = GaussianARD(0, 1)
alpha = Gamma(2,1)
X = GaussianARD(mu,
alpha)
X.observe(3)
(m0, m1) = mu._message_from_children()
#(m0, m1) = X._message_to_parent(0)
self.assertAllClose(m0,
2*3)
self.assertAllClose(m1,
-0.5*2)
# Check formula with uncertain node
mu = GaussianARD(1, 1e10)
X = GaussianARD(mu, 2)
Y = GaussianARD(X, 1)
Y.observe(5)
X.update()
(m0, m1) = mu._message_from_children()
self.assertAllClose(m0,
2 * 1/(2+1)*(2*1+1*5))
self.assertAllClose(m1,
-0.5*2)
# Check alpha larger than mu
mu = GaussianARD(np.zeros((2,3)), 1e10, shape=(2,3))
X = GaussianARD(mu,
2*np.ones((3,2,3)))
X.observe(3*np.ones((3,2,3)))
(m0, m1) = mu._message_from_children()
self.assertAllClose(m0,
2*3 * 3 * np.ones((2,3)))
self.assertAllClose(m1,
-0.5 * 3 * 2*misc.identity(2,3))
# Check mu larger than alpha
mu = GaussianARD(np.zeros((3,2,3)), 1e10, shape=(3,2,3))
X = GaussianARD(mu,
2*np.ones((2,3)))
X.observe(3*np.ones((3,2,3)))
(m0, m1) = mu._message_from_children()
self.assertAllClose(m0,
2 * 3 * np.ones((3,2,3)))
self.assertAllClose(m1,
-0.5 * 2*misc.identity(3,2,3))
# Check node larger than mu and alpha
mu = GaussianARD(np.zeros((2,3)), 1e10, shape=(2,3))
X = GaussianARD(mu,
2*np.ones((3,)),
shape=(3,2,3))
X.observe(3*np.ones((3,2,3)))
(m0, m1) = mu._message_from_children()
self.assertAllClose(m0,
2*3 * 3*np.ones((2,3)))
self.assertAllClose(m1,
-0.5 * 2 * 3*misc.identity(2,3))
# Check broadcasting of dimensions
mu = GaussianARD(np.zeros((2,1)), 1e10, shape=(2,1))
X = GaussianARD(mu,
2*np.ones((2,3)),
shape=(2,3))
X.observe(3*np.ones((2,3)))
(m0, m1) = mu._message_from_children()
self.assertAllClose(m0,
2*3 * 3*np.ones((2,1)))
self.assertAllClose(m1,
-0.5 * 2 * 3*misc.identity(2,1))
# Check plates for smaller mu than node
mu = GaussianARD(0,1,
shape=(3,),
plates=(4,1,1))
X = GaussianARD(mu,
2*np.ones((3,)),
shape=(2,3),
plates=(4,5))
X.observe(3*np.ones((4,5,2,3)))
(m0, m1) = mu._message_from_children()
self.assertAllClose(m0 * np.ones((4,1,1,3)),
2*3 * 5*2*np.ones((4,1,1,3)))
self.assertAllClose(m1 * np.ones((4,1,1,3,3)),
-0.5*2 * 5*2*misc.identity(3) * np.ones((4,1,1,3,3)))
# Check mask
mu = GaussianARD(np.zeros((2,1,3)), 1e10, shape=(3,))
X = GaussianARD(mu,
2*np.ones((2,4,3)),
shape=(3,),
plates=(2,4,))
X.observe(3*np.ones((2,4,3)), mask=[[True, True, True, False],
[False, True, False, True]])
(m0, m1) = mu._message_from_children()
self.assertAllClose(m0,
(2*3 * np.ones((2,1,3))
* np.array([[[3]], [[2]]])))
self.assertAllClose(m1,
(-0.5*2 * misc.identity(3)
* np.ones((2,1,1,1))
* np.array([[[[3]]], [[[2]]]])))
# Check mask with different shapes
mu = GaussianARD(np.zeros((2,1,3)), 1e10, shape=())
X = GaussianARD(mu,
2*np.ones((2,4,3)),
shape=(3,),
plates=(2,4,))
mask = np.array([[True, True, True, False],
[False, True, False, True]])
X.observe(3*np.ones((2,4,3)), mask=mask)
(m0, m1) = mu._message_from_children()
self.assertAllClose(m0,
2*3 * np.sum(np.ones((2,4,3))*mask[...,None],
axis=-2,
keepdims=True))
self.assertAllClose(m1,
(-0.5*2 * np.sum(np.ones((2,4,3))*mask[...,None],
axis=-2,
keepdims=True)))
# Check non-ARD Gaussian child
mu = np.array([1,2])
Mu = GaussianARD(mu, 1e10, shape=(2,))
alpha = np.array([3,4])
Lambda = np.array([[1, 0.5],
[0.5, 1]])
X = GaussianARD(Mu, alpha, ndim=1)
Y = Gaussian(X, Lambda)
y = np.array([5,6])
Y.observe(y)
X.update()
(m0, m1) = Mu._message_from_children()
mean = np.dot(np.linalg.inv(np.diag(alpha)+Lambda),
np.dot(np.diag(alpha), mu)
+ np.dot(Lambda, y))
self.assertAllClose(m0,
np.dot(np.diag(alpha), mean))
self.assertAllClose(m1,
-0.5*np.diag(alpha))
# Check broadcasted variable axes
mu = GaussianARD(np.zeros(1), 1e10, shape=(1,))
X = GaussianARD(mu,
2,
shape=(3,))
X.observe(3*np.ones(3))
(m0, m1) = mu._message_from_children()
self.assertAllClose(m0,
2*3 * np.sum(np.ones(3), axis=-1, keepdims=True))
self.assertAllClose(m1,
-0.5*2 * np.sum(np.identity(3),
axis=(-1,-2),
keepdims=True))
pass
def test_message_to_parent_alpha(self):
"""
Test the message from GaussianARD the 2nd parent (alpha).
"""
# Check formula with uncertain parent mu
mu = GaussianARD(1,1)
tau = Gamma(0.5*1e10, 1e10)
X = GaussianARD(mu,
tau)
X.observe(3)
(m0, m1) = tau._message_from_children()
self.assertAllClose(m0,
-0.5*(3**2 - 2*3*1 + 1**2+1))
self.assertAllClose(m1,
0.5)
# Check formula with uncertain node
tau = Gamma(1e10, 1e10)
X = GaussianARD(2, tau)
Y = GaussianARD(X, 1)
Y.observe(5)
X.update()
(m0, m1) = tau._message_from_children()
self.assertAllClose(m0,
-0.5*(1/(1+1)+3.5**2 - 2*3.5*2 + 2**2))
self.assertAllClose(m1,
0.5)
# Check alpha larger than mu
alpha = Gamma(np.ones((3,2,3))*1e10, 1e10)
X = GaussianARD(np.ones((2,3)),
alpha,
ndim=3)
X.observe(2*np.ones((3,2,3)))
(m0, m1) = alpha._message_from_children()
self.assertAllClose(m0 * np.ones((3,2,3)),
-0.5*(2**2 - 2*2*1 + 1**2) * np.ones((3,2,3)))
self.assertAllClose(m1*np.ones((3,2,3)),
0.5*np.ones((3,2,3)))
# Check mu larger than alpha
tau = Gamma(np.ones((2,3))*1e10, 1e10)
X = GaussianARD(np.ones((3,2,3)),
tau,
ndim=3)
X.observe(2*np.ones((3,2,3)))
(m0, m1) = tau._message_from_children()
self.assertAllClose(m0,
-0.5*(2**2 - 2*2*1 + 1**2) * 3 * np.ones((2,3)))
self.assertAllClose(m1 * np.ones((2,3)),
0.5 * 3 * np.ones((2,3)))
# Check node larger than mu and alpha
tau = Gamma(np.ones((3,))*1e10, 1e10)
X = GaussianARD(np.ones((2,3)),
tau,
shape=(3,2,3))
X.observe(2*np.ones((3,2,3)))
(m0, m1) = tau._message_from_children()
self.assertAllClose(m0 * np.ones(3),
-0.5*(2**2 - 2*2*1 + 1**2) * 6 * np.ones((3,)))
self.assertAllClose(m1 * np.ones(3),
0.5 * 6 * np.ones(3))
# Check plates for smaller mu than node
tau = Gamma(np.ones((4,1,2,3))*1e10, 1e10)
X = GaussianARD(GaussianARD(1, 1,
shape=(3,),
plates=(4,1,1)),
tau,
shape=(2,3),
plates=(4,5))
X.observe(2*np.ones((4,5,2,3)))
(m0, m1) = tau._message_from_children()
self.assertAllClose(m0 * np.ones((4,1,2,3)),
(-0.5 * (2**2 - 2*2*1 + 1**2+1)
* 5*np.ones((4,1,2,3))))
self.assertAllClose(m1 * np.ones((4,1,2,3)),
5*0.5 * np.ones((4,1,2,3)))
# Check mask
tau = Gamma(np.ones((4,3))*1e10, 1e10)
X = GaussianARD(np.ones(3),
tau,
shape=(3,),
plates=(2,4,))
X.observe(2*np.ones((2,4,3)), mask=[[True, False, True, False],
[False, True, True, False]])
(m0, m1) = tau._message_from_children()
self.assertAllClose(m0 * np.ones((4,3)),
(-0.5 * (2**2 - 2*2*1 + 1**2)
* np.ones((4,3))
* np.array([[1], [1], [2], [0]])))
self.assertAllClose(m1 * np.ones((4,3)),
0.5 * np.array([[1], [1], [2], [0]]) * np.ones((4,3)))
# Check non-ARD Gaussian child
mu = np.array([1,2])
alpha = np.array([3,4])
Alpha = Gamma(alpha*1e10, 1e10)
Lambda = np.array([[1, 0.5],
[0.5, 1]])
X = GaussianARD(mu, Alpha, ndim=1)
Y = Gaussian(X, Lambda)
y = np.array([5,6])
Y.observe(y)
X.update()
(m0, m1) = Alpha._message_from_children()
Cov = np.linalg.inv(np.diag(alpha)+Lambda)
mean = np.dot(Cov, np.dot(np.diag(alpha), mu)
+ np.dot(Lambda, y))
self.assertAllClose(m0 * np.ones(2),
-0.5 * np.diag(
np.outer(mean, mean) + Cov
- np.outer(mean, mu)
- np.outer(mu, mean)
+ np.outer(mu, mu)))
self.assertAllClose(m1 * np.ones(2),
0.5 * np.ones(2))
pass
def test_message_to_parents(self):
""" Check gradient passed to inputs parent node """
D = 3
X = Gaussian(np.random.randn(D), random.covariance(D))
a = Gamma(np.random.rand(D), np.random.rand(D))
Y = GaussianARD(X, a)
Y.observe(np.random.randn(D))
self.assert_message_to_parent(Y, X)
self.assert_message_to_parent(Y, a)
pass
def test_lowerbound(self):
"""
Test the variational Bayesian lower bound term for GaussianARD.
"""
# Test vector formula with full noise covariance
m = np.random.randn(2)
alpha = np.random.rand(2)
y = np.random.randn(2)
X = GaussianARD(m, alpha, ndim=1)
V = np.array([[3,1],[1,3]])
Y = Gaussian(X, V)
Y.observe(y)
X.update()
Cov = np.linalg.inv(np.diag(alpha) + V)
mu = np.dot(Cov, np.dot(V, y) + alpha*m)
x2 = np.outer(mu, mu) + Cov
logH_X = (+ 2*0.5*(1+np.log(2*np.pi))
+ 0.5*np.log(np.linalg.det(Cov)))
logp_X = (- 2*0.5*np.log(2*np.pi)
+ 0.5*np.log(np.linalg.det(np.diag(alpha)))
- 0.5*np.sum(np.diag(alpha)
* (x2
- np.outer(mu,m)
- np.outer(m,mu)
+ np.outer(m,m))))
self.assertAllClose(logp_X + logH_X,
X.lower_bound_contribution())
def check_lower_bound(shape_mu, shape_alpha, plates_mu=(), **kwargs):
M = GaussianARD(np.ones(plates_mu + shape_mu),
np.ones(plates_mu + shape_mu),
shape=shape_mu,
plates=plates_mu)
if not ('ndim' in kwargs or 'shape' in kwargs):
kwargs['ndim'] = len(shape_mu)
X = GaussianARD(M,
2*np.ones(shape_alpha),
**kwargs)
Y = GaussianARD(X,
3*np.ones(X.get_shape(0)),
**kwargs)
Y.observe(4*np.ones(Y.get_shape(0)))
X.update()
Cov = 1/(2+3)
mu = Cov * (2*1 + 3*4)
x2 = mu**2 + Cov
logH_X = (+ 0.5*(1+np.log(2*np.pi))
+ 0.5*np.log(Cov))
logp_X = (- 0.5*np.log(2*np.pi)
+ 0.5*np.log(2)
- 0.5*2*(x2 - 2*mu*1 + 1**2+1))
r = np.prod(X.get_shape(0))
self.assertAllClose(r * (logp_X + logH_X),
X.lower_bound_contribution())
# Test scalar formula
check_lower_bound((), ())
# Test array formula
check_lower_bound((2,3), (2,3))
# Test dim-broadcasting of mu
check_lower_bound((3,1), (2,3,4))
# Test dim-broadcasting of alpha
check_lower_bound((2,3,4), (3,1))
# Test dim-broadcasting of mu and alpha
check_lower_bound((3,1), (3,1),
shape=(2,3,4))
# Test dim-broadcasting of mu with plates
check_lower_bound((), (),
plates_mu=(),
shape=(),
plates=(5,))
# BUG: Scalar parents for array variable caused einsum error
check_lower_bound((), (),
shape=(3,))
# BUG: Log-det was summed over plates
check_lower_bound((), (),
shape=(3,),
plates=(4,))
pass
def test_rotate(self):
"""
Test the rotation of Gaussian ARD arrays.
"""
def check(shape, plates, einsum_x, einsum_xx, axis=-1):
# TODO/FIXME: Improve by having non-diagonal precision/covariance
# parameter for the Gaussian X
D = shape[axis]
X = GaussianARD(np.random.randn(*(plates+shape)),
np.random.rand(*(plates+shape)),
shape=shape,
plates=plates)
(x, xx) = X.get_moments()
R = np.random.randn(D,D)
X.rotate(R, axis=axis)
(rx, rxxr) = X.get_moments()
self.assertAllClose(rx,
np.einsum(einsum_x, R, x))
self.assertAllClose(rxxr,
np.einsum(einsum_xx, R, xx, R))
pass
# Rotate vector
check((3,), (),
'...jk,...k->...j',
'...mk,...kl,...nl->...mn')
check((3,), (2,4),
'...jk,...k->...j',
'...mk,...kl,...nl->...mn')
# Rotate array
check((2,3,4), (),
'...jc,...abc->...abj',
'...mc,...abcdef,...nf->...abmden',
axis=-1)
check((2,3,4), (5,6),
'...jc,...abc->...abj',
'...mc,...abcdef,...nf->...abmden',
axis=-1)
check((2,3,4), (),
'...jb,...abc->...ajc',
'...mb,...abcdef,...ne->...amcdnf',
axis=-2)
check((2,3,4), (5,6),
'...jb,...abc->...ajc',
'...mb,...abcdef,...ne->...amcdnf',
axis=-2)
check((2,3,4), (),
'...ja,...abc->...jbc',
'...ma,...abcdef,...nd->...mbcnef',
axis=-3)
check((2,3,4), (5,6),
'...ja,...abc->...jbc',
'...ma,...abcdef,...nd->...mbcnef',
axis=-3)
pass
def test_rotate_plates(self):
# Basic test for Gaussian vectors
X = GaussianARD(np.random.randn(3,2),
np.random.rand(3,2),
shape=(2,),
plates=(3,))
(u0, u1) = X.get_moments()
Cov = u1 - linalg.outer(u0, u0, ndim=1)
Q = np.random.randn(3,3)
Qu0 = np.einsum('ik,kj->ij', Q, u0)
QCov = np.einsum('k,kij->kij', np.sum(Q, axis=0)**2, Cov)
Qu1 = QCov + linalg.outer(Qu0, Qu0, ndim=1)
X.rotate_plates(Q, plate_axis=-1)
(u0, u1) = X.get_moments()
self.assertAllClose(u0, Qu0)
self.assertAllClose(u1, Qu1)
# Test full covariance, that is, with observations
X = GaussianARD(np.random.randn(3,2),
np.random.rand(3,2),
shape=(2,),
plates=(3,))
Y = Gaussian(X, [[2.0, 1.5], [1.5, 3.0]],
plates=(3,))
Y.observe(np.random.randn(3,2))
X.update()
(u0, u1) = X.get_moments()
Cov = u1 - linalg.outer(u0, u0, ndim=1)
Q = np.random.randn(3,3)
Qu0 = np.einsum('ik,kj->ij', Q, u0)
QCov = np.einsum('k,kij->kij', np.sum(Q, axis=0)**2, Cov)
Qu1 = QCov + linalg.outer(Qu0, Qu0, ndim=1)
X.rotate_plates(Q, plate_axis=-1)
(u0, u1) = X.get_moments()
self.assertAllClose(u0, Qu0)
self.assertAllClose(u1, Qu1)
pass
def test_initialization(self):
"""
Test initialization methods of GaussianARD
"""
X = GaussianARD(1, 2, shape=(2,), plates=(3,))
# Prior initialization
mu = 1 * np.ones((3, 2))
alpha = 2 * np.ones((3, 2))
X.initialize_from_prior()
u = X._message_to_child()
self.assertAllClose(u[0]*np.ones((3,2)),
mu)
self.assertAllClose(u[1]*np.ones((3,2,2)),
linalg.outer(mu, mu, ndim=1) +
misc.diag(1/alpha, ndim=1))
# Parameter initialization
mu = np.random.randn(3, 2)
alpha = np.random.rand(3, 2)
X.initialize_from_parameters(mu, alpha)
u = X._message_to_child()
self.assertAllClose(u[0], mu)
self.assertAllClose(u[1], linalg.outer(mu, mu, ndim=1) +
misc.diag(1/alpha, ndim=1))
# Value initialization
x = np.random.randn(3, 2)
X.initialize_from_value(x)
u = X._message_to_child()
self.assertAllClose(u[0], x)
self.assertAllClose(u[1], linalg.outer(x, x, ndim=1))
# Random initialization
X.initialize_from_random()
pass
class TestGaussianGamma(TestCase):
"""
Unit tests for GaussianGamma node.
"""
def test_init(self):
"""
Test the creation of GaussianGamma node
"""
# Test 0-ndim Gaussian-Gamma
X_alpha = GaussianGamma([1,2], [0.1, 0.2], [0.02, 0.03], [0.03, 0.04], ndim=0)
# Simple construction
X_alpha = GaussianGamma([1,2,3], np.identity(3), 2, 10)
self.assertEqual(X_alpha.plates, ())
self.assertEqual(X_alpha.dims, ( (3,), (3,3), (), () ))
# Plates
X_alpha = GaussianGamma([1,2,3], np.identity(3), 2, 10, plates=(4,))
self.assertEqual(X_alpha.plates, (4,))
self.assertEqual(X_alpha.dims, ( (3,), (3,3), (), () ))
# Plates in mu
X_alpha = GaussianGamma(np.ones((4,3)), np.identity(3), 2, 10)
self.assertEqual(X_alpha.plates, (4,))
self.assertEqual(X_alpha.dims, ( (3,), (3,3), (), () ))
# Plates in Lambda
X_alpha = GaussianGamma(np.ones(3), np.ones((4,3,3))*np.identity(3), 2, 10)
self.assertEqual(X_alpha.plates, (4,))
self.assertEqual(X_alpha.dims, ( (3,), (3,3), (), () ))
# Plates in a
X_alpha = GaussianGamma(np.ones(3), np.identity(3), np.ones(4), 10)
self.assertEqual(X_alpha.plates, (4,))
self.assertEqual(X_alpha.dims, ( (3,), (3,3), (), () ))
# Plates in Lambda
X_alpha = GaussianGamma(np.ones(3), np.identity(3), 2, np.ones(4))
self.assertEqual(X_alpha.plates, (4,))
self.assertEqual(X_alpha.dims, ( (3,), (3,3), (), () ))
# Inconsistent plates
self.assertRaises(ValueError,
GaussianGamma,
np.ones((4,3)),
np.identity(3),
2,
10,
plates=())
# Inconsistent plates
self.assertRaises(ValueError,
GaussianGamma,
np.ones((4,3)),
np.identity(3),
2,
10,
plates=(5,))
# Unknown parameters
mu = Gaussian(np.zeros(3), np.identity(3))
Lambda = Wishart(10, np.identity(3))
b = Gamma(1, 1)
X_alpha = GaussianGamma(mu, Lambda, 2, b)
self.assertEqual(X_alpha.plates, ())
self.assertEqual(X_alpha.dims, ( (3,), (3,3), (), () ))
# mu is Gaussian-gamma
mu_tau = GaussianGamma(np.ones(3), np.identity(3), 5, 5)
X_alpha = GaussianGamma(mu_tau, np.identity(3), 5, 5)
self.assertEqual(X_alpha.plates, ())
self.assertEqual(X_alpha.dims, ( (3,), (3,3), (), () ))
pass
def test_message_to_child(self):
"""
Test the message to child of GaussianGamma node.
"""
# Simple test
mu = np.array([1,2,3])
Lambda = np.identity(3)
a = 2
b = 10
X_alpha = GaussianGamma(mu, Lambda, a, b)
u = X_alpha._message_to_child()
self.assertEqual(len(u), 4)
tau = np.array(a/b)
self.assertAllClose(u[0],
tau[...,None] * mu)
self.assertAllClose(u[1],
(linalg.inv(Lambda)
+ tau[...,None,None] * linalg.outer(mu, mu)))
self.assertAllClose(u[2],
tau)
self.assertAllClose(u[3],
-np.log(b) + special.psi(a))
# Test with unknown parents
mu = Gaussian(np.arange(3), 10*np.identity(3))
Lambda = Wishart(10, np.identity(3))
a = 2
b = Gamma(3, 15)
X_alpha = GaussianGamma(mu, Lambda, a, b)
u = X_alpha._message_to_child()
(mu, mumu) = mu._message_to_child()
Cov_mu = mumu - linalg.outer(mu, mu)
(Lambda, _) = Lambda._message_to_child()
(b, _) = b._message_to_child()
(tau, logtau) = Gamma(a, b + 0.5*np.sum(Lambda*Cov_mu))._message_to_child()
self.assertAllClose(u[0],
tau[...,None] * mu)
self.assertAllClose(u[1],
(linalg.inv(Lambda)
+ tau[...,None,None] * linalg.outer(mu, mu)))
self.assertAllClose(u[2],
tau)
self.assertAllClose(u[3],
logtau)
# Test with plates
mu = Gaussian(np.reshape(np.arange(3*4), (4,3)),
10*np.identity(3),
plates=(4,))
Lambda = Wishart(10, np.identity(3))
a = 2
b = Gamma(3, 15)
X_alpha = GaussianGamma(mu, Lambda, a, b, plates=(4,))
u = X_alpha._message_to_child()
(mu, mumu) = mu._message_to_child()
Cov_mu = mumu - linalg.outer(mu, mu)
(Lambda, _) = Lambda._message_to_child()
(b, _) = b._message_to_child()
(tau, logtau) = Gamma(a,
b + 0.5*np.sum(Lambda*Cov_mu,
axis=(-1,-2)))._message_to_child()
self.assertAllClose(u[0] * np.ones((4,1)),
np.ones((4,1)) * tau[...,None] * mu)
self.assertAllClose(u[1] * np.ones((4,1,1)),
np.ones((4,1,1)) * (linalg.inv(Lambda)
+ tau[...,None,None] * linalg.outer(mu, mu)))
self.assertAllClose(u[2] * np.ones(4),
np.ones(4) * tau)
self.assertAllClose(u[3] * np.ones(4),
np.ones(4) * logtau)
pass
def test_mask_to_parent(self):
"""
Test the mask handling in GaussianGamma node
"""
pass
def test_messages(self):
D = 2
M = 3
np.random.seed(42)
def check(mu, Lambda, alpha, beta, ndim):
X = GaussianGamma(
mu,
(
Lambda if isinstance(Lambda._moments, WishartMoments) else
Lambda.as_wishart(ndim=ndim)
),
alpha,
beta,
ndim=ndim
)
self.assert_moments(
X,
postprocess=lambda u: [
u[0],
u[1] + linalg.transpose(u[1], ndim=ndim),
u[2],
u[3]
],
rtol=1e-5,
atol=1e-6,
eps=1e-8
)
X.observe(
(
np.random.randn(*(X.plates + X.dims[0])),
np.random.rand(*X.plates)
)
)
self.assert_message_to_parent(X, mu)
self.assert_message_to_parent(
X,
Lambda,
postprocess=lambda m: [
m[0] + linalg.transpose(m[0], ndim=ndim),
m[1],
]
)
self.assert_message_to_parent(X, beta)
check(
Gaussian(np.random.randn(M, D), random.covariance(D), plates=(M,)),
Wishart(D + np.random.rand(M), random.covariance(D), plates=(M,)),
|
np.random.rand(M)
|
numpy.random.rand
|
from psana.event import Event
from psana import dgram
from psana.psexp.packet_footer import PacketFooter
import numpy as np
import os
class EventManager(object):
def __init__(self, smd_configs, dm, filter_fn=0):
self.smd_configs = smd_configs
self.dm = dm
self.n_smd_files = len(self.smd_configs)
self.filter_fn = filter_fn
def events(self, view):
pf = PacketFooter(view=view)
views = pf.split_packets()
# Keeps offset, size, & timestamp for all events in the batch
# for batch reading (if filter_fn is not given).
ofsz_batch =
|
np.zeros((pf.n_packets, self.n_smd_files, 2), dtype=np.intp)
|
numpy.zeros
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Generic packages
import numpy as np
import hjson, json, os
from pathlib import Path
# For integration times
import astropy.units as u
# For keep outs and to convert decimal dates into readable dates
import EXOSIMS, EXOSIMS.MissionSim
from astropy.time import Time
from scripts.cgi_etc_star_accessibility import cgi_etc_star_accessibility
# (Optional) Plotting the results
import matplotlib.pyplot as plt
# IMD
import pandas as pd
# Updated specs for EXOSIMS
from scripts.cgi_etc_update_specs import cgi_etc_update_specs
# Linear interpolation
from scipy import interpolate
# CSV file
from scripts.store_csv_file import store_csv_file_rv
def cgi_etc_rv_shortest_integration_time(CGI_epoch0, CGI_epoch1, filterList, jsonFile, csvFileName, CGI_Observations):
# Path with the orbital data of the planets from IMD (https://plandb.sioslab.com/index.php)
pathIMD = './imd/'
# Meaning of table parameters from IMD (# https://plandb.sioslab.com/docs/html/index.html#planetorbits-table)
# t = time in days since 01/01/2026
# r = actual distance planet-host star in AU
# s = visual sepraration
# beta = orbital phase angle
# Remember (See above) that IMD data start at 01/01/2026 (not sure whether it is based on mJD, ISO, or else)
imdEpoch0 = 2026.00
# P.S. PName is used later on to read csv files from IMD
# P.P.S. Leave a blank space between the name of the star and the planet, e.g., 14 Her b
PName = CGI_Observations['PName']
nPlanets = len(PName)
# HIP identifiers
hipPName = CGI_Observations['hipPName']
# Derive the star's accessibility
accessibleDays = cgi_etc_star_accessibility(CGI_epoch0, CGI_epoch1,
jsonFile, csvFileName, PName, hipPName)
# Write star names
starName = [''] * nPlanets
starNameCommon = [''] * nPlanets
for i_p in np.arange(nPlanets):
starName[i_p] = hipPName[i_p][0:len(hipPName[i_p])-2]
starNameCommon[i_p] = PName[i_p][0:len(PName[i_p])-2]
# Values of the cloud fsed used by IMD
cloudFsed = [0.00, 0.01, 0.03, 0.10, 0.30, 1.00, 3.00, 6.00]
nFsed = len(cloudFsed)
# Table of weights used to average the DMag from IMD (table provided by <NAME> to <NAME>)
freqFsed = [0.099, 0.001, 0.005, 0.010, 0.025, 0.280, 0.300, 0.280]
# Make sur eit is normalized (it is, just if it gets changed in the future)
freqFsed = freqFsed / np.sum(freqFsed)
# Filters: Band 1, 3 and 4
# P.S. Technical note for developers: Use 'NF', 'Amici_Spec' and 'WF', respectively, because
# these substrings are used when assigning the actual value of
# the post-processing value for each mode. Also, EXOSIMS makes use of 'Amici' and 'Spec' in Nemati_2019.py
nFilters = len(filterList)
# Keeping track of the actual value of the post-processing factor used in the estimations.
# Notice that EXOSIMS's ppFact equals (1/kpp), where kpp is the post-processing factor in CGI Perf,
# which is the usual way to report it. For instance, for the NF, kpp=2, and then EXOSIMS ppFact is 0.5.
kppList = np.empty(nFilters)
kppList.fill(np.nan)
####################################################
# Deriving expected integration times given an SNR #
####################################################
# SNR list
SNRRefList = CGI_Observations['SNRList']
nSNRRef = len(SNRRefList)
# SNR list to derive the integration times (fast, no worries)
# Grid of values of SNR, instead of results for the values in SNRRefList only.
# P.S. Small SNR values are used to highlight cases that are not worth
# observing
SNRList = np.sort(np.concatenate([SNRRefList, np.arange(0.5,20,0.5),
np.arange(20,105,5)], axis=0))
nSNR = len(SNRList)
# Keeping track of the SNR actually found (in general, it should be the same as
# in SNRRefList but they are the values in SNRList closest to SNRRefList)
SNRRefFound = np.empty(len(SNRRefList))
SNRRefFound.fill(np.nan)
## First and last indices for the epochs under consideration
dayEpoch0 = np.round(365.25 * (CGI_epoch0 - imdEpoch0)).astype(int)
dayEpoch1 = np.round(365.25 * (CGI_epoch1 - imdEpoch0)).astype(int)
# Imaging Mission Database says that the orbits are computed every 30 days, but there are cases where this is not the case (02/10/21: https://plandb.sioslab.com/plandetail.php?name=47+UMa+d whose CSV table has steps of 141 days)
# I just assume it is 1 day, although in general it is larger. No problem. The rest of unused indices are filled with NaN
dayEpochArray = np.empty((nPlanets, dayEpoch1 - dayEpoch0 + 1))
dayEpochArray.fill(np.nan)
waArcsecArray = np.empty((nPlanets, dayEpoch1 - dayEpoch0 + 1))
waArcsecArray.fill(np.nan)
fRatioArray = np.empty((nPlanets, nFilters, dayEpoch1 - dayEpoch0 + 1))
fRatioArray.fill(np.nan)
intTimeFilterHours = np.empty((nPlanets, nFilters, nSNR, dayEpoch1 - dayEpoch0 + 1))
intTimeFilterHours.fill(np.nan)
sInds = np.empty(nPlanets, dtype=int)
sInds.fill(np.nan)
# Looping over filters
for i_flt in np.arange(nFilters):
# Updating the instrumental specs because of the different post-processing factor for each filter.
kppTmp, OSTmp, TLTmp, TKTmp = \
cgi_etc_update_specs(jsonFile, filterList[i_flt],
CGI_epoch0, CGI_epoch1)
kppList[i_flt] = kppTmp
mode = list(filter(lambda mode: mode['instName'] == filterList[i_flt], OSTmp.observingModes))[0]
# Local zodi
fZ = TLTmp.ZodiacalLight.fZ0
# Loop over planets
for i_pl in np.arange(nPlanets):
# Index where the host star is found in the target list
sInds[i_pl] = np.where(TLTmp.Name == starName[i_pl])[0]
# Reading the CSV file from IMD
PStr = PName[i_pl]
# P.S. From IMD: if no inclination available, orbit is assumed edge-on. If no eccentricity is available, orbit is assumed circular.
planetDataOrig = pd.read_csv(pathIMD + PStr.replace(' ', '_' ) + '_orbit_data.csv')
# IMD documentation (point 11 in https://plandb.sioslab.com/docs/html/index.html#planetorbits-table)
# say (sic) "NaN when period of time of periastron passage are undefined"
# If this is the case skip the planet
if np.isnan(planetDataOrig['t']).all() == True:
print('WARNING: Planet ' + PName[i_pl] + ' has undefined Ephemeris. Skipping it ...')
continue
# Creating a new pandas dataframe for each day using linear interpolation
dict_tmp = {}
dict_tmp['t'] = dayEpoch0 + np.arange(dayEpoch1-dayEpoch0+1)
for column in planetDataOrig.columns:
if column == 't': continue
if isinstance(planetDataOrig[column][0], float):
interpolant = interpolate.interp1d(planetDataOrig['t'],
planetDataOrig[column], kind='linear')
# IMD ephemeris may have more than 1 orbit
try:
orbital_period = np.where(planetDataOrig['t']==0)[0]
orbital_period_days = \
planetDataOrig['t'][orbital_period[1]-1] - \
planetDataOrig['t'][orbital_period[0]]
dict_tmp[column] = \
interpolant(dict_tmp['t'] % orbital_period_days)
except:
dict_tmp[column] = interpolant(dict_tmp['t'])
# database
planetDataCgi = pd.DataFrame.from_dict(dict_tmp)
dayEpochArray[i_pl,0:len(planetDataCgi)] = planetDataCgi['t']
# Angular visual separation of the planet
waArcsec = planetDataCgi['WA'].values / 1000 * u.arcsec
waArcsecArray[i_pl,0:len(waArcsec)]=waArcsec.value
# Actual planet-star distance (only used for exozodi)
r_au = planetDataCgi['r'].values * u.AU
# Fiducial visual inclination (only used for exozodi). The CSV files from IMD do not provide it.
inc_deg = [20] * u.deg
# Exozodi along the orbit
fEZ = TLTmp.ZodiacalLight.fEZ(np.array([TLTmp.MV[sInds[i_pl]]]), inc_deg, r_au)
fRatio = np.zeros(len(planetDataCgi['t']))
# Looping over cloud fsed to get the average flux ratio
for i_fsed in np.arange(nFsed):
# Using the center wavelength of each observing mode to select the corresponding data
# These values are stored in new columns pPhi_XXXC_YYYNM and dMag_XXXC_YYYNM
# where XXX is the cloud fsed scaled by 100 (000 representing no cloud) and
# YYY is the wavelength in nm.
keyPlanetDataCgi = 'dMag_' + str(format(np.round(cloudFsed[i_fsed] * 100).astype(int),'03d')) + 'C_' + str(mode['lam'].to_value().astype(int)) + 'NM'
fRatio = fRatio + freqFsed[i_fsed] * np.power(10,-0.4 * planetDataCgi[keyPlanetDataCgi])
fRatioArray[i_pl, i_flt,0:len(fRatio)]= np.array(fRatio)
dMags = -2.5 * np.log10(np.array(fRatio))
# Only consider days that are accessible
try:
dMags[accessibleDays==False]=np.nan
# Pass in case accessibility has not been computed
except:
pass
# Looping over SNR
for i_snr in np.arange(nSNR):
mode['SNR'] = SNRList[i_snr]
intTimeTmp = OSTmp.calc_intTime(TLTmp, np.array([sInds[i_pl]]), fZ, fEZ, dMags, waArcsec, mode, TK=TKTmp).to('hour').value
intTimeTmp[np.where(intTimeTmp == 0)] = np.nan
intTimeFilterHours[i_pl, i_flt, i_snr, 0:len(fRatio)] = intTimeTmp
# Restoring the 'true_divide' error after EXOSIMS run
np.seterr(divide='warn', invalid='warn')
# Getting the maximum time that the target is accessible and its SNR
SNRPlanetMax = np.empty((nPlanets, nFilters))
SNRPlanetMax.fill(np.min(SNRList))
intTimeSNRMax = np.empty((nPlanets, nFilters))
intTimeSNRMax.fill(np.nan)
intTmpHours = np.empty((nSNR))
intTmpHours.fill(np.nan)
for i_pl in np.arange(nPlanets):
# Days that the target is accessible
if nPlanets == 1:
nDaysPlanet = np.sum(accessibleDays)
else:
nDaysPlanet = np.sum(accessibleDays[i_pl])
for i_flt in np.arange(nFilters):
for i_snr in np.arange(nSNR):
# Shortest integration time within accessible times
intTmpHours[i_snr] = \
np.nanmin(intTimeFilterHours[i_pl, i_flt, i_snr, :])
# First time that it is not possible to achieve an SNR,
# it means that the previous step was the largest value
if np.isnan(intTmpHours[i_snr]) == False:
# If the integration time fits within the accessibility window
if intTmpHours[i_snr] <= (nDaysPlanet*24):
SNRPlanetMax[i_pl, i_flt] = SNRList[i_snr]
intTimeSNRMax[i_pl, i_flt] = intTmpHours[i_snr]
else:
SNRInterpolant = interpolate.interp1d(
intTmpHours[0:i_snr+1], SNRList[0:i_snr+1],
kind='linear')
# Round to 1 decimal place (it's SNR)
SNRPlanetMax[i_pl, i_flt] = \
np.round(SNRInterpolant(nDaysPlanet*24), decimals=1)
intTimeSNRMax[i_pl, i_flt] = nDaysPlanet*24
# Replace bad cases by NaN now
for i_pl in np.arange(nPlanets):
for i_flt in np.arange(nFilters):
if SNRPlanetMax[i_pl, i_flt] == np.min(SNRList):
SNRPlanetMax[i_pl, i_flt] = np.nan
intTimeSNRMax[i_pl, i_flt]= np.nan
# Summarize results
nSNRRef = len(SNRRefList)
# The Epoch of observation, WA, and flux ratio do not change with SNR
dayEpochBestTime = np.empty((nPlanets, nFilters, nSNR, 3))
dayEpochBestTime.fill(np.nan)
# Days that are necessary to get the integration time
# (e.g., according to some observing sequence, like OS11)
dayOperationalBestTime = np.empty((nPlanets, nFilters, nSNR, 3))
dayOperationalBestTime.fill(np.nan)
# In the case of OS11, we have that 14 hours out of 24 are dedicated to observing a target
fOperation = 14 / 24 ;
waMasBestTime = np.empty((nPlanets, nFilters, nSNR, 3))
waMasBestTime.fill(np.nan)
fRatioBestTime = np.empty((nPlanets, nFilters, nSNR, 3))
fRatioBestTime.fill(np.nan)
# The integration time depends on the SNR
intTimeBestHours = np.empty((nPlanets, nFilters, nSNR))
intTimeBestHours.fill(np.nan)
for i_pl in np.arange(nPlanets):
for i_flt in np.arange(nFilters):
i_snr_2 = 0
for snr in SNRRefList:
i_snr = int(np.where(np.abs(snr - SNRList) == \
np.min(np.abs(snr - SNRList)))[0][0])
# Finding the shortest integration time
# If all are NaN, skip
if (np.isnan(intTimeFilterHours[i_pl, i_flt, i_snr]).all()) == True:
continue
indBest = np.where(intTimeFilterHours[i_pl, i_flt, i_snr] == np.nanmin(intTimeFilterHours[i_pl, i_flt, i_snr]))
# Veerify that the integration time is less than the maximum available
if (indBest[0].size != 0) and \
(intTimeFilterHours[i_pl, i_flt, i_snr, indBest] < intTimeSNRMax[i_pl, i_flt]):
dayEpochBestTime[i_pl, i_flt, i_snr_2, 1] = dayEpochArray[i_pl, indBest]
dayOperationalBestTime[i_pl, i_flt, i_snr_2, 1] = dayEpochArray[i_pl, indBest]
waMasBestTime[i_pl, i_flt, i_snr_2, 1] = waArcsecArray[i_pl, indBest] * 1000 # arcsec to milli-arcsec
fRatioBestTime[i_pl, i_flt, i_snr_2, 1] = fRatioArray[i_pl, i_flt, indBest]
intTimeBestHours[i_pl, i_flt, i_snr_2] = intTimeFilterHours[i_pl, i_flt, i_snr, indBest]
# Filling out the values before/after the best time
dayEpochBestTime[i_pl, i_flt, i_snr_2, 0] = dayEpochBestTime[i_pl, i_flt, i_snr, 1] - intTimeBestHours[i_pl, i_flt, i_snr] / 24 / 2
# In case the first date is before the mission start
if dayEpochBestTime[i_pl, i_flt, i_snr_2, 0] < 0:
dayEpochBestTime[i_pl, i_flt, i_snr_2, 0] = 0
dayEpochBestTime[i_pl, i_flt, i_snr_2, 2] = dayEpochBestTime[i_pl, i_flt, i_snr_2, 1] + intTimeBestHours[i_pl, i_flt, i_snr_2] / 24
else:
dayEpochBestTime[i_pl, i_flt, i_snr_2, 2] = dayEpochBestTime[i_pl, i_flt, i_snr_2, 1] + intTimeBestHours[i_pl, i_flt, i_snr_2] / 24 / 2
# Operational days have a fudge factor
dayOperationalBestTime[i_pl, i_flt, i_snr_2, 0] = dayEpochBestTime[i_pl, i_flt, i_snr_2, 1] - ( 1 / fOperation) * intTimeBestHours[i_pl, i_flt, i_snr_2] / 24 / 2
# In case the first date is before the mission start
if dayOperationalBestTime[i_pl, i_flt, i_snr_2, 0] < 0:
dayOperationalBestTime[i_pl, i_flt, i_snr_2, 0] = 0
dayOperationalBestTime[i_pl, i_flt, i_snr_2, 2] = dayEpochBestTime[i_pl, i_flt, i_snr_2, 1] + ( 1 / fOperation ) * intTimeBestHours[i_pl, i_flt, i_snr_2] / 24
else:
dayOperationalBestTime[i_pl, i_flt, i_snr_2, 2] = dayEpochBestTime[i_pl, i_flt, i_snr_2, 1] + ( 1 / fOperation ) * intTimeBestHours[i_pl, i_flt, i_snr_2] / 24 / 2
waMasBestTime[i_pl, i_flt, i_snr_2, 0] = np.interp(dayEpochBestTime[i_pl, i_flt, i_snr_2, 0],
dayEpochArray[i_pl,~np.isnan(dayEpochArray[i_pl])],
1000 * waArcsecArray[i_pl,~np.isnan(dayEpochArray[i_pl])])
waMasBestTime[i_pl, i_flt, i_snr_2, 2] = np.interp(dayEpochBestTime[i_pl, i_flt, i_snr_2, 2],
dayEpochArray[i_pl,~
|
np.isnan(dayEpochArray[i_pl])
|
numpy.isnan
|
# -*implants -*-
"""
Functions for creating retinal implants
"""
import numpy as np
import logging
from pulse2percept import utils
SUPPORTED_IMPLANT_TYPES = ['epiretinal', 'subretinal']
class Electrode(object):
def __init__(self, etype, radius, x_center, y_center, height=0, name=None):
"""Create an electrode on the retina
This function creates a disk electrode of type `etype` and places it
on the retina at location (`xs`, `ys`) in microns. The electrode has
radius `radius` (microns) and sits a distance `height` away from the
retinal surface.
The coordinate system is anchored around the fovea at (0, 0).
Parameters
----------
etype : str
Electrode type, {'epiretinal', 'subretinal'}
radius : float
The radius of the electrode (in microns).
x_center : float
The x coordinate of the electrode center (in microns) from the
fovea.
y_center : float
The y location of the electrode (in microns) from the fovea
height : float
The height of the electrode from the retinal surface:
- epiretinal array: distance to the ganglion layer
- subretinal array: distance to the bipolar layer
name : string
Electrode name
"""
assert radius >= 0
assert height >= 0
if etype.lower() not in SUPPORTED_IMPLANT_TYPES:
e_s = "Acceptable values for `etype` are: "
e_s += ", ".join(SUPPORTED_IMPLANT_TYPES) + "."
raise ValueError(e_s)
self.etype = etype.lower()
self.radius = radius
self.x_center = x_center
self.y_center = y_center
self.name = name
self.height = height
def __str__(self):
info_s = "Electrode(%s, r=%.2f um, " % (self.etype, self.radius)
info_s += "(x,y) = (%.2f, %.2f) um, " % (self.x_center, self.y_center)
info_s += "h=%.2f um, n=%s" % (self.height, self.name)
return info_s
def get_height(self):
"""Returns the electrode-retina distance
For epiretinal electrodes, this returns the distance to the ganglion
cell layer.
For subretinal electrodes, this returns the distance to the bipolar
layer.
"""
if self.etype == 'epiretinal':
return self.h_ofl
elif self.etype == 'subretinal':
return self.h_inl
else:
raise ValueError("Unknown `etype`: " + self.etype)
def set_height(self, height):
"""Sets the electrode-to-retina distance
This function sets the electrode-to-retina distance according to
`height`. For an epiretinal device, we calculate the distance to
the ganglion cell layer (layer thickness depends on retinal location).
For a subretinal device, we calculate the distance to the bipolar
layer (layer thickness again depends on retinal location).
Estimates of layer thickness based on:
LoDuca et al. Am J. Ophthalmology 2011
Thickness Mapping of Retinal Layers by Spectral Domain Optical
Coherence Tomography
Note that this is for normal retinal, so may overestimate thickness.
Thickness from their paper (averaged across quadrants):
0-600 um radius (from fovea):
- Layer 1. (Nerve fiber layer) = 4
- Layer 2. (Ganglion cell bodies + inner plexiform) = 56
- Layer 3. (Bipolar bodies, inner nuclear layer) = 23
600-1550 um radius:
- Layer 1. 34
- Layer 2. 87
- Layer 3. 37.5
1550-3000 um radius:
- Layer 1. 45.5
- Layer 2. 58.2
- Layer 3. 30.75
We place our ganglion axon surface on the inner side of the nerve fiber
layer.
We place our bipolar surface 1/2 way through the inner nuclear layer.
So for an epiretinal array the bipolar layer is L1 + L2 + 0.5 * L3.
"""
fovdist = np.sqrt(self.x_center ** 2 + self.y_center ** 2)
if fovdist <= 600:
# Layer thicknesses given for 0-600 um distance (from fovea)
th_ofl = 4.0 # nerve fiber layer
th_gc = 56.0 # ganglion cell bodies + inner nuclear layer
th_bp = 23.0 # bipolar bodies + inner nuclear layer
elif fovdist <= 1550:
# Layer thicknesses given for 600-1550 um distance (from fovea)
th_ofl = 34.0
th_gc = 87.0
th_bp = 37.5
else:
# Layer thicknesses given for 1550-3000 um distance (from fovea)
th_ofl = 45.5
th_gc = 58.2
th_bp = 30.75
if fovdist > 3000:
e_s = "Distance to fovea=%.0f > 3000 um, " % fovdist
e_s += "assuming same layer thicknesses as for 1550-3000 um "
e_s += "distance."
logging.getLogger(__name__).warning(e_s)
if self.etype == 'epiretinal':
# This is simply the electrode-retina distance
self.h_ofl = height
# All the way through the ganglion cell layer, inner plexiform
# layer, and halfway through the inner nuclear layer
self.h_inl = height + th_ofl + th_gc + 0.5 * th_bp
elif self.etype == 'subretinal':
# Starting from the outer plexiform layer, go halfway through the
# inner nuclear layer
self.h_inl = height + 0.5 * th_bp
# Starting from the outer plexiform layer, all the way through the
# inner nuclear layer, inner plexiform layer, and ganglion cell
# layer
self.h_ofl = height + th_bp + th_gc + th_ofl
else:
raise ValueError("Unknown `etype`: " + self.etype)
height = property(get_height, set_height)
def current_spread(self, xg, yg, layer, alpha=14000, n=1.69):
"""
The current spread due to a current pulse through an electrode,
reflecting the fall-off of the current as a function of distance from
the electrode center. This can be calculated for any layer in the
retina.
Based on equation 2 in Nanduri et al [1].
Parameters
----------
xg : array
x-coordinates of the retinal grid
yg : array
y-coordinates of the retinal grid
layer: str
Layer for which to calculate the current spread:
- 'OFL': optic fiber layer, ganglion axons
- 'INL': inner nuclear layer, containing the bipolars
alpha : float
A constant to do with the spatial fall-off.
n : float
A constant to do with the spatial fall-off (Default: 1.69, based
on Ahuja et al. [2] An In Vitro Model of a Retinal Prosthesis.
<NAME>, <NAME>, <NAME>, <NAME>.
Humayun, and <NAME> (2008). IEEE Trans Biomed Eng 55.
"""
r = np.sqrt((xg - self.x_center) ** 2 + (yg - self.y_center) ** 2)
# current values on the retina due to array being above the retinal
# surface
if 'OFL' in layer: # optic fiber layer, ganglion axons
h = np.ones(r.shape) * self.h_ofl
# actual distance from the electrode edge
d = ((r - self.radius)**2 + self.h_ofl**2)**.5
elif 'INL' in layer: # inner nuclear layer, containing the bipolars
h = np.ones(r.shape) * self.h_inl
d = ((r - self.radius)**2 + self.h_inl**2)**.5
else:
s = "Layer %s not found. Acceptable values for `layer` are " \
"'OFL' or 'INL'." % layer
raise ValueError(s)
cspread = (alpha / (alpha + h ** n))
cspread[r > self.radius] = (alpha
/ (alpha + d[r > self.radius] ** n))
return cspread
def receptive_field(self, xg, yg, rftype='square', size=None):
"""An electrode's receptive field
Parameters
----------
xg : array_like
Array of all x coordinates
yg : array_like
Array of all y coordinates
rftype : {'square', 'gaussian'}
The type of receptive field.
- 'square': A simple square box receptive field with side length
`size`.
- 'gaussian': A Gaussian receptive field where the weight drops off
as a function of distance from the electrode center.
The standard deviation of the Gaussian is `size`.
size : float, optional
Parameter describing the size of the receptive field. For square
receptive fields, this corresponds to the side length of the
square.
For Gaussian receptive fields, this corresponds to the standard
deviation of the Gaussian.
Default: Twice the electrode radius.
"""
if size is None:
size = 2 * self.radius
if rftype == 'square':
# Create a map of the retina for each electrode
# where it's 1 under the electrode, 0 elsewhere
rf = np.zeros(xg.shape).astype(np.float32)
ind = np.where((xg > self.x_center - (size / 2.0))
& (xg < self.x_center + (size / 2.0))
& (yg > self.y_center - (size / 2.0))
& (yg < self.y_center + (size / 2.0)))
rf[ind] = 1.0
elif rftype == 'gaussian':
# Create a map of the retina where the weight drops of as a
# function of distance from the electrode center
dist = (xg - self.x_center) ** 2 + (yg - self.y_center) ** 2
rf = np.exp(-dist / (2 * size ** 2))
rf /= np.sum(rf)
else:
e_s = "Acceptable values for `rftype` are 'square' or 'gaussian'"
raise ValueError(e_s)
return rf
class ElectrodeArray(object):
def __init__(self, etype, radii, xs, ys, hs=0, names=None, eye='RE'):
"""Create an ElectrodeArray on the retina
This function creates an electrode array of type `etype` and places it
on the retina. Lists should specify, for each electrode, its size
(`radii`), location on the retina (`xs` and `ys`), distance to the
retina (height, `hs`), and a string identifier (`names`, optional).
Array location should be given in microns, where the fovea is located
at (0, 0).
Single electrodes in the array can be addressed by index (integer)
or name.
Parameters
----------
radii : array_like
List of electrode radii.
xs : array_like
List of x-coordinates for the center of the electrodes (microns).
ys : array_like
List of y-coordinates for the center of the electrodes (microns).
hs : float | array_like, optional, default: 0
List of electrode heights (distance from the retinal surface).
names : array_like, optional, default: None
List of names (string identifiers) for each eletrode.
eye : {'LE', 'RE'}, optional, default: 'RE'
Eye in which array is implanted.
Examples
--------
A single epiretinal electrode called 'A1', with radius 100um, sitting
at retinal location (0, 0), 10um away from the retina:
>>> from pulse2percept import implants
>>> implant0 = implants.ElectrodeArray('epiretinal', 100, 0, 0, hs=10,
... names='A1')
Get access to the electrode with name 'A1' in the first array:
>>> my_electrode = implant0['A1']
An array with two electrodes of size 100um, one sitting at
(-100, -100), the other sitting at (0, 0), with 0 distance from the
retina, of type 'subretinal':
>>> implant1 = implants.ElectrodeArray('subretinal', [100, 100],
... [-100, 0], [-100, 0], hs=[0, 0])
"""
self.etype = etype
self.eye = eye
self.electrodes = []
self.num_electrodes = 0
self.add_electrodes(radii, xs, ys, hs, names)
def __str__(self):
return "ElectrodeArray(%s, num_electrodes=%d)" % (self.etype,
self.num_electrodes)
def add_electrode(self, electrode):
"""Adds an electrode to an ElectrodeArray object
This function adds a single electrode to an existing ElectrodeArray
object. The electrode must have the same type as the array
(see implants.SUPPORTED_IMPLANT_TYPES).
Parameters
----------
electrode : implants.Electrode
An electrode object specifying type, size, and location of the
electrode on the retina.
"""
if not isinstance(electrode, Electrode):
raise TypeError("`electrode` must be of type retina.Electrode.")
if electrode.etype != self.etype:
e_s = "Added electrode must be of same type as the existing"
e_s = "array (%s)." % self.etype
raise ValueError(e_s)
self.num_electrodes += 1
self.electrodes.append(electrode)
def add_electrodes(self, radii, xs, ys, hs=0, names=None):
"""Adds electrodes to an ElectrodeArray object
This function adds one or more electrodes to an existing ElectrodeArray
object. Lists should specify, for each electrode to be added, the size
(`radii`), location on the retina (`xs` and `ys`), distance to the
retina (height, `hs`), and a string identifier (`names`, optional).
Array location should be given in microns, where the fovea is located
at (0, 0).
Single electrodes in the array can be addressed by index (integer)
or name.
Parameters
----------
radii : array_like
List of electrode radii.
xs : array_like
List of x-coordinates for the center of the electrodes (microns).
ys : array_like
List of y-coordinates for the center of the electrodes (microns).
hs : float | array_like, optional, default: 0
List of electrode heights (distance from the retinal surface).
names : array_like, optional, default: None
List of names (string identifiers) for each eletrode.
Examples
--------
Adding a single electrode of radius 50um sitting at (0, 0) to an
existing ElectrodeArray object:
>>> implant = ElectrodeArray('epiretinal', 100, 100, 100)
>>> implant.add_electrodes(50, 0, 0)
"""
# Make it so the method can accept either floats, lists, or
# numpy arrays, and `zip` works regardless.
radii = np.array([radii], dtype=np.float32).flatten()
xs = np.array([xs], dtype=np.float32).flatten()
ys = np.array([ys], dtype=np.float32).flatten()
names = np.array([names], dtype=np.str).flatten()
if isinstance(hs, list):
hs = np.array(hs).flatten()
else:
# All electrodes have the same height
hs =
|
np.ones_like(radii)
|
numpy.ones_like
|
import numpy as np
import scipy.stats as stats
def geometric_brownian_motion(s, sigma, r, T, N, n):
''' Path of the stock price
Have uniform partition of size N for the time interval [0,T]
and thus generate N paths of the geometric Brownian motion
'''
h = T / N
# print(f'h: {h}')
W = np.random.randn(n, N)
# print(f'W: {W}')
q =
|
np.ones((n, N))
|
numpy.ones
|
import unittest
import torch
import numpy as np
from unittest.mock import MagicMock
from sklearn import metrics
from pytorch_wrapper import evaluators
class GenericEvaluatorResultsTestCase(unittest.TestCase):
def test_max_is_better(self):
score_1 = 1.
score_2 = 2.
er1 = evaluators.GenericEvaluatorResults(score_1, is_max_better=True)
er2 = evaluators.GenericEvaluatorResults(score_2, is_max_better=True)
self.assertTrue(er2.is_better_than(er1))
self.assertFalse(er1.is_better_than(er2))
self.assertAlmostEqual(er2.compare_to(er1), 1)
self.assertAlmostEqual(er1.compare_to(er2), -1)
def test_min_is_better(self):
score_1 = 1.
score_2 = 2.
er1 = evaluators.GenericEvaluatorResults(score_1, is_max_better=False)
er2 = evaluators.GenericEvaluatorResults(score_2, is_max_better=False)
self.assertFalse(er2.is_better_than(er1))
self.assertTrue(er1.is_better_than(er2))
self.assertAlmostEqual(er2.compare_to(er1), 1)
self.assertAlmostEqual(er1.compare_to(er2), -1)
class GenericPointWiseLossEvaluatorTestCase(unittest.TestCase):
def test_correct_loss_calculation(self):
mocked_loss = MagicMock()
mocked_loss.item = MagicMock(return_value=10)
loss_wrapper = MagicMock()
loss_wrapper.calculate_loss = MagicMock(return_value=mocked_loss)
evaluator = evaluators.GenericPointWiseLossEvaluator(
loss_wrapper,
label='loss',
score_format='%f',
batch_target_key='target'
)
output = MagicMock()
batch = {'target': MagicMock()}
batch['target'].shape = [5]
evaluator.step(output, batch)
batch['target'].shape = [8]
evaluator.step(output, batch)
res = evaluator.calculate()
self.assertAlmostEqual(res.score, 10)
class AccuracyEvaluatorTestCase(unittest.TestCase):
def test_correct_score_calculation_binary(self):
evaluator = evaluators.AccuracyEvaluator(
threshold=0.5,
model_output_key=None,
batch_target_key='target'
)
output = torch.tensor([0.9, 0.2, 0.7, 0.4], dtype=torch.float32)
batch = {'target': torch.tensor([1, 1, 0, 0], dtype=torch.float32)}
evaluator.step(output, batch)
output = torch.tensor([0.3, 0.7, 0.9], dtype=torch.float32)
batch = {'target': torch.tensor([1, 1, 0], dtype=torch.float32)}
evaluator.step(output, batch)
res = evaluator.calculate()
self.assertAlmostEqual(res.score, 100 * 3. / 7)
def test_correct_score_calculation_multi_label(self):
evaluator = evaluators.AccuracyEvaluator(
threshold=0.5,
model_output_key=None,
batch_target_key='target'
)
output = torch.tensor([[0.7, 0.4], [0.8, 0.3], [0.7, 0.6], [0.2, 0.8]], dtype=torch.float32)
batch = {'target': torch.tensor([[1, 1], [0, 1], [1, 0], [0, 1]], dtype=torch.float32)}
evaluator.step(output, batch)
output = torch.tensor([[0.8, 0.3]], dtype=torch.float32)
batch = {'target': torch.tensor([[1, 1]], dtype=torch.float32)}
evaluator.step(output, batch)
res = evaluator.calculate()
self.assertAlmostEqual(res.score, 100 * 5. / 10)
class MultiClassAccuracyEvaluatorTestCase(unittest.TestCase):
def test_correct_score_calculation(self):
evaluator = evaluators.MultiClassAccuracyEvaluator(
model_output_key=None,
batch_target_key='target'
)
output = torch.tensor([[0.5, 0.1, 0.4], [0.3, 0.3, 0.4], [0.5, 0.5, 0.0]], dtype=torch.float32)
batch = {'target': torch.tensor([0, 2, 2], dtype=torch.float32)}
evaluator.step(output, batch)
output = torch.tensor([[0.1, 0.1, 0.8]], dtype=torch.float32)
batch = {'target': torch.tensor([2], dtype=torch.float32)}
evaluator.step(output, batch)
res = evaluator.calculate()
self.assertAlmostEqual(res.score, 100 * 3. / 4)
class AUROCEvaluatorTestCase(unittest.TestCase):
def test_correct_score_calculation_binary(self):
evaluator = evaluators.AUROCEvaluator(
model_output_key=None,
batch_target_key='target',
average='macro'
)
output = torch.tensor([0.9, 0.2, 0.8, 0.3], dtype=torch.float32)
batch = {'target': torch.tensor([1, 1, 0, 0], dtype=torch.float32)}
evaluator.step(output, batch)
output = torch.tensor([0.2, 0.98, 0.76], dtype=torch.float32)
batch = {'target': torch.tensor([1, 1, 0], dtype=torch.float32)}
evaluator.step(output, batch)
res = evaluator.calculate()
self.assertAlmostEqual(res.score, 0.5)
def test_correct_score_calculation_multi_label_macro(self):
evaluator = evaluators.AUROCEvaluator(
model_output_key=None,
batch_target_key='target',
average='macro'
)
output = torch.tensor([[0.6, 0.2], [0.7, 0.2], [0.6, 0.6], [0.3, 0.55]], dtype=torch.float32)
batch = {'target': torch.tensor([[1, 1], [0, 1], [1, 0], [0, 1]], dtype=torch.float32)}
evaluator.step(output, batch)
output = torch.tensor([[0.6, 0.4]], dtype=torch.float32)
batch = {'target': torch.tensor([[1, 1]], dtype=torch.float32)}
evaluator.step(output, batch)
res = evaluator.calculate()
lable1_score = metrics.roc_auc_score(
y_score=np.array([0.6, 0.7, 0.6, 0.3, 0.6]),
y_true=np.array([1, 0, 1, 0, 1])
)
label2_score = metrics.roc_auc_score(
y_score=np.array([0.2, 0.2, 0.6, 0.55, 0.4]),
y_true=np.array([1, 1, 0, 1, 1])
)
correct = (lable1_score + label2_score) / 2.
self.assertAlmostEqual(res.score, correct)
def test_correct_score_calculation_multi_label_micro(self):
evaluator = evaluators.AUROCEvaluator(
model_output_key=None,
batch_target_key='target',
average='micro'
)
output = torch.tensor([[0.6, 0.2], [0.7, 0.2], [0.6, 0.6], [0.3, 0.55]], dtype=torch.float32)
batch = {'target': torch.tensor([[1, 1], [0, 1], [1, 0], [0, 1]], dtype=torch.float32)}
evaluator.step(output, batch)
output = torch.tensor([[0.6, 0.4]], dtype=torch.float32)
batch = {'target': torch.tensor([[1, 1]], dtype=torch.float32)}
evaluator.step(output, batch)
res = evaluator.calculate()
correct = metrics.roc_auc_score(
y_score=np.array([0.6, 0.7, 0.6, 0.3, 0.6, 0.2, 0.2, 0.6, 0.55, 0.4]),
y_true=np.array([1, 0, 1, 0, 1, 1, 1, 0, 1, 1])
)
self.assertAlmostEqual(res.score, correct)
class PrecisionEvaluatorTestCase(unittest.TestCase):
def test_correct_score_calculation_binary(self):
evaluator = evaluators.PrecisionEvaluator(
model_output_key=None,
batch_target_key='target',
average='binary'
)
output = torch.tensor([0.9, 0.2, 0.8, 0.3], dtype=torch.float32)
batch = {'target': torch.tensor([1, 1, 0, 0], dtype=torch.float32)}
evaluator.step(output, batch)
output = torch.tensor([0.2, 0.98, 0.76], dtype=torch.float32)
batch = {'target': torch.tensor([1, 1, 0], dtype=torch.float32)}
evaluator.step(output, batch)
res = evaluator.calculate()
self.assertAlmostEqual(res.score, 2. / 4)
def test_correct_score_calculation_multi_label_macro(self):
evaluator = evaluators.PrecisionEvaluator(
model_output_key=None,
batch_target_key='target',
average='macro'
)
output = torch.tensor([[0.6, 0.2], [0.7, 0.2], [0.6, 0.6], [0.3, 0.55]], dtype=torch.float32)
batch = {'target': torch.tensor([[1, 1], [0, 1], [1, 0], [0, 1]], dtype=torch.float32)}
evaluator.step(output, batch)
output = torch.tensor([[0.6, 0.4]], dtype=torch.float32)
batch = {'target': torch.tensor([[1, 1]], dtype=torch.float32)}
evaluator.step(output, batch)
res = evaluator.calculate()
lable1_score = metrics.precision_score(
y_pred=np.array([0.6, 0.7, 0.6, 0.3, 0.6]) > 0.5,
y_true=np.array([1, 0, 1, 0, 1])
)
label2_score = metrics.precision_score(
y_pred=np.array([0.2, 0.2, 0.6, 0.55, 0.4]) > 0.5,
y_true=np.array([1, 1, 0, 1, 1])
)
correct = (lable1_score + label2_score) / 2.
self.assertAlmostEqual(res.score, correct)
def test_correct_score_calculation_multi_label_micro(self):
evaluator = evaluators.PrecisionEvaluator(
model_output_key=None,
batch_target_key='target',
average='micro'
)
output = torch.tensor([[0.6, 0.2], [0.7, 0.2], [0.6, 0.6], [0.3, 0.55]], dtype=torch.float32)
batch = {'target': torch.tensor([[1, 1], [0, 1], [1, 0], [0, 1]], dtype=torch.float32)}
evaluator.step(output, batch)
output = torch.tensor([[0.6, 0.4]], dtype=torch.float32)
batch = {'target': torch.tensor([[1, 1]], dtype=torch.float32)}
evaluator.step(output, batch)
res = evaluator.calculate()
correct = metrics.precision_score(
y_pred=np.array([0.6, 0.7, 0.6, 0.3, 0.6, 0.2, 0.2, 0.6, 0.55, 0.4]) > 0.5,
y_true=np.array([1, 0, 1, 0, 1, 1, 1, 0, 1, 1])
)
self.assertAlmostEqual(res.score, correct)
class MultiClassPrecisionEvaluatorTestCase(unittest.TestCase):
def test_correct_score_calculation_macro(self):
evaluator = evaluators.MultiClassPrecisionEvaluator(
model_output_key=None,
batch_target_key='target',
average='macro'
)
output = torch.tensor([[0.5, 0.1, 0.4], [0.3, 0.3, 0.4], [0.5, 0.5, 0.0]], dtype=torch.float32)
batch = {'target': torch.tensor([0, 2, 2], dtype=torch.float32)}
evaluator.step(output, batch)
output = torch.tensor([[0.1, 0.1, 0.8]], dtype=torch.float32)
batch = {'target': torch.tensor([2], dtype=torch.float32)}
evaluator.step(output, batch)
res = evaluator.calculate()
correct = metrics.precision_score(y_pred=np.array([0, 2, 0, 2]), y_true=
|
np.array([0, 2, 2, 2])
|
numpy.array
|
"""Core functions to perform interpolation by an arbitrary interpolation kernel
(such as in `.linear` or `.pchip`) in one dimension, with a variety of functions
to facilitate interpolating with one or two kernels, one or two dependent data
sets, and/or to datasets that are 1D or multi-dimensional.
The functions in this module are not intended to be called directly (though they
can be). Rather, they are used by factory functions that return a new function
with the interpolation kernel (the input `f`) enclosed, thereby accelerating the
code. See `.tools/make_interpolator`.
This subpackage is useful for interpolation when the evaluation site is known,
so that only one or a few interpolations are needed. However, when
interpolation must be repeated many times, such as when solving a nonlinear
equation involving the interpolants, the `ppinterp` subpackage is preferred, as
that subpackage pre-computes the piecewise polynomial coefficients once, and then
interpolation to a given evaluation site is fast.
Note: In theory, many of these functions could be collapsed down to a single
function. For example, `_interp_1` and `_interp_1_YZ` could be replaced by
a single function that accepts a tuple as its fourth `Y` parameter; then it
works like `_interp_1_YZ` over each element of `Y`. However, as of Numba v0.53,
that approach is considerably *slower* than the approach taken here.
"""
import numpy as np
import numba as nb
@nb.njit
def _interp_1(f, x, X, Y):
"""
Apply a given kernel of interpolation once.
Parameters
----------
f : function
The "kernel" of interpolation: A function that performs a single
interpolation within a known interval.
The parameters to `f` are
x : float
X : ndarray(float, 1d)
Y : ndarray(float, 1d)
i : int
and the return value of `f` is
y : float
which is `Y` as a function of `X` interpolated to the value `x`. Here,
the subinterval of `X` within which `x` falls is given to this
function by `i`. This function will only be called when the
following is guaranteed:
(a) `i == 1` and `X[0] <= x <= X[1]`, or
(b) `2 <= i <= len(X) - 1` and X[i-1] < x <= X[i]`.
x : float
Evaluation site
X : ndarray(float, 1d)
The independent data.
Must be monotonically increasing.
NaN is treated as +inf, hence NaN's must go after any valid data.
If X[0] is NaN, it is assumed that all elements of X are NaN.
Y : ndarray(float, 1d)
The dependent data, with the same length as `X`.
Returns
-------
y : float
The value of `Y` interpolated to `X` at `x`.
"""
if np.isnan(x) or x < X[0] or X[-1] < x or np.isnan(X[0]):
return np.nan
# i = searchsorted(X,x) is such that:
# i = 0 if x <= X[0] or all(isnan(X))
# i = len(X) if X[-1] < x or isnan(x)
# X[i-1] < x <= X[i] otherwise
#
# Having guaranteed above that
# x is not nan, and X[0] is not nan hence not all(isnan(X)),
# and X[0] <= x <= X[-1],
# then either
# (a) i == 0 and x == X[0], or
# (b) 1 <= i <= len(X)-1 and X[i-1] < x <= X[i]
#
# Next, merge (a) and (b) cases so that
# 1 <= i <= len(X) - 1
# is guaranteed, and
# X[0 ] <= x <= X[1] when i == 1
# X[i-1] < x <= X[i] when i > 1
i = max(1, np.searchsorted(X, x))
# Interpolate within the given interval
return f(x, X, Y, i)
@nb.njit
def _interp_1_fg(f, g, x, X, Y):
"""As _interp_1 but applies two interpolation kernels."""
if np.isnan(x) or x < X[0] or X[-1] < x or np.isnan(X[0]):
return np.nan, np.nan
i = max(1, np.searchsorted(X, x))
return f(x, X, Y, i), g(x, X, Y, i)
@nb.njit
def _interp_1_YZ(f, x, X, Y, Z):
"""As _interp_1 but applies the interpolation kernel to two dependent data arrays."""
if np.isnan(x) or x < X[0] or X[-1] < x or np.isnan(X[0]):
return np.nan, np.nan
i = max(1, np.searchsorted(X, x))
return f(x, X, Y, i), f(x, X, Z, i)
@nb.njit
def _interp_1_fg_YZ(f, g, x, X, Y, Z):
"""As _interp_1 but applies two interpolation kernels to two dependent data arrays.
Parameters
----------
f, g : function
As in `_interp_1`
x, X : see `_interp_1`
Y, Z : ndarray(float, 1d)
As in `_interp_1`
Returns
-------
yf : float
The value of `Y` interpolated using `f` to `X` at `x`.
zf : float
The value of `Z` interpolated using `f` to `X` at `x`.
yg: float
The value of `Y` interpolated using `g` to `X` at `x`.
zg : float
The value of `Z` interpolated using `g` to `X` at `x`.
"""
if np.isnan(x) or x < X[0] or X[-1] < x or np.isnan(X[0]):
return np.nan, np.nan, np.nan, np.nan
i = max(1, np.searchsorted(X, x))
return f(x, X, Y, i), f(x, X, Z, i), g(x, X, Y, i), g(x, X, Z, i)
@nb.njit
def _interp_n(f, x, X, Y):
"""
As _interp_1 but applies interpolation kernel many times.
Parameters
----------
f, x, X : see `_interp_1`
Y: ndarray(float, nd)
Dependent data. The last dimension must be the same length as `X`.
Returns
-------
y : ndarray
The value `y[n]` is `Y[n]` (a 1D array) interpolated to `X` at `x`.
The shape of `y` is the shape of `Y` less its last dimension.
Note
----
This function is faster than a `numba.guvectorize`'d version of `_interp_1`
because `numpy.searchsorted` is only called once, here.
"""
if np.isnan(x) or x < X[0] or X[-1] < x or np.isnan(X[0]):
y = np.full(Y.shape[0:-1], np.nan, dtype=Y.dtype)
else:
y = np.empty(Y.shape[0:-1], dtype=Y.dtype)
i = max(1, np.searchsorted(X, x))
for n in np.ndindex(y.shape):
y[n] = f(x, X, Y[n], i)
return y
@nb.njit
def _interp_n_YZ(f, x, X, Y, Z):
"""As _interp_n but applies the interpolation kernel to two dependent data ndarrays.
Assumes Y and Z are the same shape.
"""
if np.isnan(x) or x < X[0] or X[-1] < x or np.isnan(X[0]):
y = np.full(Y.shape[0:-1], np.nan, dtype=Y.dtype)
z = np.full(Y.shape[0:-1], np.nan, dtype=Y.dtype)
else:
y = np.empty(Y.shape[0:-1], dtype=np.f8)
z = np.empty(Y.shape[0:-1], dtype=np.f8)
i = max(1, np.searchsorted(X, x))
for n in np.ndindex(y.shape):
y[n] = f(x, X, Y[n], i)
z[n] = f(x, X, Z[n], i)
return y, z
@nb.njit
def _interp_n_fg(f, g, x, X, Y):
"""As _interp_n but applies two interpolation kernels."""
if np.isnan(x) or x < X[0] or X[-1] < x or np.isnan(X[0]):
yf = np.full(Y.shape[0:-1], np.nan, dtype=Y.dtype)
yg = np.full(Y.shape[0:-1], np.nan, dtype=Y.dtype)
else:
yf = np.empty(Y.shape[0:-1], dtype=Y.dtype)
yg = np.empty(Y.shape[0:-1], dtype=Y.dtype)
i = max(1,
|
np.searchsorted(X, x)
|
numpy.searchsorted
|
from skimage import feature, transform
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from IPython.display import IFrame
import warnings
import http.server
import socketserver
import asyncio
import websockets
def run_websocket_server(websocket_handler, port=1234):
print(f'Starting websocket server on port {port}')
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
start_server = websockets.serve(websocket_handler, "127.0.0.1", port)
loop.run_until_complete(start_server)
loop.run_forever()
def run_http_server(port = 8080):
Handler = http.server.SimpleHTTPRequestHandler
with socketserver.TCPServer(("", port), Handler) as httpd:
print("serving at port", port)
httpd.serve_forever()
def plot(data, xi=None, cmap='RdBu_r', axis=plt, percentile=100, dilation=3.0, alpha=0.8):
dx, dy = 0.05, 0.05
xx = np.arange(0.0, data.shape[1], dx)
yy =
|
np.arange(0.0, data.shape[0], dy)
|
numpy.arange
|
from solvers.rigidity_solver import constraints_3d
from util.debugger import MyDebugger
from bricks_modeling.connections.conn_type import ConnType
import numpy as np
import scipy
import util.geometry_util as geo_util
import open3d as o3d
import copy
from typing import List
import itertools
from numpy import linalg as LA
from numpy.linalg import inv, matrix_rank
from visualization.model_visualizer import visualize_3D, visualize_2D
from scipy.linalg import null_space, cholesky
def rigidity_matrix(
points: np.ndarray,
edges: np.ndarray,
dim: int
) -> np.ndarray:
"""
points: (n, d) array, n points in a d-dimensional space
edges : (m, 2) array, m edges, store indices of the points they join
dim : int, dimension order
"""
assert len(points.shape) == 2 and points.shape[1] == dim
n, m = len(points), len(edges)
# constructing the rigidity matrix R
R = np.zeros((m, dim * n))
for i, (p_ind, q_ind) in enumerate(edges):
q_minus_p = points[q_ind, :] - points[p_ind, :]
R[i, q_ind * dim : (q_ind + 1) * dim] = q_minus_p
R[i, p_ind * dim : (p_ind + 1) * dim] = -q_minus_p
return R
def _remove_fixed_edges(points: np.ndarray, edges: np.ndarray, fixed_points_idx):
"""
subroutine used by spring_energy_matrix. remove the fixed points and edges by deleting them from inputs
"""
if len(fixed_points_idx) == 0:
return points, edges
fixed_edges_idx = [
index
for index, edge in enumerate(edges)
if edge[0] in fixed_points_idx and edge[1] in fixed_points_idx
]
if len(fixed_edges_idx) > 0:
edges = np.delete(edges, fixed_edges_idx, axis=0)
return points, edges
def spring_energy_matrix(
points: np.ndarray,
edges: np.ndarray,
dim: int = 3,
matrices=False,
):
"""
matrices: return K, P, A if true
fix_stiffness: use constant for value K if true, use 1/norm(vec) if false
"""
n, m = len(points), len(edges)
K = np.zeros((m, m))
P = np.zeros((m, m * dim))
A = np.zeros((m * dim, n * dim))
normalized = lambda v: v / LA.norm(v)
# forming P and K
for idx, e in enumerate(edges):
if len(e) == 2:
edge_vec = points[e[0]] - points[e[1]]
else: # virtual edge
assert len(e) == 2 + dim
assert LA.norm(points[e[0]] - points[e[1]]) < 1e-6
edge_vec = np.array(e[2:])
edge_vec = normalized(edge_vec) / 1e-4 # making the spring strong by shorter the edge
P[idx, idx * dim : idx * dim + dim] = normalized(edge_vec).T
K[idx, idx] = 1 / LA.norm(edge_vec)
for d in range(dim):
A[dim * idx + d, dim * e[0] + d] = 1
A[dim * idx + d, dim * e[1] + d] = -1
if matrices:
return K, P, A
else:
return np.linalg.multi_dot([A.T, P.T, K, P, A])
def spring_energy_matrix_accelerate_3D(
points: np.ndarray,
edges: np.ndarray,
abstract_edges=None,
dim: int = 3,
matrices=False,
virtual_edges=False,
) -> np.ndarray:
if abstract_edges is None:
abstract_edges = np.array([])
n, m = len(points), len(edges) + len(abstract_edges)
K = np.zeros((m, m))
P = np.zeros((m, m * dim))
A = np.zeros((m * dim, n * dim))
normalized = lambda v: v / LA.norm(v)
# forming P and K
if virtual_edges:
for idx, e in enumerate(edges):
if len(e) == 2:
edge_vec = points[e[0]] - points[e[1]]
else: # virtual edge
print(e)
assert len(e) == 2 + dim
assert LA.norm(points[e[0]] - points[e[1]]) < 1e-6
edge_vec = np.array(e[2:])
edge_vec = normalized(edge_vec)/1e-4 # making the spring strong by shorter the edge
# if LA.norm(edge_vec) < 1e-4:
# print(LA.norm(edge_vec))
P[idx, idx * dim : idx * dim + dim] = normalized(edge_vec).T
# K[idx, idx] = 1 / LA.norm(edge_vec)
K[idx, idx] = 1 # set as the same material for debugging
for d in range(dim):
A[dim * idx + d, dim * e[0] + d] = 1
A[dim * idx + d, dim * e[1] + d] = -1
else:
abstract_edges = np.array(abstract_edges)
if abstract_edges.shape[0] != 0:
assert LA.norm(points[abstract_edges[:,0].astype("int32")] - points[abstract_edges[:,1].astype("int32")],axis=1).max() < 1e-6
edges = np.array(edges)
points = np.array(points)
edge_vecs = points[edges[:, 1]] - points[edges[:, 0]]
if abstract_edges.shape[0] !=0 :
edges = np.concatenate((edges,abstract_edges[:,:2]),axis=0).astype("int32")
edge_vecs = np.concatenate((edge_vecs,abstract_edges[:,2:]),axis=0)
edge_norms = np.linalg.norm(edge_vecs, axis=1)
non_zero_edge_index = np.where(edge_norms > 1e-4)
zero_edge_index = np.where(edge_norms < 1e-4)
edge_vecs[non_zero_edge_index] = edge_vecs[non_zero_edge_index] / edge_norms[non_zero_edge_index][:, np.newaxis]
edge_vecs[zero_edge_index] = 0
row_K, col_K = np.diag_indices_from(K)
K[row_K, col_K] = 1
P[np.arange(m), np.arange(m) * dim] = edge_vecs[:, 0]
P[np.arange(m), np.arange(m) * dim + 1] = edge_vecs[:, 1]
P[np.arange(m), np.arange(m) * dim + 2] = edge_vecs[:, 2]
A[np.arange(m) * 3, edges[:, 0] * 3] = 1
A[np.arange(m) * 3, edges[:, 1] * 3] = -1
A[np.arange(m) * 3 + 1, edges[:, 0] * 3 + 1] = 1
A[np.arange(m) * 3 + 1, edges[:, 1] * 3 + 1] = -1
A[np.arange(m) * 3 + 2, edges[:, 0] * 3 + 2] = 1
A[np.arange(m) * 3 + 2, edges[:, 1] * 3 + 2] = -1
# sK = scipy.sparse.csr_matrix(K)
sK = scipy.sparse.csr_matrix((1/edge_norms,(
|
np.arange(m)
|
numpy.arange
|
import sys
import os
import numpy as np
import h5py
import scipy.signal
import itertools
import pdb
sys.path.append('/packages/msutil')
import util_stimuli
from dataset_util import initialize_hdf5_file, write_example_to_hdf5
def generate_MistunedHarmonics_dataset(hdf5_filename,
fs=32000,
dur=0.150,
f0_ref_list=[100.0, 200.0, 400.0],
f0_ref_width=0.04,
step_size_in_octaves=1/(192*8),
phase_mode='sine',
low_harm=1,
upp_harm=12,
harmonic_dBSPL=60.0,
list_mistuned_pct=[-8, -6, -4, -3, -2, -1, 0, 1, 2, 3, 4, 6, 8],
list_mistuned_harm=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
noise_params={},
disp_step=100):
'''
Main routine for generating Moore et al. (1985, JASA) mistuned harmonics dataset.
Args
----
'''
# Define encoding / decoding dictionaries for phase_mode
phase_mode_encoding = {'sine':0, 'rand':1, 'sch':2, 'cos':3, 'alt':4}
phase_mode_decoding = {0:'sine', 1:'rand', 2:'sch', 3:'cos', 4:'alt'}
# Define stimulus-specific parameters
list_f0 = []
for f0_ref in f0_ref_list:
f0_min = f0_ref * (1-f0_ref_width)
f0_max = f0_ref * (1+f0_ref_width)
list_f0.extend(list(f0_min * (np.power(2, np.arange(0, np.log2(f0_max / f0_min), step_size_in_octaves)))))
list_mistuned_pct = np.array(list_mistuned_pct).astype(float)
list_mistuned_harm = np.array(list_mistuned_harm).astype(int)
N = len(list_f0) * len(list_mistuned_pct) * len(list_mistuned_harm)
# Define stimulus-shared parameters
phase = phase_mode_encoding[phase_mode]
harmonic_numbers = np.arange(low_harm, upp_harm+1, dtype=int)
amplitudes = 20e-6 * np.power(10, (harmonic_dBSPL/20)) * np.ones_like(harmonic_numbers)
# Prepare config_dict with config values
config_dict = {
'sr': fs,
'config_tone/fs': fs,
'config_tone/dur': dur,
'config_tone/harmonic_dBSPL': harmonic_dBSPL,
'config_tone/phase_mode': phase,
'config_tone/low_harm': low_harm,
'config_tone/upp_harm': upp_harm,
}
config_key_pair_list = [(k, k) for k in config_dict.keys()]
# Iterate over all combinations of stimulus-specific parameters
itrN = 0
for mistuned_harm in list_mistuned_harm:
for mistuned_pct in list_mistuned_pct:
for f0 in list_f0:
# Build signal with specified harmonic mistuning and f0
harmonic_freqs = f0 * harmonic_numbers
mistuned_index = harmonic_numbers == mistuned_harm
harmonic_freqs[mistuned_index] = (1.0 + mistuned_pct/100.0) * harmonic_freqs[mistuned_index]
signal = util_stimuli.complex_tone(f0, fs, dur, harmonic_numbers=None,
frequencies=harmonic_freqs,
amplitudes=amplitudes,
phase_mode=phase_mode)
# Add signal and metadata to data_dict for hdf5 filewriting
data_dict = {
'stimuli/signal': signal.astype(np.float32),
'f0': np.float32(f0),
'mistuned_harm': int(mistuned_harm),
'mistuned_pct':
|
np.float32(mistuned_pct)
|
numpy.float32
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Defines unit tests for :mod:`colour.notation.munsell` module.
"""
from __future__ import division, unicode_literals
import numpy as np
import sys
if sys.version_info[:2] <= (2, 6):
import unittest2 as unittest
else:
import unittest
from colour.notation.munsell import (
parse_munsell_colour,
is_grey_munsell_colour,
normalize_munsell_specification)
from colour.notation.munsell import (
munsell_colour_to_munsell_specification,
munsell_specification_to_munsell_colour)
from colour.notation.munsell import (
xyY_from_renotation,
is_specification_in_renotation)
from colour.notation.munsell import bounding_hues_from_renotation
from colour.notation.munsell import hue_to_hue_angle, hue_angle_to_hue
from colour.notation.munsell import hue_to_ASTM_hue
from colour.notation.munsell import (
interpolation_method_from_renotation_ovoid,
xy_from_renotation_ovoid)
from colour.notation.munsell import LCHab_to_munsell_specification
from colour.notation.munsell import maximum_chroma_from_renotation
from colour.notation.munsell import munsell_specification_to_xy
from colour.notation.munsell import (
munsell_specification_to_xyY,
xyY_to_munsell_specification)
from colour.notation import (
munsell_value_priest1920,
munsell_value_munsell1933,
munsell_value_moon1943,
munsell_value_saunderson1944,
munsell_value_ladd1955,
munsell_value_mccamy1987,
munsell_value_ASTM_D1535_08)
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013 - 2014 - Colour Developers'
__license__ = 'New BSD License - http://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = '<EMAIL>'
__status__ = 'Production'
__all__ = ['MUNSELL_SPECIFICATIONS',
'MUNSELL_GREYS_SPECIFICATIONS',
'MUNSELL_EVEN_SPECIFICATIONS',
'MUNSELL_BOUNDING_HUES',
'MUNSELL_HUE_TO_ANGLE',
'MUNSELL_HUE_TO_ASTM_HUE',
'MUNSELL_INTERPOLATION_METHODS',
'MUNSELL_XY_FROM_RENOTATION_OVOID',
'MUNSELL_SPECIFICATIONS_TO_XY',
'MUNSELL_COLOURS_TO_XYY',
'MUNSELL_GREYS_TO_XYY',
'XYY_TO_MUNSELL_SPECIFICATIONS',
'XYY_TO_MUNSELL_GREYS_SPECIFICATIONS',
'NON_CONVERGING_XYY',
'TestMunsellValuePriest1920',
'TestMunsellValueMunsell1933',
'TestMunsellValueMoon1943',
'TestMunsellValueSaunderson1944',
'TestMunsellValueLadd1955',
'TestMunsellValueMcCamy1992',
'TestMunsellValueASTM_D1535_08',
'TestMunsellSpecification_to_xyY',
'TestMunsellColour_to_xyY',
'TestxyY_to_munsell_specification',
'TestxyY_to_munsell_colour',
'TestParseMunsellColour',
'TestIsGreyMunsellColour',
'TestNormalizeMunsellSpecification',
'TestMunsellColourToMunsellSpecification',
'TestMunsellSpecificationToMunsellColour',
'Test_xyY_fromRenotation',
'TestIsSpecificationInRenotation',
'TestBoundingHuesFromRenotation',
'TestHueToHueAngle',
'TestHueAngleToHue',
'TestHueTo_ASTM_hue',
'TestInterpolationMethodFromRenotationOvoid',
'Test_xy_fromRenotationOvoid',
'TestLCHabToMunsellSpecification',
'TestMaximumChromaFromRenotation',
'TestMunsellSpecification_to_xy']
# TODO: Investigate if tests can be simplified by using a common valid set of
# specifications.
MUNSELL_SPECIFICATIONS = (
(2.5, 7.9653798470827155, 11.928546308350969, 4),
(2.5, 6.197794822090879, 6.923610826208884, 4),
(2.5, 5.311956978256753, 2.0, 4),
(5.613007062442384, 8.402756538070792, 18.56590894044391, 4),
(5.845640071004907, 8.062638664520136, 5.782325614552295, 4),
(5.780794121059599, 3.174804081025836, 3.3492086825591487, 4),
(5.483684299639117, 3.8994120994080133, 5.761459062506715, 4),
(5.809580308813496, 5.816975143899512, 6.662613753958899, 4),
(5.209252955662903, 2.9770364483569107, 5.141472643810014, 4),
(7.706105853911573, 2.789942201654241, 11.396648897274897, 4),
(7.5675942867463615, 9.569378264154928, 16.714918860774414, 4),
(8.117640564564343, 2.7489429651492028, 3.1653563832640272, 4),
(7.8731203012311255, 2.6438472620092806, 13.241107969297714, 4),
(8.04983322214289, 2.4630649870973422, 7.501924679081063, 4),
(8.355307569391062, 2.703242274198649, 11.925441344336392, 4),
(8.342795760577609, 1.0627446691234035, 6.298818145909256, 4),
(7.5947244020062845, 1.5750745121803325, 4.626613135331287, 4),
(8.19517786608579, 8.732504313513864, 23.571122010181508, 4),
(7.754763634912469, 8.437206137825585, 21.00944901061068, 4),
(9.010231962978862, 6.1312711883866395, 6.803370568930175, 4),
(9.041566851651622, 6.4540531985593965, 17.010037203566448, 4),
(9.915652169827913, 8.56438797679146, 11.13108215988432, 4),
(10.0, 8.651470349341308, 27.322046186799103, 4),
(9.961336111598143, 8.039682739223524, 13.20009863344056, 4),
(9.887406551063181, 8.321342653987184, 2.0660963235598375, 4),
(10.0, 3.400787121787084, 2.5700932200974145, 4),
(10.0, 3.063915609453643, 13.514066607169514, 4),
(10.0, 5.461465491798149, 12.753899774963989, 4),
(10.0, 5.90081409486059, 15.244598276849418, 4),
(10.0, 5.4222087054147545, 27.929001019877095, 4),
(9.757039645743053, 5.653647411872443, 3.4112871270786895, 4),
(10.0, 5.790357134071424, 24.86360130658431, 4),
(9.862075817629322, 4.487864213671867, 7.67196809500038, 4),
(3.2140937198013564, 9.345163595199718, 3.4367939376082868, 3),
(3.484005759599379, 9.572118958552942, 14.905079424139613, 3),
(3.1967035260607033, 9.059573376604588, 24.78003138905329, 3),
(2.5, 9.479129956842218, 27.736581704977635, 3),
(2.7908763449337677, 8.166099921946278, 20.868304564027603, 3),
(3.221499566897477, 5.507741920664265, 5.467726257137659, 3),
(2.622512070432247, 5.989380652373817, 19.364472252973304, 3),
(3.2873061024849806, 5.439892524933965, 19.855724192587914, 3),
(5.727612405003367, 3.013295327457818, 10.746642552166502, 3),
(5.347955701149093, 3.003537709503816, 18.900471815194905, 3),
(5.7385751713204325, 3.987559993529851, 4.223160837759656, 3),
(5.720824103581511, 1.804037523043165, 4.878068159363519, 3),
(5.316780024484356, 1.0305080135789524, 8.043957606541364, 3),
(5.7623230008312385, 1.6541934959363132, 9.507411716255689, 3),
(5.985579505387595, 2.2109765673980277, 14.803434527189347, 3),
(5.461619603420755, 2.805568235937479, 6.6471547360970025, 3),
(7.838277926195208, 2.8050500161595604, 6.238528025218592, 3),
(8.2830613968175, 2.716343821673611, 10.350825174769154, 3),
(7.603155032355272, 6.1394212951580345, 29.139541165198704, 3),
(8.324115039527976, 6.971801555303874, 23.515778973195257, 3),
(8.44424273124686, 6.657492305333222, 2.4130843113046656, 3),
(8.309061774521076, 6.371190719454564, 17.507252134514488, 3),
(8.14037117068092, 2.6868573867536836, 14.649933295354042, 3),
(8.484903553213694, 2.2057045177976002, 11.879562262633948, 3),
(8.454109029623016, 2.3630506284708144, 4.606317173304252, 3),
(8.305262429168986, 5.460535517182709, 3.9045072719017924, 3),
(8.189730004579287, 5.069933398792441, 28.126992759236863, 3),
(7.54028778107475, 5.779995612547662, 6.635319193935916, 3),
(7.9629991342362985, 5.233597701388516, 20.293354805626866, 3),
(8.432959559038371, 5.797128354507666, 26.469970873757067, 3),
(10.0, 9.005161484782885, 6.0469956581432704, 3),
(9.771353946056914, 9.383759836829901, 20.82975271547889, 3),
(9.376380796522223, 9.46044820450894, 13.348522394106682, 3),
(9.912704179532229, 4.057804958576875, 25.778231770351923, 3),
(10.0, 4.853695964045051, 13.712247643370837, 3),
(10.0, 4.221211292509457, 28.587923360931033, 3),
(9.287535146732925, 4.404206868704275, 6.997389565284625, 3),
(10.0, 5.717897422867529, 30.932435068478792, 3),
(10.0, 5.121046242854478, 7.946854746461393, 3),
(10.0, 5.631186501571907, 26.172410297895773, 3),
(2.5, 6.822278767379375, 12.643410557057086, 2),
(2.5, 3.3435596434006034, 19.167537762557394, 2),
(3.284581774573411, 3.7457477655465423, 10.316761862277126, 2),
(3.0814075494281132, 3.302789020993419, 4.031683724514751, 2),
(2.5, 9.595267222759654, 9.136435041220121, 2),
(2.5899169115530087, 9.55055785508054, 8.263133397233354, 2),
(2.5342634625499727, 9.494299074607266, 14.863663104253218, 2),
(5.275920564662094, 9.02282018751374, 12.879135949769728, 2),
(5.522856449128964, 9.387711396347438, 17.412586595686815, 2),
(5.885914939777947, 9.191119089368966, 17.388086814072437, 2),
(5.4717401116974616, 9.868862187868638, 11.646848538821667, 2),
(5.956560321967156, 4.186123335197883, 4.31169020481439, 2),
(5.6279111948942635, 4.547202429787774, 16.56681914443115, 2),
(5.8534547245334565, 4.592599799739227, 18.83506980508535, 2),
(5.144720369630256, 5.318575486426688, 18.979172966805407, 2),
(5.2907074463880175, 6.000990946276877, 13.598520998056053, 2),
(5.415844403197766, 6.398031110922737, 15.178617464461626, 2),
(8.204144852288245, 5.902107978077237, 4.020177691372295, 2),
(9.366069953403018, 3.3728653869498273, 15.422766182794579, 2),
(10.0, 3.949081763597084, 9.192387616705815, 2),
(10.0, 3.187455579956449, 15.954247893607032, 2),
(9.260586271537607, 3.4545177339210404, 10.59517579170162, 2),
(9.571675864670619, 3.149737124891618, 17.398847531397934, 2),
(3.2387393821759787, 4.827650915864795, 3.7435106940988625, 1),
(2.5, 4.30220435408426, 7.399343614420917, 1),
(2.5, 4.329470943798639, 8.860840417367838, 1),
(2.5, 7.620094327678255, 10.887265616829124, 1),
(2.5, 7.1449996531857725, 10.10233537418591, 1),
(2.6104349455855846, 7.700939489093993, 4.236171515065992, 1),
(2.5, 8.524455647347406, 5.3613636980274295, 1),
(3.1731014606584806, 8.133658146416419, 15.199536235308303, 1),
(2.5, 7.129372162253073, 5.4202608625739925, 1),
(2.5, 7.70850985024877, 9.619364938403443, 1),
(3.252581509053177, 7.081532543557421, 6.2224060204343745, 1),
(2.5, 7.67557944940156, 12.261808397585057, 1),
(2.5, 3.4825807865537914, 7.768505546917617, 1),
(2.5, 3.020783157962588, 6.998840911724095, 1),
(3.020562119690717, 3.1223174909201346, 5.203087539105082, 1),
(5.2190911687613255, 3.0070655951585925, 13.573887550967275, 1),
(5.5962506280473505, 2.15728255216339, 5.165106850365733, 1),
(5.078574838897358, 2.9637552645895053, 6.8599427244043705, 1),
(5.1756171558445825, 2.772951703906637, 4.56080038214103, 1),
(5.497353020782844, 5.410551418942688, 2.0, 1),
(5.841773513544001, 5.686667624085427, 13.28936566781855, 1),
(5.580549185463668, 6.964187735662777, 16.1803201492634, 1),
(5.287772726922527, 6.865396694853934, 14.098946461580404, 1),
(8.358221285614269, 4.591594256415192, 17.271563597297103, 1),
(7.87724479635977, 4.744438140664897, 5.598934346859475, 1),
(8.323336953587479, 4.566800376285041, 7.0881523668119195, 1),
(7.845486096299681, 4.586270737017715, 16.23379517928239, 1),
(8.382569502344943, 4.562211644069123, 13.97512411087629, 1),
(7.855593749782354, 3.238350356301548, 5.360435825061775, 1),
(7.655501153733914, 3.903923881082662, 9.769593047963392, 1),
(7.653019158008493, 6.348396270933699, 11.704589766625281, 1),
(10.0, 2.7176353295329094, 5.415846167802247, 1),
(9.196648156004963, 8.15078293499349, 5.069223366759241, 1),
(10.0, 6.040694822625091, 7.76280231640685, 1),
(10.0, 6.719017792521678, 18.37437538640251, 1),
(2.8739501345809, 3.5100389001084373, 4.494521106912674, 10),
(2.979763831715893, 8.5642374861117, 6.426710793964199, 10),
(2.5, 8.924876646785982, 2.491252841450378, 10),
(2.5, 8.121352187119456, 8.82337986403619, 10),
(2.5, 4.643160393937538, 18.83933997786449, 10),
(2.5, 4.925443059836121, 5.417711811598947, 10),
(2.5, 8.509385882792433, 8.04535672534691, 10),
(2.5, 2.709647356385667, 16.195810159806815, 10),
(5.6678871626197305, 1.8444622064585485, 18.226010811743183, 10),
(5.759673840199206, 1.960972599684376, 30.42873152741525, 10),
(5.783634661463273, 1.5360819708237339, 21.480194214511137, 10),
(5.118173248862928, 1.5400563354602976, 41.86847335857883, 10),
(5.757349724389667, 1.6383453350505301, 13.609604267804956, 10),
(5.279304061296045, 4.900840641360432, 22.876127528048663, 10),
(5.715709801059808, 4.570357108788123, 30.360213488022158, 10),
(5.947947304520848, 4.273422536180247, 4.8966439066197935, 10),
(5.09899322481724, 4.947505227279317, 26.26875042475258, 10),
(5.53222949762985, 4.629910893964432, 7.756449262721482, 10),
(5.923584541768192, 4.593239396795306, 19.567605030849386, 10),
(5.950156387030171, 2.42463499343633, 4.953666946161412, 10),
(5.614158136535322, 2.4812727587161407, 20.644953904366893, 10),
(5.435908140730638, 2.7884847594702746, 21.585064332200393, 10),
(5.539908561343329, 2.9864344023506266, 44.90369903995316, 10),
(5.3792514320991325, 2.137036038265424, 25.88907455882873, 10),
(5.632909830682246, 5.9349482115124506, 21.384042506861697, 10),
(5.20332651493292, 5.825367195549048, 15.514467427422431, 10),
(5.927793692134072, 5.448079050348612, 3.7766395197414253, 10),
(5.817322396187511, 5.292185862716667, 11.31804158090752, 10),
(7.949960633591607, 2.873765731226449, 25.621368902089333, 10),
(8.382592436810759, 2.461570417216745, 40.54127195292601, 10),
(7.96379736332257, 2.200134671312228, 36.70731870996695, 10),
(8.373924456610474, 2.3066883154384743, 8.623846064990166, 10),
(8.151990686473388, 2.2622251239305577, 42.229127196458144, 10),
(8.25502764532606, 9.609182815192318, 7.080986046028279, 10),
(8.488384085232076, 8.098523111957578, 9.779628072315807, 10),
(8.438357068876163, 2.6893452283620705, 26.873452492074044, 10),
(8.309434906530441, 2.4623229011742396, 48.49966399344499, 10),
(7.7115794149655015, 2.724728645017314, 5.729859843354196, 10),
(7.6273740879401934, 2.2251923932068416, 26.724973070776922, 10),
(7.693923337226084, 2.6579274123978887, 48.407897505690485, 10),
(10.0, 6.197418391023862, 10.97195381591066, 10),
(9.113097274740381, 6.270996638245157, 2.7564645951736484, 10),
(10.0, 9.235232580795238, 6.003388325186025, 10),
(10.0, 5.050367446997329, 19.170756721559698, 10),
(9.380110088755156, 5.5356649305105154, 18.817507743754415, 10),
(9.001795946577033, 7.786061808916703, 4.453854563212078, 10),
(10.0, 7.692030956316567, 3.653159723688856, 10),
(9.046182896421445, 3.0439259875156295, 22.300946806849847, 10),
(9.459420796383784, 3.0372188559586917, 10.552556949414955, 10),
(10.0, 3.3229506269252425, 31.2476220198124, 10),
(10.0, 3.1004893435032645, 29.2734347311525, 10),
(2.5, 7.990213836555715, 8.375074375178261, 9),
(2.5, 7.298301069157875, 9.502846862649331, 9),
(2.8619005171223564, 7.275426002317967, 7.466126134628901, 9),
(3.0874221941355513, 8.485000561300847, 2.493857829360787, 9),
(2.5, 3.690667859366627, 19.77471678075617, 9),
(3.220553507003754, 3.281507210559706, 37.05938066272616, 9),
(2.5, 3.8989428412499203, 39.166418500944374, 9),
(2.7654037016841957, 3.1169069187360945, 29.726535569137937, 9),
(2.5, 3.703448940191029, 12.087654687250128, 9),
(2.5, 3.433194385943258, 3.382852759577178, 9),
(2.836612137080781, 3.9924265837199604, 2.0, 9),
(2.8888545547050946, 3.2474346036095905, 14.618307037832857, 9),
(5.164399331990519, 6.2346627424063925, 9.111465383743912, 9),
(5.500356903003388, 6.736841239972426, 13.154128131968298, 9),
(5.535810057742433, 6.970342536034459, 8.892716664134475, 9),
(5.590040966343994, 3.5668609688847175, 22.75661278689855, 9),
(5.282620261743346, 3.2367340323019573, 18.732823688754383, 9),
(5.172895640160181, 3.0043051231231956, 6.2292543458148515, 9),
(5.259721854731981, 3.3004333429874864, 35.890872110681414, 9),
(5.5536463415959245, 3.4948508349893386, 10.076683709549055, 9),
(5.730003972159145, 2.488034141173207, 15.985698390269977, 9),
(5.782381516990652, 2.4812045413951833, 28.774618518379302, 9),
(5.069379781665461, 6.741533325352479, 2.2194841714206595, 9),
(5.1346796709796605, 6.103139133682482, 27.726398643923417, 9),
(5.383260687864624, 5.56099784134289, 18.302295934127923, 9),
(5.869792088464701, 5.233311379347905, 32.55343216796663, 9),
(5.462451143540612, 5.746471808899983, 30.948864634440213, 9),
(5.357445269639698, 5.68852667194441, 5.261434469006405, 9),
(5.626373453003034, 5.771003693827525, 25.170846666445236, 9),
(8.284200895164993, 2.466049819474928, 17.238899804160177, 9),
(8.318102784124019, 2.2658035726687236, 22.596147383535918, 9),
(7.851936866242713, 7.45229335345878, 20.962374407911458, 9),
(8.146081336032703, 7.714405906472637, 13.533962918469337, 9),
(8.09720864316275, 7.247339841946607, 17.33899155052454, 9),
(7.830256291991797, 6.872416994269415, 10.706822163825924, 9),
(7.80065897068848, 6.330678323824742, 6.211375680877805, 9),
(8.044863647118635, 6.808226317611471, 15.557155261544228, 9),
(8.461774802909071, 4.745965252820717, 36.03729693977732, 9),
(7.612382882207284, 4.372367470892327, 14.168690780706225, 9),
(8.169633927695997, 4.48833473800357, 27.23584610386441, 9),
(9.602031136015775, 5.527970638413552, 20.5806356758181, 9),
(9.663686030178818, 5.516978463101205, 29.047658472982956, 9),
(9.75292854736471, 5.461162553197844, 34.11493160528129, 9),
(10.0, 5.650424904167431, 4.216215730437086, 9),
(10.0, 5.73654367766597, 34.72852675583916, 9),
(10.0, 5.4360854849263855, 14.779627294882367, 9),
(10.0, 5.79544155795279, 2.0, 9),
(9.49705091394873, 5.914105479148815, 10.80885478009873, 9),
(9.826635163465532, 1.9759992882300867, 7.06711443184985, 9),
(9.382502350301259, 4.709738717837755, 19.999476877446362, 9),
(9.115530591819274, 4.986025386567032, 5.883436488694818, 9),
(10.0, 4.813033015882831, 24.745870232952445, 9),
(9.378359588580793, 4.574376802251692, 26.295787257422923, 9),
(10.0, 2.1709322459501545, 21.57257635660235, 9),
(10.0, 2.5713046143569223, 26.039872491235577, 9),
(2.5, 2.6357605512712707, 4.712138166253982, 8),
(2.8874578666829285, 2.0337681540970594, 13.994896052145748, 8),
(3.435419560439465, 2.2299190864211247, 6.718989113532732, 8),
(2.9925336062737173, 1.928933557645075, 7.198014339866309, 8),
(2.5, 1.3726890604845965, 14.156726710024465, 8),
(2.6104579288975813, 1.2137704813997643, 3.3458156268951917, 8),
(5.1670653045538115, 7.761502840367845, 2.1409481568506346, 8),
(5.054434114346951, 7.011456904063963, 6.442157332603133, 8),
(5.803735682450612, 8.51299345440391, 10.443841773523394, 8),
(5.044877539779968, 6.342036003669621, 18.424428701407553, 8),
(5.484832402621484, 6.739510598555563, 5.474777491295647, 8),
(5.162300427200289, 6.57672216934989, 24.999056248525125, 8),
(5.877256360743413, 6.789776791182118, 15.450444143259661, 8),
(8.197449080109873, 2.2092984979309276, 2.0, 8),
(7.997237265754237, 2.060313094466323, 11.655829335806517, 8),
(7.973192560907184, 8.67128307488709, 4.272886886879181, 8),
(7.836498646186221, 8.168701526186094, 13.596658717999025, 8),
(7.782186965908517, 9.202193528464585, 13.902105524067945, 8),
(9.531795266771761, 5.037755377967032, 2.0, 8),
(10.0, 5.41661210331397, 11.055624912778937, 8),
(9.312270837393163, 7.466203120412419, 11.185222099189973, 8),
(10.0, 7.097905887270363, 13.895455902446677, 8),
(9.925669940032272, 4.692192166283825, 7.2040789887667955, 8),
(9.416740882402403, 4.697368796121149, 8.720116348180492, 8),
(10.0, 4.338509514756336, 16.469698910991372, 8),
(10.0, 6.402201264456283, 6.599237233947309, 8),
(10.0, 5.182208073338139, 4.550269784467781, 8),
(9.970332530519679, 5.903209540812212, 10.837022722087644, 8),
(2.962707587174585, 9.2513521634857, 9.999116931630539, 7),
(3.1672052728994915, 9.141134617154027, 7.383624729892915, 7),
(2.5, 5.049858089466979, 17.881593853007615, 7),
(2.7415018638966284, 5.680976628228491, 18.00290873780138, 7),
(2.5, 5.8481154189353175, 10.232668996271492, 7),
(2.877902226185231, 5.567414385297515, 3.5582034231201787, 7),
(2.5, 5.8534450733346, 27.77999592691697, 7),
(5.412821771284458, 2.5214549204115335, 7.258040020605607, 7),
(5.83754747605084, 2.530273181625722, 11.998261380615471, 7),
(5.9693975439749885, 4.3487706338488, 14.397906420283302, 7),
(5.004079000563381, 4.657273345320005, 22.736677614468775, 7),
(5.168438425945292, 4.24641271720769, 4.844860547907693, 7),
(5.863284315202094, 4.359153796629064, 23.489710023246513, 7),
(5.756333389411959, 8.952011225713635, 7.301135618422141, 7),
(5.108337403014788, 8.31154202432518, 11.359771531491097, 7),
(8.314898437378535, 9.185953513281046, 4.238233636005843, 7),
(8.201460399608226, 4.230965415446139, 11.589840844520428, 7),
(7.595604919273442, 4.88445113865134, 6.798265747221928, 7),
(8.378186361828917, 9.484819582257831, 8.022357890675561, 7),
(8.028236284464779, 9.757701617444052, 11.574198271062086, 7),
(8.229270762113973, 8.691786353579515, 6.350022396927342, 7),
(10.0, 3.3059509658558612, 3.1152259635487924, 7),
(9.756267998308681, 3.1863606517354883, 14.803384721914584, 7),
(10.0, 3.5046891678155427, 13.90160960971739, 7),
(10.0, 8.784136629159212, 6.218490965882184, 7),
(10.0, 8.37434528326138, 13.887493044276624, 7),
(10.0, 4.6140458786417, 14.68907159946693, 7),
(10.0, 8.03303730091703, 13.518172354943417, 7),
(2.7455640547144746, 1.6521001852026693, 5.569110673549164, 6),
(3.1452880891491906, 5.155515834056653, 8.595832717291, 6),
(2.5, 4.389047661368727, 4.950679151608691, 6),
(2.5, 4.394863837189541, 4.383231249423155, 6),
(2.5, 1.5580252510526358, 3.307282274836235, 6),
(5.045583268005572, 8.635334543903529, 9.59194524860244, 6),
(5.594284526041456, 8.6320252698003, 10.197201238166286, 6),
(5.988802467213943, 8.132531816914582, 12.30595195616923, 6),
(5.425850947396252, 5.185445600639579, 8.046156862703112, 6),
(5.369364240119585, 5.088077743168478, 7.340573827339962, 6),
(5.702045821590509, 5.271793984998375, 10.325652051724541, 6),
(5.411096326958829, 5.545898372969883, 5.292034843095026, 6),
(8.242968536635763, 9.082400742895011, 4.90020586532881, 6),
(8.050426422258862, 9.780537958506372, 18.978339720751418, 6),
(8.238754570485817, 8.602489911338367, 5.94133011037865, 6),
(8.39568424389748, 4.506736427736353, 9.461515968715135, 6),
(10.0, 5.138757136469953, 12.704963485646498, 6),
(10.0, 5.159912610631281, 15.6753707607594, 6),
(10.0, 5.549472965121217, 3.506573388368494, 6),
(10.0, 5.795090421330749, 14.063922879568509, 6),
(10.0, 6.983123234599715, 3.128443413944953, 6),
(10.0, 6.680204754366847, 11.632405914314647, 6),
(9.050263182466011, 6.721800647918977, 17.08367694275979, 6),
(10.0, 6.0634616201345715, 4.736966947326921, 6),
(9.409402543801862, 6.94420363069249, 6.28766021168659, 6),
(9.633394604006961, 7.505827554006868, 4.623044001702525, 6),
(9.020770192275748, 7.3138794160617016, 13.422245014577644, 6),
(9.26317609686154, 7.357994930871833, 15.233295182477667, 6),
(3.332782026387723, 7.225679089752617, 16.113419977677538, 5),
(2.5, 5.428663116358418, 6.5436496028361315, 5),
(2.5, 2.829072524106358, 2.0, 5),
(2.8285591842433737, 8.730390823623916, 21.473258817290873, 5),
(2.5, 8.17012010036135, 12.020108658634838, 5),
(2.5, 8.74354045618398, 14.42790441415372, 5),
(2.5, 4.638913962811717, 8.380243803410817, 5),
(3.363079416671538, 4.670651645625486, 2.7755096642090313, 5),
(5.339079962653624, 8.064094823108675, 16.611574939424255, 5),
(5.347356764781598, 8.43641762101464, 15.41216519823205, 5),
(5.368950609634622, 7.371653807185894, 7.038165919924306, 5),
(5.929552854535908, 6.895926920816455, 7.57281344704806, 5),
(5.72794655950891, 6.581660847859535, 10.668172633934036, 5),
(5.641782139668679, 6.458019104693064, 9.549016885745186, 5),
(5.344359642058747, 2.871097758194079, 5.430489560972486, 5),
(7.749909297802317, 4.328832721055091, 4.268933751175051, 5),
(8.145409228909998, 4.865021714408405, 7.545633529064384, 5),
(7.907253670159305, 5.688395096546548, 10.770986229289623, 5),
(7.592508492261312, 5.098997604455221, 4.933568344499713, 5),
(7.674872690410821, 5.441049019888879, 3.5502452884794837, 5),
(7.991979987062054, 6.616295483614106, 3.2837012487472252, 5),
(9.345599185286883, 7.224736586735167, 17.48852175788182, 5),
(9.659595218511388, 7.899577776723924, 3.3572177484844636, 5))
MUNSELL_GREYS_SPECIFICATIONS = np.linspace(0, 10, 25)
MUNSELL_EVEN_SPECIFICATIONS = (
(2.5, 5.0, 12.0, 4),
(2.5, 5.0, 32.0, 4),
(2.5, 5.0, 22.0, 4),
(2.5, 5.0, 32.0, 4),
(2.5, 6.0, 18.0, 4),
(2.5, 6.0, 32.0, 4),
(2.5, 6.0, 6.0, 4),
(2.5, 5.0, 42.0, 4),
(2.5, 5.0, 26.0, 4),
(2.5, 5.0, 48.0, 4),
(2.5, 2.0, 14.0, 4),
(2.5, 2.0, 14.0, 4),
(2.5, 0.0, 14.0, 4),
(2.5, 0.0, 2.0, 4),
(5.0, 1.0, 46.0, 4),
(5.0, 1.0, 38.0, 4),
(5.0, 1.0, 12.0, 4),
(5.0, 1.0, 10.0, 4),
(5.0, 4.0, 16.0, 4),
(5.0, 2.0, 44.0, 4),
(5.0, 7.0, 2.0, 4),
(5.0, 7.0, 8.0, 4),
(5.0, 7.0, 32.0, 4),
(7.5, 2.0, 28.0, 4),
(7.5, 2.0, 12.0, 4),
(7.5, 2.0, 34.0, 4),
(7.5, 4.0, 24.0, 4),
(7.5, 4.0, 10.0, 4),
(7.5, 4.0, 18.0, 4),
(7.5, 9.0, 44.0, 4),
(7.5, 5.0, 12.0, 4),
(7.5, 5.0, 40.0, 4),
(7.5, 5.0, 30.0, 4),
(7.5, 5.0, 12.0, 4),
(10.0, 3.0, 38.0, 4),
(10.0, 3.0, 16.0, 4),
(10.0, 3.0, 32.0, 4),
(10.0, 3.0, 44.0, 4),
(10.0, 3.0, 42.0, 4),
(10.0, 3.0, 34.0, 4),
(10.0, 3.0, 18.0, 4),
(10.0, 7.0, 10.0, 4),
(10.0, 7.0, 40.0, 4),
(10.0, 7.0, 12.0, 4),
(10.0, 6.0, 42.0, 4),
(10.0, 6.0, 6.0, 4),
(10.0, 4.0, 40.0, 4),
(2.5, 7.0, 28.0, 3),
(2.5, 7.0, 26.0, 3),
(2.5, 9.0, 44.0, 3),
(2.5, 9.0, 26.0, 3),
(2.5, 0.0, 32.0, 3),
(2.5, 0.0, 26.0, 3),
(2.5, 8.0, 30.0, 3),
(2.5, 8.0, 30.0, 3),
(2.5, 8.0, 6.0, 3),
(2.5, 6.0, 32.0, 3),
(2.5, 6.0, 12.0, 3),
(5.0, 7.0, 28.0, 3),
(5.0, 7.0, 26.0, 3),
(5.0, 7.0, 46.0, 3),
(5.0, 7.0, 10.0, 3),
(5.0, 6.0, 10.0, 3),
(5.0, 6.0, 44.0, 3),
(5.0, 1.0, 2.0, 3),
(5.0, 9.0, 34.0, 3),
(5.0, 9.0, 30.0, 3),
(7.5, 3.0, 12.0, 3),
(7.5, 7.0, 26.0, 3),
(7.5, 7.0, 18.0, 3),
(7.5, 7.0, 42.0, 3),
(7.5, 7.0, 20.0, 3),
(7.5, 7.0, 16.0, 3),
(7.5, 3.0, 36.0, 3),
(7.5, 3.0, 38.0, 3),
(7.5, 3.0, 14.0, 3),
(7.5, 2.0, 30.0, 3),
(7.5, 2.0, 12.0, 3),
(7.5, 2.0, 8.0, 3),
(7.5, 2.0, 6.0, 3),
(7.5, 6.0, 34.0, 3),
(7.5, 6.0, 12.0, 3),
(10.0, 4.0, 14.0, 3),
(10.0, 4.0, 40.0, 3),
(10.0, 5.0, 2.0, 3),
(10.0, 5.0, 26.0, 3),
(10.0, 6.0, 40.0, 3),
(10.0, 6.0, 46.0, 3),
(10.0, 6.0, 18.0, 3),
(10.0, 6.0, 38.0, 3),
(10.0, 3.0, 16.0, 3),
(10.0, 3.0, 32.0, 3),
(10.0, 3.0, 26.0, 3),
(10.0, 3.0, 22.0, 3),
(10.0, 8.0, 2.0, 3),
(10.0, 8.0, 10.0, 3),
(10.0, 8.0, 12.0, 3),
(10.0, 8.0, 18.0, 3),
(10.0, 8.0, 44.0, 3),
(2.5, 8.0, 2.0, 2),
(2.5, 8.0, 42.0, 2),
(2.5, 7.0, 34.0, 2),
(2.5, 4.0, 36.0, 2),
(2.5, 4.0, 34.0, 2),
(2.5, 4.0, 22.0, 2),
(2.5, 0.0, 42.0, 2),
(2.5, 0.0, 32.0, 2),
(2.5, 1.0, 28.0, 2),
(2.5, 1.0, 2.0, 2),
(2.5, 1.0, 24.0, 2),
(2.5, 1.0, 12.0, 2),
(5.0, 5.0, 22.0, 2),
(5.0, 5.0, 46.0, 2),
(5.0, 5.0, 24.0, 2),
(5.0, 1.0, 48.0, 2),
(5.0, 1.0, 12.0, 2),
(5.0, 1.0, 16.0, 2),
(5.0, 1.0, 2.0, 2),
(5.0, 1.0, 18.0, 2),
(5.0, 8.0, 28.0, 2),
(5.0, 8.0, 32.0, 2),
(5.0, 8.0, 24.0, 2),
(5.0, 8.0, 38.0, 2),
(5.0, 2.0, 24.0, 2),
(5.0, 2.0, 4.0, 2),
(5.0, 2.0, 32.0, 2),
(5.0, 2.0, 38.0, 2),
(5.0, 9.0, 36.0, 2),
(5.0, 9.0, 34.0, 2),
(5.0, 9.0, 4.0, 2),
(7.5, 7.0, 28.0, 2),
(7.5, 7.0, 10.0, 2),
(7.5, 7.0, 48.0, 2),
(7.5, 9.0, 48.0, 2),
(7.5, 9.0, 48.0, 2),
(7.5, 9.0, 30.0, 2),
(7.5, 5.0, 42.0, 2),
(7.5, 5.0, 46.0, 2),
(7.5, 6.0, 26.0, 2),
(7.5, 6.0, 28.0, 2),
(7.5, 6.0, 22.0, 2),
(7.5, 6.0, 10.0, 2),
(7.5, 6.0, 32.0, 2),
(7.5, 6.0, 32.0, 2),
(10.0, 7.0, 10.0, 2),
(10.0, 7.0, 30.0, 2),
(10.0, 7.0, 30.0, 2),
(10.0, 7.0, 14.0, 2),
(10.0, 7.0, 10.0, 2),
(10.0, 7.0, 12.0, 2),
(10.0, 8.0, 12.0, 2),
(10.0, 8.0, 28.0, 2),
(10.0, 8.0, 42.0, 2),
(10.0, 8.0, 4.0, 2),
(10.0, 8.0, 10.0, 2),
(10.0, 8.0, 22.0, 2),
(10.0, 9.0, 6.0, 2),
(10.0, 9.0, 38.0, 2),
(2.5, 2.0, 18.0, 1),
(2.5, 2.0, 24.0, 1),
(2.5, 9.0, 18.0, 1),
(2.5, 9.0, 28.0, 1),
(2.5, 9.0, 20.0, 1),
(2.5, 4.0, 14.0, 1),
(2.5, 4.0, 36.0, 1),
(2.5, 4.0, 26.0, 1),
(2.5, 3.0, 22.0, 1),
(2.5, 3.0, 42.0, 1),
(2.5, 3.0, 32.0, 1),
(2.5, 3.0, 16.0, 1),
(2.5, 3.0, 38.0, 1),
(2.5, 9.0, 2.0, 1),
(2.5, 9.0, 8.0, 1),
(2.5, 9.0, 26.0, 1),
(2.5, 9.0, 42.0, 1),
(5.0, 2.0, 2.0, 1),
(5.0, 2.0, 14.0, 1),
(5.0, 2.0, 8.0, 1),
(5.0, 1.0, 20.0, 1),
(5.0, 1.0, 32.0, 1),
(5.0, 3.0, 48.0, 1),
(5.0, 0.0, 42.0, 1),
(7.5, 3.0, 2.0, 1),
(7.5, 3.0, 36.0, 1),
(7.5, 5.0, 32.0, 1),
(7.5, 5.0, 20.0, 1),
(7.5, 5.0, 34.0, 1),
(7.5, 0.0, 6.0, 1),
(7.5, 0.0, 12.0, 1),
(7.5, 8.0, 48.0, 1),
(7.5, 8.0, 32.0, 1),
(7.5, 8.0, 4.0, 1),
(10.0, 5.0, 22.0, 1),
(10.0, 5.0, 18.0, 1),
(10.0, 5.0, 46.0, 1),
(10.0, 5.0, 12.0, 1),
(10.0, 5.0, 30.0, 1),
(10.0, 7.0, 36.0, 1),
(10.0, 7.0, 30.0, 1),
(10.0, 7.0, 20.0, 1),
(10.0, 7.0, 38.0, 1),
(10.0, 7.0, 20.0, 1),
(10.0, 1.0, 18.0, 1),
(10.0, 1.0, 10.0, 1),
(10.0, 1.0, 18.0, 1),
(10.0, 1.0, 20.0, 1),
(10.0, 0.0, 12.0, 1),
(10.0, 0.0, 46.0, 1),
(10.0, 0.0, 38.0, 1),
(2.5, 7.0, 40.0, 10),
(2.5, 7.0, 22.0, 10),
(2.5, 4.0, 12.0, 10),
(2.5, 4.0, 32.0, 10),
(2.5, 4.0, 36.0, 10),
(2.5, 0.0, 20.0, 10),
(2.5, 0.0, 30.0, 10),
(2.5, 3.0, 40.0, 10),
(2.5, 3.0, 10.0, 10),
(2.5, 8.0, 42.0, 10),
(2.5, 8.0, 4.0, 10),
(2.5, 8.0, 44.0, 10),
(2.5, 8.0, 32.0, 10),
(2.5, 8.0, 24.0, 10),
(5.0, 9.0, 42.0, 10),
(5.0, 9.0, 18.0, 10),
(5.0, 9.0, 2.0, 10),
(5.0, 7.0, 46.0, 10),
(5.0, 7.0, 42.0, 10),
(5.0, 7.0, 34.0, 10),
(5.0, 0.0, 46.0, 10),
(5.0, 0.0, 8.0, 10),
(5.0, 5.0, 28.0, 10),
(5.0, 1.0, 4.0, 10),
(5.0, 1.0, 10.0, 10),
(5.0, 1.0, 26.0, 10),
(7.5, 3.0, 26.0, 10),
(7.5, 3.0, 42.0, 10),
(7.5, 3.0, 36.0, 10),
(7.5, 0.0, 16.0, 10),
(7.5, 0.0, 40.0, 10),
(7.5, 2.0, 4.0, 10),
(7.5, 2.0, 14.0, 10),
(7.5, 2.0, 46.0, 10),
(7.5, 8.0, 38.0, 10),
(7.5, 8.0, 6.0, 10),
(7.5, 8.0, 24.0, 10),
(7.5, 8.0, 20.0, 10),
(7.5, 0.0, 48.0, 10),
(7.5, 0.0, 20.0, 10),
(7.5, 0.0, 46.0, 10),
(7.5, 0.0, 38.0, 10),
(10.0, 2.0, 32.0, 10),
(10.0, 2.0, 10.0, 10),
(10.0, 2.0, 30.0, 10),
(10.0, 8.0, 14.0, 10),
(10.0, 8.0, 24.0, 10),
(10.0, 8.0, 44.0, 10),
(10.0, 9.0, 28.0, 10),
(10.0, 9.0, 36.0, 10),
(10.0, 9.0, 12.0, 10),
(10.0, 6.0, 20.0, 10),
(10.0, 6.0, 46.0, 10),
(10.0, 6.0, 20.0, 10),
(10.0, 6.0, 28.0, 10),
(10.0, 6.0, 16.0, 10),
(10.0, 6.0, 44.0, 10),
(10.0, 6.0, 28.0, 10),
(2.5, 6.0, 6.0, 9),
(2.5, 5.0, 24.0, 9),
(2.5, 5.0, 6.0, 9),
(2.5, 2.0, 42.0, 9),
(2.5, 2.0, 24.0, 9),
(2.5, 2.0, 36.0, 9),
(2.5, 2.0, 42.0, 9),
(2.5, 2.0, 16.0, 9),
(2.5, 2.0, 22.0, 9),
(2.5, 2.0, 26.0, 9),
(2.5, 2.0, 36.0, 9),
(2.5, 8.0, 30.0, 9),
(2.5, 8.0, 6.0, 9),
(5.0, 9.0, 6.0, 9),
(5.0, 9.0, 22.0, 9),
(5.0, 9.0, 42.0, 9),
(5.0, 1.0, 10.0, 9),
(5.0, 2.0, 32.0, 9),
(5.0, 2.0, 28.0, 9),
(5.0, 0.0, 34.0, 9),
(5.0, 0.0, 22.0, 9),
(5.0, 4.0, 2.0, 9),
(5.0, 4.0, 2.0, 9),
(5.0, 4.0, 4.0, 9),
(7.5, 5.0, 6.0, 9),
(7.5, 5.0, 28.0, 9),
(7.5, 3.0, 2.0, 9),
(7.5, 3.0, 34.0, 9),
(7.5, 3.0, 8.0, 9),
(7.5, 7.0, 46.0, 9),
(7.5, 9.0, 34.0, 9),
(7.5, 9.0, 44.0, 9),
(7.5, 4.0, 10.0, 9),
(7.5, 4.0, 10.0, 9),
(10.0, 4.0, 16.0, 9),
(10.0, 4.0, 4.0, 9),
(10.0, 1.0, 44.0, 9),
(10.0, 1.0, 16.0, 9),
(10.0, 1.0, 30.0, 9),
(10.0, 1.0, 44.0, 9),
(10.0, 3.0, 4.0, 9),
(10.0, 3.0, 46.0, 9),
(10.0, 0.0, 14.0, 9),
(2.5, 1.0, 4.0, 8),
(2.5, 1.0, 18.0, 8),
(2.5, 1.0, 8.0, 8),
(2.5, 3.0, 32.0, 8),
(2.5, 3.0, 28.0, 8),
(2.5, 3.0, 46.0, 8),
(2.5, 3.0, 12.0, 8),
(2.5, 3.0, 18.0, 8),
(2.5, 0.0, 34.0, 8),
(2.5, 0.0, 22.0, 8),
(2.5, 2.0, 22.0, 8),
(2.5, 2.0, 14.0, 8),
(2.5, 2.0, 42.0, 8),
(2.5, 0.0, 16.0, 8),
(5.0, 4.0, 24.0, 8),
(5.0, 4.0, 26.0, 8),
(5.0, 0.0, 26.0, 8),
(5.0, 2.0, 44.0, 8),
(5.0, 2.0, 38.0, 8),
(5.0, 2.0, 48.0, 8),
(5.0, 2.0, 26.0, 8),
(5.0, 2.0, 6.0, 8),
(5.0, 4.0, 12.0, 8),
(5.0, 9.0, 36.0, 8),
(5.0, 9.0, 48.0, 8),
(5.0, 9.0, 16.0, 8),
(5.0, 9.0, 6.0, 8),
(7.5, 0.0, 34.0, 8),
(7.5, 6.0, 10.0, 8),
(7.5, 6.0, 12.0, 8),
(7.5, 6.0, 10.0, 8),
(7.5, 9.0, 26.0, 8),
(7.5, 9.0, 6.0, 8),
(7.5, 9.0, 40.0, 8),
(7.5, 9.0, 18.0, 8),
(7.5, 4.0, 20.0, 8),
(7.5, 4.0, 42.0, 8),
(7.5, 4.0, 16.0, 8),
(10.0, 5.0, 24.0, 8),
(10.0, 5.0, 44.0, 8),
(10.0, 5.0, 20.0, 8),
(10.0, 5.0, 22.0, 8),
(10.0, 3.0, 16.0, 8),
(10.0, 3.0, 14.0, 8),
(10.0, 3.0, 6.0, 8),
(10.0, 2.0, 24.0, 8),
(10.0, 2.0, 36.0, 8),
(10.0, 2.0, 46.0, 8),
(10.0, 0.0, 42.0, 8),
(10.0, 0.0, 26.0, 8),
(10.0, 0.0, 42.0, 8),
(10.0, 0.0, 10.0, 8),
(10.0, 9.0, 12.0, 8),
(10.0, 9.0, 8.0, 8),
(2.5, 4.0, 42.0, 7),
(2.5, 4.0, 14.0, 7),
(2.5, 4.0, 46.0, 7),
(2.5, 4.0, 18.0, 7),
(2.5, 2.0, 2.0, 7),
(2.5, 2.0, 38.0, 7),
(2.5, 2.0, 14.0, 7),
(2.5, 8.0, 26.0, 7),
(2.5, 7.0, 12.0, 7),
(2.5, 7.0, 46.0, 7),
(2.5, 1.0, 44.0, 7),
(5.0, 8.0, 26.0, 7),
(5.0, 0.0, 46.0, 7),
(5.0, 9.0, 44.0, 7),
(5.0, 9.0, 16.0, 7),
(5.0, 9.0, 40.0, 7),
(5.0, 5.0, 14.0, 7),
(5.0, 7.0, 6.0, 7),
(5.0, 7.0, 30.0, 7),
(7.5, 1.0, 16.0, 7),
(7.5, 1.0, 18.0, 7),
(7.5, 1.0, 30.0, 7),
(7.5, 1.0, 4.0, 7),
(7.5, 1.0, 10.0, 7),
(7.5, 1.0, 40.0, 7),
(7.5, 1.0, 18.0, 7),
(7.5, 3.0, 14.0, 7),
(7.5, 3.0, 48.0, 7),
(7.5, 3.0, 48.0, 7),
(7.5, 3.0, 6.0, 7),
(7.5, 0.0, 36.0, 7),
(7.5, 0.0, 42.0, 7),
(7.5, 0.0, 22.0, 7),
(10.0, 9.0, 8.0, 7),
(10.0, 9.0, 8.0, 7),
(10.0, 7.0, 36.0, 7),
(10.0, 7.0, 46.0, 7),
(10.0, 7.0, 20.0, 7),
(10.0, 7.0, 14.0, 7),
(10.0, 4.0, 32.0, 7),
(10.0, 4.0, 16.0, 7),
(10.0, 1.0, 24.0, 7),
(10.0, 1.0, 40.0, 7),
(10.0, 1.0, 18.0, 7),
(10.0, 1.0, 24.0, 7),
(10.0, 7.0, 30.0, 7),
(10.0, 7.0, 30.0, 7),
(2.5, 5.0, 24.0, 6),
(2.5, 5.0, 42.0, 6),
(2.5, 8.0, 34.0, 6),
(2.5, 8.0, 34.0, 6),
(2.5, 8.0, 48.0, 6),
(2.5, 8.0, 26.0, 6),
(2.5, 3.0, 28.0, 6),
(5.0, 4.0, 44.0, 6),
(5.0, 4.0, 46.0, 6),
(5.0, 6.0, 28.0, 6),
(5.0, 6.0, 46.0, 6),
(5.0, 2.0, 10.0, 6),
(5.0, 2.0, 4.0, 6),
(5.0, 2.0, 34.0, 6),
(5.0, 4.0, 46.0, 6),
(5.0, 4.0, 20.0, 6),
(7.5, 7.0, 2.0, 6),
(7.5, 6.0, 46.0, 6),
(7.5, 6.0, 48.0, 6),
(7.5, 6.0, 36.0, 6),
(7.5, 6.0, 42.0, 6),
(10.0, 7.0, 8.0, 6),
(10.0, 8.0, 18.0, 6),
(10.0, 8.0, 42.0, 6),
(10.0, 8.0, 32.0, 6),
(10.0, 2.0, 22.0, 6),
(10.0, 2.0, 28.0, 6),
(10.0, 2.0, 6.0, 6),
(10.0, 2.0, 36.0, 6),
(10.0, 2.0, 48.0, 6),
(10.0, 2.0, 28.0, 6),
(10.0, 2.0, 36.0, 6),
(10.0, 2.0, 6.0, 6),
(10.0, 9.0, 24.0, 6),
(2.5, 6.0, 42.0, 5),
(2.5, 6.0, 8.0, 5),
(2.5, 7.0, 20.0, 5),
(2.5, 7.0, 4.0, 5),
(2.5, 7.0, 16.0, 5),
(2.5, 5.0, 34.0, 5),
(2.5, 5.0, 22.0, 5),
(2.5, 2.0, 26.0, 5),
(2.5, 2.0, 38.0, 5),
(2.5, 2.0, 30.0, 5),
(5.0, 9.0, 42.0, 5),
(5.0, 9.0, 20.0, 5),
(5.0, 9.0, 32.0, 5),
(5.0, 6.0, 46.0, 5),
(5.0, 6.0, 14.0, 5),
(5.0, 0.0, 38.0, 5),
(5.0, 0.0, 16.0, 5),
(5.0, 4.0, 16.0, 5),
(5.0, 4.0, 42.0, 5),
(5.0, 4.0, 8.0, 5),
(5.0, 9.0, 2.0, 5),
(5.0, 9.0, 26.0, 5),
(7.5, 4.0, 20.0, 5),
(7.5, 4.0, 6.0, 5),
(7.5, 9.0, 6.0, 5),
(7.5, 9.0, 48.0, 5),
(7.5, 9.0, 46.0, 5),
(7.5, 9.0, 36.0, 5),
(7.5, 6.0, 40.0, 5),
(7.5, 3.0, 28.0, 5),
(7.5, 3.0, 24.0, 5),
(7.5, 7.0, 14.0, 5),
(7.5, 7.0, 26.0, 5),
(7.5, 7.0, 48.0, 5),
(7.5, 7.0, 16.0, 5),
(10.0, 4.0, 42.0, 5),
(10.0, 9.0, 42.0, 5),
(10.0, 9.0, 6.0, 5),
(10.0, 9.0, 12.0, 5),
(10.0, 0.0, 16.0, 5),
(10.0, 0.0, 14.0, 5),
(10.0, 8.0, 28.0, 5),
(10.0, 8.0, 12.0, 5),
(10.0, 8.0, 34.0, 5),
(10.0, 6.0, 22.0, 5),
(10.0, 6.0, 44.0, 5),
(10.0, 6.0, 32.0, 5))
MUNSELL_BOUNDING_HUES = (
((2.5, 4), (2.5, 4)),
((2.5, 4), (2.5, 4)),
((2.5, 4), (2.5, 4)),
((5.0, 4), (7.5, 4)),
((5.0, 4), (7.5, 4)),
((5.0, 4), (7.5, 4)),
((5.0, 4), (7.5, 4)),
((5.0, 4), (7.5, 4)),
((5.0, 4), (7.5, 4)),
((7.5, 4), (10, 4)),
((7.5, 4), (10, 4)),
((7.5, 4), (10, 4)),
((7.5, 4), (10, 4)),
((7.5, 4), (10, 4)),
((7.5, 4), (10, 4)),
((7.5, 4), (10, 4)),
((7.5, 4), (10, 4)),
((7.5, 4), (10, 4)),
((7.5, 4), (10, 4)),
((7.5, 4), (10, 4)),
((7.5, 4), (10, 4)),
((7.5, 4), (10, 4)),
((10.0, 4), (10.0, 4)),
((7.5, 4), (10, 4)),
((7.5, 4), (10, 4)),
((10.0, 4), (10.0, 4)),
((10.0, 4), (10.0, 4)),
((10.0, 4), (10.0, 4)),
((10.0, 4), (10.0, 4)),
((10.0, 4), (10.0, 4)),
((7.5, 4), (10, 4)),
((10.0, 4), (10.0, 4)),
((7.5, 4), (10, 4)),
((2.5, 3), (5.0, 3)),
((2.5, 3), (5.0, 3)),
((2.5, 3), (5.0, 3)),
((2.5, 3), (2.5, 3)),
((2.5, 3), (5.0, 3)),
((2.5, 3), (5.0, 3)),
((2.5, 3), (5.0, 3)),
((2.5, 3), (5.0, 3)),
((5.0, 3), (7.5, 3)),
((5.0, 3), (7.5, 3)),
((5.0, 3), (7.5, 3)),
((5.0, 3), (7.5, 3)),
((5.0, 3), (7.5, 3)),
((5.0, 3), (7.5, 3)),
((5.0, 3), (7.5, 3)),
((5.0, 3), (7.5, 3)),
((7.5, 3), (10, 3)),
((7.5, 3), (10, 3)),
((7.5, 3), (10, 3)),
((7.5, 3), (10, 3)),
((7.5, 3), (10, 3)),
((7.5, 3), (10, 3)),
((7.5, 3), (10, 3)),
((7.5, 3), (10, 3)),
((7.5, 3), (10, 3)),
((7.5, 3), (10, 3)),
((7.5, 3), (10, 3)),
((7.5, 3), (10, 3)),
((7.5, 3), (10, 3)),
((7.5, 3), (10, 3)),
((10.0, 3), (10.0, 3)),
((7.5, 3), (10, 3)),
((7.5, 3), (10, 3)),
((7.5, 3), (10, 3)),
((10.0, 3), (10.0, 3)),
((10.0, 3), (10.0, 3)),
((7.5, 3), (10, 3)),
((10.0, 3), (10.0, 3)),
((10.0, 3), (10.0, 3)),
((10.0, 3), (10.0, 3)),
((2.5, 2), (2.5, 2)),
((2.5, 2), (2.5, 2)),
((2.5, 2), (5.0, 2)),
((2.5, 2), (5.0, 2)),
((2.5, 2), (2.5, 2)),
((2.5, 2), (5.0, 2)),
((2.5, 2), (5.0, 2)),
((5.0, 2), (7.5, 2)),
((5.0, 2), (7.5, 2)),
((5.0, 2), (7.5, 2)),
((5.0, 2), (7.5, 2)),
((5.0, 2), (7.5, 2)),
((5.0, 2), (7.5, 2)),
((5.0, 2), (7.5, 2)),
((5.0, 2), (7.5, 2)),
((5.0, 2), (7.5, 2)),
((5.0, 2), (7.5, 2)),
((7.5, 2), (10, 2)),
((7.5, 2), (10, 2)),
((10.0, 2), (10.0, 2)),
((10.0, 2), (10.0, 2)),
((7.5, 2), (10, 2)),
((7.5, 2), (10, 2)),
((2.5, 1), (5.0, 1)),
((2.5, 1), (2.5, 1)),
((2.5, 1), (2.5, 1)),
((2.5, 1), (2.5, 1)),
((2.5, 1), (2.5, 1)),
((2.5, 1), (5.0, 1)),
((2.5, 1), (2.5, 1)),
((2.5, 1), (5.0, 1)),
((2.5, 1), (2.5, 1)),
((2.5, 1), (2.5, 1)),
((2.5, 1), (5.0, 1)),
((2.5, 1), (2.5, 1)),
((2.5, 1), (2.5, 1)),
((2.5, 1), (2.5, 1)),
((2.5, 1), (5.0, 1)),
((5.0, 1), (7.5, 1)),
((5.0, 1), (7.5, 1)),
((5.0, 1), (7.5, 1)),
((5.0, 1), (7.5, 1)),
((5.0, 1), (7.5, 1)),
((5.0, 1), (7.5, 1)),
((5.0, 1), (7.5, 1)),
((5.0, 1), (7.5, 1)),
((7.5, 1), (10, 1)),
((7.5, 1), (10, 1)),
((7.5, 1), (10, 1)),
((7.5, 1), (10, 1)),
((7.5, 1), (10, 1)),
((7.5, 1), (10, 1)),
((7.5, 1), (10, 1)),
((7.5, 1), (10, 1)),
((10.0, 1), (10.0, 1)),
((7.5, 1), (10, 1)),
((10.0, 1), (10.0, 1)),
((10.0, 1), (10.0, 1)),
((2.5, 10), (5.0, 10)),
((2.5, 10), (5.0, 10)),
((2.5, 10), (2.5, 10)),
((2.5, 10), (2.5, 10)),
((2.5, 10), (2.5, 10)),
((2.5, 10), (2.5, 10)),
((2.5, 10), (2.5, 10)),
((2.5, 10), (2.5, 10)),
((5.0, 10), (7.5, 10)),
((5.0, 10), (7.5, 10)),
((5.0, 10), (7.5, 10)),
((5.0, 10), (7.5, 10)),
((5.0, 10), (7.5, 10)),
((5.0, 10), (7.5, 10)),
((5.0, 10), (7.5, 10)),
((5.0, 10), (7.5, 10)),
((5.0, 10), (7.5, 10)),
((5.0, 10), (7.5, 10)),
((5.0, 10), (7.5, 10)),
((5.0, 10), (7.5, 10)),
((5.0, 10), (7.5, 10)),
((5.0, 10), (7.5, 10)),
((5.0, 10), (7.5, 10)),
((5.0, 10), (7.5, 10)),
((5.0, 10), (7.5, 10)),
((5.0, 10), (7.5, 10)),
((5.0, 10), (7.5, 10)),
((5.0, 10), (7.5, 10)),
((7.5, 10), (10, 10)),
((7.5, 10), (10, 10)),
((7.5, 10), (10, 10)),
((7.5, 10), (10, 10)),
((7.5, 10), (10, 10)),
((7.5, 10), (10, 10)),
((7.5, 10), (10, 10)),
((7.5, 10), (10, 10)),
((7.5, 10), (10, 10)),
((7.5, 10), (10, 10)),
((7.5, 10), (10, 10)),
((7.5, 10), (10, 10)),
((10.0, 10), (10.0, 10)),
((7.5, 10), (10, 10)),
((10.0, 10), (10.0, 10)),
((10.0, 10), (10.0, 10)),
((7.5, 10), (10, 10)),
((7.5, 10), (10, 10)),
((10.0, 10), (10.0, 10)),
((7.5, 10), (10, 10)),
((7.5, 10), (10, 10)),
((10.0, 10), (10.0, 10)),
((10.0, 10), (10.0, 10)),
((2.5, 9), (2.5, 9)),
((2.5, 9), (2.5, 9)),
((2.5, 9), (5.0, 9)),
((2.5, 9), (5.0, 9)),
((2.5, 9), (2.5, 9)),
((2.5, 9), (5.0, 9)),
((2.5, 9), (2.5, 9)),
((2.5, 9), (5.0, 9)),
((2.5, 9), (2.5, 9)),
((2.5, 9), (2.5, 9)),
((2.5, 9), (5.0, 9)),
((2.5, 9), (5.0, 9)),
((5.0, 9), (7.5, 9)),
((5.0, 9), (7.5, 9)),
((5.0, 9), (7.5, 9)),
((5.0, 9), (7.5, 9)),
((5.0, 9), (7.5, 9)),
((5.0, 9), (7.5, 9)),
((5.0, 9), (7.5, 9)),
((5.0, 9), (7.5, 9)),
((5.0, 9), (7.5, 9)),
((5.0, 9), (7.5, 9)),
((5.0, 9), (7.5, 9)),
((5.0, 9), (7.5, 9)),
((5.0, 9), (7.5, 9)),
((5.0, 9), (7.5, 9)),
((5.0, 9), (7.5, 9)),
((5.0, 9), (7.5, 9)),
((5.0, 9), (7.5, 9)),
((7.5, 9), (10, 9)),
((7.5, 9), (10, 9)),
((7.5, 9), (10, 9)),
((7.5, 9), (10, 9)),
((7.5, 9), (10, 9)),
((7.5, 9), (10, 9)),
((7.5, 9), (10, 9)),
((7.5, 9), (10, 9)),
((7.5, 9), (10, 9)),
((7.5, 9), (10, 9)),
((7.5, 9), (10, 9)),
((7.5, 9), (10, 9)),
((7.5, 9), (10, 9)),
((7.5, 9), (10, 9)),
((10.0, 9), (10.0, 9)),
((10.0, 9), (10.0, 9)),
((10.0, 9), (10.0, 9)),
((10.0, 9), (10.0, 9)),
((7.5, 9), (10, 9)),
((7.5, 9), (10, 9)),
((7.5, 9), (10, 9)),
((7.5, 9), (10, 9)),
((10.0, 9), (10.0, 9)),
((7.5, 9), (10, 9)),
((10.0, 9), (10.0, 9)),
((10.0, 9), (10.0, 9)),
((2.5, 8), (2.5, 8)),
((2.5, 8), (5.0, 8)),
((2.5, 8), (5.0, 8)),
((2.5, 8), (5.0, 8)),
((2.5, 8), (2.5, 8)),
((2.5, 8), (5.0, 8)),
((5.0, 8), (7.5, 8)),
((5.0, 8), (7.5, 8)),
((5.0, 8), (7.5, 8)),
((5.0, 8), (7.5, 8)),
((5.0, 8), (7.5, 8)),
((5.0, 8), (7.5, 8)),
((5.0, 8), (7.5, 8)),
((7.5, 8), (10, 8)),
((7.5, 8), (10, 8)),
((7.5, 8), (10, 8)),
((7.5, 8), (10, 8)),
((7.5, 8), (10, 8)),
((7.5, 8), (10, 8)),
((10.0, 8), (10.0, 8)),
((7.5, 8), (10, 8)),
((10.0, 8), (10.0, 8)),
((7.5, 8), (10, 8)),
((7.5, 8), (10, 8)),
((10.0, 8), (10.0, 8)),
((10.0, 8), (10.0, 8)),
((10.0, 8), (10.0, 8)),
((7.5, 8), (10, 8)),
((2.5, 7), (5.0, 7)),
((2.5, 7), (5.0, 7)),
((2.5, 7), (2.5, 7)),
((2.5, 7), (5.0, 7)),
((2.5, 7), (2.5, 7)),
((2.5, 7), (5.0, 7)),
((2.5, 7), (2.5, 7)),
((5.0, 7), (7.5, 7)),
((5.0, 7), (7.5, 7)),
((5.0, 7), (7.5, 7)),
((5.0, 7), (7.5, 7)),
((5.0, 7), (7.5, 7)),
((5.0, 7), (7.5, 7)),
((5.0, 7), (7.5, 7)),
((5.0, 7), (7.5, 7)),
((7.5, 7), (10, 7)),
((7.5, 7), (10, 7)),
((7.5, 7), (10, 7)),
((7.5, 7), (10, 7)),
((7.5, 7), (10, 7)),
((7.5, 7), (10, 7)),
((10.0, 7), (10.0, 7)),
((7.5, 7), (10, 7)),
((10.0, 7), (10.0, 7)),
((10.0, 7), (10.0, 7)),
((10.0, 7), (10.0, 7)),
((10.0, 7), (10.0, 7)),
((10.0, 7), (10.0, 7)),
((2.5, 6), (5.0, 6)),
((2.5, 6), (5.0, 6)),
((2.5, 6), (2.5, 6)),
((2.5, 6), (2.5, 6)),
((2.5, 6), (2.5, 6)),
((5.0, 6), (7.5, 6)),
((5.0, 6), (7.5, 6)),
((5.0, 6), (7.5, 6)),
((5.0, 6), (7.5, 6)),
((5.0, 6), (7.5, 6)),
((5.0, 6), (7.5, 6)),
((5.0, 6), (7.5, 6)),
((7.5, 6), (10, 6)),
((7.5, 6), (10, 6)),
((7.5, 6), (10, 6)),
((7.5, 6), (10, 6)),
((10.0, 6), (10.0, 6)),
((10.0, 6), (10.0, 6)),
((10.0, 6), (10.0, 6)),
((10.0, 6), (10.0, 6)),
((10.0, 6), (10.0, 6)),
((10.0, 6), (10.0, 6)),
((7.5, 6), (10, 6)),
((10.0, 6), (10.0, 6)),
((7.5, 6), (10, 6)),
((7.5, 6), (10, 6)),
((7.5, 6), (10, 6)),
((7.5, 6), (10, 6)),
((2.5, 5), (5.0, 5)),
((2.5, 5), (2.5, 5)),
((2.5, 5), (2.5, 5)),
((2.5, 5), (5.0, 5)),
((2.5, 5), (2.5, 5)),
((2.5, 5), (2.5, 5)),
((2.5, 5), (2.5, 5)),
((2.5, 5), (5.0, 5)),
((5.0, 5), (7.5, 5)),
((5.0, 5), (7.5, 5)),
((5.0, 5), (7.5, 5)),
((5.0, 5), (7.5, 5)),
((5.0, 5), (7.5, 5)),
((5.0, 5), (7.5, 5)),
((5.0, 5), (7.5, 5)),
((7.5, 5), (10, 5)),
((7.5, 5), (10, 5)),
((7.5, 5), (10, 5)),
((7.5, 5), (10, 5)),
((7.5, 5), (10, 5)),
((7.5, 5), (10, 5)),
((7.5, 5), (10, 5)),
((7.5, 5), (10, 5)))
MUNSELL_HUE_TO_ANGLE = (
(2.5, 1, 208.75),
(2.5, 2, 153.75),
(2.5, 3, 118.75),
(2.5, 4, 63.75),
(2.5, 5, 39.375),
(2.5, 6, 16.875),
(2.5, 7, 348.75),
(2.5, 8, 300.0),
(2.5, 9, 251.25),
(2.5, 10, 236.25),
(5.0, 1, 225.0),
(5.0, 2, 160.0),
(5.0, 3, 135.0),
(5.0, 4, 70.0),
(5.0, 5, 45.0),
(5.0, 6, 22.5),
(5.0, 7, 0.0),
(5.0, 8, 315.0),
(5.0, 9, 255.0),
(5.0, 10, 240.0),
(7.5, 1, 228.75),
(7.5, 2, 176.25),
(7.5, 3, 141.25),
(7.5, 4, 86.25),
(7.5, 5, 51.25),
(7.5, 6, 28.125),
(7.5, 7, 5.625),
(7.5, 8, 326.25),
(7.5, 9, 270.0),
(7.5, 10, 243.75),
(10.0, 1, 232.5),
(10.0, 2, 192.5),
(10.0, 3, 147.5),
(10.0, 4, 102.5),
(10.0, 5, 57.5),
(10.0, 6, 33.75),
(10.0, 7, 11.25),
(10.0, 8, 337.5),
(10.0, 9, 285.0),
(10.0, 10, 247.5))
MUNSELL_HUE_TO_ASTM_HUE = (
(2.5, 0, 72.5),
(2.5, 1, 62.5),
(2.5, 2, 52.5),
(2.5, 3, 42.5),
(2.5, 4, 32.5),
(2.5, 5, 22.5),
(2.5, 6, 12.5),
(2.5, 7, 2.5),
(2.5, 8, 92.5),
(2.5, 9, 82.5),
(2.5, 10, 72.5),
(5.0, 0, 75.0),
(5.0, 1, 65.0),
(5.0, 2, 55.0),
(5.0, 3, 45.0),
(5.0, 4, 35.0),
(5.0, 5, 25.0),
(5.0, 6, 15.0),
(5.0, 7, 5.0),
(5.0, 8, 95.0),
(5.0, 9, 85.0),
(5.0, 10, 75.0),
(7.5, 0, 77.5),
(7.5, 1, 67.5),
(7.5, 2, 57.5),
(7.5, 3, 47.5),
(7.5, 4, 37.5),
(7.5, 5, 27.5),
(7.5, 6, 17.5),
(7.5, 7, 7.5),
(7.5, 8, 97.5),
(7.5, 9, 87.5),
(7.5, 10, 77.5),
(10.0, 0, 80.0),
(10.0, 1, 70.0),
(10.0, 2, 60.0),
(10.0, 3, 50.0),
(10.0, 4, 40.0),
(10.0, 5, 30.0),
(10.0, 6, 20.0),
(10.0, 7, 10.0),
(10.0, 8, 100.0),
(10.0, 9, 90.0),
(10.0, 10, 80.0))
MUNSELL_INTERPOLATION_METHODS = (
'Radial',
'Radial',
'Radial',
'Radial',
'Radial',
'Radial',
'Radial',
'Radial',
'Radial',
'Radial',
'Linear',
'Linear',
None,
None,
'Linear',
'Linear',
'Linear',
'Linear',
'Radial',
'Linear',
'Radial',
'Radial',
'Radial',
'Linear',
'Linear',
'Linear',
'Radial',
'Radial',
'Radial',
'Radial',
'Radial',
'Radial',
'Radial',
'Radial',
'Radial',
'Radial',
'Radial',
'Radial',
'Radial',
'Radial',
'Radial',
'Radial',
'Linear',
'Radial',
'Radial',
'Radial',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
None,
None,
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
None,
None,
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Radial',
'Radial',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Radial',
'Radial',
'Radial',
'Radial',
'Radial',
'Radial',
'Radial',
'Radial',
'Radial',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Radial',
'Linear',
'Linear',
'Radial',
None,
'Radial',
'Radial',
'Radial',
'Radial',
'Radial',
None,
None,
'Radial',
'Radial',
'Radial',
'Radial',
'Radial',
'Radial',
'Radial',
'Radial',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
None,
None,
None,
'Linear',
'Linear',
'Radial',
'Radial',
'Radial',
None,
None,
'Radial',
'Radial',
'Radial',
'Radial',
'Radial',
'Radial',
'Radial',
'Linear',
'Linear',
'Radial',
'Linear',
'Linear',
'Linear',
None,
None,
'Radial',
'Radial',
'Radial',
'Radial',
'Radial',
'Radial',
'Radial',
None,
None,
'Radial',
'Linear',
'Linear',
'Radial',
'Radial',
'Radial',
'Radial',
None,
None,
None,
None,
'Linear',
'Linear',
'Linear',
'Radial',
'Radial',
'Radial',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Radial',
'Linear',
'Radial',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Radial',
'Radial',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
None,
None,
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
None,
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
None,
None,
'Linear',
'Linear',
'Linear',
None,
'Linear',
'Linear',
None,
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
None,
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
None,
None,
None,
None,
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
None,
'Linear',
'Linear',
'Linear',
'Radial',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
None,
None,
None,
'Radial',
'Radial',
'Radial',
'Radial',
'Radial',
'Radial',
'Radial',
'Radial',
'Linear',
'Linear',
'Linear',
'Linear',
'Radial',
'Radial',
'Radial',
'Radial',
'Radial',
'Radial',
'Radial',
'Radial',
'Radial',
'Radial',
'Radial',
'Radial',
'Radial',
'Linear',
'Radial',
'Linear',
'Radial',
'Radial',
'Radial',
'Radial',
'Radial',
'Radial',
'Radial',
'Radial',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Radial',
'Linear',
'Linear',
'Linear',
'Linear',
'Radial',
'Linear',
'Radial',
'Radial',
'Linear',
'Radial',
'Linear',
'Radial',
'Radial',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Radial',
'Radial',
None,
None,
'Radial',
'Radial',
'Radial',
'Radial',
'Linear',
'Radial',
'Radial',
'Radial',
'Linear',
'Linear',
'Linear',
'Radial',
'Radial',
'Radial',
'Linear',
'Linear',
'Linear',
'Linear',
'Radial',
'Linear',
'Radial',
'Radial',
None,
None,
'Linear',
'Radial',
'Linear',
'Radial',
'Radial',
'Radial')
MUNSELL_XY_FROM_RENOTATION_OVOID = (
(0.4333, 0.5602),
None,
None,
None,
None,
None,
(0.3799, 0.447),
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
(0.3284, 0.3559),
(0.3722, 0.4669),
None,
None,
(0.274, 0.879),
None,
None,
(0.3395, 0.5913),
(0.303, 0.809),
None,
(0.345, 0.5949),
None,
None,
(0.345, 0.5949),
None,
(0.202, 0.807),
None,
None,
None,
None,
(0.168, 0.88),
(0.3123, 0.4732),
None,
(0.3092, 0.5095),
None,
(0.3128, 0.4175),
None,
(0.149, 0.681),
(0.1689, 0.6549),
None,
(0.206, 0.619),
None,
None,
(0.163, 0.67),
(0.163, 0.67),
(0.2952, 0.3851),
(0.069, 0.764),
(0.2574, 0.4814),
(0.123, 0.546),
(0.1397, 0.5312),
None,
(0.2554, 0.4087),
(0.2466, 0.4181),
None,
(0.2833, 0.3564),
None,
None,
(0.1516, 0.4505),
(0.1303, 0.4858),
(0.1841, 0.4448),
None,
(0.1688, 0.457),
(0.1982, 0.433),
None,
None,
(0.1262, 0.4667),
None,
(0.1022, 0.4759),
(0.1842, 0.4244),
(0.22, 0.3983),
(0.034, 0.546),
(0.2171, 0.4138),
(0.1398, 0.4168),
None,
(0.291, 0.331),
(0.069, 0.4542),
None,
None,
(0.1551, 0.4208),
None,
(0.0925, 0.4275),
None,
None,
(0.0333, 0.4444),
(0.2957, 0.3293),
(0.243, 0.371),
(0.2282, 0.3811),
(0.1866, 0.4086),
None,
(0.294, 0.3268),
None,
None,
None,
None,
(0.0636, 0.3788),
None,
None,
None,
(0.26, 0.3289),
None,
None,
(0.0781, 0.3211),
None,
(0.067, 0.32),
None,
None,
None,
(0.25, 0.3141),
None,
None,
None,
(0.122, 0.351),
None,
None,
(0.2234, 0.315),
None,
None,
None,
None,
(0.2768, 0.3287),
None,
(0.2094, 0.3165),
None,
None,
None,
None,
None,
None,
(0.068, 0.283),
None,
(0.092, 0.29),
(0.1961, 0.311),
None,
None,
(0.2035, 0.2956),
None,
None,
(0.1671, 0.2832),
(0.2035, 0.2956),
(0.1841, 0.2892),
(0.1937, 0.2978),
None,
None,
(0.2686, 0.313),
(0.212, 0.3025),
(0.118, 0.273),
(0.2501, 0.3118),
None,
None,
None,
None,
None,
None,
(0.1027, 0.2057),
None,
None,
None,
None,
None,
(0.065, 0.17),
None,
(0.2909, 0.3125),
(0.228, 0.296),
None,
None,
(0.2559, 0.2874),
None,
(0.1245, 0.1827),
None,
None,
None,
None,
(0.2616, 0.2857),
None,
None,
(0.098, 0.146),
None,
None,
None,
None,
None,
(0.2688, 0.2956),
(0.096, 0.126),
(0.1203, 0.1505),
None,
(0.1666, 0.1964),
None,
None,
None,
(0.128, 0.162),
None,
(0.128, 0.162),
None,
(0.084, 0.094),
None,
None,
None,
None,
None,
None,
None,
(0.1634, 0.1698),
None,
None,
None,
None,
None,
(0.1576, 0.16),
None,
(0.2758, 0.2879),
None,
None,
None,
None,
None,
(0.2991, 0.3057),
None,
None,
None,
None,
None,
(0.109, 0.079),
(0.2012, 0.1867),
(0.1285, 0.087),
(0.095, 0.027),
(0.1642, 0.0655),
(0.157, 0.034),
(0.159, 0.044),
None,
None,
(0.242, 0.2148),
(0.1762, 0.0955),
(0.161, 0.016),
None,
(0.2702, 0.2648),
None,
None,
None,
None,
None,
None,
(0.1918, 0.0379),
(0.22, 0.133),
(0.1925, 0.042),
(0.24, 0.196),
None,
None,
None,
None,
None,
(0.214, 0.143),
None,
(0.214, 0.143),
(0.194, 0.101),
(0.2265, 0.1671),
None,
(0.194, 0.101),
(0.2842, 0.255),
(0.2372, 0.1223),
(0.2806, 0.2444),
(0.218, 0.022),
(0.2277, 0.0621),
(0.22, 0.031),
(0.218, 0.022),
(0.2372, 0.098),
(0.2298, 0.0696),
(0.226, 0.0555),
(0.22, 0.031),
None,
(0.2881, 0.2671),
(0.296, 0.271),
None,
None,
(0.2701, 0.1178),
(0.254, 0.039),
(0.2559, 0.0525),
None,
None,
(0.3022, 0.2825),
(0.3022, 0.2825),
(0.2958, 0.2565),
(0.3093, 0.2555),
(0.3018, 0.1253),
(0.3088, 0.274),
(0.291, 0.06),
(0.3037, 0.1981),
None,
None,
None,
(0.3056, 0.206),
(0.3056, 0.206),
(0.337, 0.1756),
(0.321, 0.2686),
None,
(0.3078, 0.0839),
None,
None,
(0.3214, 0.2517),
None,
None,
(0.329, 0.2095),
(0.337, 0.08),
(0.3342, 0.1551),
None,
(0.414, 0.102),
None,
(0.3754, 0.1898),
(0.3929, 0.1506),
None,
None,
(0.383, 0.096),
(0.3711, 0.1449),
None,
None,
(0.473, 0.172),
(0.482, 0.162),
None,
None,
None,
None,
None,
(0.3708, 0.238),
(0.4104, 0.2361),
None,
None,
(0.4, 0.263),
(0.3431, 0.2988),
None,
(0.396, 0.286),
(0.4125, 0.2784),
(0.396, 0.286),
None,
(0.3512, 0.3052),
None,
None,
(0.513, 0.2101),
None,
(0.4799, 0.2329),
(0.57, 0.24),
None,
(0.5396, 0.2535),
(0.554, 0.246),
(0.5628, 0.2241),
(0.538, 0.2369),
(0.4218, 0.2864),
None,
None,
None,
None,
None,
None,
None,
(0.414, 0.302),
(0.376, 0.31),
None,
(0.5369, 0.281),
None,
(0.5898, 0.2622),
(0.3614, 0.3033),
None,
(0.5734, 0.2083),
None,
(0.4435, 0.3119),
None,
None,
None,
None,
None,
None,
None,
(0.5341, 0.3158),
(0.3805, 0.3244),
None,
None,
None,
None,
(0.466, 0.2888),
(0.6111, 0.229),
None,
None,
(0.6492, 0.3012),
None,
None,
(0.4738, 0.3316),
None,
None,
None,
(0.416, 0.35),
(0.416, 0.35),
None,
None,
(0.592, 0.374),
(0.5234, 0.37),
None,
(0.6409, 0.3533),
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
(0.602, 0.405),
None,
None,
None,
None,
None,
(0.684, 0.415),
(0.4674, 0.3738),
None,
None,
None,
(0.3437, 0.3397),
None,
None,
None,
None,
(0.4399, 0.4164),
(0.5179, 0.467),
None,
None,
None,
None,
(0.545, 0.458),
None,
None,
None,
None,
(0.545, 0.458),
(0.532, 0.478),
None,
(0.4517, 0.4421),
(0.516, 0.492),
(0.3761, 0.38),
(0.5049, 0.4843),
None,
None,
None,
None,
None,
None,
(0.483, 0.5092),
None,
None,
(0.4905, 0.5038),
None,
None,
None,
None,
(0.4745, 0.481),
(0.3378, 0.3504),
None,
None,
(0.4331, 0.4688),
(0.3811, 0.4123),
None,
None,
None,
None,
None,
None,
(0.4652, 0.5128),
None,
None,
(0.4728, 0.5215),
None,
None,
(0.3761, 0.4155),
(0.4271, 0.492),
None,
None,
None,
(0.4341, 0.502),
None,
None,
None,
None)
MUNSELL_SPECIFICATIONS_TO_XY = (
((2.5, 8.0, 11.928546308350969, 4),
(0.41492483295053395, 0.5123568112702328)),
((2.5, 6.0, 6.923610826208884, 4),
(0.38945937205126197, 0.46616492464383436)),
((3.0588107073010358, 6.0, 14.50196667770457, 4),
(0.42971427832243203, 0.5665812104155267)),
((2.5, 5.0, 2.0, 4), (0.3352, 0.3636)),
((5.613007062442384, 8.0, 18.56590894044391, 4),
(0.39909107942315, 0.5888839244592698)),
((5.845640071004907, 8.0, 5.782325614552295, 4),
(0.3476544320849577, 0.415451228906664)),
((5.780794121059599, 3.0, 3.3492086825591487, 4),
(0.3397413408138117, 0.4168925891450931)),
((5.483684299639117, 4.0, 5.761459062506715, 4),
(0.3624285934444264, 0.4768182836607108)),
((5.809580308813496, 6.0, 6.662613753958899, 4),
(0.35692510294957597, 0.4554043675955618)),
((5.209252955662903, 3.0, 5.141472643810014, 4),
(0.36331762907025356, 0.4808425142327246)),
((7.706105853911573, 3.0, 11.396648897274897, 4),
(0.3131610795605752, 0.693666965393307)),
((8.117640564564343, 3.0, 3.1653563832640272, 4),
(0.3191717580108967, 0.3988687571291857)),
((7.8731203012311255, 3.0, 13.241107969297714, 4),
(0.2945431122633574, 0.7584911897992045)),
((8.04983322214289, 2.0, 7.501924679081063, 4),
(0.3074690943666493, 0.6094306766573039)),
((8.355307569391062, 3.0, 11.925441344336392, 4),
(0.2925864520380243, 0.7013043294887223)),
((8.342795760577609, 1.0, 6.298818145909256, 4),
(0.2563315496261501, 0.7193319941337727)),
((7.5947244020062845, 2.0, 4.626613135331287, 4),
(0.32400993792893495, 0.47353910662213033)),
((8.19517786608579, 9.0, 23.571122010181508, 4),
(0.3393737554129363, 0.6530759138839242)),
((7.754763634912469, 8.0, 21.00944901061068, 4),
(0.3524522658493676, 0.6357582251092715)),
((9.010231962978862, 6.0, 6.803370568930175, 4),
(0.3215881688765898, 0.4404084623233154)),
((9.041566851651622, 6.0, 17.010037203566448, 4),
(0.3077087669482304, 0.6419582684061792)),
((9.915652169827913, 9.0, 11.13108215988432, 4),
(0.3156394420758696, 0.47190649633818943)),
((10.0, 9.0, 27.322046186799103, 4),
(0.27105079215940403, 0.6981866004283883)),
((9.961336111598143, 8.0, 13.20009863344056, 4),
(0.3110050190589885, 0.512285067166623)),
((9.887406551063181, 8.0, 2.0660963235598375, 4),
(0.3124556963039024, 0.3470190461737042)),
((10.0, 3.0, 2.5700932200974145, 4),
(0.30780233686482955, 0.37333504024765457)),
((10.0, 3.0, 13.514066607169514, 4),
(0.23432557407109803, 0.7247335078491779)),
((10.0, 5.0, 12.753899774963989, 4),
(0.29015511114768366, 0.5923266098579271)),
((10.0, 6.0, 15.244598276849418, 4),
(0.29059930775417764, 0.6049052757954609)),
((10.0, 5.0, 27.929001019877095, 4),
(0.1548874872515363, 0.9471895260068659)),
((9.757039645743053, 6.0, 3.4112871270786895, 4),
(0.31331400537253434, 0.37344423041130265)),
((10.0, 6.0, 24.86360130658431, 4),
(0.23238659150720198, 0.7984084248251019)),
((9.862075817629322, 4.0, 7.67196809500038, 4),
(0.3036210954390051, 0.5020744140554851)),
((3.2140937198013564, 9.0, 3.4367939376082868, 3),
(0.30085241356011383, 0.3526678514951116)),
((3.1967035260607033, 9.0, 24.78003138905329, 3),
(0.2002744068603639, 0.577845271609722)),
((2.5, 9.0, 27.736581704977635, 3),
(0.19384392806515655, 0.6398389804597316)),
((2.7908763449337677, 8.0, 20.868304564027603, 3),
(0.22423490392017478, 0.5571230146385515)),
((3.221499566897477, 6.0, 5.467726257137659, 3),
(0.2873858207778558, 0.3848413427300203)),
((2.622512070432247, 6.0, 19.364472252973304, 3),
(0.19637966981582017, 0.5896085735184569)),
((3.2873061024849806, 5.0, 19.855724192587914, 3),
(0.15107222663217493, 0.6036632631469063)),
((5.727612405003367, 3.0, 10.746642552166502, 3),
(0.17920676997174168, 0.4665320350934196)),
((5.347955701149093, 3.0, 18.900471815194905, 3),
(0.07543521561825134, 0.5598764933852344)),
((5.7385751713204325, 4.0, 4.223160837759656, 3),
(0.27341911934324464, 0.3703759727313385)),
((5.720824103581511, 2.0, 4.878068159363519, 3),
(0.2467519504501329, 0.396042965876929)),
((5.316780024484356, 1.0, 8.043957606541364, 3),
(0.05387242044052574, 0.5623155400405534)),
((5.7623230008312385, 2.0, 9.507411716255689, 3),
(0.16257886565663734, 0.4748117643951303)),
((5.569555328848931, 2.0, 17.594491934810442, 3),
(-0.005438870615513399, 0.5989078963083921)),
((5.985579505387595, 2.0, 14.803434527189347, 3),
(0.05088199721408976, 0.5508235640922985)),
((5.461619603420755, 3.0, 6.6471547360970025, 3),
(0.2368393501455289, 0.4149076913619224)),
((8.479050960745253, 3.0, 19.932170607445244, 3),
(0.055960429731629305, 0.4808828600574318)),
((7.838277926195208, 3.0, 6.238528025218592, 3),
(0.23006932476384195, 0.3896583956403046)),
((8.2830613968175, 3.0, 10.350825174769154, 3),
(0.17154862793173067, 0.42348961815670966)),
((7.603155032355272, 6.0, 29.139541165198704, 3),
(0.07552585363483849, 0.5167802829864803)),
((8.324115039527976, 7.0, 23.515778973195257, 3),
(0.1410408872001719, 0.4624726907273586)),
((8.44424273124686, 7.0, 2.4130843113046656, 3),
(0.2935043828079583, 0.3347759364340525)),
((8.309061774521076, 6.0, 17.507252134514488, 3),
(0.16639630860705243, 0.44095922688344563)),
((7.698664797242625, 3.0, 18.828802660207376, 3),
(0.06976476518639417, 0.495578148736382)),
((8.14037117068092, 3.0, 14.649933295354042, 3),
(0.11587116950368091, 0.45887391411549944)),
((8.484903553213694, 2.0, 11.879562262633948, 3),
(0.10118404307076279, 0.4519866737136369)),
((8.454109029623016, 2.0, 4.606317173304252, 3),
(0.23983680211973246, 0.37222196630379506)),
((8.305262429168986, 5.0, 3.9045072719017924, 3),
(0.27629481645975607, 0.3507630813736354)),
((8.189730004579287, 5.0, 28.126992759236863, 3),
(0.05705292734056282, 0.5054724651556731)),
((7.54028778107475, 6.0, 6.635319193935916, 3),
(0.2612474304996075, 0.3719851145582656)),
((7.9629991342362985, 5.0, 20.293354805626866, 3),
(0.11711998926043284, 0.47473573591944374)),
((8.432959559038371, 6.0, 26.469970873757067, 3),
(0.0950374077805397, 0.48534709230317313)),
((10.0, 9.0, 6.0469956581432704, 3),
(0.2699968780049759, 0.35154672720525215)),
((9.771353946056914, 9.0, 20.82975271547889, 3),
(0.1703271759874031, 0.4176338874276043)),
((9.376380796522223, 9.0, 13.348522394106682, 3),
(0.22483106117061774, 0.3904997531995366)),
((9.912704179532229, 4.0, 25.778231770351923, 3),
(0.041682531090741895, 0.45638108279644746)),
((10.0, 5.0, 13.712247643370837, 3),
(0.16970415882749393, 0.4075044010703485)),
((10.0, 4.0, 28.587923360931033, 3),
(0.018590574793017255, 0.4614698084023276)),
((9.287535146732925, 4.0, 6.997389565284625, 3),
(0.22779618119117986, 0.3782363477013876)),
((10.0, 6.0, 30.932435068478792, 3),
(0.059472954520648456, 0.4677973052054364)),
((10.0, 5.0, 7.946854746461393, 3),
(0.23028991231427853, 0.372620011437199)),
((10.0, 6.0, 26.172410297895773, 3),
(0.09297071254878268, 0.45251723089368734)),
((2.5, 7.0, 12.643410557057086, 2),
(0.20473101026501478, 0.3654658906154655)),
((2.5, 3.0, 19.167537762557394, 2),
(0.05510943657077363, 0.3689588995456623)),
((3.284581774573411, 4.0, 10.316761862277126, 2),
(0.16617263015780218, 0.34847110450044866)),
((3.0814075494281132, 3.0, 4.031683724514751, 2),
(0.24102409919891465, 0.3343695993283068)),
((2.5342634625499727, 9.0, 14.863663104253218, 2),
(0.20177805681461625, 0.3722383864015372)),
((5.275920564662094, 9.0, 12.879135949769728, 2),
(0.20397359836905862, 0.342464374531538)),
((5.522856449128964, 9.0, 17.412586595686815, 2),
(0.16690331344259282, 0.3431822968019505)),
((5.885914939777947, 9.0, 17.388086814072437, 2),
(0.16584353646682692, 0.3393430032707385)),
((5.956560321967156, 4.0, 4.31169020481439, 2),
(0.24129709871769092, 0.3181985042671494)),
((5.6279111948942635, 5.0, 16.56681914443115, 2),
(0.11616623038399936, 0.3161181105644866)),
((5.8534547245334565, 5.0, 18.83506980508535, 2),
(0.09538175760437877, 0.3095289993435369)),
((5.445581146699364, 5.0, 25.690737024023207, 2),
(0.05808395140333093, 0.3095707855049915)),
((5.144720369630256, 5.0, 18.979172966805407, 2),
(0.09701692047501353, 0.32135192146478)),
((5.2907074463880175, 6.0, 13.598520998056053, 2),
(0.16894640868927147, 0.3308774206782637)),
((5.415844403197766, 6.0, 15.178617464461626, 2),
(0.15478320533987108, 0.32921474300090464)),
((8.204144852288245, 6.0, 4.020177691372295, 2),
(0.259276270369745, 0.3144147576406639)),
((9.366069953403018, 3.0, 15.422766182794579, 2),
(0.06961435752241517, 0.22151452459065538)),
((10.0, 4.0, 9.192387616705815, 2),
(0.1593065733661186, 0.2652494804914122)),
((10.0, 3.0, 15.954247893607032, 2),
(0.06533856558730797, 0.20620817208408798)),
((9.260586271537607, 3.0, 10.59517579170162, 2),
(0.1194483796728095, 0.2491723584774583)),
((9.571675864670619, 3.0, 17.398847531397934, 2),
(0.056120220665006354, 0.20987427459193636)),
((3.2387393821759787, 5.0, 3.7435106940988625, 1),
(0.2529831434272229, 0.29484575887521186)),
((2.5, 4.0, 7.399343614420917, 1),
(0.18304020679575475, 0.25792603874732756)),
((2.5, 4.0, 8.860840417367838, 1),
(0.1619064862820606, 0.24508285645237338)),
((2.5, 8.0, 10.887265616829124, 1),
(0.1982153399209648, 0.2800403945667933)),
((2.5, 7.0, 10.10233537418591, 1),
(0.19839199656426879, 0.2769729728229426)),
((2.6104349455855846, 8.0, 4.236171515065992, 1),
(0.26435154821318096, 0.30558752020931723)),
((2.5, 9.0, 5.3613636980274295, 1),
(0.253705681170712, 0.3036923862002273)),
((3.1731014606584806, 8.0, 15.199536235308303, 1),
(0.1580590485886202, 0.25147475798106617)),
((2.5, 7.0, 5.4202608625739925, 1),
(0.24791624789984437, 0.2982609826359614)),
((2.5, 8.0, 9.619364938403443, 1),
(0.21036828710980593, 0.28549866725870554)),
((3.252581509053177, 7.0, 6.2224060204343745, 1),
(0.2390630935153159, 0.29177058812605583)),
((2.5, 8.0, 12.261808397585057, 1),
(0.1856448040789573, 0.27399568137110875)),
((2.5, 3.0, 7.768505546917617, 1),
(0.15474603763604755, 0.23547281814409443)),
((2.5, 3.0, 6.998840911724095, 1),
(0.16686825564034552, 0.24336188065482803)),
((3.020562119690717, 3.0, 5.203087539105082, 1),
(0.19673848562818472, 0.2591147093725996)),
((5.2190911687613255, 3.0, 13.573887550967275, 1),
(0.0906282824426043, 0.15261288582990243)),
((5.5962506280473505, 2.0, 5.165106850365733, 1),
(0.18029987042327314, 0.22800579756695252)),
((5.078574838897358, 3.0, 6.8599427244043705, 1),
(0.17038770476932538, 0.22610259207324904)),
((5.1756171558445825, 3.0, 4.56080038214103, 1),
(0.20820572971121643, 0.25527794433698137)),
((5.497353020782844, 5.0, 2.0, 1),
(0.279552235480715, 0.30256072076105706)),
((5.841773513544001, 6.0, 13.28936566781855, 1),
(0.15805095852659043, 0.219578147164172)),
((5.580549185463668, 7.0, 16.1803201492634, 1),
(0.14439615742800296, 0.21383831068232395)),
((5.287772726922527, 7.0, 14.098946461580404, 1),
(0.16108942091896006, 0.22841596148263876)),
((8.358221285614269, 5.0, 17.271563597297103, 1),
(0.11773949049199511, 0.15965371392065625)),
((7.87724479635977, 5.0, 5.598934346859475, 1),
(0.23076678125893227, 0.2641771899347817)),
((8.323336953587479, 5.0, 7.0881523668119195, 1),
(0.21347348637221167, 0.24827181533645154)),
((7.845486096299681, 5.0, 16.23379517928239, 1),
(0.122698773315744, 0.168346491930928)),
((8.020564429287921, 5.0, 18.390260797283936, 1),
(0.10913570150779599, 0.15334234592862114)),
((8.382569502344943, 5.0, 13.97512411087629, 1),
(0.14368321354779087, 0.18508081572797264)),
((7.855593749782354, 3.0, 5.360435825061775, 1),
(0.19862762162502848, 0.23353318934066486)),
((7.655501153733914, 4.0, 9.769593047963392, 1),
(0.1631153136408173, 0.20467585514331496)),
((7.653019158008493, 6.0, 11.704589766625281, 1),
(0.17675687521357686, 0.22227909812729865)),
((9.843286146335094, 3.0, 15.473317400474043, 1),
(0.09727706414766314, 0.11902807291267123)),
((10.0, 3.0, 5.415846167802247, 1),
(0.20244200747389485, 0.22588706133330697)),
((9.474422358368296, 3.0, 15.178161395592507, 1),
(0.09722523557671268, 0.12212902007311457)),
((9.196648156004963, 8.0, 5.069223366759241, 1),
(0.259590110606016, 0.2847030494945458)),
((9.59661128432634, 8.0, 11.180193797198104, 1),
(0.19725272984671793, 0.23273775140666494)),
((10.0, 6.0, 7.76280231640685, 1),
(0.22139057567772807, 0.24895849892069766)),
((10.0, 7.0, 18.37437538640251, 1),
(0.1385665599883837, 0.17337937229518244)),
((2.8739501345809, 4.0, 4.494521106912674, 10),
(0.24363463440447483, 0.2527864036169822)),
((2.979763831715893, 9.0, 6.426710793964199, 10),
(0.2536062796096411, 0.2679911028334137)),
((2.5, 9.0, 2.491252841450378, 10),
(0.29221903195440846, 0.3020506629214542)),
((2.5, 8.0, 8.82337986403619, 10),
(0.2291778912916562, 0.2455895812236743)),
((2.5, 5.0, 18.83933997786449, 10),
(0.13113805913613338, 0.1355442901438808)),
((2.5, 5.0, 5.417711811598947, 10),
(0.24334188621371236, 0.2555545429854522)),
((2.5, 9.0, 8.04535672534691, 10),
(0.23865982455989818, 0.25668250292257166)),
((2.5, 3.0, 16.195810159806815, 10),
(0.11192304412106252, 0.10562932888135229)),
((5.6678871626197305, 2.0, 18.226010811743183, 10),
(0.12710395272558006, 0.06481848713426154)),
((5.759673840199206, 2.0, 30.42873152741525, 10),
(0.11709221638289552, 0.036741995906091535)),
((5.783634661463273, 2.0, 21.480194214511137, 10),
(0.12525468287616728, 0.054567458290390045)),
((5.118173248862928, 2.0, 41.86847335857883, 10),
(0.09079570140926681, 0.021796051692063684)),
((5.757349724389667, 2.0, 13.609604267804956, 10),
(0.14301420657653752, 0.09199523693485535)),
((5.279304061296045, 5.0, 22.876127528048663, 10),
(0.13458758534346954, 0.10713223523452234)),
((5.715709801059808, 5.0, 30.360213488022158, 10),
(0.11886049487302211, 0.06981616463630416)),
((5.947947304520848, 4.0, 4.8966439066197935, 10),
(0.2500490236976596, 0.24288778972563252)),
((5.09899322481724, 5.0, 26.26875042475258, 10),
(0.11709613281836206, 0.08769066848517765)),
((5.53222949762985, 5.0, 7.756449262721482, 10),
(0.2311862502859707, 0.22551387883678542)),
((5.923584541768192, 5.0, 19.567605030849386, 10),
(0.15795418059599556, 0.1254715070218464)),
((5.950156387030171, 2.0, 4.953666946161412, 10),
(0.218650721667177, 0.1994067474023979)),
((5.614158136535322, 2.0, 20.644953904366893, 10),
(0.12233990218571343, 0.05689997242979489)),
((5.435908140730638, 3.0, 21.585064332200393, 10),
(0.12301577743313093, 0.07397033989544444)),
((5.539908561343329, 3.0, 44.90369903995316, 10),
(0.1037616438284486, 0.027945682306968186)),
((5.3792514320991325, 2.0, 25.88907455882873, 10),
(0.10873545894297332, 0.04236234760922594)),
((5.632909830682246, 6.0, 21.384042506861697, 10),
(0.15459252917395186, 0.1315791146519223)),
((5.20332651493292, 6.0, 15.514467427422431, 10),
(0.17721170525528537, 0.168150476585649)),
((5.927793692134072, 5.0, 3.7766395197414253, 10),
(0.27134805483093305, 0.27045711680975104)),
((5.817322396187511, 5.0, 11.31804158090752, 10),
(0.20484725163482398, 0.1908000868612774)),
((7.887786042250045, 1.0, 12.574240714561657, 10),
(0.18031303615086874, 0.07804201835995273)),
((7.949960633591607, 3.0, 25.621368902089333, 10),
(0.17044491341806606, 0.06725474122671778)),
((8.382592436810759, 2.0, 40.54127195292601, 10),
(0.17178949078589845, 0.024188092070610984)),
((7.96379736332257, 2.0, 36.70731870996695, 10),
(0.16770209089072902, 0.02993902193504958)),
((8.373924456610474, 2.0, 8.623846064990166, 10),
(0.20728444801820906, 0.14388370971167386)),
((8.151990686473388, 2.0, 42.229127196458144, 10),
(0.16904149941391258, 0.021656309205312783)),
((8.488384085232076, 8.0, 9.779628072315807, 10),
(0.2488450360455433, 0.22627788517996875)),
((8.438357068876163, 3.0, 26.873452492074044, 10),
(0.17599559283280108, 0.0648395414079383)),
((8.309434906530441, 2.0, 48.49966399344499, 10),
(0.16957295408937242, 0.012250504009832514)),
((7.7115794149655015, 3.0, 5.729859843354196, 10),
(0.23559517277851352, 0.20522005774679514)),
((7.6273740879401934, 2.0, 26.724973070776922, 10),
(0.16657953614593823, 0.047841689338012895)),
((7.693923337226084, 3.0, 48.407897505690485, 10),
(0.15852007773506502, 0.024265876491785094)),
((10.0, 6.0, 10.97195381591066, 10),
(0.24914023092044668, 0.2089496110383951)),
((9.113097274740381, 6.0, 2.7564645951736484, 10),
(0.2924932485533538, 0.28804004921141624)),
((10.0, 9.0, 6.003388325186025, 10),
(0.2779813642114769, 0.26596442258554676)),
((10.0, 5.0, 19.170756721559698, 10),
(0.2142974946878668, 0.13766814885103174)),
((9.380110088755156, 6.0, 18.817507743754415, 10),
(0.21117041381027712, 0.14914374281863527)),
((9.001795946577033, 8.0, 4.453854563212078, 10),
(0.2859321611242305, 0.28009650987573115)),
((10.0, 8.0, 3.653159723688856, 10),
(0.29311167360260465, 0.2880429565835092)),
((9.046182896421445, 3.0, 22.300946806849847, 10),
(0.1878970718888041, 0.08063250752355927)),
((9.459420796383784, 3.0, 10.552556949414955, 10),
(0.21991607033340996, 0.1509982577104353)),
((10.0, 3.0, 31.2476220198124, 10),
(0.19305142678811255, 0.056344277243534656)),
((10.0, 3.0, 29.2734347311525, 10),
(0.1942359391613085, 0.06175274143556113)),
((2.5, 8.0, 8.375074375178261, 9),
(0.27887477687446527, 0.24546183806091346)),
((2.5, 7.0, 9.502846862649331, 9),
(0.27464003598072734, 0.23312580166748068)),
((2.8619005171223564, 7.0, 7.466126134628901, 9),
(0.28349638659529314, 0.2511139999686381)),
((3.0874221941355513, 8.0, 2.493857829360787, 9),
(0.30329446073589955, 0.29950346826291635)),
((2.5, 4.0, 19.77471678075617, 9),
(0.2398055097946389, 0.12335032186680327)),
((2.5, 3.0, 45.299844868071496, 9),
(0.21835007756596425, 0.029400310263857008)),
((3.220553507003754, 3.0, 37.05938066272616, 9),
(0.23069339455828514, 0.046593234792596326)),
((2.5, 4.0, 39.166418500944374, 9),
(0.21883358149905563, 0.05350074449716688)),
((2.7654037016841957, 3.0, 29.726535569137937, 9),
(0.22876621648675485, 0.06517243299208932)),
((2.5, 4.0, 12.087654687250128, 9),
(0.25568086328187467, 0.17236450351743657)),
((2.5, 3.0, 3.382852759577178, 9),
(0.28321145706274836, 0.2446297883631457)),
((2.836612137080781, 4.0, 2.0, 9),
(0.29704964788098587, 0.2808823856404053)),
((2.8888545547050946, 3.0, 14.618307037832857, 9),
(0.2477649838426708, 0.12970326868762677)),
((5.164399331990519, 6.0, 9.111465383743912, 9),
(0.28951764964116206, 0.23371824784650205)),
((5.954180129965368, 6.0, 34.844915916827865, 9),
(0.2714161513182527, 0.09857200508129484)),
((5.500356903003388, 7.0, 13.154128131968298, 9),
(0.28721219988452035, 0.2147291790311368)),
((5.777207914079591, 6.0, 29.94398353538339, 9),
(0.27203705163012554, 0.11592402886317606)),
((5.535810057742433, 7.0, 8.892716664134475, 9),
(0.2942708344242646, 0.2451099871340891)),
((5.590040966343994, 4.0, 22.75661278689855, 9),
(0.27287420887848585, 0.12066815247186011)),
((5.282620261743346, 3.0, 18.732823688754383, 9),
(0.26860898171122366, 0.11390224239719735)),
((5.172895640160181, 3.0, 6.2292543458148515, 9),
(0.2877332357942354, 0.2114241583957755)),
((5.259721854731981, 3.0, 35.890872110681414, 9),
(0.2577945586528, 0.05159015452853822)),
((5.5536463415959245, 3.0, 10.076683709549055, 9),
(0.28257922939755636, 0.1719757532037015)),
((5.730003972159145, 2.0, 15.985698390269977, 9),
(0.273099534393889, 0.10636788604049591)),
((5.782381516990652, 2.0, 28.774618518379302, 9),
(0.26483796474476695, 0.050195747909618664)),
((5.069379781665461, 7.0, 2.2194841714206595, 9),
(0.3055058860092272, 0.2991172470460963)),
((5.903716333756614, 6.0, 35.50557429199497, 9),
(0.27002853761437307, 0.09626706166005734)),
((5.1346796709796605, 6.0, 27.726398643923417, 9),
(0.2627660248271106, 0.12221521725840637)),
((5.383260687864624, 6.0, 18.302295934127923, 9),
(0.2779886327084519, 0.17403664791365248)),
((5.869792088464701, 5.0, 32.55343216796663, 9),
(0.27112789885723526, 0.09796127335276403)),
((5.462451143540612, 6.0, 30.948864634440213, 9),
(0.2654630166563432, 0.10997924832348281)),
((5.357445269639698, 6.0, 5.261434469006405, 9),
(0.2990173572087526, 0.26625184527715584)),
((5.626373453003034, 6.0, 25.170846666445236, 9),
(0.27422758103121975, 0.13772616835371584)),
((8.284200895164993, 2.0, 17.238899804160177, 9),
(0.3015541927190563, 0.10594069228544409)),
((8.318102784124019, 2.0, 22.596147383535918, 9),
(0.29986823051730344, 0.07933946724011191)),
((7.851936866242713, 7.0, 20.962374407911458, 9),
(0.31278104464771295, 0.18168350176147405)),
((8.146081336032703, 8.0, 13.533962918469337, 9),
(0.3169359353603149, 0.2297919580402022)),
((8.09720864316275, 7.0, 17.33899155052454, 9),
(0.31638806878498915, 0.20284841728512604)),
((7.830256291991797, 7.0, 10.706822163825924, 9),
(0.3131765403497294, 0.24112945829089102)),
((7.80065897068848, 6.0, 6.211375680877805, 9),
(0.31162664368160353, 0.2642498962824071)),
((8.044863647118635, 7.0, 15.557155261544228, 9),
(0.31566706911724673, 0.21257488877131692)),
((8.461774802909071, 5.0, 36.03729693977732, 9),
(0.3193853258545449, 0.09829269654684747)),
((7.612382882207284, 4.0, 14.168690780706225, 9),
(0.30487133599504435, 0.1749148969907045)),
((8.169633927695997, 4.0, 27.23584610386441, 9),
(0.31018864589103556, 0.11196625320524731)),
((9.602031136015775, 6.0, 20.5806356758181, 9),
(0.3358702478125188, 0.18315683261358986)),
((9.663686030178818, 6.0, 29.047658472982956, 9),
(0.3420516528840192, 0.14510390621759314)),
((9.75292854736471, 5.0, 34.11493160528129, 9),
(0.3460058496414126, 0.11306352412679725)),
((10.0, 6.0, 4.216215730437086, 9),
(0.31858648539348344, 0.2854243280891126)),
((10.0, 6.0, 34.72852675583916, 9),
(0.35136426337791954, 0.1268144197324825)),
((10.0, 5.0, 14.779627294882367, 9),
(0.3368575900243706, 0.20211714305442638)),
((10.0, 6.0, 2.0, 9), (0.3146, 0.3018)),
((9.49705091394873, 6.0, 10.80885478009873, 9),
(0.32614459837684673, 0.23803767844806178)),
((9.826635163465532, 2.0, 7.06711443184985, 9),
(0.31994127256104854, 0.19825489523721926)),
((9.382502350301259, 5.0, 19.999476877446362, 9),
(0.3328136867226051, 0.17031645852096106)),
((9.115530591819274, 5.0, 5.883436488694818, 9),
(0.3188381363847329, 0.2614152937200581)),
((10.0, 5.0, 24.745870232952445, 9),
(0.3456712832096572, 0.1519571163934759)),
((9.378359588580793, 5.0, 26.295787257422923, 9),
(0.33580634416152166, 0.14170543655286671)),
((10.0, 2.0, 21.57257635660235, 9),
(0.3230213711821699, 0.08825848939915812)),
((10.0, 3.0, 26.039872491235577, 9),
(0.3343139553719324, 0.09760462479294567)),
((2.5, 3.0, 4.712138166253982, 8),
(0.3435962977395826, 0.2553142252457729)),
((2.8874578666829285, 2.0, 13.994896052145748, 8),
(0.37835548166727323, 0.14725225859091387)),
((3.435419560439465, 2.0, 6.718989113532732, 8),
(0.35983522137260177, 0.22143958809308129)),
((2.9925336062737173, 2.0, 7.198014339866309, 8),
(0.3575475618962941, 0.21313809904749426)),
((2.5, 1.0, 14.156726710024465, 8), (0.3368, 0.10107531241085566)),
((2.6104579288975813, 1.0, 3.3458156268951917, 8),
(0.3281972508449899, 0.2218263920769044)),
((5.1670653045538115, 8.0, 2.1409481568506346, 8),
(0.31905079087740745, 0.31163900088406254)),
((5.054434114346951, 7.0, 6.442157332603133, 8),
(0.35015366076172083, 0.2933004270294575)),
((5.803735682450612, 9.0, 10.443841773523394, 8),
(0.37537800934530957, 0.2859037624969863)),
((5.044877539779968, 6.0, 18.424428701407553, 8),
(0.4277251999841766, 0.23640419314222352)),
((5.484832402621484, 7.0, 5.474777491295647, 8),
(0.3449818965080228, 0.2983629686835587)),
((5.162300427200289, 7.0, 24.999056248525125, 8),
(0.4499951490629939, 0.22267274868897197)),
((5.877256360743413, 7.0, 15.450444143259661, 8),
(0.4135138022364607, 0.2615018632684154)),
((8.197449080109873, 2.0, 2.0, 8),
(0.3479365513139208, 0.2910133676082857)),
((7.997237265754237, 2.0, 11.655829335806517, 8),
(0.45364925931660116, 0.19649613209426764)),
((7.973192560907184, 9.0, 4.272886886879181, 8),
(0.3382290806106464, 0.3100993156381419)),
((7.510355740108461, 8.0, 14.320141317950995, 8),
(0.41602573142890514, 0.2791113211989992)),
((7.836498646186221, 8.0, 13.596658717999025, 8),
(0.41449506838703515, 0.28339932777466953)),
((7.782186965908517, 9.0, 13.902105524067945, 8),
(0.4117119567839591, 0.28581945828988925)),
((9.531795266771761, 5.0, 2.0, 8),
(0.3325257851841513, 0.3124819697521387)),
((10.0, 5.0, 11.055624912778937, 8),
(0.44623696767281984, 0.2877358440858011)),
((9.312270837393163, 7.0, 11.185222099189973, 8),
(0.4115569799140528, 0.2962671589127124)),
((10.0, 7.0, 13.895455902446677, 8),
(0.44457546784397745, 0.29335613303900565)),
((9.925669940032272, 5.0, 7.2040789887667955, 8),
(0.39993423356459634, 0.3000028935899999)),
((9.416740882402403, 5.0, 8.720116348180492, 8),
(0.41420875274374974, 0.2926385477337663)),
((10.0, 4.0, 16.469698910991372, 8),
(0.5288485073674999, 0.2505105957717457)),
((10.0, 6.0, 6.599237233947309, 8),
(0.37969275372249944, 0.30632137297889483)),
((10.0, 5.0, 4.550269784467781, 8),
(0.36647096673041096, 0.30759681204960715)),
((9.970332530519679, 6.0, 10.837022722087644, 8),
(0.4235409167024141, 0.2965173827647608)),
((2.962707587174585, 9.0, 9.999116931630539, 7),
(0.4082112009458871, 0.317851549976094)),
((3.1672052728994915, 9.0, 7.383624729892915, 7),
(0.38120519554647453, 0.3201699383275524)),
((2.5, 5.0, 17.881593853007615, 7),
(0.5525791262360914, 0.28084994335857105)),
((2.7415018638966284, 6.0, 18.00290873780138, 7),
(0.5290289042496541, 0.2948216346860084)),
((2.5, 6.0, 10.232668996271492, 7),
(0.4348850955537665, 0.3113811958067113)),
((2.877902226185231, 6.0, 3.5582034231201787, 7),
(0.3519353725942859, 0.31712947263253355)),
((2.5, 6.0, 27.77999592691697, 7),
(0.5960099816711264, 0.2656600122192491)),
((5.412821771284458, 3.0, 7.258040020605607, 7),
(0.49172648760450915, 0.31621073924712606)),
((5.83754747605084, 3.0, 11.998261380615471, 7),
(0.5975448574102102, 0.29794809688903484)),
((5.9693975439749885, 4.0, 14.397906420283302, 7),
(0.5881617924445709, 0.31236418999305104)),
((5.004079000563381, 5.0, 22.736677614468775, 7),
(0.63920549388473, 0.28783077224442344)),
((5.168438425945292, 4.0, 4.844860547907693, 7),
(0.40839719509713, 0.32305082659991396)),
((5.863284315202094, 4.0, 23.489710023246513, 7),
(0.7027892157150165, 0.2695458294720114)),
((5.756333389411959, 9.0, 7.301135618422141, 7),
(0.39078475628981396, 0.32893785909586065)),
((5.108337403014788, 8.0, 11.359771531491097, 7),
(0.44539182306302255, 0.32701253286287824)),
((8.314898437378535, 9.0, 4.238233636005843, 7),
(0.3599697419936892, 0.33117366775661333)),
((7.729982986777109, 5.0, 24.923686571499648, 7),
(0.6880520401649296, 0.31336218352271883)),
((8.201460399608226, 4.0, 11.589840844520428, 7),
(0.5584840865313621, 0.33959721735842735)),
((7.595604919273442, 5.0, 6.798265747221928, 7),
(0.4338251159286378, 0.3369357659148767)),
((8.378186361828917, 9.0, 8.022357890675561, 7),
(0.4105838750780343, 0.34204796349468947)),
((8.300135000740797, 8.0, 14.433553547681656, 7),
(0.5019186699595175, 0.35172447128424394)),
((8.229270762113973, 9.0, 6.350022396927342, 7),
(0.38792000675351795, 0.33801569665443554)),
((10.0, 3.0, 3.1152259635487924, 7),
(0.405141552942915, 0.33686460722138906)),
((9.756267998308681, 3.0, 14.803384721914584, 7),
(0.6814476281796187, 0.3179618099654896)),
((10.0, 4.0, 13.90160960971739, 7),
(0.6136634096115119, 0.3568983903902826)),
((10.0, 8.0, 19.365358380679876, 7), (0.5632882254261189, 0.374)),
((10.0, 9.0, 6.218490965882184, 7),
(0.3910588735223506, 0.34456639744594064)),
((10.0, 8.0, 13.887493044276624, 7),
(0.5055374095755961, 0.36877498608855325)),
((10.0, 5.0, 14.68907159946693, 7),
(0.5862646522729101, 0.3661588249401866)),
((10.0, 5.0, 24.263442351912005, 7),
(0.704502702343164, 0.353473115296176)),
((10.0, 8.0, 13.518172354943417, 7),
(0.5007362406142645, 0.36803634470988683)),
((2.7455640547144746, 2.0, 5.569110673549164, 6),
(0.5147965927948814, 0.35947427079052985)),
((3.1452880891491906, 5.0, 8.595832717291, 6),
(0.49156559782999765, 0.3836139842529673)),
((2.5, 4.0, 4.950679151608691, 6),
(0.4364884940203847, 0.3603170842733587)),
((2.5, 4.0, 4.383231249423155, 6),
(0.42312509592391534, 0.3564868109336063)),
((2.5, 2.0, 3.307282274836235, 6),
(0.43396162885139156, 0.3458470682650791)),
((5.045583268005572, 9.0, 9.59194524860244, 6),
(0.4426976823901471, 0.3880493030502878)),
((5.594284526041456, 9.0, 10.197201238166286, 6),
(0.45008651645515263, 0.39500104997859575)),
((5.988802467213943, 8.0, 12.30595195616923, 6),
(0.48725749804679613, 0.4136999258156572)),
((5.425850947396252, 5.0, 8.046156862703112, 6),
(0.48370601248767936, 0.39930129085649035)),
((5.405852543210212, 6.0, 16.635714109554605, 6),
(0.5613340460279886, 0.4289299103499902)),
((5.369364240119585, 5.0, 7.340573827339962, 6),
(0.46958736593496786, 0.39345497811572305)),
((5.702045821590509, 5.0, 10.325652051724541, 6),
(0.5189311698950891, 0.41373924250477145)),
((5.411096326958829, 6.0, 5.292034843095026, 6),
(0.40946256871366055, 0.37022550255078585)),
((8.242968536635763, 9.0, 4.90020586532881, 6),
(0.38006673083868486, 0.3693561101855342)),
((8.238754570485817, 9.0, 5.94133011037865, 6),
(0.3940851904797918, 0.379080970224506)),
((8.39568424389748, 5.0, 9.461515968715135, 6),
(0.5006508183439433, 0.43147844246085765)),
((10.0, 5.0, 12.704963485646498, 6),
(0.5274094231965362, 0.462819853942586)),
((10.0, 5.0, 15.6753707607594, 6),
(0.5498899099449361, 0.47553916842341726)),
((10.0, 6.0, 3.506573388368494, 6),
(0.37697160768481713, 0.3696933421148326)),
((10.0, 6.0, 14.063922879568509, 6),
(0.5203515758376268, 0.46251414164655447)),
((10.0, 7.0, 3.128443413944953, 6),
(0.36320142718357795, 0.36035187523477064)),
((10.0, 7.0, 11.632405914314647, 6),
(0.4857175289017656, 0.4453349428787812)),
((9.050263182466011, 7.0, 17.08367694275979, 6),
(0.5287506410501914, 0.459929219239207)),
((10.0, 6.0, 4.736966947326921, 6),
(0.40006552365184517, 0.386391115357349)),
((9.409402543801862, 7.0, 6.28766021168659, 6),
(0.41478835014788323, 0.39548485637022385)),
((9.633394604006961, 8.0, 4.623044001702525, 6),
(0.37931316473145726, 0.3728258686141236)),
((9.020770192275748, 7.0, 13.422245014577644, 6),
(0.5060230048016112, 0.44738008068068885)),
((9.26317609686154, 7.0, 15.233295182477667, 6),
(0.517940485402123, 0.4564421027270388)),
((3.332782026387723, 7.0, 16.113419977677538, 5),
(0.49943843353399675, 0.4913166627882885)),
((2.5, 5.0, 6.5436496028361315, 5),
(0.446290656443251, 0.4355063353928991)),
((2.5, 6.0, 15.572129740854304, 5),
(0.5138820422172288, 0.4876949957096056)),
((2.5, 3.0, 2.0, 5), (0.3703, 0.37)),
((2.8285591842433737, 9.0, 21.473258817290873, 5),
(0.5043337880565358, 0.4951865962256489)),
((2.5, 8.0, 12.020108658634838, 5),
(0.4679648910008057, 0.4590236682506042)),
((2.5, 9.0, 14.42790441415372, 5),
(0.47578137869199916, 0.46735347427784546)),
((2.5, 5.0, 8.380243803410817, 5), (0.472682681837519, 0.455422938237116)),
((3.363079416671538, 5.0, 2.7755096642090313, 5),
(0.36889260512263344, 0.3743534718948429)),
((5.9271524261020545, 9.0, 20.603131952471927, 5),
(0.4802472251615271, 0.5159774928137729)),
((5.339079962653624, 8.0, 16.611574939424255, 5),
(0.4789877671483087, 0.5049439528400183)),
((5.347356764781598, 8.0, 15.41216519823205, 5),
(0.4745806920664243, 0.5005081883465945)),
((5.368950609634622, 7.0, 7.038165919924306, 5),
(0.41341154716192496, 0.4348136096758073)),
((5.063316239211655, 7.0, 16.01331933482103, 5),
(0.487164109418344, 0.5051543240966131)),
((5.929552854535908, 7.0, 7.57281344704806, 5),
(0.41853653124143764, 0.444243976557503)),
((5.72794655950891, 7.0, 10.668172633934036, 5),
(0.45318357111848423, 0.47904872268084375)),
((5.641782139668679, 6.0, 9.549016885745186, 5),
(0.4561067615040472, 0.4783274892995489)),
((5.344359642058747, 3.0, 5.430489560972486, 5),
(0.4516333592896905, 0.461109580193912)),
((7.749909297802317, 4.0, 4.268933751175051, 5),
(0.40175883449950806, 0.4334105720840665)),
((8.145409228909998, 5.0, 7.545633529064384, 5),
(0.435789245569801, 0.4810623452292749)),
((7.907253670159305, 6.0, 10.770986229289623, 5),
(0.4538016350466874, 0.5021167554370949)),
((7.592508492261312, 5.0, 4.933568344499713, 5),
(0.4009033326671016, 0.4323309706149007)),
((7.674872690410821, 5.0, 3.5502452884794837, 5),
(0.37590596292111755, 0.4014473524868083)),
((7.991979987062054, 7.0, 3.2837012487472252, 5),
(0.35678303301647424, 0.37978502351744886)),
((9.345599185286883, 7.0, 17.48852175788182, 5),
(0.46492781928598614, 0.537405269620011)),
((9.659595218511388, 8.0, 3.3572177484844636, 5),
(0.35143609296322403, 0.377417525766746)))
MUNSELL_COLOURS_TO_XYY = (
np.array([0.41515095, 0.51288165, 0.5702441]),
np.array([0.38804358, 0.46299149, 0.31592072]),
np.array([0.33491518, 0.36277402, 0.22128409]),
np.array([0.39936353, 0.58547238, 0.64852094]),
np.array([0.34767896, 0.4152922, 0.58706989]),
np.array([0.33966055, 0.41527226, 0.07167165]),
np.array([0.36265912, 0.47966922, 0.11068168]),
np.array([0.35748002, 0.45915987, 0.2727359]),
np.array([0.36348032, 0.48213512, 0.06293782]),
np.array([0.30330033, 0.73038471, 0.05538644]),
np.array([0.33159302, 0.43388935, 0.89380734]),
np.array([0.31838794, 0.40167814, 0.05382145]),
np.array([0.27202005, 0.83522048, 0.04995375]),
np.array([0.31425413, 0.58372544, 0.04377268]),
np.array([0.27634942, 0.75063178, 0.05211431]),
np.array([0.258837, 0.71096717, 0.01266934]),
np.array([0.31405111, 0.53120144, 0.02111891]),
np.array([0.33914454, 0.6563647, 0.71217401]),
np.array([0.35328989, 0.63157007, 0.65497851]),
np.array([0.32167873, 0.43862617, 0.3080991]),
np.array([0.31168045, 0.6270064, 0.34717087]),
np.array([0.31496017, 0.47530248, 0.67920304]),
np.array([0.26882355, 0.70549119, 0.69614462]),
np.array([0.31107787, 0.51188895, 0.58306925]),
np.array([0.31254722, 0.34686238, 0.6334334]),
np.array([0.30880402, 0.37157402, 0.08263161]),
np.array([0.23582365, 0.72197618, 0.06667783]),
np.array([0.29476305, 0.57521949, 0.23583791]),
np.array([0.28891056, 0.61005165, 0.28191444]),
np.array([0.17590584, 0.91365, 0.23196178]),
np.array([0.31292041, 0.3752074, 0.25538037]),
np.array([0.22307972, 0.8153644, 0.2698602]),
np.array([0.30648167, 0.48754769, 0.15098549]),
np.array([0.30382174, 0.34089453, 0.84210967]),
np.array([0.28517207, 0.38369148, 0.89445395]),
np.array([0.20621151, 0.56369357, 0.77955867]),
np.array([0.2465848, 0.49294784, 0.87271533]),
np.array([0.22538285, 0.5564611, 0.60532773]),
np.array([0.28500017, 0.38833563, 0.24045742]),
np.array([0.19598037, 0.59002914, 0.29181101]),
np.array([0.16437784, 0.59069112, 0.23370301]),
np.array([0.17940333, 0.4663929, 0.06448045]),
np.array([0.07553293, 0.55981543, 0.06406275]),
np.array([0.27330162, 0.37048932, 0.11621278]),
np.array([0.23251367, 0.40832841, 0.02585745]),
np.array([0.05704598, 0.55990299, 0.01221862]),
np.array([0.09405428, 0.51916421, 0.02268015]),
np.array([0.06306305, 0.54336526, 0.0361037]),
np.array([0.23250342, 0.41833342, 0.0559913]),
np.array([0.22630523, 0.39163204, 0.05597116]),
np.array([0.15858055, 0.42916814, 0.05259972]),
np.array([0.07933408, 0.51474312, 0.30905098]),
np.array([0.14028772, 0.46282023, 0.41589047]),
np.array([0.29271668, 0.33531051, 0.37326792]),
np.array([0.17253811, 0.43786778, 0.33686994]),
np.array([0.09180367, 0.46823752, 0.05151176]),
np.array([0.10903846, 0.44893518, 0.03595462]),
np.array([0.2428693, 0.37094376, 0.04060119]),
np.array([0.27771166, 0.34994832, 0.23574564]),
np.array([0.05867972, 0.50502648, 0.19891229]),
np.array([0.25930387, 0.37349411, 0.26874577]),
np.array([0.12284826, 0.47211684, 0.21388094]),
np.array([0.0890682, 0.48703791, 0.27058998]),
np.array([0.27018357, 0.35138182, 0.76804186]),
np.array([0.22062535, 0.38110738, 0.85084234]),
np.array([0.26193025, 0.3581405, 0.86839733]),
np.array([0.0431053, 0.45634623, 0.12074655]),
np.array([0.16522669, 0.40881359, 0.18014875]),
np.array([0.02517244, 0.46138968, 0.1317301]),
np.array([0.23349872, 0.37536989, 0.14476492]),
np.array([0.05119965, 0.46839242, 0.26212526]),
np.array([0.2315995, 0.37207726, 0.20351563]),
np.array([0.08301372, 0.45335265, 0.25304755]),
np.array([0.20183026, 0.36561544, 0.39526058]),
np.array([0.06340759, 0.37121187, 0.07975536]),
np.array([0.16044634, 0.34707426, 0.10145605]),
np.array([0.24416648, 0.33434737, 0.07774819]),
np.array([0.28155768, 0.33248001, 0.89992977]),
np.array([0.28105936, 0.3327088, 0.88937678]),
np.array([0.25255297, 0.34594245, 0.87623351]),
np.array([0.20616318, 0.34192146, 0.77176579]),
np.array([0.21898553, 0.33335124, 0.85174026]),
np.array([0.19119679, 0.33526743, 0.80792502]),
np.array([0.29624596, 0.31950269, 0.96665647]),
np.array([0.24328961, 0.31868567, 0.12931978]),
np.array([0.10471116, 0.30938022, 0.15549815]),
np.array([0.0862452, 0.30268915, 0.15900713]),
np.array([0.10497041, 0.32451898, 0.22191645]),
np.array([0.16894641, 0.33087742, 0.29312371]),
np.array([0.16144965, 0.33133829, 0.34018592]),
np.array([0.25864013, 0.31415379, 0.28205753]),
np.array([0.07732853, 0.22846579, 0.08121964]),
np.array([0.15795868, 0.26417318, 0.11377678]),
np.array([0.06907834, 0.20994435, 0.0722573]),
np.array([0.12862477, 0.25616557, 0.08539517]),
np.array([0.05881481, 0.21256736, 0.07052095]),
np.array([0.25058288, 0.29329096, 0.17796585]),
np.array([0.18830894, 0.26192867, 0.13740285]),
np.array([0.1684076, 0.25029878, 0.13934697]),
np.array([0.1951648, 0.27716957, 0.51306785]),
np.array([0.19935306, 0.27783329, 0.44060477]),
np.array([0.26308512, 0.3046212, 0.52610451]),
np.array([0.2532416, 0.30291555, 0.67153139]),
np.array([0.15890128, 0.2532598, 0.59956247]),
np.array([0.24841933, 0.2986962, 0.43833832]),
np.array([0.2082133, 0.28356991, 0.52733609]),
np.array([0.23939654, 0.2920611, 0.43144538]),
np.array([0.18279859, 0.27122662, 0.52199238]),
np.array([0.16449512, 0.24371038, 0.08686299]),
np.array([0.16724393, 0.24366794, 0.06480227]),
np.array([0.19881487, 0.26071106, 0.06927689]),
np.array([0.09076654, 0.15277497, 0.06421355]),
np.array([0.18253778, 0.23018215, 0.03460635]),
np.array([0.16926303, 0.22496873, 0.06237928]),
np.array([0.20398493, 0.2513471, 0.05473403]),
np.array([0.28140041, 0.30378091, 0.23081828]),
np.array([0.15231331, 0.21384066, 0.25883348]),
np.array([0.14386953, 0.21327677, 0.41482428]),
np.array([0.1593506, 0.22670722, 0.40114326]),
np.array([0.10949743, 0.15034868, 0.15892888]),
np.array([0.22674934, 0.26033997, 0.17110185]),
np.array([0.20569472, 0.2404847, 0.15700695]),
np.array([0.11359218, 0.15851929, 0.15851498]),
np.array([0.13446868, 0.17456223, 0.15665285]),
np.array([0.20295637, 0.23758918, 0.07464645]),
np.array([0.16020908, 0.20160833, 0.11096053]),
np.array([0.17946292, 0.22546056, 0.3340693]),
np.array([0.19584886, 0.21874231, 0.05264774]),
np.array([0.25950493, 0.28494406, 0.60260113]),
np.array([0.22170777, 0.24928491, 0.29763974]),
np.array([0.13564759, 0.16991066, 0.38138893]),
np.array([0.23373145, 0.24171207, 0.08831548]),
np.array([0.25339824, 0.26720506, 0.67917402]),
np.array([0.29210338, 0.30192924, 0.75127547]),
np.array([0.22958296, 0.2462168, 0.59738522]),
np.array([0.1258535, 0.12764109, 0.16297312]),
np.array([0.24227309, 0.25436998, 0.18624748]),
np.array([0.23758242, 0.25457444, 0.66865194]),
np.array([0.10476265, 0.09497701, 0.05235122]),
np.array([0.12612865, 0.06066443, 0.02676646]),
np.array([0.11705747, 0.03587748, 0.02951591]),
np.array([0.1232905, 0.0441543, 0.02037758]),
np.array([0.09139852, 0.01529466, 0.02045231]),
np.array([0.13833192, 0.07953813, 0.02236117]),
np.array([0.13361693, 0.10504399, 0.18414205]),
np.array([0.1210474, 0.06862453, 0.15728175]),
np.array([0.25249867, 0.24628189, 0.1353695]),
np.array([0.11706407, 0.08706468, 0.18814811]),
np.array([0.22549284, 0.2180621, 0.16192792]),
np.array([0.1534495, 0.11674072, 0.15905692]),
np.array([0.2235872, 0.20668864, 0.04253357]),
np.array([0.12515256, 0.06568452, 0.04436879]),
np.array([0.12125722, 0.0687482, 0.05533026]),
np.array([0.10373316, 0.0277414, 0.06333516]),
np.array([0.10925991, 0.04419045, 0.03405371]),
np.array([0.15402461, 0.13042053, 0.28570417]),
np.array([0.17573216, 0.16578146, 0.27364637]),
np.array([0.27401103, 0.27401935, 0.23451177]),
np.array([0.2075913, 0.19464274, 0.21940166]),
np.array([0.17049737, 0.06465369, 0.05868583]),
np.array([0.17064728, 0.0288915, 0.04372401]),
np.array([0.1672038, 0.03196773, 0.03579761]),
np.array([0.21031018, 0.15034168, 0.03888934]),
np.array([0.16827351, 0.02413193, 0.03757647]),
np.array([0.29178046, 0.29061931, 0.90323404]),
np.array([0.24910224, 0.22648966, 0.59336016]),
np.array([0.17601554, 0.0587606, 0.05160293]),
np.array([0.16834537, 0.01686511, 0.04374851]),
np.array([0.23182863, 0.19825806, 0.05291206]),
np.array([0.16638758, 0.05075245, 0.03650792]),
np.array([0.16028497, 0.01948654, 0.05046003]),
np.array([0.24957235, 0.21006823, 0.31587613]),
np.array([0.29306654, 0.28917618, 0.32466527]),
np.array([0.28495343, 0.27687408, 0.81760638]),
np.array([0.21441304, 0.13814375, 0.19716723]),
np.array([0.20941829, 0.14321541, 0.24327119]),
np.array([0.28541299, 0.27913907, 0.54006024]),
np.array([0.29230469, 0.28656219, 0.52465762]),
np.array([0.18804124, 0.08137467, 0.06580398]),
np.array([0.22025958, 0.15180899, 0.06551257]),
np.array([0.19309397, 0.06115047, 0.07873642]),
np.array([0.19437258, 0.06326427, 0.06829742]),
np.array([0.27887167, 0.24543217, 0.57450962]),
np.array([0.27487624, 0.23376357, 0.46322748]),
np.array([0.28356864, 0.2519005, 0.45980664]),
np.array([0.30333596, 0.30005216, 0.66401066]),
np.array([0.23835467, 0.11558036, 0.09827669]),
np.array([0.23067198, 0.05028062, 0.07671426]),
np.array([0.21902307, 0.05208443, 0.11065271]),
np.array([0.22907253, 0.06719948, 0.06903321]),
np.array([0.2536145, 0.16387485, 0.0990085]),
np.array([0.28535713, 0.25114971, 0.08429109]),
np.array([0.29701504, 0.28076672, 0.11652327]),
np.array([0.24894294, 0.13513311, 0.0750785]),
np.array([0.28976435, 0.23551078, 0.3203068]),
np.array([0.28699217, 0.2122739, 0.38376156]),
np.array([0.2942318, 0.24483482, 0.41568603]),
np.array([0.27112866, 0.10892559, 0.09137276]),
np.array([0.26932562, 0.11871922, 0.07456975]),
np.array([0.28774446, 0.21149857, 0.06409553]),
np.array([0.25815891, 0.05632389, 0.07763328]),
np.array([0.28438514, 0.18361032, 0.08751006]),
np.array([0.27466364, 0.11623324, 0.04459164]),
np.array([0.26635689, 0.0603288, 0.04436654]),
np.array([0.30526917, 0.29787617, 0.38438766]),
np.array([0.26275899, 0.12295408, 0.3048271]),
np.array([0.27733084, 0.16764806, 0.24584118]),
np.array([0.27121622, 0.0996767, 0.21385417]),
np.array([0.26547923, 0.10802713, 0.26515926]),
np.array([0.29841781, 0.26325636, 0.25902873]),
np.array([0.27412192, 0.13541072, 0.26778091]),
np.array([0.3042953, 0.11611832, 0.04387]),
np.array([0.30157505, 0.08506396, 0.03768091]),
np.array([0.31391169, 0.1856442, 0.48667459]),
np.array([0.3167079, 0.22835511, 0.52829657]),
np.array([0.31664956, 0.20454265, 0.45562827]),
np.array([0.31300137, 0.23982828, 0.40210613]),
np.array([0.31187872, 0.26667157, 0.33190218]),
np.array([0.31537904, 0.21052765, 0.39335492]),
np.array([0.31803143, 0.09273886, 0.1712263]),
np.array([0.30594132, 0.18152717, 0.14244072]),
np.array([0.31195968, 0.12089229, 0.15102095]),
np.array([0.33618672, 0.17589268, 0.24249386]),
np.array([0.34207627, 0.13875616, 0.24138597]),
np.array([0.34605075, 0.11899797, 0.23580785]),
np.array([0.31923003, 0.28291153, 0.25504488]),
np.array([0.35136426, 0.12256902, 0.2641027]),
np.array([0.33639641, 0.20777481, 0.23332748]),
np.array([0.31464507, 0.3010788, 0.27040807]),
np.array([0.32622786, 0.23679153, 0.28338647]),
np.array([0.31964789, 0.19702337, 0.02988488]),
np.array([0.33202416, 0.16293316, 0.16828902]),
np.array([0.3188341, 0.26119414, 0.19149517]),
np.array([0.34497302, 0.14740581, 0.17674791]),
np.array([0.33396066, 0.13204228, 0.15759269]),
np.array([0.32447663, 0.09207588, 0.03498261]),
np.array([0.32823298, 0.08288658, 0.04740281]),
np.array([0.34263192, 0.2492826, 0.04966462]),
np.array([0.37863885, 0.1480557, 0.03133476]),
np.array([0.36067287, 0.22508694, 0.03664306]),
np.array([0.35583972, 0.20890369, 0.0287403]),
np.array([0.34728299, 0.11402692, 0.01746108]),
np.array([0.32940771, 0.22789278, 0.01489395]),
np.array([0.31972567, 0.31122932, 0.53600948]),
np.array([0.35012172, 0.29333067, 0.42147094]),
np.array([0.37589661, 0.2850717, 0.66934047]),
np.array([0.42549932, 0.23904177, 0.33329037]),
np.array([0.34641765, 0.2972505, 0.38411768]),
np.array([0.45441652, 0.21797623, 0.36276856]),
np.array([0.41521602, 0.25989123, 0.39086156]),
np.array([0.34780042, 0.2928404, 0.0360562]),
np.array([0.4544551, 0.19822245, 0.03201793]),
np.array([0.33858745, 0.3098545, 0.70004006]),
np.array([0.41381262, 0.2839371, 0.60579167]),
np.array([0.39278492, 0.2914687, 0.81034741]),
np.array([0.33239612, 0.31251827, 0.19604738]),
np.array([0.43846181, 0.29096381, 0.23141236]),
np.array([0.40958022, 0.29719222, 0.48882871]),
np.array([0.44399899, 0.29369509, 0.43379687]),
np.array([0.40554919, 0.29723013, 0.16687769]),
np.array([0.42007003, 0.28930815, 0.1672933]),
np.array([0.52108329, 0.25574146, 0.13999526]),
np.array([0.3763801, 0.30728007, 0.34070289]),
np.array([0.36495307, 0.30801481, 0.20910915]),
np.array([0.42566912, 0.29564012, 0.28217939]),
np.array([0.38537971, 0.31745807, 0.82116554]),
np.array([0.37201534, 0.31965197, 0.79705828]),
np.array([0.55136347, 0.28138892, 0.19712193]),
np.array([0.53899416, 0.29048788, 0.25823634]),
np.array([0.43854811, 0.3103317, 0.27612362]),
np.array([0.35589069, 0.3165537, 0.24649473]),
np.array([0.6015019, 0.26287828, 0.27670596]),
np.array([0.49631592, 0.30111191, 0.04570504]),
np.array([0.60338354, 0.2746834, 0.04600213]),
np.array([0.57619776, 0.31554717, 0.14073356]),
np.array([0.65681487, 0.27970869, 0.16409107]),
np.array([0.40414547, 0.32310724, 0.13347887]),
np.array([0.68743116, 0.27762719, 0.14148314]),
np.array([0.39097754, 0.32893313, 0.75691217]),
np.array([0.44274163, 0.32668916, 0.63163346]),
np.array([0.3514372, 0.32860694, 0.80679695]),
np.array([0.55200335, 0.34090583, 0.13240521]),
np.array([0.43719237, 0.33673056, 0.18274766]),
np.array([0.36439573, 0.33015311, 0.87403366]),
np.array([0.34753957, 0.32343836, 0.93909237]),
np.array([0.38880059, 0.33783693, 0.7040874]),
np.array([0.40006019, 0.33663147, 0.07790261]),
np.array([0.67248369, 0.32330365, 0.07220649]),
np.array([0.64354918, 0.33973639, 0.08803122]),
np.array([0.39181364, 0.3446948, 0.72252254]),
np.array([0.49958346, 0.36703778, 0.64322822]),
np.array([0.60231065, 0.36168845, 0.16068191]),
np.array([0.50023387, 0.36789369, 0.5819145]),
np.array([0.5479115, 0.34892913, 0.02263783]),
np.array([0.48794187, 0.38293202, 0.2066566]),
np.array([0.42836733, 0.35891726, 0.14365536]),
np.array([0.4151889, 0.35495306, 0.14408043]),
np.array([0.46542405, 0.34082576, 0.02079253]),
np.array([0.44637245, 0.38945422, 0.69298338]),
np.array([0.45404134, 0.39647154, 0.69233629]),
np.array([0.48546253, 0.4130592, 0.59936297]),
np.array([0.47978428, 0.39825981, 0.20940783]),
np.array([0.46785973, 0.39297126, 0.20053901]),
np.array([0.51331905, 0.41265964, 0.21747039]),
np.array([0.41770486, 0.37303747, 0.24430738]),
np.array([0.37481816, 0.36536787, 0.78442772]),
np.array([0.35992953, 0.34813048, 0.94470358]),
np.array([0.39620152, 0.37951049, 0.68657974]),
np.array([0.51299224, 0.43528921, 0.15241166]),
np.array([0.52529122, 0.46205863, 0.20512581]),
np.array([0.54693353, 0.47444863, 0.20705936]),
np.array([0.38249101, 0.37271251, 0.24466996]),
np.array([0.52465528, 0.46381752, 0.27037022]),
np.array([0.36332827, 0.36042625, 0.417479]),
np.array([0.49103786, 0.44741782, 0.37625337]),
np.array([0.53260672, 0.46085797, 0.38175879]),
np.array([0.39947636, 0.38607101, 0.30024818]),
np.array([0.41566751, 0.39590836, 0.41203391]),
np.array([0.38379951, 0.37533829, 0.49499836]),
np.array([0.50184813, 0.44587964, 0.46556628]),
np.array([0.51381974, 0.45512612, 0.47222976]),
np.array([0.49758967, 0.49049612, 0.45242214]),
np.array([0.43935016, 0.43103452, 0.23259639]),
np.array([0.37285524, 0.37178029, 0.05690988]),
np.array([0.50597795, 0.49533702, 0.71175266]),
np.array([0.46625888, 0.45805354, 0.60604474]),
np.array([0.47877996, 0.46893263, 0.71437706]),
np.array([0.48009865, 0.45955664, 0.16263768]),
np.array([0.37334852, 0.37797455, 0.16515524]),
np.array([0.47856178, 0.5047532, 0.58732427]),
np.array([0.47119689, 0.49876821, 0.65483021]),
np.array([0.40983957, 0.43186708, 0.474305]),
np.array([0.42018308, 0.44575131, 0.405341]),
np.array([0.45841191, 0.4830471, 0.3634053]),
np.array([0.45040867, 0.47371048, 0.34766863]),
np.array([0.45990386, 0.46807933, 0.05857873]),
np.array([0.39793427, 0.428948, 0.13930127]),
np.array([0.43789903, 0.48337683, 0.18110313]),
np.array([0.4578588, 0.50623924, 0.25901491]),
np.array([0.39985008, 0.43116457, 0.20152195]),
np.array([0.37226961, 0.3973812, 0.23381716]),
np.array([0.3589998, 0.38250951, 0.36788985]),
np.array([0.46379107, 0.5360814, 0.45228296]),
np.array([0.35180708, 0.37798088, 0.55904288]))
MUNSELL_GREYS_TO_XYY = (
np.array([0.31006, 0.31616, 0.]),
np.array([0.31006, 0.31616, 0.00473582]),
np.array([0.31006, 0.31616, 0.00961944]),
np.array([0.31006, 0.31616, 0.01545756]),
np.array([0.31006, 0.31616, 0.02293343]),
np.array([0.31006, 0.31616, 0.03261914]),
np.array([0.31006, 0.31616, 0.044988]),
np.array([0.31006, 0.31616, 0.0604269]),
np.array([0.31006, 0.31616, 0.07924864]),
np.array([0.31006, 0.31616, 0.10170428]),
np.array([0.31006, 0.31616, 0.12799549]),
np.array([0.31006, 0.31616, 0.15828689]),
np.array([0.31006, 0.31616, 0.19271844]),
np.array([0.31006, 0.31616, 0.23141772]),
np.array([0.31006, 0.31616, 0.27451233]),
np.array([0.31006, 0.31616, 0.32214224]),
np.array([0.31006, 0.31616, 0.3744721]),
np.array([0.31006, 0.31616, 0.43170362]),
np.array([0.31006, 0.31616, 0.4940879]),
np.array([0.31006, 0.31616, 0.56193781]),
np.array([0.31006, 0.31616, 0.6356403]),
np.array([0.31006, 0.31616, 0.71566876]),
np.array([0.31006, 0.31616, 0.80259539]),
np.array([0.31006, 0.31616, 0.89710353]),
np.array([0.31006, 0.31616, 1.]))
XYY_TO_MUNSELL_SPECIFICATIONS = (
(np.array([0.41515095, 0.51288165, 0.5702441]),
(2.4974254984450397, 7.9653798278107182, 11.928549858473941, 4)),
(np.array([0.38804358, 0.46299149, 0.31592072]),
(2.5006439954892556, 6.1977947932238182, 6.9236106970679092, 4)),
(np.array([0.33491518, 0.36277402, 0.22128409]),
(2.4903091620270921, 5.3119569245226996, 1.9986380341015466, 4)),
(np.array([0.39936353, 0.58547238, 0.64852094]),
(5.611784843667591, 8.4027565252844596, 18.567138539062917, 4)),
(np.array([0.34767896, 0.4152922, 0.58706989]),
(5.8509402259358456, 8.0626386670645687, 5.7841425045791395, 4)),
(np.array([0.33966055, 0.41527226, 0.07167165]),
(5.7816682069236824, 3.174803944794176, 3.349385637554362, 4)),
(np.array([0.36265912, 0.47966922, 0.11068168]),
(5.4883266507471262, 3.899412030844517, 5.7627095549416296, 4)),
(np.array([0.35748002, 0.45915987, 0.2727359]),
(5.8091607027319458, 5.8169751521608619, 6.6660161915407334, 4)),
(np.array([0.36348032, 0.48213512, 0.06293782]),
(5.2106889846935189, 2.9770363655668297, 5.1418366196999559, 4)),
(np.array([0.31838794, 0.40167814, 0.05382145]),
(8.112670582764574, 2.7489429215052237, 3.1644657849632849, 4)),
(np.array([0.31425413, 0.58372544, 0.04377268]),
(8.0454960027236311, 2.4630649861246052, 7.5000612073563522, 4)),
(np.array([0.31405111, 0.53120144, 0.02111891]),
(7.5930163631603786, 1.5750743888437724, 4.6261748431094443, 4)),
(np.array([0.32167873, 0.43862617, 0.3080991]),
(9.0152103567385531, 6.1312711912840276, 6.8052936649435196, 4)),
(np.array([0.31168045, 0.6270064, 0.34717087]),
(9.0429810192754712, 6.4540531798090557, 17.010229922329884, 4)),
(np.array([0.31496017, 0.47530248, 0.67920304]),
(9.9147922785103972, 8.564387948380265, 11.130516599293573, 4)),
(np.array([0.31107787, 0.51188895, 0.58306925]),
(9.9600028417318498, 8.0396827436851854, 13.199185802941964, 4)),
(np.array([0.31254722, 0.34686238, 0.6334334]),
(9.8852069946025658, 8.3213426295629134, 2.0708474520194438, 4)),
(np.array([0.30880402, 0.37157402, 0.08263161]),
(9.9880916197181513, 3.4007871110002572, 2.5684536428873437, 4)),
(np.array([0.23582365, 0.72197618, 0.06667783]),
(9.9984874163608151, 3.063915653085683, 13.513388270694881, 4)),
(np.array([0.29476305, 0.57521949, 0.23583791]),
(9.9976375658465031, 5.461465412699126, 12.753028500364776, 4)),
(np.array([0.28891056, 0.61005165, 0.28191444]),
(9.9978537677920656, 5.9008140943398812, 15.244070244743728, 4)),
(np.array([0.31292041, 0.3752074, 0.25538037]),
(9.7548278524788188, 5.6536473977778643, 3.4107844965142462, 4)),
(np.array([0.30648167, 0.48754769, 0.15098549]),
(9.8616202679251828, 4.4878641370082102, 7.6757055354477961, 4)),
(np.array([0.30382174, 0.34089453, 0.84210967]),
(3.2303908627800304, 9.3451636022528515, 3.4403440367216769, 3)),
(np.array([0.22538285, 0.5564611, 0.60532773]),
(2.7893896011049968, 8.1660999142444801, 20.865361708231859, 3)),
(np.array([0.28500017, 0.38833563, 0.24045742]),
(3.2217895159709276, 5.5077419322120686, 5.4630728253870444, 3)),
(np.array([0.19598037, 0.59002914, 0.29181101]),
(2.6212805877659342, 5.9893806530207669, 19.362394859916822, 3)),
(np.array([0.16437784, 0.59069112, 0.23370301]),
(3.2866319103339858, 5.4398925329719443, 19.854410847201201, 3)),
(np.array([0.17940333, 0.4663929, 0.06448045]),
(5.7282498562431972, 3.0132952817775913, 10.742779986622125, 3)),
(np.array([0.07553293, 0.55981543, 0.06406275]),
(5.3447124424617432, 3.003537623832734, 18.896804785539025, 3)),
(np.array([0.27330162, 0.37048932, 0.11621278]),
(5.7450040846850658, 3.9875599357896836, 4.2238304339640553, 3)),
(np.array([0.23251367, 0.40832841, 0.02585745]),
(5.7167484363405752, 1.8040377069834592, 4.8775349629391567, 3)),
(np.array([0.05704598, 0.55990299, 0.01221862]),
(5.3134014009438513, 1.0305079836154281, 8.0423751566497952, 3)),
(np.array([0.09405428, 0.51916421, 0.02268015]),
(5.7582243025990465, 1.6541936507188186, 9.5055870640895321, 3)),
(np.array([0.06306305, 0.54336526, 0.0361037]),
(5.982440185520268, 2.2109766905487875, 14.80076750240695, 3)),
(np.array([0.23250342, 0.41833342, 0.0559913]),
(5.4561315302616542, 2.8055682870311611, 6.6458910422254247, 3)),
(np.array([0.22630523, 0.39163204, 0.05597116]),
(7.8355832971785766, 2.8050499614961404, 6.2381161041005031, 3)),
(np.array([0.15858055, 0.42916814, 0.05259972]),
(8.2773608986748037, 2.7163438063692538, 10.349070921928153, 3)),
(np.array([0.14028772, 0.46282023, 0.41589047]),
(8.319398373380178, 6.971801563527217, 23.511305166143138, 3)),
(np.array([0.29271668, 0.33531051, 0.37326792]),
(8.4446073967845603, 6.6574922739334328, 2.4100016954494836, 3)),
(np.array([0.17253811, 0.43786778, 0.33686994]),
(8.3062536616909277, 6.3711907135696917, 17.50573930025228, 3)),
(np.array([0.09180367, 0.46823752, 0.05151176]),
(8.1366760619167078, 2.6868572712647723, 14.647241441839769, 3)),
(np.array([0.10903846, 0.44893518, 0.03595462]),
(8.4804731414469181, 2.2057044721731964, 11.877741549341193, 3)),
(np.array([0.2428693, 0.37094376, 0.04060119]),
(8.4528136469348514, 2.3630505444499383, 4.6062410883921121, 3)),
(np.array([0.27771166, 0.34994832, 0.23574564]),
(8.3053924744903362, 5.4605354972582942, 3.9020883808495852, 3)),
(np.array([0.05867972, 0.50502648, 0.19891229]),
(8.1854332748360292, 5.0699333724644822, 28.120377018384666, 3)),
(np.array([0.25930387, 0.37349411, 0.26874577]),
(7.5395384410319366, 5.7799955833061949, 6.6412975765309845, 3)),
(np.array([0.12284826, 0.47211684, 0.21388094]),
(7.9597279338124061, 5.2335977016179758, 20.290421956393594, 3)),
(np.array([0.0890682, 0.48703791, 0.27058998]),
(8.429197168007434, 5.7971282967167381, 26.464916551909962, 3)),
(np.array([0.27018357, 0.35138182, 0.76804186]),
(0.0029116335013412709, 9.0051614807808651, 6.0521479959935158, 2)),
(np.array([0.0431053, 0.45634623, 0.12074655]),
(9.9090940657999482, 4.0578050043303753, 25.772345770419662, 3)),
(np.array([0.16522669, 0.40881359, 0.18014875]),
(9.9976284265517368, 4.8536959511881248, 13.711432917514353, 3)),
(np.array([0.23349872, 0.37536989, 0.14476492]),
(9.2805000536689342, 4.4042068716055978, 6.9966797139394705, 3)),
(np.array([0.2315995, 0.37207726, 0.20351563]),
(9.9968489688298856, 5.1210462040554292, 7.9466011879104901, 3)),
(np.array([0.08301372, 0.45335265, 0.25304755]),
(9.9995652694576354, 5.6311864544796251, 26.172410781131642, 3)),
(np.array([0.20183026, 0.36561544, 0.39526058]),
(2.4927553349648868, 6.8222787267061182, 12.637144808932389, 2)),
(np.array([0.06340759, 0.37121187, 0.07975536]),
(2.4983577898662457, 3.3435595770060069, 19.16688155401506, 2)),
(np.array([0.16044634, 0.34707426, 0.10145605]),
(3.2889502832179485, 3.7457477699374353, 10.316221955381685, 2)),
(np.array([0.24416648, 0.33434737, 0.07774819]),
(3.0834483697271065, 3.3027889692997099, 4.0314789302131331, 2)),
(np.array([0.28105936, 0.3327088, 0.88937678]),
(2.5569047038313286, 9.5505578383904854, 8.2592949619273135, 2)),
(np.array([0.24328961, 0.31868567, 0.12931978]),
(5.9613749867002497, 4.1861233500163211, 4.3110624347372388, 2)),
(np.array([0.10471116, 0.30938022, 0.15549815]),
(5.6283551180725855, 4.5472024424749877, 16.561283650587431, 2)),
(np.array([0.0862452, 0.30268915, 0.15900713]),
(5.8537976763848487, 4.5925997102794662, 18.829597868809778, 2)),
(np.array([0.10497041, 0.32451898, 0.22191645]),
(5.145089894686512, 5.3185755017128393, 18.972215318595008, 2)),
(np.array([0.16894641, 0.33087742, 0.29312371]),
(5.2908544585019168, 6.0, 13.596495394774379, 2)),
(np.array([0.16144965, 0.33133829, 0.34018592]),
(5.4160474347058862, 6.3980311319848546, 15.175835231610055, 2)),
(np.array([0.25864013, 0.31415379, 0.28205753]),
(8.2025559167724538, 5.9021079413020505, 4.0260977652628069, 2)),
(np.array([0.15795868, 0.26417318, 0.11377678]),
(0.012536235321443101, 3.9490817702416789, 9.1888977386924644, 1)),
(np.array([0.12862477, 0.25616557, 0.08539517]),
(9.2612367834642395, 3.4545177235566826, 10.589381636411931, 2)),
(np.array([0.25058288, 0.29329096, 0.17796585]),
(3.241079303134331, 4.827650836237364, 3.743365882969607, 1)),
(np.array([0.18830894, 0.26192867, 0.13740285]),
(2.5158920949660768, 4.3022043533741527, 7.3971111785249475, 1)),
(np.array([0.1684076, 0.25029878, 0.13934697]),
(2.5131427837361731, 4.329470861478244, 8.858704782948033, 1)),
(np.array([0.1951648, 0.27716957, 0.51306785]),
(2.5013789364411032, 7.6200943072565934, 10.879242856664604, 1)),
(np.array([0.19935306, 0.27783329, 0.44060477]),
(2.5013473966576427, 7.1449996623226202, 10.096343120964026, 1)),
(np.array([0.26308512, 0.3046212, 0.52610451]),
(2.6086704821311635, 7.7009394844971908, 4.2409228690438132, 1)),
(np.array([0.2532416, 0.30291555, 0.67153139]),
(2.4970799555123779, 8.524455640257985, 5.3618112705721508, 1)),
(np.array([0.24841933, 0.2986962, 0.43833832]),
(2.5045629814751091, 7.1293721373614156, 5.4196706970655724, 1)),
(np.array([0.2082133, 0.28356991, 0.52733609]),
(2.5131771577412465, 7.7085098474609053, 9.6149141336841648, 1)),
(np.array([0.23939654, 0.2920611, 0.43144538]),
(3.2651896190197771, 7.0815325065875827, 6.2204042252646667, 1)),
(np.array([0.18279859, 0.27122662, 0.52199238]),
(2.5035978550267846, 7.6755794342432786, 12.259534701163053, 1)),
(np.array([0.16449512, 0.24371038, 0.08686299]),
(2.5134247452410996, 3.4825807614595732, 7.7661876537009666, 1)),
(np.array([0.16724393, 0.24366794, 0.06480227]),
(2.5011108303709673, 3.0207831644481167, 6.9952041725206957, 1)),
(np.array([0.19881487, 0.26071106, 0.06927689]),
(3.0402893156428767, 3.1223174742573971, 5.2006395603475859, 1)),
(np.array([0.18253778, 0.23018215, 0.03460635]),
(5.597657625685617, 2.1572823771224261, 5.1627644612166348, 1)),
(np.array([0.16926303, 0.22496873, 0.06237928]),
(5.0963427942773887, 2.9637551635524759, 6.8587937787896989, 1)),
(np.array([0.20398493, 0.2513471, 0.05473403]),
(5.1969692716096816, 2.7729516729961743, 4.5594222945860832, 1)),
(np.array([0.28140041, 0.30378091, 0.23081828]),
(5.496561269157576, 5.4105513508464149, 2.0008136754401193, 1)),
(np.array([0.15231331, 0.21384066, 0.25883348]),
(5.8564961151865536, 5.6866675850563144, 13.28811457163558, 1)),
(np.array([0.1593506, 0.22670722, 0.40114326]),
(5.2738491300453916, 6.8653966593629132, 14.101257686332218, 1)),
(np.array([0.22674934, 0.26033997, 0.17110185]),
(7.8734843119339804, 4.7444381618873681, 5.5988944477630973, 1)),
(np.array([0.20569472, 0.2404847, 0.15700695]),
(8.3151048023527618, 4.5668004089848395, 7.0880114815101756, 1)),
(np.array([0.11359218, 0.15851929, 0.15851498]),
(7.8472605398133766, 4.5862706767212797, 16.22461017448477, 1)),
(np.array([0.13446868, 0.17456223, 0.15665285]),
(8.3830368679291833, 4.5622116144353626, 13.972942153711159, 1)),
(np.array([0.20295637, 0.23758918, 0.07464645]),
(7.856435533177164, 3.2383503193149088, 5.3584916284061572, 1)),
(np.array([0.16020908, 0.20160833, 0.11096053]),
(7.6347078681738623, 3.9039238069235207, 9.7687367916502765, 1)),
(np.array([0.17946292, 0.22546056, 0.3340693]),
(7.6477212875072365, 6.3483962769765867, 11.704570140652267, 1)),
(np.array([0.19584886, 0.21874231, 0.05264774]),
(9.9898352731700779, 2.7176352217817406, 5.4160154998040086, 1)),
(np.array([0.25950493, 0.28494406, 0.60260113]),
(9.1958881934475922, 8.1507829370037523, 5.0725149953944122, 1)),
(np.array([0.22170777, 0.24928491, 0.29763974]),
(9.9993390910882241, 6.0406947699165405, 7.7621615159154409, 1)),
(np.array([0.23373145, 0.24171207, 0.08831548]),
(2.8725008274335195, 3.5100389169220145, 4.4944374759441068, 10)),
(np.array([0.29210338, 0.30192924, 0.75127547]),
(2.5573566809324166, 8.9248766535028565, 2.4932404586928576, 10)),
(np.array([0.1258535, 0.12764109, 0.16297312]),
(2.5007360838933224, 4.6431603215843316, 18.831399277234546, 10)),
(np.array([0.24227309, 0.25436998, 0.18624748]),
(2.493358398116623, 4.9254430182491289, 5.4174887497658339, 10)),
(np.array([0.25249867, 0.24628189, 0.1353695]),
(5.9478312821937074, 4.2734224989493885, 4.8966308718341258, 10)),
(np.array([0.22549284, 0.2180621, 0.16192792]),
(5.5322841596577277, 4.6299109349866532, 7.7522529293268203, 10)),
(np.array([0.1534495, 0.11674072, 0.15905692]),
(5.9220754173118184, 4.5932393336579818, 19.565654953046174, 10)),
(np.array([0.2235872, 0.20668864, 0.04253357]),
(5.9517759003047477, 2.4246350396441594, 4.9539113586948025, 10)),
(np.array([0.17573216, 0.16578146, 0.27364637]),
(5.2008216960854803, 5.8253671682452843, 15.513073935981227, 10)),
(np.array([0.27401103, 0.27401935, 0.23451177]),
(5.8906638458196703, 5.4480790029813448, 3.7740354308969817, 10)),
(np.array([0.2075913, 0.19464274, 0.21940166]),
(5.8176239302630783, 5.2921858536043249, 11.318160969256796, 10)),
(np.array([0.21031018, 0.15034168, 0.03888934]),
(8.37336964793856, 2.3066883689858764, 8.6310377953949686, 10)),
(np.array([0.23182863, 0.19825806, 0.05291206]),
(7.7121590219520009, 2.7247287040518557, 5.7300160185960349, 10)),
(np.array([0.24957235, 0.21006823, 0.31587613]),
(9.9948213055847646, 6.1974183872008091, 10.969746297783992, 10)),
(np.array([0.29306654, 0.28917618, 0.32466527]),
(9.1126758970844079, 6.2709966101935857, 2.7508294514108931, 10)),
(np.array([0.21441304, 0.13814375, 0.19716723]),
(9.998713332858582, 5.050367418553515, 19.168834467026883, 10)),
(np.array([0.20941829, 0.14321541, 0.24327119]),
(9.3829287452992816, 5.535664936769356, 18.821027597932279, 10)),
(np.array([0.28541299, 0.27913907, 0.54006024]),
(9.002080407348938, 7.7860618086454938, 4.4538886914437397, 10)),
(np.array([0.29230469, 0.28656219, 0.52465762]),
(9.991235613354732, 7.6920309739694908, 3.652324068875179, 10)),
(np.array([0.18804124, 0.08137467, 0.06580398]),
(9.0425654223037455, 3.0439259368754286, 22.292781603620266, 10)),
(np.array([0.22025958, 0.15180899, 0.06551257]),
(9.4618219859335095, 3.0372188057325871, 10.554387963579108, 10)),
(np.array([0.27887167, 0.24543217, 0.57450962]),
(2.5015728390355108, 7.9902138330733292, 8.3830608857310125, 9)),
(np.array([0.27487624, 0.23376357, 0.46322748]),
(2.4996651744668252, 7.2983010268318242, 9.5028467299290504, 9)),
(np.array([0.28356864, 0.2519005, 0.45980664]),
(2.8618699795852098, 7.2754260032575946, 7.46611567875307, 9)),
(np.array([0.30333596, 0.30005216, 0.66401066]),
(3.0821290942108526, 8.4850005681408049, 2.4935002898711431, 9)),
(np.array([0.23835467, 0.11558036, 0.09827669]),
(2.5037499210886072, 3.6906677571198614, 19.779266412450269, 9)),
(np.array([0.2536145, 0.16387485, 0.0990085]),
(2.4992857563119486, 3.703448918491747, 12.087654980113633, 9)),
(np.array([0.28535713, 0.25114971, 0.08429109]),
(2.4890318095296315, 3.4331943344980584, 3.3814929201024877, 9)),
(np.array([0.29701504, 0.28076672, 0.11652327]),
(2.8365261300922739, 3.992426600758435, 2.0008049439042344, 9)),
(np.array([0.24894294, 0.13513311, 0.0750785]),
(2.8801166239961695, 3.2474344484707478, 14.610390101910371, 9)),
(np.array([0.28976435, 0.23551078, 0.3203068]),
(5.1644159196302653, 6.2346627202329179, 9.1133260523497661, 9)),
(np.array([0.28699217, 0.2122739, 0.38376156]),
(5.5001367381271749, 6.7368412342989172, 13.153893654633693, 9)),
(np.array([0.2942318, 0.24483482, 0.41568603]),
(5.5355820929681876, 6.97034254644461, 8.8925671354071021, 9)),
(np.array([0.27112866, 0.10892559, 0.09137276]),
(5.5896397325517455, 3.5668608875999919, 22.756075736806583, 9)),
(np.array([0.26932562, 0.11871922, 0.07456975]),
(5.2816804395644468, 3.2367339016487255, 18.731686204369552, 9)),
(np.array([0.28774446, 0.21149857, 0.06409553]),
(5.1732976532712982, 3.0043049868755096, 6.2294092968441577, 9)),
(np.array([0.28438514, 0.18361032, 0.08751006]),
(5.5527847263028818, 3.4948508627605213, 10.076128044498653, 9)),
(np.array([0.27466364, 0.11623324, 0.04459164]),
(5.7295438817735267, 2.4880342757163407, 15.986078477134843, 9)),
(np.array([0.30526917, 0.29787617, 0.38438766]),
(5.0697134888906703, 6.7415332853801191, 2.219542488732086, 9)),
(np.array([0.27733084, 0.16764806, 0.24584118]),
(5.3830683593123041, 5.5609978860251674, 18.302040299981691, 9)),
(np.array([0.29841781, 0.26325636, 0.25902873]),
(5.3573489710687028, 5.6885266519601112, 5.2553309577287299, 9)),
(np.array([0.3042953, 0.11611832, 0.04387]),
(8.2848827562071747, 2.4660497719128411, 17.239324057538756, 9)),
(np.array([0.3167079, 0.22835511, 0.52829657]),
(8.1437019218335216, 7.7144058793057653, 13.532589823660205, 9)),
(np.array([0.31664956, 0.20454265, 0.45562827]),
(8.0950673003541418, 7.2473398239288285, 17.337282390950396, 9)),
(np.array([0.31300137, 0.23982828, 0.40210613]),
(7.8286696950237911, 6.872416979415612, 10.705903656469591, 9)),
(np.array([0.31187872, 0.26667157, 0.33190218]),
(7.7997180017353251, 6.3306783023663034, 6.2110658750956116, 9)),
(np.array([0.31537904, 0.21052765, 0.39335492]),
(8.044182832558544, 6.8082262964881988, 15.55667343253385, 9)),
(np.array([0.30594132, 0.18152717, 0.14244072]),
(7.61253523767909, 4.3723674448096084, 14.16881179143731, 9)),
(np.array([0.31195968, 0.12089229, 0.15102095]),
(8.1692313405838135, 4.4883347140084675, 27.235344441294185, 9)),
(np.array([0.33618672, 0.17589268, 0.24249386]),
(9.6020077678142357, 5.527970670885737, 20.580420190036016, 9)),
(np.array([0.34207627, 0.13875616, 0.24138597]),
(9.6633397378583119, 5.51697844390592, 29.047422373609326, 9)),
(np.array([0.31923003, 0.28291153, 0.25504488]),
(9.9976373285878672, 5.6504248984821048, 4.2159521394352462, 9)),
(np.array([0.33639641, 0.20777481, 0.23332748]),
(9.9973290603118059, 5.4360854689605906, 14.778819186976174, 9)),
(np.array([0.31464507, 0.3010788, 0.27040807]),
(9.9981812895483202, 5.7954414845493361, 1.989415064944841, 9)),
(np.array([0.32622786, 0.23679153, 0.28338647]),
(9.4957863462098757, 5.9141054746428159, 10.808393081380505, 9)),
(np.array([0.31964789, 0.19702337, 0.02988488]),
(9.8255286116794061, 1.9759993085792666, 7.0669157653548789, 9)),
(np.array([0.33202416, 0.16293316, 0.16828902]),
(9.3833430362731995, 4.7097386903129328, 20.001567691188573, 9)),
(np.array([0.3188341, 0.26119414, 0.19149517]),
(9.1151365491745828, 4.9860254232389654, 5.8833516462242006, 9)),
(np.array([0.34497302, 0.14740581, 0.17674791]),
(9.9993661799172529, 4.8130329637008931, 24.745869339266417, 9)),
(np.array([0.33396066, 0.13204228, 0.15759269]),
(9.3788217006559904, 4.5743767927679624, 26.296103195549776, 9)),
(np.array([0.34263192, 0.2492826, 0.04966462]),
(2.5000378111202792, 2.6357603664122133, 4.7116011462627716, 8)),
(np.array([0.37863885, 0.1480557, 0.03133476]),
(2.8899890484960267, 2.0337680336115653, 13.996029826620704, 8)),
(np.array([0.36067287, 0.22508694, 0.03664306]),
(3.4356425026035353, 2.2299191499236484, 6.7219022055024089, 8)),
(np.array([0.35583972, 0.20890369, 0.0287403]),
(2.9923928706346814, 1.9289333829363795, 7.1957475402702213, 8)),
(np.array([0.34728299, 0.11402692, 0.01746108]),
(2.500584368220391, 1.3726888338846817, 14.170766991414974, 8)),
(np.array([0.32940771, 0.22789278, 0.01489395]),
(2.6107190169889272, 1.2137703916290237, 3.3488976554537588, 8)),
(np.array([0.31972567, 0.31122932, 0.53600948]),
(5.1656963312103521, 7.7615027982277569, 2.1410423395557343, 8)),
(np.array([0.35012172, 0.29333067, 0.42147094]),
(5.0476033618933336, 7.0114568961277142, 6.451158237816637, 8)),
(np.array([0.37589661, 0.2850717, 0.66934047]),
(5.8078232482982983, 8.512993428798092, 10.441995222524335, 8)),
(np.array([0.42549932, 0.23904177, 0.33329037]),
(5.0441061851453028, 6.3420359675892799, 18.425239270239359, 8)),
(np.array([0.34641765, 0.2972505, 0.38411768]),
(5.4855084167238033, 6.7395106003336087, 5.4746289633709742, 8)),
(np.array([0.41521602, 0.25989123, 0.39086156]),
(5.8766492669350967, 6.7897767720171638, 15.4400719961405, 8)),
(np.array([0.34780042, 0.2928404, 0.0360562]),
(8.1980634796784635, 2.2092985443242368, 1.9999688315566178, 8)),
(np.array([0.4544551, 0.19822245, 0.03201793]),
(7.9964595957086892, 2.0603132172126144, 11.656390753035632, 8)),
(np.array([0.33858745, 0.3098545, 0.70004006]),
(7.9849282958267054, 8.6712830581905536, 4.2705189024026593, 8)),
(np.array([0.33239612, 0.31251827, 0.19604738]),
(9.5354740556904183, 5.0377553290689132, 1.9992885406730441, 8)),
(np.array([0.43846181, 0.29096381, 0.23141236]),
(9.9958803631122173, 5.4166120275490517, 11.058404052106438, 8)),
(np.array([0.40958022, 0.29719222, 0.48882871]),
(9.3095533384450668, 7.4662030728919255, 11.186885768515552, 8)),
(np.array([0.44399899, 0.29369509, 0.43379687]),
(9.9949745495327491, 7.0979059067934074, 13.900038696652489, 8)),
(np.array([0.40554919, 0.29723013, 0.16687769]),
(9.9247647943179373, 4.6921921147267351, 7.1970576398528765, 8)),
(np.array([0.42007003, 0.28930815, 0.1672933]),
(9.41497757686939, 4.6973688173207524, 8.7211281744258624, 8)),
(np.array([0.52108329, 0.25574146, 0.13999526]),
(0.0019220181363621691, 4.3385094379603402, 16.467729969190529, 7)),
(np.array([0.3763801, 0.30728007, 0.34070289]),
(0.013622323425028782, 6.4022012583755865, 6.5944639522864303, 7)),
(np.array([0.36495307, 0.30801481, 0.20910915]),
(0.01085925621531203, 5.1822080765947414, 4.5480549505954455, 7)),
(np.array([0.42566912, 0.29564012, 0.28217939]),
(9.9664422617946791, 5.9032094955646706, 10.839641908347604, 8)),
(np.array([0.55136347, 0.28138892, 0.19712193]),
(2.497658061438166, 5.0498581073710413, 17.884020184861189, 7)),
(np.array([0.53899416, 0.29048788, 0.25823634]),
(2.7485546629604585, 5.6809766685439209, 17.995724599746378, 7)),
(np.array([0.43854811, 0.3103317, 0.27612362]),
(2.4943078709206112, 5.8481154233099364, 10.235754640176427, 7)),
(np.array([0.35589069, 0.3165537, 0.24649473]),
(2.8983979131373161, 5.5674143643020697, 3.5545236839116567, 7)),
(np.array([0.49631592, 0.30111191, 0.04570504]),
(5.4103686174523524, 2.5214548835517885, 7.258854844117816, 7)),
(np.array([0.60338354, 0.2746834, 0.04600213]),
(5.8342027901144569, 2.5302731702838765, 12.000615206905717, 7)),
(np.array([0.57619776, 0.31554717, 0.14073356]),
(5.9788280212108607, 4.3487706615362089, 14.392102528163205, 7)),
(np.array([0.40414547, 0.32310724, 0.13347887]),
(5.1783763049447264, 4.2464126669886673, 4.8428138961508438, 7)),
(np.array([0.3514372, 0.32860694, 0.80679695]),
(8.3238088961026691, 9.1859534871888915, 4.2364443377740981, 7)),
(np.array([0.55200335, 0.34090583, 0.13240521]),
(8.1957437659127912, 4.2309654368964633, 11.592666113548891, 7)),
(np.array([0.43719237, 0.33673056, 0.18274766]),
(7.6095498405871478, 4.8844510705007274, 6.7936327467178108, 7)),
(np.array([0.38880059, 0.33783693, 0.7040874]),
(8.254754870479271, 8.6917863577621315, 6.3425646390065618, 7)),
(np.array([0.40006019, 0.33663147, 0.07790261]),
(9.9982747461203623, 3.3059508982469912, 3.1154069559449877, 7)),
(np.array([0.67248369, 0.32330365, 0.07220649]),
(9.7627074021020093, 3.1863606829924529, 14.800047578330393, 7)),
(np.array([0.64354918, 0.33973639, 0.08803122]),
(9.9982818667900677, 3.5046891565568421, 13.900125103004392, 7)),
(np.array([0.39181364, 0.3446948, 0.72252254]),
(0.012146453319250128, 8.7841366100324851, 6.2155473412367117, 6)),
(np.array([0.60231065, 0.36168845, 0.16068191]),
(9.9987262089904458, 4.6140459204878024, 14.686472574413189, 7)),
(np.array([0.5479115, 0.34892913, 0.02263783]),
(2.7484465676051997, 1.6521000438004581, 5.5680030289264844, 6)),
(np.array([0.48794187, 0.38293202, 0.2066566]),
(3.1414206652130154, 5.15551583761708, 8.5966568435236024, 6)),
(np.array([0.42836733, 0.35891726, 0.14365536]),
(2.4978474308952627, 4.389047620589019, 4.9510475190736321, 6)),
(np.array([0.4151889, 0.35495306, 0.14408043]),
(2.4977653679963083, 4.3948638180430448, 4.383585180612334, 6)),
(np.array([0.46542405, 0.34082576, 0.02079253]),
(2.4915569441282575, 1.5580251224422779, 3.3089610305380077, 6)),
(
|
np.array([0.44637245, 0.38945422, 0.69298338])
|
numpy.array
|
import os
import pickle
import numpy as np
import torch
from tqdm import tqdm
from SyncNetDist import SyncNet
from dataLoader import loadWAV
def cosine_distance(x, y):
x_norm = np.linalg.norm(x)
y_norm = np.linalg.norm(y)
if x_norm*y_norm == 0:
similiarity = 0
print(x, y)
else:
similiarity = np.dot(x, y.T)/(x_norm*y_norm)
dist = 1-similiarity
return dist
def cosine_similarity(x, y):
x_norm = np.linalg.norm(x)
y_norm = np.linalg.norm(y)
if x_norm*y_norm == 0:
similiarity = 0
print(x, y)
else:
similiarity = np.dot(x, y.T)/(x_norm*y_norm)
return similiarity
S = SyncNet(model="models.SyncNetModelFBank", maxFrames=30, learning_rate=0.001, temporal_stride=2)
modelpath = "data/exp09.model"
print('Loading model from \'%s\''%modelpath)
S.loadParameters(modelpath)
S.eval()
root_dir = '/data2/Downloads/wav'
wavdata = pickle.load(open("./dataset_1000_pretrain_voice.pkl", 'rb'))
train_list = wavdata['train']
valid_list = wavdata['valid']
train_dict = dict()
valid_dict = dict()
for data_pair in train_list:
wavpath, index = data_pair
if index not in train_dict.keys():
train_dict[index] = [wavpath]
else:
train_dict[index].append(wavpath)
for data_pair in valid_list:
wavpath, index = data_pair
if index not in valid_dict.keys():
valid_dict[index] = [wavpath]
else:
valid_dict[index].append(wavpath)
if os.path.exists('npy/register_dict.npy'):
register_dict = np.load('npy/register_dict.npy', allow_pickle=True).item()
else:
register_dict = dict()
for people in tqdm(train_dict.keys()):
id_features = []
for wavpath in train_dict[people]:
filename = os.path.join(root_dir, wavpath)
data_aud = loadWAV(filename, 160)
out_content, out_id = S.__S__.forward_aud(data_aud.cuda())
out_id = out_id.cpu().detach().numpy()[0]
magnitude = np.linalg.norm(out_id, axis=0)
out_id = out_id/magnitude
out_id = np.average(out_id, axis=1)
id_features.append(out_id)
register_dict[people] =
|
np.average(id_features, axis=0)
|
numpy.average
|
# Multiple landmark detection in 3D ultrasound images of fetal head
# Network training
#
# Reference
# Fast Multiple Landmark Localisation Using a Patch-based Iterative Network
# https://arxiv.org/abs/1806.06987
#
# Code EDITED BY: <NAME>
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import numpy as np
import torch
import torch.nn as nn
from utils import input_data_torch, shape_model_func, network_torch, patch
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from torch.optim.lr_scheduler import ExponentialLR
from network_torch import VisitNet
class Config(object):
"""Training configurations."""
# File paths
data_dir = '../landmark_detection_PIN/data_2d/Images'
label_dir = '../landmark_detection_PIN/data_2d/landmarks'
train_list_file = '../landmark_detection_PIN/data_2d/train_280/list_train.txt'
test_list_file = '../landmark_detection_PIN/data_2d/train_280/list_test.txt'
log_dir = '../landmark_detection_PIN/logs'
model_dir = '../landmark_detection_PIN/cnn_model'
model_file = ''
# Shape model parameters
shape_model_file = '../landmark_detection_PIN/shape_model/shape_model_280_4/ShapeModel.mat'
eigvec_per = 0.995 # Percentage of eigenvectors to keep
sd = 3.0 # Standard deviation of shape parameters
landmark_count = 4 # Number of landmarks
landmark_unwant = [] # list of unwanted landmark indices
# Training parameters
resume = False # Whether to train from scratch or resume previous training
box_size = 121 # patch size (odd number)
alpha = 0.5 # Weighting given to the loss (0<=alpha<=1). loss = alpha*loss_c + (1-alpha)*loss_r
learning_rate = 0.0005
max_steps = 100000 # Number of steps to train
save_interval = 25000 # Number of steps in between saving each model
batch_size = 64 # Training batch size
dropout = 0.8
def main():
config = Config()
# Load shape model
shape_model = shape_model_func.load_shape_model(config.shape_model_file, config.eigvec_per)
num_cnn_output_c = 2 * shape_model['Evectors'].shape[1]
num_cnn_output_r = shape_model['Evectors'].shape[1]
# Load images and landmarks
train_data, test_data = input_data_torch.read_data_sets(config.data_dir,
config.label_dir,
config.train_list_file,
config.test_list_file,
config.landmark_count,
config.landmark_unwant,
shape_model)
#intialize weights and bias
# w = network_torch.network_weights(shape)
# b = network_torch.bias_variable(shape)
# Define CNN model
# net = VisitNet(num_cnn_output_c,num_cnn_output_r,config.dropout)
net = VisitNet(num_cnn_output_c,num_cnn_output_r,Config.dropout).cuda()
# loss_c = nn.CrossEntropyLoss()
# loss_r = nn.MSELoss()
loss_c = nn.CrossEntropyLoss().cuda()
loss_r = nn.MSELoss().cuda()
optimizer = torch.optim.Adam(net.parameters(), lr=config.learning_rate, betas=(0.9, 0.999))
# scheduler = ExponentialLR(optimizer, gamma=0.999)
if config.resume:
net.load_state_dict(torch.load(config.model_dir+"/"+config.model_file))
ite_start = config.start_iter
ite_end = ite_start + config.max_steps
else:
ite_start = 0
ite_end = config.max_steps
print('Currently Using Cuda mode:',torch.cuda.get_device_name(0))
for i in range(ite_start, ite_end):
patches_train, actions_train, dbs_train, _ = get_train_pairs(config.batch_size,
train_data.images,
train_data.shape_params,
config.box_size,
num_cnn_output_c,
num_cnn_output_r,
shape_model,
config.sd)
optimizer.zero_grad()
net.train()
patches_train_torch = torch.from_numpy(patches_train).permute(0, 3, 1, 2)
actions_train_torch = torch.argmax(torch.from_numpy(actions_train).to(torch.long), dim=1)
dbs_train_torch = torch.from_numpy(dbs_train).to(torch.float32)
patches_train_torch = patches_train_torch.cuda()
actions_train_torch = actions_train_torch.cuda()
dbs_train_torch = dbs_train_torch.cuda()
y_c, y_r, _ = net(patches_train_torch)
loss_c_val = torch.mean(config.alpha * loss_c(y_c, actions_train_torch))
loss_r_val = torch.mean((1 - config.alpha) * loss_r(y_r, dbs_train_torch))
# loss_r_val = torch.mean((1 - config.alpha) * torch.pow(dbs_train_torch-y_r,2))
loss = loss_c_val + loss_r_val
# print(loss_r_val)
# loss = config.alpha * loss_c_val + (1 - config.alpha) * loss_r_val
loss.backward()
optimizer.step()
# scheduler.step()
pred_idx = torch.argmax(y_c, dim=1)
# print(pred_idx)
# print()
# print(actions_train_torch)
# print()
# print(pred_idx.eq(actions_train_torch).sum())
# print()
# print(pred_idx.eq(actions_train_torch).sum().item())
# print()
# print((pred_idx.size()[0]))
# print()
# print((pred_idx.size()[0]))
# if i == 1:
# break
accuracy = pred_idx.eq(actions_train_torch).sum().item() / (pred_idx.size()[0])
print("[%d/%d] class loss: %f reg loss: %f train loss: %f accuracy: %f" % (i, ite_end, loss_c_val.item(), loss_r_val.item(), loss.item(), accuracy))
if ((i+1) % config.save_interval) == 0:
torch.save(net.state_dict(), config.model_dir+"/model2_meanstd_norm_step"+str(i)+".pt")
if (i+1) % 100 == 0:
patches_test, actions_test, dbs_test, _ = get_train_pairs(config.batch_size,
test_data.images,
test_data.shape_params,
config.box_size,
num_cnn_output_c,
num_cnn_output_r,
shape_model,
config.sd)
net.eval()
patches_test_torch = torch.from_numpy(patches_test).permute(0, 3, 1, 2).cuda()
actions_test_torch = torch.argmax(torch.from_numpy(actions_test).to(torch.long), dim=1).cuda()
# dbs_test_torch = torch.from_numpy(dbs_test).to(torch.float32)
y_c_test, _, _ = net(patches_test_torch)
pred_idx_test = torch.argmax(y_c_test, dim=1)
# print(pred_idx)
# print(actions_train_torch)
# print(pred_idx.eq(actions_train_torch).sum())
accuracy_test = pred_idx_test.eq(actions_test_torch).sum().item() / (pred_idx_test.size()[0])
print("[%d/%d] test accuracy: %f" % (i, ite_end, accuracy_test))
def get_train_pairs(batch_size, images, bs_gt, box_size, num_actions, num_regression_output, shape_model, sd):
"""Randomly sample image patches and corresponding ground truth classification and regression outputs.
Args:
batch_size: mini batch size
images: list of img_count images. Each image is [width, height, depth, channel], [x,y,z,channel]
bs_gt: Ground truth shape parameters. [img_count, num_shape_params]
box_size: size of image patch. Scalar.
num_actions: number of classification outputs
num_regression_output: number of regression outputs
shape_model: structure containing shape models
sd: standard deviation of shape model. Bounds from which to sample bs.
Returns:
patches: 2D image patches, [batch_size, box_size, box_size, 3*num_landmarks]
actions: Ground truth classification output. [batch_size, num_actions], each row is a one hot vector [positive or negative for each shape parameter]
dbs: Ground truth regression output. [batch_size, num_regression_output]. dbs = bs - bs_gt.
bs: sampled shape parameters [batch_size, num_regression_output]
"""
img_count = len(images)
num_landmarks = 4
box_r = int((box_size - 1) / 2)
patches = np.zeros((batch_size, box_size, box_size, int(3*num_landmarks)), np.float32)
actions_ind = np.zeros(batch_size, dtype=np.uint16)
actions =
|
np.zeros((batch_size, num_actions), np.float32)
|
numpy.zeros
|
import base64
import io
import cv2
from character_detector import detect
from character_classifier import train_classifier, load_dataset, get_label_for_integer
from math_solver import make_equation, solve
import tensorflow as tf
import numpy as np
from flask import Flask, render_template, request, Response
from PIL import Image
classifier = tf.keras.models.load_model('model')
app = Flask(__name__, instance_relative_config=True)
@app.route('/')
def start():
"""
Initial mapping just for displaying page
:return: render basic template
"""
return render_template("base.html")
@app.route('/script', methods=['POST'])
def solve_equation():
"""
Mapping for receiving image and sending back result
:return: dict
e : classified equation from image
r : result of solving equation
"""
# get string base64 image
encoded_data = request.form['img']
# decode it to bytes
img_data = base64.b64decode(encoded_data)
# get opencv image from bytes
image = Image.open(io.BytesIO(img_data))
img = cv2.cvtColor(np.array(image), cv2.COLOR_BGR2RGB)
# get coordinates and cropped images
coordinates, cropped_images = detect(img)
# classify all characters in cropped images
characters = list()
for img in cropped_images:
int_prediction = np.argmax(classifier.predict(
|
np.expand_dims(img, axis=(0, 3))
|
numpy.expand_dims
|
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 19 10:54:00 2019
@author: Jonathan
"""
# =============================================================================
# Import libraries
# =============================================================================
import numpy as np
import scipy
import scipy.interpolate as interp
import scipy.signal
from scipy.cluster.vq import kmeans2
import matplotlib.pyplot as plt
import matplotlib
import matplotlib.patches as patches
import copy
# =============================================================================
# # ToDo
# =============================================================================
# General stuff
#- Check all the functions help text (type, description etc)
# Functions (Interpolate)
# - Can we use my implementation? not steffen
# Functions (CLUSTERING)
#- twoClusterWeighting
# - Downsampling
# - missing values handeling?
# - Edges and going back for the latest samples
# Questions
# When getting median gaze position (fixation), the inetrpolated values are ignored.
# Is this intended, then whats the point of interpolaiton, besides the clustering?
# Not all results are identical to matlab version
# for instance, trial 1 pp2, fix5 and 6 have a slight overlap
# pp2 trial 3,4 and 5 has a fixations thats to short compared to matlab (almost last fixation)
# =============================================================================
# Helper functions
# =============================================================================
def isNumber(s):
try:
np.array(s,dtype=float)
return True
except ValueError:
return False
def checkNumeric(k,v):
assert isNumber(v), 'The value of "{}" is invalid. Expected input to be one of these types:\n\ndouble, single, uint8, uint16, uint32, uint64, int8, int16, int32, int64\n\nInstead its type was {}.'.format(k, type(v))
def checkScalar(k,v):
assert np.ndim(v) == 0, 'The value of "{}" is invalid. Expected input to be a scalar.'.format(k)
def checkNumel2(k,v):
assert np.shape(v) == (2,), 'The value of "{}" is invalid. Expected input to be an array with number of elements equal to 2.'.format(k)
def checkInt(k,v):
assert np.sum(np.array(v)%1) == 0, 'The value of "{}" is invalid. Expected input to be integer-valued.'.format(k)
def checkFun(k, d, s):
assert k in d.keys(), 'I2MCfunc: "{}" must be specified using the "{}" option'.format(s, k)
assert isNumber(d[k]), 'I2MCfunc: "{}" must be set as a number using the "{}" option'.format(s, k)
def angleToPixels(angle, screenDist, screenW, screenXY):
"""
Calculate the number of pixels which equals a specified angle in visual
degrees, given parameters. Calculates the pixels based on the width of
the screen. If the pixels are not square, a separate conversion needs
to be done with the height of the screen.\n
"angleToPixelsWH" returns pixels for width and height.
Parameters
----------
angle : float or int
The angle to convert in visual degrees
screenDist : float or int
Viewing distance in cm
screenW : float or int
The width of the screen in cm
screenXY : tuple, ints
The resolution of the screen (width - x, height - y), pixels
Returns
-------
pix : float
The number of pixels which corresponds to the visual degree in angle,
horizontally
Examples
--------
>>> pix = angleToPixels(1, 75, 47.5, (1920,1080))
>>> pix
52.912377341863817
"""
pixSize = screenW / float(screenXY[0])
angle = np.radians(angle / 2.0)
cmOnScreen = np.tan(angle) * float(screenDist)
pix = (cmOnScreen / pixSize) * 2
return pix
def getMissing(L_X, R_X, missingx, L_Y, R_Y, missingy):
"""
Gets missing data and returns missing data for left, right and average
Parameters
----------
L_X : np.array
Left eye X gaze position data
R_X : np.array
Right eye X gaze position data
missingx : Not defined
The values reflecting mising values for X coordinates in the dataset
L_Y : np.array
Left eye Y gaze position data
R_Y : np.array
Right eye Y gaze position data
missingy : Not defined
The value reflectings mising values for Y coordinates in the dataset
Returns
-------
qLMiss : np.array - Boolean
Boolean with missing values for the left eye
qRMiss : np.array - Boolean
Boolean with missing values for the right eye
qBMiss : np.array - Boolean
Boolean with missing values for both eyes
Examples
--------
>>>
"""
# Get where the missing is
# Left eye
qLMissX = np.logical_or(L_X == missingx, np.isnan(L_X))
qLMissY = np.logical_or(L_Y == missingy, np.isnan(L_Y))
qLMiss = np.logical_and(qLMissX, qLMissY)
# Right
qRMissX = np.logical_or(R_X == missingx, np.isnan(R_X))
qRMissY = np.logical_or(R_Y == missingy, np.isnan(R_Y))
qRMiss = np.logical_and(qRMissX, qRMissY)
# Both eyes
qBMiss = np.logical_and(qLMiss, qRMiss)
return qLMiss, qRMiss, qBMiss
def averageEyes(L_X, R_X, missingx, L_Y, R_Y, missingy):
"""
Averages data from two eyes. Take one eye if only one was found.
Parameters
----------
L_X : np.array
Left eye X gaze position data
R_X : np.array
Right eye X gaze position data
missingx : Not defined
The values reflecting mising values for X coordinates in the dataset
L_Y : np.array
Left eye Y gaze position data
R_Y : np.array
Right eye Y gaze position data
missingy : Not defined
The values reflecting mising values for Y coordinates in the dataset
Returns
-------
xpos : np.array
The average Y gaze position
ypos : np.array
The average X gaze position
qBMiss : np.array - Boolean
Boolean with missing values for both eyes
qLMiss : np.array - Boolean
Boolean with missing values for the left eye
qRMiss : np.array - Boolean
Boolean with missing values for the right eye
Examples
--------
>>>
"""
xpos = np.zeros(len(L_X))
ypos = np.zeros(len(L_Y))
# get missing
qLMiss, qRMiss, qBMiss = getMissing(L_X, R_X, missingx, L_Y, R_Y, missingy)
q = np.logical_and(np.invert(qLMiss), np.invert(qRMiss))
xpos[q] = (L_X[q] + R_X[q]) / 2.
ypos[q] = (L_Y[q] + R_Y[q]) / 2.
q = np.logical_and(qLMiss, np.invert(qRMiss))
xpos[q] = R_X[q]
ypos[q] = R_Y[q]
q = np.logical_and(np.invert(qLMiss), qRMiss)
xpos[q] = L_X[q]
ypos[q] = L_Y[q]
xpos[qBMiss] = np.NAN
ypos[qBMiss] = np.NAN
return xpos, ypos, qBMiss, qLMiss, qRMiss
def bool2bounds(b):
"""
Finds all contiguous sections of true in a boolean
Parameters
----------
data : np.array
A 1d np.array containing True, False values.
Returns
-------
on : np.array
The array contains the indexes of the first value = True
off : np.array
The array contains the indexes of the last value = True in a sequence
Example
--------
>>> import numpy as np
>>> b = np.array([1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0])
>>> on, off = bool2bounds(b)
>>> print(on)
[0 4 8]
>>> print(off)
[0 6 9]
"""
b = np.array(np.array(b, dtype = np.bool), dtype=int)
b = np.pad(b, (1, 1), 'constant', constant_values=(0, 0))
D = np.diff(b)
on = np.array(np.where(D == 1)[0], dtype=int)
off = np.array(np.where(D == -1)[0] -1, dtype=int)
return on, off
def getCluster(b):
'''
Splits a np.array with True, False values into clusters. A cluster is
defined as adjacent points with the same value, e.g. True or False.
The output from this function is used to determine cluster sizes when
running cluster statistics.
Parameters
----------
b : np.array
A 1d np.array containing True, False values.
Returns
-------
clusters : list of np.arrays
The list contains the clusters split up. Each cluster in its own
np.array.
indx : list of np.arrays
The list contains the indexes for each time point in the clusters.
Example
--------
>>>
'''
if b.dtype != 'bool':
b = np.array(b, dtype = np.bool)
clusters = np.split(b, np.where(np.diff(b) != 0)[0]+1)
indx = np.split(np.arange(len(b)), np.where(np.diff(b) != 0)[0]+1)
size = np.array([len(c) for c in indx])
offC = np.array([np.sum(c) > 0 for c in clusters])
onC = np.invert(offC)
offCluster = [indx[i] for i in range(len(offC)) if offC[i]]
onCluster = [indx[i] for i in range(len(onC)) if onC[i]]
offSize = size[offC]
onSize = size[onC]
missStart = np.array([c[0] for c in offCluster], dtype=int)
missEnd = np.array([c[-1] for c in offCluster], dtype=int)
dataStart = np.array([c[0] for c in onCluster], dtype=int)
dataEnd = np.array([c[-1] for c in onCluster], dtype=int)
return missStart, missEnd, dataStart, dataEnd, onSize, offSize, onCluster, offCluster
def plotResults(data,fix,res=[1920,1080]):
'''
Plots the results of the I2MC function
'''
time = data['time']
Xdat = np.array([])
Ydat = np.array([])
klr = []
if 'L_X' in data.keys():
Xdat = data['L_X']
Ydat = data['L_Y']
klr.append('g')
if 'R_X' in data.keys():
if len(Xdat) == 0:
Xdat = data['R_X']
Ydat = data['R_Y']
else:
Xdat = np.vstack([Xdat, data['R_X']])
Ydat = np.vstack([Ydat, data['R_Y']])
klr.append('r')
if 'average_X' in data.keys() and not 'L_X' in data.keys() and not 'R_X' in data.keys():
if len(Xdat) == 0:
Xdat = data['average_X']
Ydat = data['average_Y']
else:
Xdat = np.vstack([Xdat, data['average_X']])
Ydat = np.vstack([Ydat, data['average_Y']])
klr.append('b')
# Plot settings
myfontsize = 10
myLabelSize = 12
traceLW = 0.5
fixLWax1 = res[0]/100
fixLWax2 = res[1]/100
font = {'size': myfontsize}
matplotlib.rc('font', **font)
## plot layout
f = plt.figure(figsize=(10, 6), dpi=300)
ax1 = plt.subplot(2,1,1)
ax1.set_ylabel('Horizontal position (pixels)', size = myLabelSize)
ax1.set_xlim([0, time[-1]])
ax1.set_ylim([0, res[0]])
### Plot x position
if len(Xdat.shape) > 1:
for p in range(Xdat.shape[0]):
ax1.plot(time,Xdat[p,:],klr[p]+'-', linewidth = traceLW)
else:
ax1.plot(time,Xdat,klr[0]+'-', linewidth = traceLW)
### Plot Y posiiton
ax2 = plt.subplot(2,1,2,sharex=ax1)
ax2.set_xlabel('Time (ms)')
ax2.set_ylabel('Vertical position (pixels)', size = myLabelSize)
ax2.set_ylim([0, res[1]])
if len(Xdat.shape) > 1:
for p in range(Ydat.shape[0]):
ax2.plot(time,Ydat[p,:],klr[p]+'-', linewidth = traceLW)
else:
ax2.plot(time,Ydat,klr[0]+'-', linewidth = traceLW)
# add fixations, but adds a shaded area instead of line
for b in range(len(fix['startT'])):
ax1.add_patch(patches.Rectangle((fix['startT'][b], fix['xpos'][b] - (fixLWax1/2)),
fix['endT'][b] - fix['startT'][b],
abs(fixLWax1), fill=True, alpha = 0.8, color = 'k',
linewidth = 0, zorder=3))
ax2.add_patch(patches.Rectangle((fix['startT'][b], fix['ypos'][b] - (fixLWax2/2)),
fix['endT'][b] - fix['startT'][b],
abs(fixLWax2), fill=True, alpha = 0.8, color = 'k',
linewidth = 0, zorder=3))
return f
# =============================================================================
# Interpolation functions
# =============================================================================
def findInterpWins(xpos, ypos, missing, windowtime, edgesamples, freq, maxdisp):
"""
Description
Parameters
----------
xpos : np.array
X gaze position
ypos : type
Y gaze position
missing : type
Description
windowtime : float
Time of window to interpolate over in ms
edgesamples : int
Number of samples at window edge used for interpolating in ms
freq : float
Frequency of measurement
maxdisp : float
maximum dispersion in position signal (i.e. if signal is in pixels, provide maxdisp in n pixels)
Returns
-------
notAllowed : np.array
Boolean with True where interpolation is not valid
Examples
--------
>>>
"""
# get indices of where missing intervals start and end
missStart, missEnd = bool2bounds(missing)
dataStart, dataEnd = bool2bounds(np.invert(missing))
#missStart, missEnd, dataStart, dataEnd, onSize, offSize, onCluster, offCluster = getCluster(missing)
# Determine windowsamples
windowsamples = round(windowtime/(1./freq))
# for each candidate, check if have enough valid data at edges to execute
# interpolation. If not, see if merging with adjacent missing is possible
# we don't throw out anything we can't deal with yet, we do that below.
# this is just some preprocessing
k=0 #was K=1 in matlab
while k<len(missStart):
# skip if too long
if missEnd[k]-missStart[k]+1 > windowsamples:
k = k+1
continue
# skip if not enough data at left edge
if np.sum(dataEnd == missStart[k]-1) > 0:
datk = int(np.argwhere(dataEnd==missStart[k]-1))
if dataEnd[datk]-dataStart[datk]+1 < edgesamples:
k = k+1
continue
# if not enough data at right edge, merge with next. Having not enough
# on right edge of this one, means not having enough at left edge of
# next. So both will be excluded always if we don't do anything. So we
# can just merge without further checks. Its ok if it then grows too
# long, as we'll just end up excluding that too below, which is what
# would have happened if we didn't do anything here
datk = np.argwhere(dataStart==missEnd[k]+1)
if len(datk) > 0:
datk = int(datk)
if dataEnd[datk]-dataStart[datk]+1 < edgesamples:
missEnd = np.delete(missEnd, k)
missStart = np.delete(missStart, k)
# don't advance k so we check this one again and grow it further if
# needed
continue
# nothing left to do, continue to next
k = k+1
# mark intervals that are too long to be deleted (only delete later so that
# below checks can use all missing on and offsets)
missDur = missEnd-missStart+1
qRemove = missDur>windowsamples
# for each candidate, check if have enough valid data at edges to execute
# interpolation and check displacement during missing wasn't too large.
# Mark for later removal as multiple missing close together may otherwise
# be wrongly allowed
for p in range(len(missStart)):
# check enough valid data at edges
# missing too close to beginning of data
# previous missing too close
# missing too close to end of data
# next missing too close
if p != (len(missStart)-1):
if missStart[p]<edgesamples+1 or \
(p>0 and missEnd[p-1] > missStart[p]-edgesamples-1) or \
missEnd[p]>len(xpos)-edgesamples or \
(p<len(missStart) and missStart[p+1] < missEnd[p]+edgesamples+1):
qRemove[p] = True
continue
# check displacement, per missing interval
# we want to check per bit of missing, even if multiple bits got merged
# this as single data points can still anchor where the interpolation
# goes and we thus need to check distance per bit, not over the whole
# merged bit
idx = np.arange(missStart[p],missEnd[p]+1, dtype = int)
on,off = bool2bounds(np.isnan(xpos[idx]))
for q in range(len(on)):
lesamps = np.array(on[q]-np.arange(edgesamples)+missStart[p]-1, dtype=int)
resamps = np.array(off[q]+np.arange(edgesamples)+missStart[p]-1, dtype=int)
displacement = np.hypot(np.nanmedian(xpos[resamps])-np.nanmedian(xpos[lesamps]), np.nanmedian(ypos[resamps])-np.nanmedian(ypos[lesamps]))
if displacement > maxdisp:
qRemove[p] = True
break
if qRemove[p]:
continue
# Remove the missing clusters which cannot be interpolated
qRemove = np.where(qRemove)[0]
missStart = np.delete(missStart, qRemove)
missEnd = np.delete(missEnd, qRemove)
# update missing vector
notAllowed = missing.copy()
for s, e in zip(missStart, missEnd):
notAllowed[range(s,e+1)] = False
return missStart,missEnd
def windowedInterpolate(xpos, ypos, missing, missStart, missEnd, edgesamples, dev=False):
"""
Interpolates the missing data, and removes areas which are not allowed
to be interpolated
Parameters
----------
xpos : np.array
X gaze positions
ypos : type
Y gaze positions
missing : np.array
Boolean vector containing indicating missing values
notAllowed : np.array
Boolean vector containing samples to be excluded after interpolation
Returns
-------
xi : np.array
Interpolated X gaze position
yi : np.array
Interpolated Y gaze position
Examples
--------
>>>
"""
missingn = copy.deepcopy(missing)
# Do the interpolating
for p in range(len(missStart)):
# make vector of all samples in this window
outWin = np.arange(missStart[p],missEnd[p]+1)
# get edge samples: where no missing data was observed
# also get samples in window where data was observed
outWinNotMissing = np.invert(missingn[outWin])
validsamps = np.concatenate((outWin[0]+np.arange(-edgesamples,0), outWin[outWinNotMissing], outWin[-1]+np.arange(1,edgesamples+1)))
# get valid values: where no missing data was observed
validx = xpos[validsamps];
validy = ypos[validsamps];
# do Steffen interpolation, update xpos, ypos
xpos[outWin]= steffenInterp(validsamps,validx,outWin)
ypos[outWin]= steffenInterp(validsamps,validy,outWin)
# update missing: hole is now plugged
missingn[outWin] = False
# plot interpolated data before (TODO, we didn't update this...)
if dev:
f, [ax1, ax2] = plt.subplots(2,1)
ax1.plot(newX,xi, 'k-')
ax1.scatter(newX[notMissing], xpos[notMissing], s = 2, color = 'r')
ax1.scatter(newX[missing], xi[missing], s = 25, color = 'b')
ax2.plot(newX,yi, 'k-')
ax2.scatter(newX[notMissing], ypos[notMissing], s = 2, color = 'r')
ax2.scatter(newX[missing], yi[missing], s = 25, color = 'b')
return xpos, ypos, missingn
# =============================================================================
# interpolator
# =============================================================================
def steffenInterp(x, y, xi):
# STEFFEN 1-D Steffen interpolation
# steffenInterp[X,Y,XI] interpolates to find YI, the values of the
# underlying function Y at the points in the array XI, using
# the method of Steffen. X and Y must be vectors of length N.
#
# Steffen's method is based on a third-order polynomial. The
# slope at each grid point is calculated in a way to guarantee
# a monotonic behavior of the interpolating function. The
# curve is smooth up to the first derivative.
# <NAME> - Summer 2014
# edited DC Niehorster - Summer 2015
# <NAME>
# A Simple Method for Monotonic Interpolation in One Dimension
# Astron. Astrophys. 239, 443-450 [1990]
n = len(x)
# calculate slopes
yp = np.zeros(n)
# first point
h1 = x[1] - x[0]
h2 = x[2] - x[1]
s1 = (y[1] - y[0])/h1
s2 = (y[2] - y[1])/h2
p1 = s1*(1 + h1/(h1 + h2)) - s2*h1/(h1 + h2)
if p1*s1 <= 0:
yp[0] = 0
elif np.abs(p1) > 2*np.abs(s1):
yp[0] = 2*s1
else:
yp[0] = p1
# inner points
for i in range(1,n-1):
hi = x[i+1] - x[i]
him1 = x[i] - x[i-1]
si = (y[i+1] - y[i])/hi
sim1 = (y[i] - y[i-1])/him1
pi = (sim1*hi + si*him1)/(him1 + hi)
if sim1*si <= 0:
yp[i] = 0
elif (np.abs(pi) > 2*np.abs(sim1)) or (np.abs(pi) > 2*np.abs(si)):
a = np.sign(sim1)
yp[i] = 2*a*np.min([np.abs(sim1),np.abs(si)])
else:
yp[i] = pi
# last point
hnm1 = x[n-1] - x[n-2]
hnm2 = x[n-2] - x[n-3]
snm1 = (y[n-1] - y[n-2])/hnm1
snm2 = (y[n-2] - y[n-3])/hnm2
pn = snm1*(1 + hnm1/(hnm1 + hnm2)) - snm2*hnm1/(hnm1 + hnm2)
if pn*snm1 <= 0:
yp[n-1] = 0
elif np.abs(pn) > 2*np.abs(snm1):
yp[n-1] = 2*snm1
else:
yp[n-1] = pn
yi = np.zeros(xi.size)
for i in range(len(xi)):
# Find the right place in the table by means of a bisection.
# do this instead of search with find as the below now somehow gets
# better optimized by matlab's JIT [runs twice as fast].
klo = 1
khi = n
while khi-klo > 1:
k = int(np.fix((khi+klo)/2.0))
if x[k] > xi[i]:
khi = k
else:
klo = k
# check if requested output is in input, so we can just copy
if xi[i]==x[klo]:
yi[i] = y[klo]
continue
elif xi[i]==x[khi]:
yi[i] = y[khi]
continue
h = x[khi] - x[klo]
s = (y[khi] - y[klo])/h
a = (yp[klo] + yp[khi] - 2*s)/h/h
b = (3*s - 2*yp[klo] - yp[khi])/h
c = yp[klo]
d = y[klo]
t = xi[i] - x[klo]
# Use Horner's scheme for efficient evaluation of polynomials
# y = a*t*t*t + b*t*t + c*t + d
yi[i] = d + t*(c + t*(b + t*a))
return yi
# =============================================================================
# Clustering functions
# =============================================================================
def twoClusterWeighting(xpos, ypos, missing, downsamples, downsampFilter, chebyOrder, windowtime, steptime, freq, maxerrors, dev=False):
"""
Description
Parameters
----------
xpos : type
Description
ypos : type
Description
missing : type
Description
downsamples : type
Description
downsampFilter : type
Description
chebyOrder : type
Description
windowtime : type
Description
steptime : type
Description
freq : type
Description
maxerrors : type
Description
Returns
-------
finalweights : np.array
Vector of 2-means clustering weights (one weight for each sample), the higher, the more likely a saccade happened
Examples
--------
>>>
"""
# calculate number of samples of the moving window
nrsamples = int(windowtime/(1./freq))
stepsize = np.max([1,int(steptime/(1./freq))])
# create empty weights vector
totalweights = np.zeros(len(xpos))
totalweights[missing] = np.nan
nrtests = np.zeros(len(xpos))
# stopped is always zero, unless maxiterations is exceeded. this
# indicates that file could not be analysed after trying for x iterations
stopped = False
counterrors = 0
# Number of downsamples
nd = len(downsamples)
# Downsample
if downsampFilter:
# filter signal. Follow the lead of decimate(), which first runs a
# Chebychev filter as specified below
rp = .05 # passband ripple in dB
b = [[] for i in range(nd)]
a = [[] for i in range(nd)]
for p in range(nd):
b[p],a[p] = scipy.signal.cheby1(chebyOrder, rp, .8/downsamples[p])
# idx for downsamples
idxs = []
for i in range(nd):
idxs.append(np.arange(nrsamples,0,-downsamples[i],dtype=int)[::-1] - 1)
# see where are missing in this data, for better running over the data
# below.
on,off = bool2bounds(missing)
if on.size > 0:
# merge intervals smaller than nrsamples long
merge = np.argwhere((on[1:] - off[:-1])-1 < nrsamples).flatten()
for p in merge[::-1]:
off[p] = off[p+1]
off = np.delete(off, p+1)
on = np.delete(on, p+1)
# check if intervals at data start and end are large enough
if on[0]<nrsamples+1:
# not enough data point before first missing, so exclude them all
on[0]=0
if off[-1]>(len(xpos)-nrsamples):
# not enough data points after last missing, so exclude them all
off[-1]=len(xpos)-1
# start at first non-missing sample if trial starts with missing (or
# excluded because too short) data
if on[0]==0:
i=off[0]+1 # start at first non-missing
else:
i=0
else:
i=0
eind = i+nrsamples
while eind<=(len(xpos)):
# check if max errors is crossed
if counterrors > maxerrors:
print('Too many empty clusters encountered, aborting file. \n')
stopped = True
finalweights = np.nan
return finalweights, stopped
# select data portion of nrsamples
idx = range(i,eind)
ll_d = [[] for p in range(nd+1)]
IDL_d = [[] for p in range(nd+1)]
ll_d[0] = np.vstack([xpos[idx], ypos[idx]])
# Filter the bit of data we're about to downsample. Then we simply need
# to select each nth sample where n is the integer factor by which
# number of samples is reduced. select samples such that they are till
# end of window
for p in range(nd):
if downsampFilter:
ll_d[p+1] = scipy.signal.filtfilt(b[p],a[p],ll_d[0])
ll_d[p+1] = ll_d[p+1][:,idxs[p]]
else:
ll_d[p+1] = ll_d[0][:,idxs[p]]
# do 2-means clustering
for p in range(nd+1):
IDL_d[p] = kmeans2(ll_d[p].T,2, 10, minit='points')
# If an empty cluster error is encountered, try again next
# iteration. This can occur particularly in long
# fixations, as the number of clusters there should be 1,
# but we try to fit 2 to detect a saccade (i.e. 2 fixations)
# visual explanation of empty cluster errors:
# http://www.ceng.metu.edu.tr/~tcan/ceng465_s1011/Schedule/KMeansEmpty.html
if len(np.unique(IDL_d[p][-1])) != 2:
print('\t\tEmpty cluster error encountered (n={}/100). Trying again on next iteration.'.format(counterrors))
counterrors += 1
continue
# detect switches and weight of switch (= 1/number of switches in
# portion)
switches = [[] for p in range(nd+1)]
switchesw = [[] for p in range(nd+1)]
for p in range(nd+1):
switches[p] = np.abs(np.diff(IDL_d[p][1]))
switchesw[p] = 1./np.sum(switches[p])
# get nearest samples of switch and add weight
weighted = np.hstack([switches[0]*switchesw[0],0])
for p in range(nd):
j = np.array((np.argwhere(switches[p+1]).flatten()+1)*downsamples[p],dtype=int)-1
for o in range(-1,int(downsamples[p])-1):
weighted[j+o] = weighted[j+o] + switchesw[p+1]
# add to totalweights
totalweights[idx] = totalweights[idx] + weighted
# record how many times each sample was tested
nrtests[idx] = nrtests[idx] + 1
# update i
i += stepsize
eind += stepsize
missingOn = np.logical_and(on>=i, on<=eind)
missingOff = np.logical_and(off>=i, off<=eind)
qWhichMiss = np.logical_or(missingOn, missingOff)
if np.sum(qWhichMiss) > 0:
# we have some missing in this window. we don't process windows
# with missing. Move back if we just skipped some samples, or else
# skip whole missing and place start of window and first next
# non-missing.
if on[qWhichMiss][0] == (eind-stepsize):
# continue at first non-missing
i = off[qWhichMiss][0]+1
else:
# we skipped some points, move window back so that we analyze
# up to first next missing point
i = on[qWhichMiss][0]-nrsamples
eind = i+nrsamples
if eind>len(xpos) and eind-stepsize<len(xpos):
# we just exceeded data bound, but previous eind was before end of
# data: we have some unprocessed samples. retreat just enough so we
# process those end samples once
d = eind-len(xpos)
eind = eind-d
i = i-d
# create final weights
finalweights = totalweights/nrtests
return finalweights, stopped
# =============================================================================
# Fixation detection functions
# =============================================================================
def getFixations(finalweights, timestamp, xpos, ypos, missing, par):
"""
Description
Parameters
----------
finalweights : type
2-means clustering weighting
timestamp : np.array
Timestamp from Eyetracker (should be in ms!)
xpos : np.array
Horizontal coordinates from Eyetracker
ypos : np.array
Vertical coordinates from Eyetracker
missing : np.array
Vector containing the booleans for mising values
par : Dictionary containing the following keys and values
cutoffstd : float
Number of std above mean clustering-weight to use as fixation cutoff
onoffsetThresh : float
Threshold (x*MAD of fixation) for walking forward/back for saccade off- and onsets
maxMergeDist : float
Maximum Euclidean distance in pixels between fixations for merging
maxMergeTime : float
Maximum time in ms between fixations for merging
minFixDur : Float
Minimum duration allowed for fiation
Returns
-------
fix : Dictionary containing the following keys and values
cutoff : float
Cutoff used for fixation detection
start : np.array
Vector with fixation start indices
end : np.array
Vector with fixation end indices
startT : np.array
Vector with fixation start times
endT : np.array
Vector with fixation end times
dur : type
Vector with fixation durations
xpos : np.array
Vector with fixation median horizontal position (one value for each fixation in trial)
ypos : np.array
Vector with fixation median vertical position (one value for each fixation in trial)
flankdataloss : bool
Boolean with 1 for when fixation is flanked by data loss, 0 if not flanked by data loss
fracinterped : float
Fraction of data loss/interpolated data
Examples
--------
>>> fix = getFixations(finalweights,data['time'],xpos,ypos,missing,par)
>>> fix
{'cutoff': 0.1355980099309374,
'dur': array([366.599, 773.2 , 239.964, 236.608, 299.877, 126.637]),
'end': array([111, 349, 433, 508, 600, 643]),
'endT': array([ 369.919, 1163.169, 1443.106, 1693.062, 1999.738, 2142.977]),
'flankdataloss': array([1., 0., 0., 0., 0., 0.]),
'fracinterped': array([0.06363636, 0. , 0. , 0. , 0. ,
0. ]),
'start': array([ 2, 118, 362, 438, 511, 606]),
'startT': array([ 6.685, 393.325, 1206.498, 1459.79 , 1703.116, 2019.669]),
'xpos': array([ 945.936, 781.056, 1349.184, 1243.92 , 1290.048, 1522.176]),
'ypos': array([486.216, 404.838, 416.664, 373.005, 383.562, 311.904])}
"""
### Extract the required parameters
cutoffstd = par['cutoffstd']
onoffsetThresh = par['onoffsetThresh']
maxMergeDist = par['maxMergeDist']
maxMergeTime = par['maxMergeTime']
minFixDur = par['minFixDur']
### first determine cutoff for finalweights
cutoff = np.nanmean(finalweights) + cutoffstd*np.nanstd(finalweights)
### get boolean of fixations
fixbool = finalweights < cutoff
### get indices of where fixations start and end
fixstart, fixend = bool2bounds(fixbool)
### for each fixation start, walk forward until recorded position is below
# a threshold of lambda*MAD away from median fixation position.
# same for each fixation end, but walk backward
for p in range(len(fixstart)):
xFix = xpos[fixstart[p]:fixend[p]]
yFix = ypos[fixstart[p]:fixend[p]]
xmedThis = np.nanmedian(xFix)
ymedThis = np.nanmedian(yFix)
# MAD = median(abs(x_i-median({x}))). For the 2D version, I'm using
# median 2D distance of a point from the median fixation position. Not
# exactly MAD, but makes more sense to me for 2D than city block,
# especially given that we use 2D distance in our walk here
MAD = np.nanmedian(np.hypot(xFix-xmedThis, yFix-ymedThis))
thresh = MAD*onoffsetThresh
# walk until distance less than threshold away from median fixation
# position. No walking occurs when we're already below threshold.
i = fixstart[p]
if i>0: # don't walk when fixation starting at start of data
while np.hypot(xpos[i]-xmedThis,ypos[i]-ymedThis)>thresh:
i = i+1
fixstart[p] = i
# and now fixation end.
i = fixend[p]
if i<len(xpos): # don't walk when fixation ending at end of data
while np.hypot(xpos[i]-xmedThis,ypos[i]-ymedThis)>thresh:
i = i-1;
fixend[p] = i
### get start time, end time,
starttime = timestamp[fixstart]
endtime = timestamp[fixend]
### loop over all fixation candidates in trial, see if should be merged
for p in range(1,len(starttime))[::-1]:
# get median coordinates of fixation
xmedThis = np.median(xpos[fixstart[p]:fixend[p]])
ymedThis = np.median(ypos[fixstart[p]:fixend[p]])
xmedPrev = np.median(xpos[fixstart[p-1]:fixend[p-1]]);
ymedPrev = np.median(ypos[fixstart[p-1]:fixend[p-1]]);
# check if fixations close enough in time and space and thus qualify
# for merging
# The interval between the two fixations is calculated correctly (see
# notes about fixation duration below), i checked this carefully. (Both
# start and end of the interval are shifted by one sample in time, but
# assuming practicalyl constant sample interval, thats not an issue.)
if starttime[p]- endtime[p-1] < maxMergeTime and \
np.hypot(xmedThis-xmedPrev,ymedThis-ymedPrev) < maxMergeDist:
# merge
fixend[p-1] = fixend[p];
endtime[p-1]= endtime[p];
# delete merged fixation
fixstart = np.delete(fixstart, p)
fixend = np.delete(fixend, p)
starttime = np.delete(starttime, p)
endtime = np.delete(endtime, p)
### beginning and end of fixation must be real data, not interpolated.
# If interpolated, those bit(s) at the edge(s) are excluded from the
# fixation. First throw out fixations that are all missing/interpolated
for p in range(len(starttime))[::-1]:
miss = missing[fixstart[p]:fixend[p]]
if np.sum(miss) == len(miss):
fixstart = np.delete(fixstart, p)
fixend = np.delete(fixend, p)
starttime = np.delete(starttime, p)
endtime = np.delete(endtime, p)
# then check edges and shrink if needed
for p in range(len(starttime)):
if missing[fixstart[p]]:
fixstart[p] = fixstart[p] + np.argmax(np.invert(missing[fixstart[p]:fixend[p]]))
starttime[p]= timestamp[fixstart[p]]
if missing[fixend[p]]:
fixend[p] = fixend[p] - (np.argmax(np.invert(missing[fixstart[p]:fixend[p]][::-1]))+1)
endtime[p] = timestamp[fixend[p]]
### calculate fixation duration
# if you calculate fixation duration by means of time of last sample during
# fixation minus time of first sample during fixation (our fixation markers
# are inclusive), then you always underestimate fixation duration by one
# sample because you're in practice counting to the beginning of the
# sample, not the end of it. To solve this, as end time we need to take the
# timestamp of the sample that is one past the last sample of the fixation.
# so, first calculate fixation duration by simple timestamp subtraction.
fixdur = endtime-starttime
# then determine what duration of this last sample was
nextSamp = np.min(np.vstack([fixend+1,np.zeros(len(fixend),dtype=int)+len(timestamp)-1]),axis=0) # make sure we don't run off the end of the data
extratime = timestamp[nextSamp]-timestamp[fixend]
# if last fixation ends at end of data, we need to determine how long that
# sample is and add that to the end time. Here we simply guess it as the
# duration of previous sample
if not len(fixend)==0 and fixend[-1]==len(timestamp): # first check if there are fixations in the first place, or we'll index into non-existing data
extratime[-1] = np.diff(timestamp[-3:-1])
# now add the duration of the end sample to fixation durations, so we have
# correct fixation durations
fixdur = fixdur+extratime
### check if any fixations are too short
qTooShort = np.argwhere(fixdur<minFixDur)
if len(qTooShort) > 0:
fixstart = np.delete(fixstart, qTooShort)
fixend = np.delete(fixend, qTooShort)
starttime = np.delete(starttime, qTooShort)
endtime = np.delete(endtime, qTooShort)
fixdur = np.delete(fixdur, qTooShort)
### process fixations, get other info about them
xmedian = np.zeros(fixstart.shape) # vector for median
ymedian = np.zeros(fixstart.shape) # vector for median
flankdataloss = np.zeros(fixstart.shape) # vector for whether fixation is flanked by data loss
fracinterped = np.zeros(fixstart.shape) # vector for fraction interpolated
for a in range(len(fixstart)):
idxs = range(fixstart[a],fixend[a])
# get data during fixation
xposf = xpos[idxs]
yposf = ypos[idxs]
# for all calculations below we'll only use data that is not
# interpolated, so only real data
qMiss = missing[idxs]
# get median coordinates of fixation
xmedian[a] = np.median(xposf[np.invert(qMiss)])
ymedian[a] = np.median(yposf[np.invert(qMiss)])
# determine whether fixation is flanked by period of data loss
flankdataloss[a] = (fixstart[a]>0 and missing[fixstart[a]-1]) or (fixend[a]<len(xpos)-1 and missing[fixend[a]+1])
# fraction of data loss during fixation that has been (does not count
# data that is still lost)
fracinterped[a] = np.sum(np.invert(np.isnan(xposf[qMiss])))/(fixend[a]-fixstart[a]+1)
# store all the results in a dictionary
fix = {}
fix['cutoff'] = cutoff
fix['start'] = fixstart
fix['end'] = fixend
fix['startT'] = starttime
fix['endT'] = endtime
fix['dur'] = fixdur
fix['xpos'] = xmedian
fix['ypos'] = ymedian
fix['flankdataloss'] = flankdataloss
fix['fracinterped'] = fracinterped
return fix
def getFixStats(xpos, ypos, missing, pixperdeg = None, fix = {}):
"""
Description
Parameters
----------
xpos : np.array
X gaze positions
ypos : np.array
Y gaze positions
missing : np.array - Boolean
Vector containing the booleans for mising values (originally, before interpolation!)
pixperdeg : float
Number of pixels per visual degree
fix : Dictionary containing the following keys and values
fstart : np.array
fixation start indices
fend : np.array
fixation end indices
Returns
-------
fix : the fix input dictionary with the following added keys and values
RMSxy : float
RMS of fixation (precision)
BCEA : float
BCEA of fixation (precision)
rangeX : float
max(xpos) - min(xpos) of fixation
rangeY : float
max(ypos) - min(ypos) of fixation
Examples
--------
>>> fix = getFixStats(xpos,ypos,missing,fix,pixperdeg)
>>> fix
{'BCEA': array([0.23148877, 0.23681681, 0.24498942, 0.1571361 , 0.20109245,
0.23703843]),
'RMSxy': array([0.2979522 , 0.23306149, 0.27712236, 0.26264146, 0.28913117,
0.23147076]),
'cutoff': 0.1355980099309374,
'dur': array([366.599, 773.2 , 239.964, 236.608, 299.877, 126.637]),
'end': array([111, 349, 433, 508, 600, 643]),
'endT': array([ 369.919, 1163.169, 1443.106, 1693.062, 1999.738, 2142.977]),
'fixRangeX': array([0.41066299, 0.99860672, 0.66199772, 0.49593727, 0.64628929,
0.81010568]),
'fixRangeY': array([1.58921528, 1.03885955, 1.10576059, 0.94040142, 1.21936613,
0.91263117]),
'flankdataloss': array([1., 0., 0., 0., 0., 0.]),
'fracinterped': array([0.06363636, 0. , 0. , 0. , 0. ,
0. ]),
'start': array([ 2, 118, 362, 438, 511, 606]),
'startT': array([ 6.685, 393.325, 1206.498, 1459.79 , 1703.116, 2019.669]),
'xpos': array([ 945.936, 781.056, 1349.184, 1243.92 , 1290.048, 1522.176]),
'ypos': array([486.216, 404.838, 416.664, 373.005, 383.562, 311.904])}
"""
### Extract the required parameters
fstart = fix['start']
fend = fix['end']
# vectors for precision measures
RMSxy = np.zeros(fstart.shape)
BCEA = np.zeros(fstart.shape)
rangeX = np.zeros(fstart.shape)
rangeY = np.zeros(fstart.shape)
for a in range(len(fstart)):
idxs = range(fstart[a],fend[a])
# get data during fixation
xposf = xpos[idxs]
yposf = ypos[idxs]
# for all calculations below we'll only use data that is not
# interpolated, so only real data
qMiss = missing[idxs]
### calculate RMS
# since its done with diff, don't just exclude missing and treat
# resulting as one continuous vector. replace missing with nan first,
# use left-over values
# Difference x position
xdif = xposf.copy()
xdif[qMiss] = np.nan
xdif = np.diff(xdif)**2;
xdif = xdif[np.invert(np.isnan(xdif))]
# Difference y position
ydif = yposf.copy()
ydif[qMiss] = np.nan
ydif = np.diff(ydif)**2;
ydif = ydif[np.invert(
|
np.isnan(ydif)
|
numpy.isnan
|
"""Methods and classes for validation of the registration procedures"""
from typing import NamedTuple
import numpy as np
from ..._utils import check_is_univariate, _to_grid
class RegistrationScorer():
r"""Cross validation scoring for registration procedures.
It calculates the score of a registration procedure, used to perform
model validation or parameter selection.
Attributes:
eval_points (array_like, optional): Set of points where the
functions are evaluated to obtain a discrete representation and
perform the calculation.
Args:
estimator (Estimator): Registration method estimator. The estimator
should be fitted.
X (:class:`FData <skfda.FData>`): Functional data to be registered.
y (:class:`FData <skfda.FData>`, optional): Functional data target.
If provided should be the same as `X` in general.
Returns:
float: Cross validation score.
Note:
The scorer passes the warpings generated in the registration procedure
to the `score_function` when necessary.
See also:
:class:`~AmplitudePhaseDecomposition`
:class:`~LeastSquares`
:class:`~SobolevLeastSquares`
:class:`~PairwiseCorrelation`
"""
def __init__(self, eval_points=None):
"""Initialize the transformer"""
self.eval_points = eval_points
def __call__(self, estimator, X, y=None):
"""Compute the score of the transformation.
Args:
estimator (Estimator): Registration method estimator. The estimator
should be fitted.
X (:class:`FData <skfda.FData>`): Functional data to be registered.
y (:class:`FData <skfda.FData>`, optional): Functional data target.
If provided should be the same as `X` in general.
Returns:
float: Cross validation score.
"""
if y is None:
y = X
# Register the data
X_reg = estimator.transform(X)
return self.score_function(y, X_reg)
class AmplitudePhaseDecompositionStats(NamedTuple):
r"""Named tuple to store the values of the amplitude-phase decomposition.
Values of the amplitude phase decomposition computed in
:func:`mse_r_squared`, returned when `return_stats` is `True`.
Args:
r_square (float): Squared correlation index :math:`R^2`.
mse_amp (float): Mean square error of amplitude
:math:`\text{MSE}_{amp}`.
mse_pha (float): Mean square error of phase :math:`\text{MSE}_{pha}`.
c_r (float): Constant :math:`C_R`.
"""
r_squared: float
mse_amp: float
mse_pha: float
c_r: float
class AmplitudePhaseDecomposition(RegistrationScorer):
r"""Compute mean square error measures for amplitude and phase variation.
Once the registration has taken place, this function computes two mean
squared error measures, one for amplitude variation, and the other for
phase variation and returns a squared multiple correlation index
of the amount of variation in the unregistered functions is due to phase.
Let :math:`x_i(t),y_i(t)` be the unregistered and registered functions
respectively. The total mean square error measure (see [RGS09-8-5]_) is
defined as
.. math::
\text{MSE}_{total}=
\frac{1}{N}\sum_{i=1}^{N}\int[x_i(t)-\overline x(t)]^2dt
The measures of amplitude and phase mean square error are
.. math::
\text{MSE}_{amp} = C_R \frac{1}{N}
\sum_{i=1}^{N} \int \left [ y_i(t) - \overline{y}(t) \right ]^2 dt
.. math::
\text{MSE}_{phase}=
\int \left [C_R \overline{y}^2(t) - \overline{x}^2(t) \right]dt
where the constant :math:`C_R` is defined as
.. math::
C_R = 1 + \frac{\frac{1}{N}\sum_{i}^{N}\int [Dh_i(t)-\overline{Dh}(t)]
[ y_i^2(t)- \overline{y^2}(t) ]dt}
{\frac{1}{N} \sum_{i}^{N} \int y_i^2(t)dt}
whose structure is related to the covariation between the deformation
functions :math:`Dh_i(t)` and the squared registered functions
:math:`y_i^2(t)`. When these two sets of functions are independents
:math:`C_R=1`, as in the case of shift registration.
The total mean square error is decomposed in the two sources of
variability.
.. math::
\text{MSE}_{total} = \text{MSE}_{amp} + \text{MSE}_{phase}
The squared multiple correlation index of the proportion of the total
variation due to phase is defined as:
.. math::
R^2 = \frac{\text{MSE}_{phase}}{\text{MSE}_{total}}
See [KR08-3]_ for a detailed explanation.
Attributes:
return_stats (boolean, optional): If `true` returns a named tuple
with four values: :math:`R^2`, :math:`MSE_{amp}`, :math:`MSE_{pha}`
and :math:`C_R`. Otherwise the squared correlation index
:math:`R^2` is returned. Default `False`.
eval_points (array_like, optional): Set of points where the
functions are evaluated to obtain a discrete representation and
perform the calculation.
Args:
estimator (RegistrationTransformer): Registration transformer.
X (:class:`FData`): Unregistered functions.
y (:class:`FData`, optional): Target data, generally the same as X. By
default 'None', which uses `X` as target.
Returns:
(float or :class:`NamedTuple <typing.NamedTuple>`): squared correlation
index :math:`R^2` if `return_stats` is `False`. Otherwise a named
tuple containing:
* `r_squared`: Squared correlation index :math:`R^2`.
* `mse_amp`: Mean square error of amplitude
:math:`\text{MSE}_{amp}`.
* `mse_pha`: Mean square error of phase :math:`\text{MSE}_{pha}`.
* `c_r`: Constant :math:`C_R`.
Raises:
ValueError: If the functional data is not univariate.
References:
.. [KR08-3] <NAME> & <NAME>. (2008). Quantifying
amplitude and phase variation. In *Combining Registration and
Fitting for Functional Models* (pp. 14-15). Journal of the American
Statistical Association.
.. [RGS09-8-5] <NAME>., <NAME> & <NAME> (2009). In
*Functional Data Analysis with R and Matlab* (pp. 125-126).
Springer.
Examples:
Calculate the score of the shift registration of a sinusoidal process
synthetically generated.
>>> from skfda.preprocessing.registration.validation import \
... AmplitudePhaseDecomposition
>>> from skfda.preprocessing.registration import ShiftRegistration
>>> from skfda.datasets import make_sinusoidal_process
>>> X = make_sinusoidal_process(error_std=0, random_state=0)
Fit the registration procedure.
>>> shift_registration = ShiftRegistration()
>>> shift_registration.fit(X)
ShiftRegistration(...)
Compute the :math:`R^2` correlation index
>>> scorer = AmplitudePhaseDecomposition()
>>> score = scorer(shift_registration, X)
>>> round(score, 3)
0.972
Also it is possible to get all the values of the decomposition.
>>> scorer = AmplitudePhaseDecomposition(return_stats=True)
>>> stats = scorer(shift_registration, X)
>>> round(stats.r_squared, 3)
0.972
>>> round(stats.mse_amp, 3)
0.007
>>> round(stats.mse_pha, 3)
0.227
>>> round(stats.c_r, 3)
1.0
See also:
:class:`~LeastSquares`
:class:`~SobolevLeastSquares`
:class:`~PairwiseCorrelation`
"""
def __init__(self, return_stats=False, eval_points=None):
"""Initialize the transformer"""
super().__init__(eval_points)
self.return_stats = return_stats
def __call__(self, estimator, X, y=None):
"""Compute the score of the transformation.
Args:
estimator (Estimator): Registration method estimator. The estimator
should be fitted.
X (:class:`FData <skfda.FData>`): Functional data to be registered.
y (:class:`FData <skfda.FData>`, optional): Functional data target.
If provided should be the same as `X` in general.
Returns:
float: Cross validation score.
"""
if y is None:
y = X
# Register the data
X_reg = estimator.transform(X)
# Pass the warpings if are generated in the transformer
if hasattr(estimator, 'warping_'):
return self.score_function(y, X_reg, warping=estimator.warping_)
else:
return self.score_function(y, X_reg)
def score_function(self, X, y, *, warping=None):
"""Compute the score of the transformation performed.
Args:
X (FData): Original functional data.
y (FData): Functional data registered.
Returns:
float: Score of the transformation.
"""
from scipy.integrate import simps
check_is_univariate(X)
check_is_univariate(y)
if len(y) != len(X):
raise ValueError(f"the registered and unregistered curves must have "
f"the same number of samples ({len(y)})!=({len(X)})")
if warping is not None and len(warping) != len(X):
raise ValueError(f"The registered curves and the warping functions "
f"must have the same number of samples "
f"({len(X)})!=({len(warping)})")
# Creates the mesh to discretize the functions
if self.eval_points is None:
try:
eval_points = y.grid_points[0]
except AttributeError:
nfine = max(y.basis.n_basis * 10 + 1, 201)
eval_points = np.linspace(*y.domain_range[0], nfine)
else:
eval_points = np.asarray(self.eval_points)
x_fine = X.evaluate(eval_points)[..., 0]
y_fine = y.evaluate(eval_points)[..., 0]
mu_fine = x_fine.mean(axis=0) # Mean unregistered function
eta_fine = y_fine.mean(axis=0) # Mean registered function
mu_fine_sq = np.square(mu_fine)
eta_fine_sq = np.square(eta_fine)
# Total mean square error of the original funtions
# mse_total = scipy.integrate.simps(
# np.mean(np.square(x_fine - mu_fine), axis=0),
# eval_points)
cr = 1. # Constant related to the covariation between the deformation
# functions and y^2
# If the warping functions are not provided, are suppose independent
if warping is not None:
# Derivates warping functions
warping_deriv = warping.derivative()
dh_fine = warping_deriv(eval_points)[..., 0]
dh_fine_mean = dh_fine.mean(axis=0)
dh_fine_center = dh_fine - dh_fine_mean
y_fine_sq = np.square(y_fine) # y^2
y_fine_sq_center = np.subtract(y_fine_sq, eta_fine_sq) # y^2-E[y2]
covariate = np.inner(dh_fine_center.T, y_fine_sq_center.T)
covariate = covariate.mean(axis=0)
cr += np.divide(simps(covariate, eval_points),
simps(eta_fine_sq, eval_points))
# mse due to phase variation
mse_pha = simps(cr * eta_fine_sq - mu_fine_sq, eval_points)
# mse due to amplitude variation
# mse_amp = mse_total - mse_pha
y_fine_center = np.subtract(y_fine, eta_fine)
y_fine_center_sq = np.square(y_fine_center, out=y_fine_center)
y_fine_center_sq_mean = y_fine_center_sq.mean(axis=0)
mse_amp = simps(y_fine_center_sq_mean, eval_points)
# Total mean square error of the original funtions
mse_total = mse_pha + mse_amp
# squared correlation measure of proportion of phase variation
rsq = mse_pha / (mse_total)
if self.return_stats is True:
stats = AmplitudePhaseDecompositionStats(rsq, mse_amp, mse_pha, cr)
return stats
return rsq
class LeastSquares(AmplitudePhaseDecomposition):
r"""Cross-validated measure of the registration procedure.
Computes a cross-validated measure of the level of synchronization
[James07]_:
.. math::
ls=1 - \frac{1}{N} \sum_{i=1}^{N} \frac{\int\left(\tilde{f}_{i}(t)-
\frac{1}{N-1} \sum_{j \neq i} \tilde{f}_{j}(t)\right)^{2} dt}{\int
\left(f_{i}(t)-\frac{1}{N-1} \sum_{j \neq i} f_{j}(t)\right)^{2} dt}
where :math:`f_i` and :math:`\tilde f_i` are the original and the
registered data respectively.
The :math:`ls` measures the total cross-sectional variance of the aligned
functions, relative to the original value.
A value of :math:`1` would indicate an identical shape for all registered
curves, while zero corresponds to no improvement in the synchronization. It
can be negative because the model can be arbitrarily worse.
Attributes:
eval_points (array_like, optional): Set of points where the
functions are evaluated to obtain a discrete representation and
perform the calculation.
Args:
estimator (RegistrationTransformer): Registration transformer.
X (:class:`FData <skfda.FData>`): Original functional data.
y (:class:`FData <skfda.FData>`): Registered functional data.
Note:
The original least square measure used in [S11-5-2-1]_ is defined as
:math:`1 - ls`, but has been modified according to the scikit-learn
scorers, where higher values correspond to better cross-validated
measures.
References:
.. [James07] <NAME>. Curve alignments by moments. Annals of Applied
Statistics, 1(2):480–501, 2007.
.. [S11-5-2-1] <NAME> et. al. Registration of Functional Data
Using Fisher-Rao Metric (2011). In *Comparisons with other Methods*
(p. 18). arXiv:1103.3817v2.
Examples:
Calculate the score of the shift registration of a sinusoidal process
synthetically generated.
>>> from skfda.preprocessing.registration.validation import \
... LeastSquares
>>> from skfda.preprocessing.registration import ShiftRegistration
>>> from skfda.datasets import make_sinusoidal_process
>>> X = make_sinusoidal_process(error_std=0, random_state=0)
Fit the registration procedure.
>>> shift_registration = ShiftRegistration()
>>> shift_registration.fit(X)
ShiftRegistration(...)
Compute the least squares score.
>>> scorer = LeastSquares()
>>> score = scorer(shift_registration, X)
>>> round(score, 3)
0.796
See also:
:class:`~AmplitudePhaseDecomposition`
:class:`~SobolevLeastSquares`
:class:`~PairwiseCorrelation`
"""
def score_function(self, X, y):
"""Compute the score of the transformation performed.
Args:
X (FData): Original functional data.
y (FData): Functional data registered.
Returns:
float: Score of the transformation.
"""
from ...misc.metrics import pairwise_distance, lp_distance
check_is_univariate(X)
check_is_univariate(y)
X, y = _to_grid(X, y, eval_points=self.eval_points)
# Instead of compute f_i - 1/(N-1) sum(j!=i)f_j for each i = 1 ... N
# It is used (1 + 1/(N-1))f_i - 1/(N-1) sum(j=1 ... N) f_j =
# (1 + 1/(N-1))f_i - N/(N-1) mean(f) =
# C1 * f_1 - C2 mean(f) for each i= 1 ... N
N = len(X)
C1 = 1 + 1 / (N - 1)
C2 = N / (N - 1)
X = C1 * X
y = C1 * y
mean_X = C2 * X.mean()
mean_y = C2 * y.mean()
# Compute distance to mean
distance = pairwise_distance(lp_distance)
ls_x = distance(X, mean_X).flatten()
ls_y = distance(y, mean_y).flatten()
# Quotient of distance
quotient = ls_y / ls_x
return 1 - 1. / N * quotient.sum()
class SobolevLeastSquares(RegistrationScorer):
r"""Cross-validated measure of the registration procedure.
Computes a cross-validated measure of the level of synchronization
[S11-5-2-3]_:
.. math::
sls=1 - \frac{\sum_{i=1}^{N} \int\left(\dot{\tilde{f}}_{i}(t)-
\frac{1}{N} \sum_{j=1}^{N} \dot{\tilde{f}}_{j}\right)^{2} dt}
{\sum_{i=1}^{N} \int\left(\dot{f}_{i}(t)-\frac{1}{N} \sum_{j=1}^{N}
\dot{f}_{j}\right)^{2} dt}
where :math:`\dot f_i` and :math:`\dot \tilde f_i` are the derivatives of
the original and the registered data respectively.
This criterion measures the total cross-sectional variance of the
derivatives of the aligned functions, relative to the original value.
A value of :math:`1` would indicate an identical shape for all registered
curves, while zero corresponds to no improvement in the registration. It
can be negative because the model can be arbitrarily worse.
Attributes:
eval_points (array_like, optional): Set of points where the
functions are evaluated to obtain a discrete representation and
perform the calculation.
Args:
estimator (RegistrationTransformer): Registration transformer.
X (:class:`FData <skfda.FData>`): Original functional data.
y (:class:`FData <skfda.FData>`): Registered functional data.
Note:
The original sobolev least square measure used in [S11-5-2-3]_ is
defined as :math:`1 - sls`, but has been modified according to the
scikit-learn scorers, where higher values correspond to better
cross-validated measures.
References:
.. [S11-5-2-3] Srivastava, Anuj et. al. Registration of Functional Data
Using Fisher-Rao Metric (2011). In *Comparisons with other Methods*
(p. 18). arXiv:1103.3817v2.
Examples:
Calculate the score of the shift registration of a sinusoidal process
synthetically generated.
>>> from skfda.preprocessing.registration.validation import \
... SobolevLeastSquares
>>> from skfda.preprocessing.registration import ShiftRegistration
>>> from skfda.datasets import make_sinusoidal_process
>>> X = make_sinusoidal_process(error_std=0, random_state=0)
Fit the registration procedure.
>>> shift_registration = ShiftRegistration()
>>> shift_registration.fit(X)
ShiftRegistration(...)
Compute the sobolev least squares score.
>>> scorer = SobolevLeastSquares()
>>> score = scorer(shift_registration, X)
>>> round(score, 3)
0.761
See also:
:class:`~AmplitudePhaseDecomposition`
:class:`~LeastSquares`
:class:`~PairwiseCorrelation`
"""
def score_function(self, X, y):
"""Compute the score of the transformation performed.
Args:
X (FData): Original functional data.
y (FData): Functional data registered.
Returns:
float: Score of the transformation.
"""
from ...misc.metrics import pairwise_distance, lp_distance
check_is_univariate(X)
check_is_univariate(y)
# Compute derivative
X = X.derivative()
y = y.derivative()
# Discretize if needed
X, y = _to_grid(X, y, eval_points=self.eval_points)
# L2 distance to mean
distance = pairwise_distance(lp_distance)
sls_x = distance(X, X.mean())
sls_y = distance(y, y.mean())
return 1 - sls_y.sum() / sls_x.sum()
class PairwiseCorrelation(RegistrationScorer):
r"""Cross-validated measure of pairwise correlation between functions.
Computes a cross-validated pairwise correlation between functions
to compare registration methods [S11-5-2-2]_ :
.. math::
pc=\frac{\sum_{i \neq j} \operatorname{cc}\left(\tilde{f}_{i}(t),
\tilde{f}_{j}(t)\right)}{\sum_{i \neq j}
\operatorname{cc}\left(f_{i}(t), f_{j}(t)\right)}
where :math:`f_i` and :math:`\tilde f_i` are the original and registered
data respectively and :math:`cc(f, g)` is the pairwise Pearson’s
correlation between functions.
The larger the value of :math:`pc`, the better the alignment between
functions in general.
Attributes:
eval_points (array_like, optional): Set of points where the
functions are evaluated to obtain a discrete representation and
perform the calculation.
Args:
estimator (RegistrationTransformer): Registration transformer.
X (:class:`FData <skfda.FData>`): Original functional data.
y (:class:`FData <skfda.FData>`): Registered functional data.
Note:
Pearson’s correlation between functions is calculated assuming
the samples are equiespaciated.
References:
.. [S11-5-2-2] Srivastava, Anuj et. al. Registration of Functional Data
Using Fisher-Rao Metric (2011). In *Comparisons with other Methods*
(p. 18). arXiv:1103.3817v2.
Examples:
Calculate the score of the shift registration of a sinusoidal process
synthetically generated.
>>> from skfda.preprocessing.registration.validation import \
... PairwiseCorrelation
>>> from skfda.preprocessing.registration import ShiftRegistration
>>> from skfda.datasets import make_sinusoidal_process
>>> X = make_sinusoidal_process(error_std=0, random_state=0)
Fit the registration procedure.
>>> shift_registration = ShiftRegistration()
>>> shift_registration.fit(X)
ShiftRegistration(...)
Compute the pairwise correlation score.
>>> scorer = PairwiseCorrelation()
>>> score = scorer(shift_registration, X)
>>> round(score, 3)
1.816
See also:
:class:`~AmplitudePhaseDecomposition`
:class:`~LeastSquares`
:class:`~SobolevLeastSquares`
"""
def score_function(self, X, y):
"""Compute the score of the transformation performed.
Args:
X (FData): Original functional data.
y (FData): Functional data registered.
Returns:
float: Score of the transformation.
"""
check_is_univariate(X)
check_is_univariate(y)
# Discretize functional data if needed
X, y = _to_grid(X, y, eval_points=self.eval_points)
# Compute correlation matrices with zeros in diagonal
# corrcoefs computes the correlation between vector, without weights
# due to the sample points
X_corr = np.corrcoef(X.data_matrix[..., 0])
|
np.fill_diagonal(X_corr, 0.)
|
numpy.fill_diagonal
|
import unittest
import scipy
import pysal.lib
import numpy as np
from pysal.model.spreg import error_sp_regimes as SP
from pysal.model.spreg.error_sp import GM_Error, GM_Endog_Error, GM_Combo
from pysal.lib.common import RTOL
class TestGM_Error_Regimes(unittest.TestCase):
def setUp(self):
db=pysal.lib.io.open(pysal.lib.examples.get_path("columbus.dbf"),"r")
y = np.array(db.by_col("CRIME"))
self.y = np.reshape(y, (49,1))
X = []
X.append(db.by_col("HOVAL"))
X.append(db.by_col("INC"))
self.X = np.array(X).T
self.w = pysal.lib.weights.Queen.from_shapefile(pysal.lib.examples.get_path("columbus.shp"))
self.w.transform = 'r'
self.r_var = 'NSA'
self.regimes = db.by_col(self.r_var)
X1 = []
X1.append(db.by_col("INC"))
self.X1 = np.array(X1).T
yd = []
yd.append(db.by_col("HOVAL"))
self.yd = np.array(yd).T
q = []
q.append(db.by_col("DISCBD"))
self.q = np.array(q).T
#Artficial:
n = 256
self.n2 = n//2
self.x_a1 = np.random.uniform(-10,10,(n,1))
self.x_a2 = np.random.uniform(1,5,(n,1))
self.q_a = self.x_a2 + np.random.normal(0,1,(n,1))
self.x_a = np.hstack((self.x_a1,self.x_a2))
self.y_a = np.dot(np.hstack((np.ones((n,1)),self.x_a)),np.array([[1],[0.5],[2]])) + np.random.normal(0,1,(n,1))
latt = int(np.sqrt(n))
self.w_a = pysal.lib.weights.util.lat2W(latt,latt)
self.w_a.transform='r'
self.regi_a = [0]*(n//2) + [1]*(n//2) ##must be floors!
self.w_a1 = pysal.lib.weights.util.lat2W(latt//2,latt)
self.w_a1.transform='r'
def test_model(self):
reg = SP.GM_Error_Regimes(self.y, self.X, self.regimes, self.w)
betas = np.array([[ 63.3443073 ],
[ -0.15468 ],
[ -1.52186509],
[ 61.40071412],
[ -0.33550084],
[ -0.85076108],
[ 0.38671608]])
np.testing.assert_allclose(reg.betas,betas,RTOL)
u = np.array([-2.06177251])
np.testing.assert_allclose(reg.u[0],u,RTOL)
predy = np.array([ 17.78775251])
np.testing.assert_allclose(reg.predy[0],predy,RTOL)
n = 49
np.testing.assert_allclose(reg.n,n,RTOL)
k = 6
np.testing.assert_allclose(reg.k,k,RTOL)
y = np.array([ 15.72598])
np.testing.assert_allclose(reg.y[0],y,RTOL)
x = np.array([[ 0. , 0. , 0. , 1. , 80.467003, 19.531 ]])
np.testing.assert_allclose(reg.x[0].toarray(),x,RTOL)
e = np.array([ 1.40747232])
np.testing.assert_allclose(reg.e_filtered[0],e,RTOL)
my = 35.128823897959187
np.testing.assert_allclose(reg.mean_y,my,RTOL)
sy = 16.732092091229699
np.testing.assert_allclose(reg.std_y,sy,RTOL)
vm = np.array([ 50.55875289, -0.14444487, -2.05735489, 0. ,
0. , 0. ])
np.testing.assert_allclose(reg.vm[0],vm,RTOL)
sig2 = 102.13050615267227
np.testing.assert_allclose(reg.sig2,sig2,RTOL)
pr2 = 0.5525102200608539
np.testing.assert_allclose(reg.pr2,pr2,RTOL)
std_err = np.array([ 7.11046784, 0.21879293, 0.58477864, 7.50596504, 0.10800686,
0.57365981])
|
np.testing.assert_allclose(reg.std_err,std_err,RTOL)
|
numpy.testing.assert_allclose
|
"""
control method for one patient
to retrieve the fitness value from
the k number of features and pre-ictal period
"""
import os
import numpy as np
from Utils import getLabelsForSeizure
from Utils import removeConstantFeatures
from Utils import removeRedundantFeatures
from Utils import filterFeatureSelectionCorr
from Utils import filterFeatureSelectionAUC
from sklearn import preprocessing
from sklearn.feature_selection import RFE
from sklearn.svm import SVR
from sklearn.ensemble import RandomForestClassifier
from sklearn.datasets import make_classification
from sklearn.metrics import confusion_matrix
from Utils import specificity
from Utils import sensitivity
import time
# due to the np.delete function
import warnings
warnings.filterwarnings("ignore")
# where the data is
# go back to data folder
os.chdir("..")
os.chdir("..")
os.chdir("Data")
os.chdir("Processed_data")
path=os.getcwd()
# go to where the data is
os.chdir(path)
patient_id=1321803
pre_ictal=35
k_features=10
def calculateFitness(patient_id,k_features,pre_ictal):
t = time.process_time()
contador=0;
# load training seizures
seizure_1_data=np.load("pat"+str(patient_id)+"_seizure1_featureMatrix.npy");
seizure_2_data=np.load("pat"+str(patient_id)+"_seizure2_featureMatrix.npy");
seizure_3_data=np.load("pat"+str(patient_id)+"_seizure3_featureMatrix.npy");
#removing ictal data, that is, where - last line (class)
# ictal is equal to 2
# inter-ictal is equal to 0
seizure_1_data=seizure_1_data[:,(np.where(seizure_1_data[-1,:]==0)[0])]
seizure_2_data=seizure_2_data[:,(np.where(seizure_2_data[-1,:]==0)[0])]
seizure_3_data=seizure_3_data[:,(np.where(seizure_3_data[-1,:]==0)[0])]
performance_values=0
# for each pre-ictal period value, we make a 3-fold cross validation
for k in range(0,3):
#seizure_1 for testing, seizure_2 and seizure_3 for training
if k==0:
# removing the class label for the feature vector
testing_features=seizure_1_data[0:-1,:]
# retrieving the training labels
testing_labels=getLabelsForSeizure(testing_features,pre_ictal)
# removing the class label for the feature vector, for one seizure
training_features_1=seizure_2_data[0:-1,:]
# retrieving the testing labels for one seizure
training_labels_1=getLabelsForSeizure(training_features_1,pre_ictal)
# removing the class label for the feature vector, for one seizure
training_features_2=seizure_3_data[0:-1,:]
# retrieving the testing labels for one seizure
training_labels_2=getLabelsForSeizure(training_features_2,pre_ictal)
# concatenate both testing_features and testing labels
training_features=np.concatenate([training_features_1, training_features_2], axis=1)
training_labels=np.concatenate([training_labels_1, training_labels_2], axis=1)
del training_features_1
del training_features_2
del training_labels_1
del training_labels_2
#seizure_2 for testing, seizure_1 and seizure_3 for training
elif k==1:
# removing the class label for the feature vector
testing_features=seizure_2_data[0:-1,:]
# retrieving the training labels
testing_labels=getLabelsForSeizure(testing_features,pre_ictal)
# removing the class label for the feature vector, for one seizure
training_features_1=seizure_1_data[0:-1,:]
# retrieving the testing labels for one seizure
training_labels_1=getLabelsForSeizure(training_features_1,pre_ictal)
# removing the class label for the feature vector, for one seizure
training_features_2=seizure_3_data[0:-1,:]
# retrieving the testing labels for one seizure
training_labels_2=getLabelsForSeizure(training_features_2,pre_ictal)
# concatenate both testing_features and testing labels
training_features=np.concatenate([training_features_1, training_features_2], axis=1)
training_labels=np.concatenate([training_labels_1, training_labels_2], axis=1)
del training_features_1
del training_features_2
del training_labels_1
del training_labels_2
#seizure_3 for testing, seizure_1 and seizure_2 for training
elif k==2:
# removing the class label for the feature vector
testing_features=seizure_3_data[0:-1,:]
# retrieving the training labels
testing_labels=getLabelsForSeizure(testing_features,pre_ictal)
# removing the class label for the feature vector, for one seizure
training_features_1=seizure_1_data[0:-1,:]
# retrieving the testing labels for one seizure
training_labels_1=getLabelsForSeizure(training_features_1,pre_ictal)
# removing the class label for the feature vector, for one seizure
training_features_2=seizure_2_data[0:-1,:]
# retrieving the testing labels for one seizure
training_labels_2=getLabelsForSeizure(training_features_2,pre_ictal)
# concatenate both testing_features and testing labels
training_features=np.concatenate([training_features_1, training_features_2], axis=1)
training_labels=np.concatenate([training_labels_1, training_labels_2], axis=1)
del training_features_1
del training_features_2
del training_labels_1
del training_labels_2
# we transpose the feature vector to have sample x feature
training_features=np.transpose(training_features)
testing_features=
|
np.transpose(testing_features)
|
numpy.transpose
|
from __future__ import print_function
import torch.utils.data as data
from PIL import Image
import os, time
import os.path
from multiprocessing import Pool
from functools import partial
from gibson.core.render.profiler import Profiler
import errno
import torch
import json
import codecs
import cv2
import numpy as np
import ctypes as ct
import sys
from tqdm import *
import torchvision.transforms as transforms
import argparse
import json
from numpy.linalg import inv
import pickle
from gibson import assets
IMG_EXTENSIONS = [
'.jpg', '.JPG', '.jpeg', '.JPEG',
'.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP',
]
def is_image_file(filename):
return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
def default_loader(path):
## Heavy usage
img = cv2.cvtColor(cv2.imread(path), cv2.COLOR_BGR2RGB)#.convert('RGB')
#img = Image.open(path)
return img
def depth_loader(path):
## Heavy usage
## TODO: Image.open for depth image is main data loading bottleneck
#img = cv2.imread(path, cv2.IMREAD_GRAYSCALE)#.convert('I')
img = Image.open(path)
return img
def get_model_path(model_id):
data_path = os.path.join(os.path.dirname(os.path.abspath(assets.__file__)), 'dataset')
assert (model_id in os.listdir(data_path)) or model_id == 'stadium', "Model {} does not exist".format(model_id)
return os.path.join(data_path, model_id)
def get_item_fn(inds, select, root, loader, transform, off_3d, target_transform, depth_trans, off_pc_render, dll, train, require_rgb):
""" Functional programming version of Dataset.__getitem__
The advantage is that it is pickle-friendly and supports python multiprocessing
Argument:
inds: tuple of scene index and output index
"""
index, out_i = inds
scene = select[index][0][0]
uuids = [item[1] for item in select[index]]
paths = ([os.path.join(root, scene, 'pano', 'rgb', "point_" + item + "_view_equirectangular_domain_rgb.png") for item in uuids])
mist_paths = ([os.path.join(root, scene, 'pano', 'mist', "point_" + item + "_view_equirectangular_domain_mist.png") for item in uuids])
normal_paths = ([os.path.join(root, scene, 'pano', 'normal', "point_" + item + "_view_equirectangular_domain_normal.png") for item in uuids])
pose_paths = ([os.path.join(root, scene, 'pano', 'points', "point_" + item + ".json") for item in uuids])
semantic_paths = ([os.path.join(root, scene, 'pano', 'semantic', "point_" + item + "_view_equirectangular_domain_semantic.png") for item in uuids])
poses = []
for i, item in enumerate(pose_paths):
f = open(item)
pose_dict = json.load(f)
p = np.concatenate(np.array(pose_dict[1][u'camera_rt_matrix'])).astype(np.float32).reshape((4, 4))
rotation = np.array([[0, 1, 0, 0], [0, 0, 1, 0], [-1, 0, 0, 0], [0, 0, 0, 1]])
p = np.dot(p, rotation)
poses.append(p)
f.close()
img_paths = paths[1:]
target_path = paths[0]
img_poses = poses[1:]
target_pose = poses[0]
mist_img_paths = mist_paths[1:]
mist_target_path = mist_paths[0]
normal_img_paths = normal_paths[1:]
normal_target_path = normal_paths[0]
semantic_img_paths = semantic_paths[1:]
semantic_target_path = semantic_paths[0]
poses_relative = []
semantic_imgs = None
semantic_target = None
normal_imgs = None
normal_target = None
mist_imgs = None
mist_target = None
imgs = None
target = None
for pose_i, item in enumerate(img_poses):
pose_i = pose_i + 1
relative = np.dot(inv(target_pose), item)
poses_relative.append(torch.from_numpy(relative))
if require_rgb:
imgs = [loader(item) for item in img_paths]
target = loader(target_path)
org_img = imgs[0].copy()
if not off_3d and require_rgb:
mist_imgs = [depth_loader(item) for item in mist_img_paths]
mist_target = depth_loader(mist_target_path)
if train:
normal_imgs = [loader(item) for item in normal_img_paths]
normal_target = loader(normal_target_path)
if not transform is None and require_rgb:
imgs = [transform(item) for item in imgs]
if not target_transform is None and require_rgb:
target = target_transform(target)
if not off_3d and require_rgb:
mist_imgs = [np.expand_dims(np.array(item).astype(np.float32) / 65536.0, 2) for item in mist_imgs]
org_mist = mist_imgs[0][:, :, 0].copy()
mist_target = np.expand_dims(np.array(mist_target).astype(np.float32) / 65536.0, 2)
if not depth_trans is None:
mist_imgs = [depth_trans(item) for item in mist_imgs]
if not depth_trans is None:
mist_target = depth_trans(mist_target)
if train:
if not transform is None:
normal_imgs = [transform(item) for item in normal_imgs]
if not target_transform is None:
normal_target = target_transform(normal_target)
if not off_pc_render and require_rgb:
img = np.array(org_img)
h, w, _ = img.shape
render = np.zeros((h, w, 3), dtype='uint8')
target_depth = np.zeros((h, w)).astype(np.float32)
depth = org_mist
pose = poses_relative[0].numpy()
dll.render(ct.c_int(img.shape[0]),
ct.c_int(img.shape[1]),
img.ctypes.data_as(ct.c_void_p),
depth.ctypes.data_as(ct.c_void_p),
pose.ctypes.data_as(ct.c_void_p),
render.ctypes.data_as(ct.c_void_p),
target_depth.ctypes.data_as(ct.c_void_p)
)
if not transform is None:
render = transform(Image.fromarray(render))
if not depth_trans is None:
target_depth = depth_trans(np.expand_dims(target_depth, 2))
if off_3d:
out = (imgs, target, poses_relative)
elif off_pc_render:
out = (imgs, target, mist_imgs, mist_target, normal_imgs, normal_target, poses_relative)
else:
out = (imgs, target, mist_imgs, mist_target, normal_imgs, normal_target, poses_relative, render, target_depth)
return (out_i, out)
class ViewDataSet3D(data.Dataset):
def __init__(self, root=None, train=False, transform=None, mist_transform=None, loader=default_loader, seqlen=5,
debug=False, dist_filter=None, off_3d=True, off_pc_render=True, overwrite_fofn=False,
semantic_transform=np.array, env = None, only_load = None):
print('Processing the data:')
if not root:
self.root = os.path.join(os.path.dirname(os.path.abspath(assets.__file__)), "dataset")
else:
self.root = root
self.train = train
self.env = env
self.loader = loader
self.seqlen = seqlen
self.transform = transform
self.target_transform = transform
self.depth_trans = mist_transform
self.semantic_trans = semantic_transform
self._require_semantics = "SEMANTICS" in self.env.config["ui_components"]
self._require_rgb = "RGB_FILLED" in self.env.config["ui_components"] or "RGB_PREFILLED" in self.env.config["ui_components"] or "rgb_filled" in self.env.config["output"] or "rgb_prefill" in self.env.config["output"]
self.off_3d = off_3d
self.select = []
self.fofn = self.root + '_fofn' + str(int(train)) + '.pkl'
self.off_pc_render = off_pc_render
self.dll = None
if not self.off_pc_render:
self.dll = np.ctypeslib.load_library('render', '.')
if overwrite_fofn or not os.path.isfile(self.fofn):
if only_load is None:
self.scenes = sorted([d for d in (os.listdir(self.root)) if
os.path.isdir(os.path.join(self.root, d)) and os.path.isfile(
os.path.join(self.root, d, 'camera_poses.csv')) and os.path.isdir(
os.path.join(self.root, d, 'pano'))])
num_scenes = len(self.scenes)
num_train = int(num_scenes * 0.9)
else:
self.scenes = sorted([only_load])
num_scenes = 1
num_train = 0
print("Total %d scenes %d train %d test" % (num_scenes, num_train, num_scenes - num_train))
if train:
self.scenes = self.scenes[:num_train]
self.meta = {}
last = len(self.scenes)
for scene in self.scenes[:last]:
posefile = os.path.join(self.root, scene, 'camera_poses.csv')
with open(posefile) as f:
for line in f:
l = line.strip().split(',')
uuid = l[0]
xyz = list(map(float, l[1:4]))
quat = list(map(float, l[4:8]))
if not scene in self.meta:
self.meta[scene] = {}
metadata = (uuid, xyz, quat)
# print(uuid, xyz)
if os.path.isfile(os.path.join(self.root, scene, 'pano', 'points', 'point_' + uuid + '.json')):
if np.linalg.norm( np.array(xyz) - np.array([0,0,0])) > 1e-5: #remove scans that are not registered
self.meta[scene][uuid] = metadata
print("Indexing")
for scene, meta in tqdm(list(self.meta.items())):
if len(meta) < self.seqlen:
continue
for uuid, v in list(meta.items()):
dist_list = [(uuid2, np.linalg.norm(np.array(v2[1]) - np.array(v[1]))) for uuid2, v2 in list(meta.items())]
dist_list = sorted(dist_list, key=lambda x: x[-1])
if not dist_filter is None:
if dist_list[1][-1] < dist_filter:
self.select.append([[scene, dist_list[i][0], dist_list[i][1]] for i in range(self.seqlen)])
else:
self.select.append([[scene, dist_list[i][0], dist_list[i][1]] for i in range(self.seqlen)])
with open(self.fofn, 'wb') as fp:
pickle.dump([self.scenes, self.meta, self.select, num_scenes, num_train], fp)
else:
with open(self.fofn, 'rb') as fp:
self.scenes, self.meta, self.select, num_scenes, num_train = pickle.load(fp)
print("Total %d scenes %d train %d test" % (num_scenes, num_train, num_scenes - num_train))
def get_scene_info(self, index):
scene = self.scenes[index]
data = [(i, item) for i, item in enumerate(self.select) if item[0][0] == scene]
uuids = ([(item[1][0][1], item[0]) for item in data])
pose_paths = (
[os.path.join(self.root, scene, 'pano', 'points', "point_" + item[0] + ".json") for item in uuids])
poses = []
for item in pose_paths:
f = open(item)
pose_dict = json.load(f)
p = np.concatenate(np.array(pose_dict[1][u'camera_rt_matrix'])).astype(np.float32).reshape((4, 4))
rotation = np.array([[0, 1, 0, 0], [0, 0, 1, 0], [-1, 0, 0, 0], [0, 0, 0, 1]])
p = np.dot(p, rotation)
poses.append(p)
f.close()
return uuids, poses
def __getitem__(self, index):
scene = self.select[index][0][0]
uuids = [item[1] for item in self.select[index]]
paths = (
[os.path.join(self.root, scene, 'pano', 'rgb', "point_" + item + "_view_equirectangular_domain_rgb.png") for
item in uuids])
mist_paths = (
[os.path.join(self.root, scene, 'pano', 'mist', "point_" + item + "_view_equirectangular_domain_mist.png") for
item in uuids])
normal_paths = (
[os.path.join(self.root, scene, 'pano', 'normal', "point_" + item + "_view_equirectangular_domain_normal.png")
for item in uuids])
pose_paths = ([os.path.join(self.root, scene, 'pano', 'points', "point_" + item + ".json") for item in uuids])
semantic_paths = ([os.path.join(self.root, scene, 'pano', 'semantic',
"point_" + item + "_view_equirectangular_domain_semantic.png") for item in
uuids])
poses = []
for i, item in enumerate(pose_paths):
f = open(item)
pose_dict = json.load(f)
p = np.concatenate(np.array(pose_dict[1][u'camera_rt_matrix'])).astype(np.float32).reshape((4, 4))
rotation = np.array([[0, 1, 0, 0], [0, 0, 1, 0], [-1, 0, 0, 0], [0, 0, 0, 1]])
p = np.dot(p, rotation)
poses.append(p)
f.close()
img_paths = paths[1:]
target_path = paths[0]
img_poses = poses[1:]
target_pose = poses[0]
mist_img_paths = mist_paths[1:]
mist_target_path = mist_paths[0]
normal_img_paths = normal_paths[1:]
normal_target_path = normal_paths[0]
semantic_img_paths = semantic_paths[1:]
semantic_target_path = semantic_paths[0]
poses_relative = []
semantic_imgs = None
semantic_target = None
normal_imgs = None
normal_target = None
mist_imgs = None
mist_target = None
imgs, target = None, None
for pose_i, item in enumerate(img_poses):
pose_i = pose_i + 1
relative = np.dot(inv(target_pose), item)
poses_relative.append(torch.from_numpy(relative))
if self._require_rgb:
imgs = [self.loader(item) for item in img_paths]
target = self.loader(target_path)
if not self.off_3d and self._require_rgb:
mist_imgs = [depth_loader(item) for item in mist_img_paths]
mist_target = depth_loader(mist_target_path)
if self.train: # Optimize
normal_imgs = [self.loader(item) for item in normal_img_paths]
normal_target = self.loader(normal_target_path)
if not self.off_pc_render and self._require_rgb:
org_img = imgs[0].copy()
if not self.transform is None:
imgs = [self.transform(item) for item in imgs]
if not self.target_transform is None:
target = self.target_transform(target)
if not self.off_3d and self._require_rgb:
mist_imgs = [np.expand_dims(np.array(item).astype(np.float32) / 65536.0, 2) for item in mist_imgs]
if not self.off_pc_render:
org_mist = mist_imgs[0][:, :, 0].copy()
mist_target = np.expand_dims(np.array(mist_target).astype(np.float32) / 65536.0, 2)
if not self.depth_trans is None:
mist_imgs = [self.depth_trans(item) for item in mist_imgs]
if not self.depth_trans is None:
mist_target = self.depth_trans(mist_target)
if self.train:
if not self.transform is None:
normal_imgs = [self.transform(item) for item in normal_imgs]
if not self.target_transform is None:
normal_target = self.target_transform(normal_target)
if not self.off_pc_render and self._require_rgb:
img = np.array(org_img)
h, w, _ = img.shape
render = np.zeros((h, w, 3), dtype='uint8')
target_depth = np.zeros((h, w)).astype(np.float32)
depth = org_mist
pose = poses_relative[0].numpy()
self.dll.render(ct.c_int(img.shape[0]),
ct.c_int(img.shape[1]),
img.ctypes.data_as(ct.c_void_p),
depth.ctypes.data_as(ct.c_void_p),
pose.ctypes.data_as(ct.c_void_p),
render.ctypes.data_as(ct.c_void_p),
target_depth.ctypes.data_as(ct.c_void_p)
)
if not self.transform is None:
render = self.transform(Image.fromarray(render))
if not self.depth_trans is None:
target_depth = self.depth_trans(np.expand_dims(target_depth, 2))
if self.off_3d:
return imgs, target, poses_relative
elif self.off_pc_render:
return imgs, target, mist_imgs, mist_target, normal_imgs, normal_target, poses_relative
else:
return imgs, target, mist_imgs, mist_target, normal_imgs, normal_target, poses_relative, render, target_depth
def get_multi_index(self, uuids):
indices = range(len(uuids))
p = Pool(16)
partial_fn = partial(get_item_fn, select=self.select, root=self.root, loader=self.loader, transform=self.transform, off_3d=self.off_3d, target_transform=self.target_transform, depth_trans=self.depth_trans, off_pc_render=self.off_pc_render, dll=self.dll, train=self.train, require_rgb=self._require_rgb)
mapped_pairs = list(tqdm(p.imap(partial_fn, list(zip(uuids, indices))), total=len(uuids)))
sorted_pairs = sorted(mapped_pairs, key=lambda x: x[0])
out_data = [key_pair[1] for key_pair in sorted_pairs]
p.close()
p.join()
return out_data
def __len__(self):
return len(self.select)
########### BELOW THIS POINT: Legacy code #################
########### KEEPING ONLY FOR REFERENCE ####################
class Places365Dataset(data.Dataset):
def __init__(self, root, train=True, transform=None, loader=default_loader):
self.root = root.rstrip('/')
self.train = train
self.fns = []
self.fofn = os.path.basename(root) + '_fofn' + str(int(train)) + '.pkl'
self.loader = loader
self.transform = transform
if not os.path.isfile(self.fofn):
for subdir, dirs, files in os.walk(self.root):
if self.train:
files = files[:len(files) / 10 * 9]
else:
files = files[len(files) / 10 * 9:]
print(subdir)
for file in files:
self.fns.append(os.path.join(subdir, file))
with open(self.fofn, 'wb') as fp:
pickle.dump(self.fns, fp)
else:
with open(self.fofn, 'rb') as fp:
self.fns = pickle.load(fp)
def __len__(self):
return len(self.fns)
def __getitem__(self, index):
path = self.fns[index]
img = self.loader(path)
if not self.transform is None:
img = self.transform(img)
return img
class PairDataset(data.Dataset):
def __init__(self, root, train=True, transform=None, mist_transform=None, loader=np.load):
self.root = root.rstrip('/')
self.train = train
self.fns = []
self.fofn = os.path.basename(root) + '_fofn' + str(int(train)) + '.pkl'
self.loader = loader
self.transform = transform
self.mist_transform = mist_transform
if not os.path.isfile(self.fofn):
for subdir, dirs, files in os.walk(self.root):
if self.train:
files = files[:len(files) / 10 * 9]
else:
files = files[len(files) / 10 * 9:]
print(subdir)
for file in files:
if file[-3:] == 'npz':
self.fns.append(os.path.join(subdir, file))
with open(self.fofn, 'wb') as fp:
pickle.dump(self.fns, fp)
else:
with open(self.fofn, 'rb') as fp:
self.fns = pickle.load(fp)
def __len__(self):
return len(self.fns)
def __getitem__(self, index):
path = self.fns[index]
data = self.loader(path)
try:
source, depth, target = data['source'], data['depth'], data['target']
# print(source.shape, depth.shape, target.shape)
except:
source = np.zeros((1024, 2048, 3)).astype(np.uint8)
target =
|
np.zeros((1024, 2048, 3))
|
numpy.zeros
|
#!/usr/bin/env python
#
# Created by: <NAME>, March 2002
#
""" Test functions for linalg.basic module
"""
from __future__ import division, print_function, absolute_import
"""
Bugs:
1) solve.check_random_sym_complex fails if a is complex
and transpose(a) = conjugate(a) (a is Hermitian).
"""
__usage__ = """
Build linalg:
python setup_linalg.py build
Run tests if scipy is installed:
python -c 'import scipy;scipy.linalg.test()'
Run tests if linalg is not installed:
python tests/test_basic.py
"""
import numpy as np
from numpy import arange, array, dot, zeros, identity, conjugate, transpose, \
float32
import numpy.linalg as linalg
from numpy.testing import TestCase, rand, run_module_suite, assert_raises, \
assert_equal, assert_almost_equal, assert_array_almost_equal, assert_, \
assert_allclose
from scipy.linalg import solve, inv, det, lstsq, pinv, pinv2, pinvh, norm,\
solve_banded, solveh_banded, solve_triangular
from scipy.linalg._testutils import assert_no_overwrite
def random(size):
return rand(*size)
class TestSolveBanded(TestCase):
def test_real(self):
a = array([[1.0, 20, 0, 0],
[-30, 4, 6, 0],
[2, 1, 20, 2],
[0, -1, 7, 14]])
ab = array([[0.0, 20, 6, 2],
[1, 4, 20, 14],
[-30, 1, 7, 0],
[2, -1, 0, 0]])
l,u = 2,1
b4 = array([10.0, 0.0, 2.0, 14.0])
b4by1 = b4.reshape(-1,1)
b4by2 = array([[2, 1],
[-30, 4],
[2, 3],
[1, 3]])
b4by4 = array([[1, 0, 0, 0],
[0, 0, 0, 1],
[0, 1, 0, 0],
[0, 1, 0, 0]])
for b in [b4, b4by1, b4by2, b4by4]:
x = solve_banded((l, u), ab, b)
assert_array_almost_equal(dot(a, x), b)
def test_complex(self):
a = array([[1.0, 20, 0, 0],
[-30, 4, 6, 0],
[2j, 1, 20, 2j],
[0, -1, 7, 14]])
ab = array([[0.0, 20, 6, 2j],
[1, 4, 20, 14],
[-30, 1, 7, 0],
[2j, -1, 0, 0]])
l,u = 2,1
b4 = array([10.0, 0.0, 2.0, 14.0j])
b4by1 = b4.reshape(-1,1)
b4by2 = array([[2, 1],
[-30, 4],
[2, 3],
[1, 3]])
b4by4 = array([[1, 0, 0, 0],
[0, 0, 0,1j],
[0, 1, 0, 0],
[0, 1, 0, 0]])
for b in [b4, b4by1, b4by2, b4by4]:
x = solve_banded((l, u), ab, b)
assert_array_almost_equal(dot(a, x), b)
def test_tridiag_real(self):
ab = array([[0.0, 20, 6, 2],
[1, 4, 20, 14],
[-30, 1, 7, 0]])
a = np.diag(ab[0,1:], 1) + np.diag(ab[1,:], 0) + np.diag(ab[2,:-1], -1)
b4 = array([10.0, 0.0, 2.0, 14.0])
b4by1 = b4.reshape(-1,1)
b4by2 = array([[2, 1],
[-30, 4],
[2, 3],
[1, 3]])
b4by4 = array([[1, 0, 0, 0],
[0, 0, 0, 1],
[0, 1, 0, 0],
[0, 1, 0, 0]])
for b in [b4, b4by1, b4by2, b4by4]:
x = solve_banded((1, 1), ab, b)
assert_array_almost_equal(dot(a, x), b)
def test_tridiag_complex(self):
ab = array([[0.0, 20, 6, 2j],
[1, 4, 20, 14],
[-30, 1, 7, 0]])
a = np.diag(ab[0,1:], 1) + np.diag(ab[1,:], 0) + np.diag(ab[2,:-1], -1)
b4 = array([10.0, 0.0, 2.0, 14.0j])
b4by1 = b4.reshape(-1,1)
b4by2 = array([[2, 1],
[-30, 4],
[2, 3],
[1, 3]])
b4by4 = array([[1, 0, 0, 0],
[0, 0, 0, 1],
[0, 1, 0, 0],
[0, 1, 0, 0]])
for b in [b4, b4by1, b4by2, b4by4]:
x = solve_banded((1, 1), ab, b)
assert_array_almost_equal(dot(a, x), b)
def test_check_finite(self):
a = array([[1.0, 20, 0, 0],
[-30, 4, 6, 0],
[2, 1, 20, 2],
[0, -1, 7, 14]])
ab = array([[0.0, 20, 6, 2],
[1, 4, 20, 14],
[-30, 1, 7, 0],
[2, -1, 0, 0]])
l,u = 2,1
b4 = array([10.0, 0.0, 2.0, 14.0])
x = solve_banded((l, u), ab, b4, check_finite=False)
assert_array_almost_equal(dot(a, x), b4)
def test_bad_shape(self):
ab = array([[0.0, 20, 6, 2],
[1, 4, 20, 14],
[-30, 1, 7, 0],
[2, -1, 0, 0]])
l,u = 2,1
bad = array([1.0, 2.0, 3.0, 4.0]).reshape(-1,4)
assert_raises(ValueError, solve_banded, (l, u), ab, bad)
assert_raises(ValueError, solve_banded, (l, u), ab, [1.0, 2.0])
# Values of (l,u) are not compatible with ab.
assert_raises(ValueError, solve_banded, (1, 1), ab, [1.0, 2.0])
class TestSolveHBanded(TestCase):
def test_01_upper(self):
# Solve
# [ 4 1 2 0] [1]
# [ 1 4 1 2] X = [4]
# [ 2 1 4 1] [1]
# [ 0 2 1 4] [2]
# with the RHS as a 1D array.
ab = array([[0.0, 0.0, 2.0, 2.0],
[-99, 1.0, 1.0, 1.0],
[4.0, 4.0, 4.0, 4.0]])
b = array([1.0, 4.0, 1.0, 2.0])
x = solveh_banded(ab, b)
assert_array_almost_equal(x, [0.0, 1.0, 0.0, 0.0])
def test_02_upper(self):
# Solve
# [ 4 1 2 0] [1 6]
# [ 1 4 1 2] X = [4 2]
# [ 2 1 4 1] [1 6]
# [ 0 2 1 4] [2 1]
#
ab = array([[0.0, 0.0, 2.0, 2.0],
[-99, 1.0, 1.0, 1.0],
[4.0, 4.0, 4.0, 4.0]])
b = array([[1.0, 6.0],
[4.0, 2.0],
[1.0, 6.0],
[2.0, 1.0]])
x = solveh_banded(ab, b)
expected = array([[0.0, 1.0],
[1.0, 0.0],
[0.0, 1.0],
[0.0, 0.0]])
assert_array_almost_equal(x, expected)
def test_03_upper(self):
# Solve
# [ 4 1 2 0] [1]
# [ 1 4 1 2] X = [4]
# [ 2 1 4 1] [1]
# [ 0 2 1 4] [2]
# with the RHS as a 2D array with shape (3,1).
ab = array([[0.0, 0.0, 2.0, 2.0],
[-99, 1.0, 1.0, 1.0],
[4.0, 4.0, 4.0, 4.0]])
b = array([1.0, 4.0, 1.0, 2.0]).reshape(-1,1)
x = solveh_banded(ab, b)
assert_array_almost_equal(x, array([0.0, 1.0, 0.0, 0.0]).reshape(-1,1))
def test_01_lower(self):
# Solve
# [ 4 1 2 0] [1]
# [ 1 4 1 2] X = [4]
# [ 2 1 4 1] [1]
# [ 0 2 1 4] [2]
#
ab = array([[4.0, 4.0, 4.0, 4.0],
[1.0, 1.0, 1.0, -99],
[2.0, 2.0, 0.0, 0.0]])
b = array([1.0, 4.0, 1.0, 2.0])
x = solveh_banded(ab, b, lower=True)
assert_array_almost_equal(x, [0.0, 1.0, 0.0, 0.0])
def test_02_lower(self):
# Solve
# [ 4 1 2 0] [1 6]
# [ 1 4 1 2] X = [4 2]
# [ 2 1 4 1] [1 6]
# [ 0 2 1 4] [2 1]
#
ab = array([[4.0, 4.0, 4.0, 4.0],
[1.0, 1.0, 1.0, -99],
[2.0, 2.0, 0.0, 0.0]])
b = array([[1.0, 6.0],
[4.0, 2.0],
[1.0, 6.0],
[2.0, 1.0]])
x = solveh_banded(ab, b, lower=True)
expected = array([[0.0, 1.0],
[1.0, 0.0],
[0.0, 1.0],
[0.0, 0.0]])
assert_array_almost_equal(x, expected)
def test_01_float32(self):
# Solve
# [ 4 1 2 0] [1]
# [ 1 4 1 2] X = [4]
# [ 2 1 4 1] [1]
# [ 0 2 1 4] [2]
#
ab = array([[0.0, 0.0, 2.0, 2.0],
[-99, 1.0, 1.0, 1.0],
[4.0, 4.0, 4.0, 4.0]], dtype=float32)
b = array([1.0, 4.0, 1.0, 2.0], dtype=float32)
x = solveh_banded(ab, b)
assert_array_almost_equal(x, [0.0, 1.0, 0.0, 0.0])
def test_02_float32(self):
# Solve
# [ 4 1 2 0] [1 6]
# [ 1 4 1 2] X = [4 2]
# [ 2 1 4 1] [1 6]
# [ 0 2 1 4] [2 1]
#
ab = array([[0.0, 0.0, 2.0, 2.0],
[-99, 1.0, 1.0, 1.0],
[4.0, 4.0, 4.0, 4.0]], dtype=float32)
b = array([[1.0, 6.0],
[4.0, 2.0],
[1.0, 6.0],
[2.0, 1.0]], dtype=float32)
x = solveh_banded(ab, b)
expected = array([[0.0, 1.0],
[1.0, 0.0],
[0.0, 1.0],
[0.0, 0.0]])
assert_array_almost_equal(x, expected)
def test_01_complex(self):
# Solve
# [ 4 -j 2 0] [2-j]
# [ j 4 -j 2] X = [4-j]
# [ 2 j 4 -j] [4+j]
# [ 0 2 j 4] [2+j]
#
ab = array([[0.0, 0.0, 2.0, 2.0],
[-99, -1.0j, -1.0j, -1.0j],
[4.0, 4.0, 4.0, 4.0]])
b = array([2-1.0j, 4.0-1j, 4+1j, 2+1j])
x = solveh_banded(ab, b)
assert_array_almost_equal(x, [0.0, 1.0, 1.0, 0.0])
def test_02_complex(self):
# Solve
# [ 4 -j 2 0] [2-j 2+4j]
# [ j 4 -j 2] X = [4-j -1-j]
# [ 2 j 4 -j] [4+j 4+2j]
# [ 0 2 j 4] [2+j j]
#
ab = array([[0.0, 0.0, 2.0, 2.0],
[-99, -1.0j, -1.0j, -1.0j],
[4.0, 4.0, 4.0, 4.0]])
b = array([[2-1j, 2+4j],
[4.0-1j, -1-1j],
[4.0+1j, 4+2j],
[2+1j, 1j]])
x = solveh_banded(ab, b)
expected = array([[0.0, 1.0j],
[1.0, 0.0],
[1.0, 1.0],
[0.0, 0.0]])
assert_array_almost_equal(x, expected)
def test_tridiag_01_upper(self):
# Solve
# [ 4 1 0] [1]
# [ 1 4 1] X = [4]
# [ 0 1 4] [1]
# with the RHS as a 1D array.
ab = array([[-99, 1.0, 1.0], [4.0, 4.0, 4.0]])
b = array([1.0, 4.0, 1.0])
x = solveh_banded(ab, b)
assert_array_almost_equal(x, [0.0, 1.0, 0.0])
def test_tridiag_02_upper(self):
# Solve
# [ 4 1 0] [1 4]
# [ 1 4 1] X = [4 2]
# [ 0 1 4] [1 4]
#
ab = array([[-99, 1.0, 1.0],
[4.0, 4.0, 4.0]])
b = array([[1.0, 4.0],
[4.0, 2.0],
[1.0, 4.0]])
x = solveh_banded(ab, b)
expected = array([[0.0, 1.0],
[1.0, 0.0],
[0.0, 1.0]])
assert_array_almost_equal(x, expected)
def test_tridiag_03_upper(self):
# Solve
# [ 4 1 0] [1]
# [ 1 4 1] X = [4]
# [ 0 1 4] [1]
# with the RHS as a 2D array with shape (3,1).
ab = array([[-99, 1.0, 1.0], [4.0, 4.0, 4.0]])
b = array([1.0, 4.0, 1.0]).reshape(-1,1)
x = solveh_banded(ab, b)
assert_array_almost_equal(x, array([0.0, 1.0, 0.0]).reshape(-1,1))
def test_tridiag_01_lower(self):
# Solve
# [ 4 1 0] [1]
# [ 1 4 1] X = [4]
# [ 0 1 4] [1]
#
ab = array([[4.0, 4.0, 4.0],
[1.0, 1.0, -99]])
b = array([1.0, 4.0, 1.0])
x = solveh_banded(ab, b, lower=True)
assert_array_almost_equal(x, [0.0, 1.0, 0.0])
def test_tridiag_02_lower(self):
# Solve
# [ 4 1 0] [1 4]
# [ 1 4 1] X = [4 2]
# [ 0 1 4] [1 4]
#
ab = array([[4.0, 4.0, 4.0],
[1.0, 1.0, -99]])
b = array([[1.0, 4.0],
[4.0, 2.0],
[1.0, 4.0]])
x = solveh_banded(ab, b, lower=True)
expected = array([[0.0, 1.0],
[1.0, 0.0],
[0.0, 1.0]])
assert_array_almost_equal(x, expected)
def test_tridiag_01_float32(self):
# Solve
# [ 4 1 0] [1]
# [ 1 4 1] X = [4]
# [ 0 1 4] [1]
#
ab = array([[-99, 1.0, 1.0], [4.0, 4.0, 4.0]], dtype=float32)
b = array([1.0, 4.0, 1.0], dtype=float32)
x = solveh_banded(ab, b)
assert_array_almost_equal(x, [0.0, 1.0, 0.0])
def test_tridiag_02_float32(self):
# Solve
# [ 4 1 0] [1 4]
# [ 1 4 1] X = [4 2]
# [ 0 1 4] [1 4]
#
ab = array([[-99, 1.0, 1.0],
[4.0, 4.0, 4.0]], dtype=float32)
b = array([[1.0, 4.0],
[4.0, 2.0],
[1.0, 4.0]], dtype=float32)
x = solveh_banded(ab, b)
expected = array([[0.0, 1.0],
[1.0, 0.0],
[0.0, 1.0]])
assert_array_almost_equal(x, expected)
def test_tridiag_01_complex(self):
# Solve
# [ 4 -j 0] [ -j]
# [ j 4 -j] X = [4-j]
# [ 0 j 4] [4+j]
#
ab = array([[-99, -1.0j, -1.0j], [4.0, 4.0, 4.0]])
b = array([-1.0j, 4.0-1j, 4+1j])
x = solveh_banded(ab, b)
assert_array_almost_equal(x, [0.0, 1.0, 1.0])
def test_tridiag_02_complex(self):
# Solve
# [ 4 -j 0] [ -j 4j]
# [ j 4 -j] X = [4-j -1-j]
# [ 0 j 4] [4+j 4 ]
#
ab = array([[-99, -1.0j, -1.0j],
[4.0, 4.0, 4.0]])
b = array([[-1j, 4.0j],
[4.0-1j, -1.0-1j],
[4.0+1j, 4.0]])
x = solveh_banded(ab, b)
expected = array([[0.0, 1.0j],
[1.0, 0.0],
[1.0, 1.0]])
assert_array_almost_equal(x, expected)
def test_check_finite(self):
# Solve
# [ 4 1 0] [1]
# [ 1 4 1] X = [4]
# [ 0 1 4] [1]
# with the RHS as a 1D array.
ab = array([[-99, 1.0, 1.0], [4.0, 4.0, 4.0]])
b = array([1.0, 4.0, 1.0])
x = solveh_banded(ab, b, check_finite=False)
assert_array_almost_equal(x, [0.0, 1.0, 0.0])
def test_bad_shapes(self):
ab = array([[-99, 1.0, 1.0],
[4.0, 4.0, 4.0]])
b = array([[1.0, 4.0],
[4.0, 2.0]])
assert_raises(ValueError, solveh_banded, ab, b)
assert_raises(ValueError, solveh_banded, ab, [1.0, 2.0])
assert_raises(ValueError, solveh_banded, ab, [1.0])
class TestSolve(TestCase):
def setUp(self):
np.random.seed(1234)
def test_20Feb04_bug(self):
a = [[1,1],[1.0,0]] # ok
x0 = solve(a,[1,0j])
assert_array_almost_equal(dot(a,x0),[1,0])
a = [[1,1],[1.2,0]] # gives failure with clapack.zgesv(..,rowmajor=0)
b = [1,0j]
x0 = solve(a,b)
assert_array_almost_equal(dot(a,x0),[1,0])
def test_simple(self):
a = [[1,20],[-30,4]]
for b in ([[1,0],[0,1]],[1,0],
[[2,1],[-30,4]]):
x = solve(a,b)
assert_array_almost_equal(dot(a,x),b)
def test_simple_sym(self):
a = [[2,3],[3,5]]
for lower in [0,1]:
for b in ([[1,0],[0,1]],[1,0]):
x = solve(a,b,sym_pos=1,lower=lower)
assert_array_almost_equal(dot(a,x),b)
def test_simple_sym_complex(self):
a = [[5,2],[2,4]]
for b in [[1j,0],
[[1j,1j],
[0,2]],
]:
x = solve(a,b,sym_pos=1)
assert_array_almost_equal(dot(a,x),b)
def test_simple_complex(self):
a = array([[5,2],[2j,4]],'D')
for b in [[1j,0],
[[1j,1j],
[0,2]],
[1,0j],
array([1,0],'D'),
]:
x = solve(a,b)
assert_array_almost_equal(dot(a,x),b)
def test_nils_20Feb04(self):
n = 2
A = random([n,n])+random([n,n])*1j
X = zeros((n,n),'D')
Ainv = inv(A)
R = identity(n)+identity(n)*0j
for i in arange(0,n):
r = R[:,i]
X[:,i] = solve(A,r)
assert_array_almost_equal(X,Ainv)
def test_random(self):
n = 20
a = random([n,n])
for i in range(n):
a[i,i] = 20*(.1+a[i,i])
for i in range(4):
b = random([n,3])
x = solve(a,b)
assert_array_almost_equal(dot(a,x),b)
def test_random_complex(self):
n = 20
a = random([n,n]) + 1j * random([n,n])
for i in range(n):
a[i,i] = 20*(.1+a[i,i])
for i in range(2):
b = random([n,3])
x = solve(a,b)
assert_array_almost_equal(dot(a,x),b)
def test_random_sym(self):
n = 20
a = random([n,n])
for i in range(n):
a[i,i] = abs(20*(.1+a[i,i]))
for j in range(i):
a[i,j] = a[j,i]
for i in range(4):
b = random([n])
x = solve(a,b,sym_pos=1)
assert_array_almost_equal(dot(a,x),b)
def test_random_sym_complex(self):
n = 20
a = random([n,n])
# a = a + 1j*random([n,n]) # XXX: with this the accuracy will be very low
for i in range(n):
a[i,i] = abs(20*(.1+a[i,i]))
for j in range(i):
a[i,j] = conjugate(a[j,i])
b = random([n])+2j*random([n])
for i in range(2):
x = solve(a,b,sym_pos=1)
assert_array_almost_equal(dot(a,x),b)
def test_check_finite(self):
a = [[1,20],[-30,4]]
for b in ([[1,0],[0,1]],[1,0],
[[2,1],[-30,4]]):
x = solve(a,b, check_finite=False)
assert_array_almost_equal(dot(a,x),b)
class TestSolveTriangular(TestCase):
def test_simple(self):
"""
solve_triangular on a simple 2x2 matrix.
"""
A = array([[1,0], [1,2]])
b = [1, 1]
sol = solve_triangular(A, b, lower=True)
assert_array_almost_equal(sol, [1, 0])
# check that it works also for non-contiguous matrices
sol = solve_triangular(A.T, b, lower=False)
assert_array_almost_equal(sol, [.5, .5])
# and that it gives the same result as trans=1
sol = solve_triangular(A, b, lower=True, trans=1)
assert_array_almost_equal(sol, [.5, .5])
b = identity(2)
sol = solve_triangular(A, b, lower=True, trans=1)
assert_array_almost_equal(sol, [[1., -.5], [0, 0.5]])
def test_simple_complex(self):
"""
solve_triangular on a simple 2x2 complex matrix
"""
A = array([[1+1j, 0], [1j, 2]])
b = identity(2)
sol = solve_triangular(A, b, lower=True, trans=1)
assert_array_almost_equal(sol, [[.5-.5j, -.25-.25j], [0, 0.5]])
def test_check_finite(self):
"""
solve_triangular on a simple 2x2 matrix.
"""
A = array([[1,0], [1,2]])
b = [1, 1]
sol = solve_triangular(A, b, lower=True, check_finite=False)
assert_array_almost_equal(sol, [1, 0])
class TestInv(TestCase):
def setUp(self):
np.random.seed(1234)
def test_simple(self):
a = [[1,2],[3,4]]
a_inv = inv(a)
assert_array_almost_equal(dot(a,a_inv),
[[1,0],[0,1]])
a = [[1,2,3],[4,5,6],[7,8,10]]
a_inv = inv(a)
assert_array_almost_equal(dot(a,a_inv),
[[1,0,0],[0,1,0],[0,0,1]])
def test_random(self):
n = 20
for i in range(4):
a = random([n,n])
for i in range(n):
a[i,i] = 20*(.1+a[i,i])
a_inv = inv(a)
assert_array_almost_equal(dot(a,a_inv),
identity(n))
def test_simple_complex(self):
a = [[1,2],[3,4j]]
a_inv = inv(a)
assert_array_almost_equal(dot(a,a_inv),
[[1,0],[0,1]])
def test_random_complex(self):
n = 20
for i in range(4):
a = random([n,n])+2j*random([n,n])
for i in range(n):
a[i,i] = 20*(.1+a[i,i])
a_inv = inv(a)
assert_array_almost_equal(dot(a,a_inv),
identity(n))
def test_check_finite(self):
a = [[1,2],[3,4]]
a_inv = inv(a, check_finite=False)
assert_array_almost_equal(dot(a,a_inv),
[[1,0],[0,1]])
class TestDet(TestCase):
def setUp(self):
np.random.seed(1234)
def test_simple(self):
a = [[1,2],[3,4]]
a_det = det(a)
assert_almost_equal(a_det,-2.0)
def test_simple_complex(self):
a = [[1,2],[3,4j]]
a_det = det(a)
assert_almost_equal(a_det,-6+4j)
def test_random(self):
basic_det = linalg.det
n = 20
for i in range(4):
a = random([n,n])
d1 = det(a)
d2 = basic_det(a)
assert_almost_equal(d1,d2)
def test_random_complex(self):
basic_det = linalg.det
n = 20
for i in range(4):
a = random([n,n]) + 2j*random([n,n])
d1 = det(a)
d2 = basic_det(a)
assert_allclose(d1, d2, rtol=1e-13)
def test_check_finite(self):
a = [[1,2],[3,4]]
a_det = det(a, check_finite=False)
assert_almost_equal(a_det,-2.0)
def direct_lstsq(a,b,cmplx=0):
at = transpose(a)
if cmplx:
at = conjugate(at)
a1 = dot(at, a)
b1 = dot(at, b)
return solve(a1, b1)
class TestLstsq(TestCase):
def setUp(self):
np.random.seed(1234)
def test_random_overdet_large(self):
# bug report: <NAME>
n = 200
a = random([n,2])
for i in range(2):
a[i,i] = 20*(.1+a[i,i])
b = random([n,3])
x = lstsq(a,b)[0]
assert_array_almost_equal(x,direct_lstsq(a,b))
def test_simple_exact(self):
a = [[1,20],[-30,4]]
for b in ([[1,0],[0,1]],[1,0],
[[2,1],[-30,4]]):
x = lstsq(a,b)[0]
assert_array_almost_equal(dot(a,x),b)
def test_simple_overdet(self):
a = [[1,2],[4,5],[3,4]]
b = [1,2,3]
x,res,r,s = lstsq(a,b)
assert_array_almost_equal(x,direct_lstsq(a,b))
assert_almost_equal((abs(dot(a,x) - b)**2).sum(axis=0), res)
def test_simple_overdet_complex(self):
a = [[1+2j,2],[4,5],[3,4]]
b = [1,2+4j,3]
x,res,r,s = lstsq(a,b)
assert_array_almost_equal(x,direct_lstsq(a,b,cmplx=1))
assert_almost_equal(res, (abs(dot(a,x) - b)**2).sum(axis=0))
def test_simple_underdet(self):
a = [[1,2,3],[4,5,6]]
b = [1,2]
x,res,r,s = lstsq(a,b)
# XXX: need independent check
assert_array_almost_equal(x,[-0.05555556, 0.11111111, 0.27777778])
def test_random_exact(self):
n = 20
a = random([n,n])
for i in range(n):
a[i,i] = 20*(.1+a[i,i])
for i in range(4):
b = random([n,3])
x = lstsq(a,b)[0]
assert_array_almost_equal(dot(a,x),b)
def test_random_complex_exact(self):
n = 20
a = random([n,n]) + 1j * random([n,n])
for i in range(n):
a[i,i] = 20*(.1+a[i,i])
for i in range(2):
b = random([n,3])
x = lstsq(a,b)[0]
assert_array_almost_equal(dot(a,x),b)
def test_random_overdet(self):
n = 20
m = 15
a = random([n,m])
for i in range(m):
a[i,i] = 20*(.1+a[i,i])
for i in range(4):
b = random([n,3])
x,res,r,s = lstsq(a,b)
assert_(r == m, 'unexpected efficient rank')
# XXX: check definition of res
assert_array_almost_equal(x,direct_lstsq(a,b))
def test_random_complex_overdet(self):
n = 20
m = 15
a = random([n,m]) + 1j * random([n,m])
for i in range(m):
a[i,i] = 20*(.1+a[i,i])
for i in range(2):
b = random([n,3])
x,res,r,s = lstsq(a,b)
assert_(r == m, 'unexpected efficient rank')
# XXX: check definition of res
assert_array_almost_equal(x,direct_lstsq(a,b,1))
def test_check_finite(self):
a = [[1,20],[-30,4]]
for b in ([[1,0],[0,1]],[1,0],
[[2,1],[-30,4]]):
x = lstsq(a,b, check_finite=False)[0]
assert_array_almost_equal(dot(a,x),b)
class TestPinv(TestCase):
def test_simple_real(self):
a = array([[1, 2, 3], [4, 5, 6], [7, 8, 10]], dtype=float)
a_pinv = pinv(a)
assert_array_almost_equal(dot(a,a_pinv), np.eye(3))
a_pinv = pinv2(a)
assert_array_almost_equal(dot(a,a_pinv), np.eye(3))
def test_simple_complex(self):
a = (array([[1, 2, 3], [4, 5, 6], [7, 8, 10]], dtype=float)
+ 1j * array([[10, 8, 7], [6, 5, 4], [3, 2, 1]], dtype=float))
a_pinv = pinv(a)
assert_array_almost_equal(dot(a, a_pinv), np.eye(3))
a_pinv = pinv2(a)
assert_array_almost_equal(dot(a, a_pinv), np.eye(3))
def test_simple_singular(self):
a = array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=float)
a_pinv = pinv(a)
a_pinv2 = pinv2(a)
assert_array_almost_equal(a_pinv,a_pinv2)
def test_simple_cols(self):
a = array([[1, 2, 3], [4, 5, 6]], dtype=float)
a_pinv = pinv(a)
a_pinv2 = pinv2(a)
assert_array_almost_equal(a_pinv,a_pinv2)
def test_simple_rows(self):
a = array([[1, 2], [3, 4], [5, 6]], dtype=float)
a_pinv = pinv(a)
a_pinv2 = pinv2(a)
assert_array_almost_equal(a_pinv,a_pinv2)
def test_check_finite(self):
a = array([[1,2,3],[4,5,6.],[7,8,10]])
a_pinv = pinv(a, check_finite=False)
assert_array_almost_equal(dot(a,a_pinv),[[1,0,0],[0,1,0],[0,0,1]])
a_pinv = pinv2(a, check_finite=False)
assert_array_almost_equal(dot(a,a_pinv),[[1,0,0],[0,1,0],[0,0,1]])
class TestPinvSymmetric(TestCase):
def test_simple_real(self):
a = array([[1, 2, 3], [4, 5, 6], [7, 8, 10]], dtype=float)
a = np.dot(a, a.T)
a_pinv = pinvh(a)
assert_array_almost_equal(np.dot(a, a_pinv), np.eye(3))
def test_nonpositive(self):
a = array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=float)
a = np.dot(a, a.T)
u, s, vt = np.linalg.svd(a)
s[0] *= -1
a = np.dot(u * s, vt) # a is now symmetric non-positive and singular
a_pinv = pinv2(a)
a_pinvh = pinvh(a)
assert_array_almost_equal(a_pinv, a_pinvh)
def test_simple_complex(self):
a = (array([[1, 2, 3], [4, 5, 6], [7, 8, 10]], dtype=float)
+ 1j * array([[10, 8, 7], [6, 5, 4], [3, 2, 1]], dtype=float))
a = np.dot(a, a.conj().T)
a_pinv = pinvh(a)
assert_array_almost_equal(np.dot(a, a_pinv), np.eye(3))
class TestNorm(object):
def test_types(self):
for dtype in np.typecodes['AllFloat']:
x = np.array([1,2,3], dtype=dtype)
tol = max(1e-15, np.finfo(dtype).eps.real * 20)
assert_allclose(norm(x), np.sqrt(14), rtol=tol)
assert_allclose(norm(x, 2), np.sqrt(14), rtol=tol)
for dtype in np.typecodes['Complex']:
x = np.array([1j,2j,3j], dtype=dtype)
tol = max(1e-15, np.finfo(dtype).eps.real * 20)
assert_allclose(norm(x), np.sqrt(14), rtol=tol)
assert_allclose(norm(x, 2),
|
np.sqrt(14)
|
numpy.sqrt
|
"""
Implementation of the ANDROMEDA algorithm from [MUG09]_ / [CAN15]_.
Based on ANDROMEDA v3.1 from 28/06/2018.
.. [MUG09]
| Mugnier et al, 2009
| **Optimal method for exoplanet detection by angular differential imaging**
| *J. Opt. Soc. Am. A, 26(6), 1326-1334*
| `doi:10.1364/JOSAA.26.001326 <http://doi.org/10.1364/JOSAA.26.001326>`_
.. [CAN15]
| Cantalloube et al, 2015
| **Direct exoplanet detection and characterization using the ANDROMEDA
method: Performance on VLT/NaCo data**
| *A&A, 582*
| `doi:10.1051/0004-6361/201425571 <http://doi.org/10.1051/0004-6361/20142557
1>`_, `arXiv:1508.06406 <http://arxiv.org/abs/1508.06406>`_
"""
from __future__ import division, print_function
from __future__ import absolute_import
__author__ = "<NAME>"
__all__ = ["andromeda"]
import numpy as np
from ..var.filters import frame_filter_highpass, cube_filter_highpass
from ..conf.utils_conf import pool_map, fixed
from ..var.shapes import dist_matrix
from .utils import robust_std, idl_round, idl_where
from .shift import calc_psf_shift_subpix
from .fit import fitaffine
global CUBE
def andromeda(cube, oversampling_fact, angles, psf, filtering_fraction=.25,
min_sep=.5, annuli_width=1., roa=2, opt_method='lsq',
nsmooth_snr=18, iwa=None, owa=None, precision=50,
fast=False,
homogeneous_variance=True, ditimg=1.0, ditpsf=None, tnd=1.0,
total=False,
multiply_gamma=True, nproc=1, verbose=False):
"""
Exoplanet detection in ADI sequences by maximum-likelihood approach.
Parameters
----------
cube : 3d array_like
Input cube.
IDL parameter: ``IMAGES_1_INPUT``
oversampling_fact : float
Oversampling factor for the wavelength corresponding to the filter used
for obtaining ``cube`` (defined as the ratio between the wavelength of
the filter and the Shannon wavelength).
IDL parameter: ``OVERSAMPLING_1_INPUT``
angles : array_like
List of parallactic angles associated with each frame in ``cube``. Note
that, compared to the IDL version, the PA convention is different: If
you would pass ``[1,2,3]`` to the IDL version, you should pass ``[-1,
-2, -3]`` to this function to obtain the same results.
IDL parameter: ``- ANGLES_INPUT``
psf : 2d array_like
The experimental PSF used to model the planet signature in the
subtracted images. This PSF is usually a non-coronographic or saturated
observation of the target star.
IDL parameter: ``PSF_PLANET_INPUT``
filtering_fraction : float, optional
Strength of the high-pass filter. If set to ``1``, no high-pass filter
is used.
IDL parameter: ``FILTERING_FRACTION_INPUT``
min_sep : float, optional
Angular separation is assured to be above ``min_sep*lambda/D``.
IDL parameter: ``MINIMUM_SEPARATION_INPUT``
annuli_width : float, optional
Annuli width on which the subtraction are performed. The same for all
annuli.
IDL parameter: ``ANNULI_WIDTH_INPUT``
roa : float, optional
Ratio of the optimization area. The optimization annulus area is defined
by ``roa * annuli_width``.
``roa`` is forced to ``1`` when ``opt_method="no"`` is chosen.
IDL parameter: ``RATIO_OPT_AREA_INPUT``
opt_method : {'no', 'total', 'lsq', 'robust'}, optional
Method used to balance for the flux difference that exists between the
two subtracted annuli in an optimal way during ADI.
IDL parameter: ``OPT_METHOD_ANG_INPUT``
nsmooth_snr : int, optional
Number of pixels over which the radial robust standard deviation profile
of the SNR map is smoothed to provide a global trend for the SNR map
normalization. For ``nsmooth_snr=0`` the SNR map normalization is
disabled.
IDL parameter: ``NSMOOTH_SNR_INPUT``
iwa : float or None, optional
Inner working angle / inner radius of the first annulus taken into
account, expressed in ``lambda/D``. If ``None``, it is chosen
automatically between the values ``0.5``, ``4`` or ``0.25``.
IDL parameter: ``IWA_INPUT``
owa : float, optional
Outer working angle / **inner** radius of the last annulus, expressed in
``lambda/D``. If ``None``, the value is automatically chosen based on
the frame size.
IDL parameter: ``OWA_INPUT``
precision : int, optional
Number of shifts applied to the PSF. Passed to
``calc_psf_shift_subpix`` , which then creates a 4D cube with shape
(precision+1, precision+1, N, N).
IDL parameter: ``PRECISION_INPUT``
fast : float or bool, optional
Size of the annuli from which the speckle noise should not be dominant
anymore, in multiples of ``lambda/D``. If ``True``, a value of
``20 lambda/D`` is used, ``False`` (the default) disables the fast mode
entirely. Above this threshold, the annuli width is set to
``4*annuli_width``.
IDL parameter: ``FAST``
homogeneous_variance : bool, optional
If set, variance is treated as homogeneous and is calculated as a mean
of variance in each position through time.
IDL parameter: ``HOMOGENEOUS_VARIANCE_INPUT``
ditimg : float, optional
DIT for images (in sec)
IDL Parameter: ``DITIMG_INPUT``
ditpsf : float or None, optional
DIT for PSF (in sec)
IDL Parameter: ``DITPSF_INPUT``
If set to ``None``, the value of ``ditimg`` is used.
tnd : float, optional
Neutral Density Transmission.
IDL parameter: ``TND_INPUT``
total : bool, optional
``total=True`` is the old behaviour (normalizing the PSF to its sum).
IDL parameter: ``TOTAL`` (was ``MAX`` in previous releases).
multiply_gamma : bool, optional
Use gamma for signature computation too.
IDL parameter: ``MULTIPLY_GAMMA_INPUT``
nproc : int, optional
Number of processes to use.
verbose : bool, optional
Print some parameter values for control.
IDL parameter: ``VERBOSE``
Returns
-------
contrast : 2d ndarray
Calculated contrast map.
(IDL return value)
snr : 2d ndarray
Signal to noise ratio map (defined as the estimated contrast divided by
the estimated standard deviation of the contrast).
IDL parameter: ``SNR_OUTPUT``
snr_norm : 2d ndarray
IDL parameter: ``SNR_NORM_OUTPUT``
stdcontrast : 2d ndarray
Map of the estimated standard deviation of the contrast.
IDL parameter: `STDDEVCONTRAST_OUTPUT`` (previously
``STDEVFLUX_OUTPUT``)
stdcontrast_norm : 2d ndarray
likelihood : 2d ndarray
likelihood
IDL parameter: ``LIKELIHOOD_OUTPUT``
ext_radius : float
Edge of the SNR map. Slightly decreased due to the normalization
procedure. Useful to a posteriori reject potential companions that are
too close to the edge to be analyzed.
IDL parameter: ``EXT_RADIUS_OUTPUT``
Notes
-----
IDL outputs:
- SNR_OUTPUT
- SNR_NORM_OUTPUT
- LIKELIHOOD_OUTPUT
- STDDEVCONTRAST_OUTPUT (was STDEVFLUX_OUTPUT)
- STDDEVCONTRAST_NORM_OUTPUT
The following IDL parameters were not implemented:
- SDI-related parameters
- IMAGES_2_INPUT
- OVERSAMPLING_2_INPUT
- OPT_METHOD_SPEC_INPUT
- ROTOFF_INPUT
- recentering (should be done in VIP before):
- COORD_CENTRE_1_INPUT
- COORD_CENTRE_2_INPUT
- debug/expert testing testing
- INDEX_NEG_INPUT
- INDEX_POS_INPUT
- ANNULI_LIMITS_INPUT
- other
- DISPLAY
- VERSION
- HELP
- return parameters
- IMAGES_1_CENTRED_OUTPUT
- IMAGES_2_RESCALED_OUTPUT
- VARIANCE_1_CENTRED_OUTPUT
- VARIANCE_2_RESCALED_OUTPUT
- GAMMA_INFO_OUTPUT
- variances (VARIANCE_1_INPUT, VARIANCE_2_INPUT)
"""
def info(msg, *fmt, **kwfmt):
if verbose:
print(msg.format(*fmt, **kwfmt))
def info2(msg, *fmt, **kwfmt):
if verbose == 2:
print(msg.format(*fmt, **kwfmt))
global CUBE # assigned after high-pass filter
# ===== verify input
# the andromeda algorithm handles PAs differently from the other algos in
# VIP. This normalizes the API:
angles = -angles
frames, npix, _ = cube.shape
npixpsf, _ = psf.shape
if npix % 2 == 1:
raise ValueError("The side of the images must be an even number, with "
"the star centered on the intersection between the "
"four central pixels.")
if npixpsf % 2 == 1:
raise ValueError("The PSF provided must be of an even dimension!")
if filtering_fraction > 1 or filtering_fraction < 0:
raise ValueError("``filtering_fraction`` must be between 0 and 1")
# ===== set default parameters:
if opt_method != "no":
if roa < 1:
raise ValueError("The optimization to subtraction area ``roa`` "
"must be >= 1")
else:
roa = 1
if iwa is None:
for test_iwa in [0.5, 4, 0.25]:
# keep first IWA which produces frame pairs
test_ang = 2*np.arcsin(min_sep / (2*test_iwa)) * 180/np.pi
test_id, _, _ = create_indices(angles, angmin=test_ang)
if test_id is not None: # pairs found
break
iwa = test_iwa
info("iwa automatically set to {}*lambda/D", iwa)
if owa is None:
owa = (npix/2 - npixpsf/2) / (2*oversampling_fact)
info("owa automatically set to {} (based on frame size)", owa)
else:
# radius of the last annulus taken into account for process [lambda/D]:
owa -= (npixpsf/2) / (2*oversampling_fact)
if owa <= iwa - annuli_width:
raise ValueError("You must increase `owa` or decrease `iwa`")
if fast is False:
pass
elif fast is True: # IDL: IF fast EQ 1.0
fast = 20 # [lambda/D]
if owa > fast:
dmean = fast
else:
fast = 0
if iwa > fast:
dmean = owa
else:
if owa > fast:
dmean = fast
else:
fast = 0
if not fast:
dmean = owa
# dmean is not defined when fast=0, but it is also not used then. <- WHAT?
if fast:
info("annuli_width is set to {} from {} lambda/D", 4*annuli_width,
dmean)
# contrast maps:
if ditpsf is None:
ditpsf = ditimg
if np.asarray(tnd).ndim == 0: # int or float
info2("Throughput map: Homogeneous transmission: {}%", tnd*100)
else: # TODO: test if really 2d map?
info2("Throughput map: Inhomogeneous 2D throughput map given.")
if nsmooth_snr != 0 and nsmooth_snr < 2:
raise ValueError("`nsmooth_snr` must be >= 2")
# ===== info output
if filtering_fraction == 1:
info("No high-pass pre-filtering of the images!")
# ===== initialize output
flux = np.zeros_like(cube[0])
snr = np.zeros_like(cube[0])
likelihood = np.zeros_like(cube[0])
stdflux = np.zeros_like(cube[0])
# ===== pre-processing
# normalization...
if total:
psf_scale_factor = np.sum(psf)
else:
psf_scale_factor = np.max(psf)
# creates new array in memory (prevent overwriting of input parameters)
psf = psf / psf_scale_factor
# ...and spatial filterin on the PSF:
if filtering_fraction != 1:
psf = frame_filter_highpass(psf, "hann", hann_cutoff=filtering_fraction)
# library of all different PSF positions
psf_cube = calc_psf_shift_subpix(psf, precision=precision)
# spatial filtering of the preprocessed image-cubes:
if filtering_fraction != 1:
if verbose:
print("Pre-processing filtering of the images and the PSF: "
"done! F={}".format(filtering_fraction))
cube = cube_filter_highpass(cube, mode="hann",
hann_cutoff=filtering_fraction,
verbose=verbose)
CUBE = cube
# definition of the width of each annuli (to perform ADI)
dmin = iwa # size of the lowest annuli, in lambda/D
dmax = owa # size of the greatest annuli, in lambda/D
if fast:
first_distarray = dmin + np.arange(
np.int(np.round(np.abs(dmean-dmin-1)) / annuli_width + 1),
dtype=float) * annuli_width
second_distarray = dmean + dmin - 1 + np.arange(
np.int(np.round(dmax-dmean) / (4*annuli_width) + 1),
dtype=float) * 4*annuli_width
distarray_lambdaonD = np.hstack([first_distarray, second_distarray])
if iwa > fast:
distarray_lambdaonD = first_distarray
if distarray_lambdaonD[-1] > dmax:
distarray_lambdaonD[-1] = dmax
annuli_limits = oversampling_fact * 2 * distarray_lambdaonD # in pixels
else:
distarray_lambdaonD = dmin + np.arange(
int(np.round(dmax-dmin) / annuli_width + 1),
dtype=float) * annuli_width
distarray_lambdaonD[-1] = dmax
annuli_limits = np.floor(oversampling_fact * 2
* distarray_lambdaonD).astype(int)
while dmax*(2*oversampling_fact) < annuli_limits[-1]:
# remove last element:
annuli_limits = annuli_limits[:-1] # view, not a copy!
annuli_number = len(annuli_limits) - 1
info("Using these user parameters, {} annuli will be processed, from a "
"separation of {} to {} pixels.", annuli_number, annuli_limits[0],
annuli_limits[-1])
# ===== main loop
res_all = pool_map(nproc, _process_annulus,
# start with outer annuli, they take longer:
fixed(range(annuli_number)[::-1]),
annuli_limits, roa, min_sep, oversampling_fact,
angles, opt_method, multiply_gamma, psf_cube,
homogeneous_variance, verbose, msg="annulus",
leave=False, verbose=False)
for res in res_all:
if res is None:
continue
flux += res[0]
snr += res[1]
likelihood += res[2]
stdflux += res[3]
# translating into contrast:
# flux_factor: float or 2d array, depending on tnd
factor = 1/psf_scale_factor
flux_factor = factor * tnd * (ditpsf/ditimg)
if verbose:
print("[34m", "psf_scale_factor:", psf_scale_factor, "[0m")
print("[34m", "tnd:", tnd, "[0m")
print("[34m", "ditpsf:", ditpsf, "[0m")
print("[34m", "ditimg:", ditimg, "[0m")
print("[34m", "flux_factor:", flux_factor, "[0m")
# post-processing of the output:
if nsmooth_snr != 0:
if verbose:
print("Normalizing SNR...")
# normalize snr map by its radial robust std:
snr_norm, snr_std = normalize_snr(snr, nsmooth_snr=nsmooth_snr,
fast=fast)
# normalization of the std of the flux (same way):
stdflux_norm = np.zeros((npix, npix))
zone = snr_std != 0
stdflux_norm[zone] = stdflux[zone] * snr_std[zone]
ext_radius = annuli_limits[annuli_number-1] / (2*oversampling_fact)
# TODO: return value handling should be improved.
return (flux * flux_factor, # IDL RETURN
snr, # snr_output
snr_norm, # snr_norm_output
stdflux * flux_factor, # IDL stddevcontrast_output
stdflux_norm * flux_factor, # IDL stddevcontrast_norm_output
likelihood, # IDL likelihood_output
ext_radius) # IDL ext_radius_output, [lambda/D]
# previous return values:
# return flux, snr_norm, likelihood, stdflux_norm, ext_radius
else:
ext_radius = (np.floor(annuli_limits[annuli_number]) /
(2*oversampling_fact))
return (flux * flux_factor, # IDL RETURN
snr, # snr_output
snr, # snr_norm_output
stdflux * flux_factor, # IDL stddevcontrast_output
stdflux * flux_factor, # IDL stddevcontrast_norm_output
likelihood, # IDL likelihood_output
ext_radius) # IDL ext_radius_output [lambda/D]
def _process_annulus(i, annuli_limits, roa, min_sep, oversampling_fact, angles,
opt_method, multiply_gamma, psf_cube,
homogeneous_variance, verbose=False):
"""
Process one single annulus, with diff_images and andromeda_core.
Parameters
----------
i : int
Number of the annulus
**kwargs
Returns
-------
res : tuple
The result of ``andromeda_core``, on the specific annulus.
"""
global CUBE
rhomin = annuli_limits[i]
rhomax = annuli_limits[i+1]
rhomax_opt = np.sqrt(roa*rhomax**2 - (roa-1)*rhomin**2)
# compute indices from min_sep
if verbose:
print(" Pairing frames...")
min_sep_pix = min_sep * oversampling_fact*2
angmin = 2*np.arcsin(min_sep_pix/(2*rhomin))*180/np.pi
index_neg, index_pos, indices_not_used = create_indices(angles, angmin)
if len(indices_not_used) != 0:
if verbose:
print(" WARNING: {} frame(s) cannot be used because it wasn't "
"possible to find any other frame to couple with them. "
"Their indices are: {}".format(len(indices_not_used),
indices_not_used))
max_sep_pix = 2*rhomin*np.sin(np.deg2rad((max(angles) -
min(angles))/4))
max_sep_ld = max_sep_pix/(2*oversampling_fact)
if verbose:
print(" For all frames to be used in this annulus, the minimum"
" separation must be set at most to {} *lambda/D "
"(corresponding to {} pixels).".format(max_sep_ld,
max_sep_pix))
if index_neg is None:
if verbose:
print(" Warning: No couples found for this distance. "
"Skipping annulus...")
return None
# ===== angular differences
if verbose:
print(" Performing angular difference...")
res = diff_images(cube_pos=CUBE[index_pos], cube_neg=CUBE[index_neg],
rint=rhomin, rext=rhomax_opt,
opt_method=opt_method)
cube_diff, gamma, gamma_prime = res
if not multiply_gamma:
# reset gamma & gamma_prime to 1 (they were returned by diff_images)
gamma = np.ones_like(gamma)
gamma_prime = np.ones_like(gamma_prime)
# TODO: gamma_info_output etc not implemented
# ;Gamma_affine:
# gamma_info_output[0,0,i] = min(gamma_output_ang[*,0])
# gamma_info_output[1,0,i] = max(gamma_output_ang[*,0])
# gamma_info_output[2,0,i] = mean(gamma_output_ang[*,0])
# gamma_info_output[3,0,i] = median(gamma_output_ang[*,0])
# gamma_info_output[4,0,i] = variance(gamma_output_ang[*,0])
# ;Gamma_prime:
# gamma_info_output[0,1,i] = min(gamma_output_ang[*,1])
# gamma_info_output[1,1,i] = max(gamma_output_ang[*,1])
# gamma_info_output[2,1,i] = mean(gamma_output_ang[*,1])
# gamma_info_output[3,1,i] = median(gamma_output_ang[*,1])
# gamma_info_output[4,1,i] = variance(gamma_output_ang[*,1])
#
#
# -> they are returned, no further modification from here on.
# launch andromeda core (:859)
if verbose:
print(" Matching...")
res = andromeda_core(diffcube=cube_diff, index_neg=index_neg,
index_pos=index_pos, angles=angles,
psf_cube=psf_cube,
homogeneous_variance=homogeneous_variance,
rhomin=rhomin, rhomax=rhomax, gamma=gamma,
verbose=verbose)
# TODO: ANDROMEDA v3.1r2 calls `ANDROMEDA_CORE` with `/WITHOUT_GAMMA_INPUT`.
return res # (flux, snr, likelihood, stdflux)
def andromeda_core(diffcube, index_neg, index_pos, angles, psf_cube, rhomin,
rhomax, gamma=None,
homogeneous_variance=True, verbose=False):
"""
Core engine of ANDROMEDA.
Estimates the flux distribution in the observation field from differential
images built from different field rotation angles.
Parameters
----------
diffcube : 3d ndarray
Differential image cube, set of ``npairs`` differential images. Shape
``(npairs, npix, npix)``.
IDL parameter: ``DIFF_IMAGES_INPUT``
index_neg : 1d ndarray
index_pos : 1d ndarray
angles : 1d ndarray
IDL parameter: ``ANGLES_INPUT``
psf_cube : 4d ndarray
IDL parameter: ``PSFCUBE_INPUT``
rhomin : float
IDL parameter: ``RHOMIN_INPUT``
rhomax : float
is ceiled for the pixel-for-loop.
IDL parameter: ``RHOMAX_INPUT``
gamma
IDL parameter: ``GAMMA_INPUT[*, 0]``
homogeneous_variance: bool, optional
IDL parameter: ``HOMOGENEOUS_VARIANCE_INPUT``
verbose : bool, optional
print more.
Returns
-------
flux : 2d ndarray
IDL return value
snr : 2d ndarray
IDL output parameter: ``SNR_OUTPUT``
likelihood : 2d ndarray
IDL output parameter: ``LIKELIHOOD_OUTPUT``
stdflux : 2d ndarray
IDL output parameter: ``STDEVFLUX_OUTPUT``
Notes
-----
- IDL 15/05/2018: add a check if there is only one couple and hence
weights_diff_2D = 1.
Differences from IDL implementation
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Upper case parameters/functions refer to the IDL ANDROMEDA implementation.
- IDL ANDROMEDA accepts ``WITHOUT_GAMMA_INPUT`` (boolean, for test) and
``GAMMA_INPUT`` ("tuple" of ``gamma`` and ``gamma_prime``). The
``gamma_prime`` part of ``GAMMA_INPUT`` is never used inside
``ANDROMEDA_CORE``. Instead of these parameters, the python implementation
accepts one single ``gamma`` parameter.
- IDL's ``kmax`` was renamed to ``npairs``.
- **not implemented parameters**:
- The ``POSITIVITY`` parameter is not used any more in ANDROMEDA,
and maybe removed in the future. It was removed in the python
implementation.
- ``GOOD_PIXELS_INPUT``
- This is a mask, applied to IDL's ``weight_cut`` and
``weighted_diff_images``. It is functional in ``ANDROMEDA_CORE``,
but not exposed through the ``ANDROMEDA`` function.
- ``MASK_INPUT``
- similar to ``GOOD_PIXELS_INPUT``, but applied to IDL's
``select_pixels`` (which controlls which pixels are processed). It
is not exposed to ``ANDROMEDA``.
- ``WEIGHTS_DIFF_INPUT``
- "(optional input) cube of inverse-of-variance maps. If it is not
given the variance is treated as constant in time and computed
empirically for each spatial position."
- in the python implementation, the variance is **always** treated
as constant in time.
- note: ``WEIGHTS_DIFF_INPUT`` is obtained as ``WEIGHTS_OUTPUT``
from ``DIFF_IMAGES``.
- ``PATTERN_OUTPUT``
- this is just an empty ``DBLARR(npix, npix, kmax)``
"""
npairs, npix, _ = diffcube.shape
npixpsf = psf_cube.shape[2] # shape: (p+1, p+1, x, y)
precision = psf_cube.shape[0] - 1
# ===== verify + sanitize input
if npix % 2 == 1:
raise ValueError("size of the cube is odd!")
if npixpsf % 2 == 1:
raise ValueError("PSF has odd pixel size!")
if gamma is None:
if verbose:
print(" ANDROMEDA_CORE: The scaling factor is not taken into "
"account to build the model!")
# calculate variance
if npairs == 1:
variance_diff_2d = 1
else:
variance_diff_2d = ((diffcube**2).sum(0)/npairs
- (diffcube.sum(0)/npairs)**2)
# calculate weights from variance
if homogeneous_variance:
varmean = np.mean(variance_diff_2d) # idlwrap.mean
weights_diff_2d = np.zeros((npix, npix)) + 1/varmean
if verbose:
print(" ANDROMEDA_CORE: Variance is considered homogeneous, mean"
" {:.3f}".format(varmean))
else:
weights_diff_2d = ((variance_diff_2d > 0) /
(variance_diff_2d + (variance_diff_2d == 0)))
if verbose:
print(" ANDROMEDA_CORE: Variance is taken equal to the "
"empirical variance in each pixel (inhomogeneous, but "
"constant in time)")
weighted_diff_images = diffcube * weights_diff_2d
# create annuli
d = dist_matrix(npix)
select_pixels = ((d > rhomin) & (d < rhomax))
if verbose:
print(" ANDROMEDA_CORE: working with {} differential images, radius "
"{} to {}".format(npairs, rhomin, rhomax))
# definition of the expected pattern (if a planet is present)
numerator = np.zeros((npix, npix))
denominator = np.ones((npix, npix))
parang = np.array([angles[index_neg], angles[index_pos]])*np.pi/180
# shape (2,npairs) -> array([[1, 2, 3],
# [4, 5, 6]]) (for npairs=3)
# IDL: dimension = SIZE = _, npairs,2, _, _
for j in range(npix//2 - np.ceil(rhomax).astype(int),
npix//2 + np.ceil(rhomax).astype(int)):
for i in range(npix//2 -
|
np.ceil(rhomax)
|
numpy.ceil
|
# Copyright 2018-2021 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Unit tests for the StronglyEntanglingLayers template.
"""
import pytest
import numpy as np
import pennylane as qml
from pennylane import numpy as pnp
class TestDecomposition:
"""Tests that the template defines the correct decomposition."""
QUEUES = [
(1, (1, 1, 3), ["Rot"], [[0]]),
(2, (1, 2, 3), ["Rot", "Rot", "CNOT", "CNOT"], [[0], [1], [0, 1], [1, 0]]),
(
2,
(2, 2, 3),
["Rot", "Rot", "CNOT", "CNOT", "Rot", "Rot", "CNOT", "CNOT"],
[[0], [1], [0, 1], [1, 0], [0], [1], [0, 1], [1, 0]],
),
(
3,
(1, 3, 3),
["Rot", "Rot", "Rot", "CNOT", "CNOT", "CNOT"],
[[0], [1], [2], [0, 1], [1, 2], [2, 0]],
),
]
@pytest.mark.parametrize("n_wires, weight_shape, expected_names, expected_wires", QUEUES)
def test_expansion(self, n_wires, weight_shape, expected_names, expected_wires):
"""Checks the queue for the default settings."""
weights = np.random.random(size=weight_shape)
op = qml.templates.StronglyEntanglingLayers(weights, wires=range(n_wires))
tape = op.expand()
for i, gate in enumerate(tape.operations):
assert gate.name == expected_names[i]
assert gate.wires.labels == tuple(expected_wires[i])
@pytest.mark.parametrize("n_layers, n_wires", [(2, 2), (1, 3), (2, 4)])
def test_uses_correct_imprimitive(self, n_layers, n_wires):
"""Test that correct number of entanglers are used in the circuit."""
weights =
|
np.random.randn(n_layers, n_wires, 3)
|
numpy.random.randn
|
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
#
# Copyright (c) 2019, Eurecat / UPF
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the <organization> nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
#
# @file absorption_module.py
# @author <NAME>
# @date 30/07/2019
#
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
import copy
import numpy as np
from .echogram import Echogram
from masp.validate_data_types import _validate_echogram, _validate_ndarray_2D, _validate_ndarray_1D
from masp.utils import C
def apply_absorption(echogram, alpha, limits=None):
"""
Applies per-band wall absorption to a given echogram.
Parameters
----------
echogram : Echogram
Target Echogram
alpha : ndarray
Wall absorption coefficients per band. Dimension = (nBands, 6)
limits : ndarray, optional
Maximum reflection time per band (RT60). Dimension = (nBands)
Returns
-------
abs_echograms : ndarray, dtype = Echogram
Array with echograms subject to absorption. Dimension = (1, nBands)
Raises
-----
TypeError, ValueError: if method arguments mismatch in type, dimension or value.
Notes
-----
`nBands` will be determined by the length of `alpha` first dimension.
`alpha` must have all values in the range [0,1].
If 'limits' is not specified, no wall absorption is applied.
"""
# Validate arguments
_validate_echogram(echogram)
_validate_ndarray_2D('abs_wall', alpha, shape1=2*C, norm=True)
nBands = alpha.shape[0]
if limits is not None:
_validate_ndarray_1D('limits', limits, size=nBands, positive=True)
abs_echograms = np.empty(nBands, dtype=Echogram)
if limits is None:
for i in range(nBands):
abs_echograms[i] = copy.copy(echogram)
else:
for nb in range(nBands):
# Find index of last echogram time element smaller than the given limit
idx_limit = np.arange(len(echogram.time))[echogram.time < limits[nb]][-1]
# idx_limit = echogram.time[echogram.time < limits[nb]].size
abs_echograms[nb] = Echogram(value=echogram.value[:idx_limit+1],
time=echogram.time[:idx_limit+1],
order=echogram.order[:idx_limit+1],
coords=echogram.coords[:idx_limit+1])
for nb in range(nBands):
# Absorption coefficients for x, y, z walls per frequency
a_x = alpha[nb, 0:2]
a_y = alpha[nb, 2:4]
a_z = alpha[nb, 4:6]
# Reflection coefficients
r_x = np.sqrt(1 - a_x)
r_y = np.sqrt(1 - a_y)
r_z = np.sqrt(1 - a_z)
# Split
i = abs_echograms[nb].order[:, 0]
j = abs_echograms[nb].order[:, 1]
k = abs_echograms[nb].order[:, 2]
i_even = i[np.remainder(i, 2) == 0]
i_odd = i[np.remainder(i, 2) != 0]
i_odd_pos = i_odd[i_odd > 0]
i_odd_neg = i_odd[i_odd < 0]
j_even = j[np.remainder(j, 2) == 0]
j_odd = j[np.remainder(j, 2) != 0]
j_odd_pos = j_odd[j_odd > 0]
j_odd_neg = j_odd[j_odd < 0]
k_even = k[np.remainder(k, 2) == 0]
k_odd = k[np.remainder(k, 2) != 0]
k_odd_pos = k_odd[k_odd > 0]
k_odd_neg = k_odd[k_odd < 0]
# Find total absorption coefficients by calculating the
# number of hits on every surface, based on the order per dimension
abs_x = np.zeros(np.size(abs_echograms[nb].time))
abs_x[np.remainder(i, 2) == 0] = np.power(r_x[0], (np.abs(i_even) / 2.)) * np.power(r_x[1], (np.abs(i_even) / 2.))
abs_x[(np.remainder(i, 2) != 0) & (i > 0)] = np.power(r_x[0], np.ceil(i_odd_pos / 2.)) * np.power(r_x[1], np.floor(i_odd_pos / 2.))
abs_x[(np.remainder(i, 2) != 0) & (i < 0)] = np.power(r_x[0], np.floor(np.abs(i_odd_neg) / 2.)) * np.power(r_x[1], np.ceil(np.abs(i_odd_neg) / 2.))
abs_y = np.zeros(np.size(abs_echograms[nb].time))
abs_y[
|
np.remainder(j, 2)
|
numpy.remainder
|
import numpy as np
import pandas as pd
from scipy.stats.distributions import chi2, norm
from statsmodels.graphics import utils
def _calc_survfunc_right(time, status):
"""
Calculate the survival function and its standard error for a single
group.
"""
time = np.asarray(time)
status = np.asarray(status)
# Convert the unique times to ranks (0, 1, 2, ...)
time, rtime = np.unique(time, return_inverse=True)
# Number of deaths at each unique time.
d = np.bincount(rtime, weights=status)
# Size of risk set just prior to each event time.
n = np.bincount(rtime)
n = np.cumsum(n[::-1])[::-1]
# Only retain times where an event occured.
ii = np.flatnonzero(d > 0)
d = d[ii]
n = n[ii]
time = time[ii]
# The survival function probabilities.
sp = 1 - d / n.astype(np.float64)
sp = np.log(sp)
sp = np.cumsum(sp)
sp = np.exp(sp)
# Standard errors (Greenwood's formula).
se = d / (n * (n - d)).astype(np.float64)
se = np.cumsum(se)
se = np.sqrt(se)
se *= sp
return sp, se, time, n, d
class SurvfuncRight(object):
"""
Estimation and inference for a survival function.
Only right censoring is supported.
Parameters
----------
time : array-like
An array of times (censoring times or event times)
status : array-like
Status at the event time, status==1 is the 'event'
(e.g. death, failure), meaning that the event
occurs at the given value in `time`; status==0
indicates that censoring has occured, meaning that
the event occurs after the given value in `time`.
title : string
Optional title used for plots and summary output.
Attributes
----------
surv_prob : array-like
The estimated value of the survivor function at each time
point in `surv_times`.
surv_prob_se : array-like
The standard errors for the values in `surv_prob`.
surv_times : array-like
The points where the survival function changes.
n_risk : array-like
The number of subjects at risk just before each time value in
`surv_times`.
n_events : array-like
The number of events (e.g. deaths) that occur at each point
in `surv_times`.
"""
def __init__(self, time, status, title=None):
self.time = np.asarray(time)
self.status = np.asarray(status)
m = len(status)
x = _calc_survfunc_right(time, status)
self.surv_prob = x[0]
self.surv_prob_se = x[1]
self.surv_times = x[2]
self.n_risk = x[3]
self.n_events = x[4]
self.title = "" if not title else title
def plot(self, ax=None):
"""
Plot the survival function.
Examples
--------
Change the line color:
>>> fig = sf.plot()
>>> ax = fig.get_axes()[0]
>>> li = ax.get_lines()
>>> li[0].set_color('purple')
>>> li[1].set_color('purple')
Don't show the censoring points:
>>> fig = sf.plot()
>>> ax = fig.get_axes()[0]
>>> li = ax.get_lines()
>>> li[1].set_visible(False)
"""
return plot_survfunc(self, ax)
def quantile(self, p):
"""
Estimated quantile of a survival distribution.
Parameters
----------
p : float
The probability point at which the quantile
is determined.
Returns the estimated quantile.
"""
# SAS uses a strict inequality here.
ii = np.flatnonzero(self.surv_prob < 1 - p)
if len(ii) == 0:
return np.nan
return self.surv_times[ii[0]]
def quantile_ci(self, p, alpha=0.05, method='cloglog'):
"""
Returns a confidence interval for a survival quantile.
Parameters
----------
p : float
The probability point for which a confidence interval is
determined.
alpha : float
The confidence interval has nominal coverage probability
1 - `alpha`.
method : string
Function to use for g-transformation, must be ...
Returns
-------
lb : float
The lower confidence limit.
ub : float
The upper confidence limit.
Notes
-----
The confidence interval is obtained by inverting Z-tests. The
limits of the confidence interval will always be observed
event times.
References
----------
The method is based on the approach used in SAS, documented here:
http://support.sas.com/documentation/cdl/en/statug/68162/HTML/default/viewer.htm#statug_lifetest_details03.htm
"""
tr = norm.ppf(1 - alpha / 2)
method = method.lower()
if method == "cloglog":
g = lambda x : np.log(-np.log(x))
gprime = lambda x : -1 / (x * np.log(x))
elif method == "linear":
g = lambda x : x
gprime = lambda x : 1
elif method == "log":
g = lambda x : np.log(x)
gprime = lambda x : 1 / x
elif method == "logit":
g = lambda x : np.log(x / (1 - x))
gprime = lambda x : 1 / (x * (1 - x))
elif method == "asinsqrt":
g = lambda x : np.arcsin(np.sqrt(x))
gprime = lambda x : 1 / (2 * np.sqrt(x) * np.sqrt(1 - x))
else:
raise ValueError("unknown method")
r = g(self.surv_prob) - g(1 - p)
r /= (gprime(self.surv_prob) * self.surv_prob_se)
ii = np.flatnonzero(np.abs(r) <= tr)
if len(ii) == 0:
return np.nan, np.nan
lb = self.surv_times[ii[0]]
if ii[-1] == len(self.surv_times) - 1:
ub = np.inf
else:
ub = self.surv_times[ii[-1] + 1]
return lb, ub
def summary(self):
"""
Return a summary of the estimated survival function.
The summary is a datafram containing the unique event times,
estimated survival function values, and related quantities.
"""
df = pd.DataFrame(index=self.surv_times)
df.index.name = "Time"
df["Surv prob"] = self.surv_prob
df["Surv prob SE"] = self.surv_prob_se
df["num at risk"] = self.n_risk
df["num events"] = self.n_events
return df
def simultaneous_cb(self, alpha=0.05, method="hw", transform="log"):
"""
Returns a simultaneous confidence band for the survival function.
Arguments
---------
alpha : float
`1 - alpha` is the desired simultaneous coverage
probability for the confidence region. Currently alpha
must be set to 0.05, giving 95% simultaneous intervals.
method : string
The method used to produce the simultaneous confidence
band. Only the Hall-Wellner (hw) method is currently
implemented.
transform : string
The used to produce the interval (note that the returned
interval is on the survival probability scale regardless
of which transform is used). Only `log` and `arcsin` are
implemented.
Returns
-------
lcb : array-like
The lower confidence limits corresponding to the points
in `surv_times`.
ucb : array-like
The upper confidence limits corresponding to the points
in `surv_times`.
"""
method = method.lower()
if method != "hw":
raise ValueError("only the Hall-Wellner (hw) method is implemented")
if alpha != 0.05:
raise ValueError("alpha must be set to 0.05")
transform = transform.lower()
s2 = self.surv_prob_se**2 / self.surv_prob**2
nn = self.n_risk
if transform == "log":
denom =
|
np.sqrt(nn)
|
numpy.sqrt
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Feb 28 15:20:56 2021
@author: guo.1648
"""
# Use faiss to approximate NN here, instead of using sklearn NN.
from tqdm import tqdm
import pandas as pd
import numpy as np
#from sklearn.neighbors import NearestNeighbors
#from scipy.spatial import cKDTree
import faiss
import matplotlib.pyplot as plt
import cv2
import os
import pickle
"""
#### for CelebA_128_sub200: 200 images dataset
srcRootDir_originDataImg = '/eecf/cbcsl/data100b/Chenqi/BigGAN-PyTorch/imgs/CelebA_128_sub200/' # these images are generated from tfrecords using code tmp.py
dstRootDir_figName = '/eecf/cbcsl/data100b/Chenqi/dataset_complexity/hist_intdim_mle/CelebA_128_sub200/'
dstRootDir_pkl = '/eecf/cbcsl/data100b/Chenqi/dataset_complexity/hist_intdim_mle/CelebA_128_sub200/intdim_k_repeated_dicts_sz128.pkl' #sz32
nameFlag = 'CelebA_128_sub200'
"""
"""
#### for CelebA_128_sub600: 600 images dataset
srcRootDir_originDataImg = '/eecf/cbcsl/data100b/Chenqi/BigGAN-PyTorch/imgs/CelebA_128_sub600/' # these images are generated from tfrecords using code tmp.py
dstRootDir_figName = '/eecf/cbcsl/data100b/Chenqi/dataset_complexity/hist_intdim_mle/CelebA_128_sub600/'
dstRootDir_pkl = '/eecf/cbcsl/data100b/Chenqi/dataset_complexity/hist_intdim_mle/CelebA_128_sub600/intdim_k_repeated_dicts_sz128.pkl' #sz32
nameFlag = 'CelebA_128_sub600'
"""
"""
#### for CelebA_128_sub1000: 1000 images dataset
srcRootDir_originDataImg = '/eecf/cbcsl/data100b/Chenqi/stylegan2/datasets_images/CelebA_128_sub1000/jpg/' # these images are generated from tfrecords using code mycode_loadImgFromTFrecords.py
dstRootDir_figName = '/eecf/cbcsl/data100b/Chenqi/dataset_complexity/hist_intdim_mle/CelebA_128_sub1000/'
dstRootDir_pkl = '/eecf/cbcsl/data100b/Chenqi/dataset_complexity/hist_intdim_mle/CelebA_128_sub1000/intdim_k_repeated_dicts_sz128.pkl' #sz32
nameFlag = 'CelebA_128_sub1000'
"""
"""
#### for CelebA_128_sub4000: 4000 images dataset
srcRootDir_originDataImg = '/eecf/cbcsl/data100b/Chenqi/stylegan2/datasets_images/CelebA_128_sub4000/' # these images are generated from tfrecords using code mycode_loadImgFromTFrecords.py
dstRootDir_figName = '/eecf/cbcsl/data100b/Chenqi/dataset_complexity/hist_intdim_mle/CelebA_128_sub4000/'
dstRootDir_pkl = '/eecf/cbcsl/data100b/Chenqi/dataset_complexity/hist_intdim_mle/CelebA_128_sub4000/intdim_k_repeated_dicts_sz128.pkl' #sz32
nameFlag = 'CelebA_128_sub4000'
"""
"""
#### for CelebA_128_sub8000: 8000 images dataset
srcRootDir_originDataImg = '/eecf/cbcsl/data100b/Chenqi/stylegan2/datasets_images/CelebA_128_sub8000/' # these images are generated from tfrecords using code mycode_loadImgFromTFrecords.py
dstRootDir_figName = '/eecf/cbcsl/data100b/Chenqi/dataset_complexity/hist_intdim_mle/CelebA_128_sub8000/'
dstRootDir_pkl = '/eecf/cbcsl/data100b/Chenqi/dataset_complexity/hist_intdim_mle/CelebA_128_sub8000/intdim_k_repeated_dicts_sz128.pkl' #sz32
nameFlag = 'CelebA_128_sub8000'
"""
"""
#### for MNIST_128_sub10000: 10000 images dataset
srcRootDir_originDataImg = '/eecf/cbcsl/data100b/Chenqi/stylegan2/datasets_images/MNIST_128_sub10000/' # these images are generated from tfrecords using code mycode_loadImgFromTFrecords.py
dstRootDir_figName = '/eecf/cbcsl/data100b/Chenqi/dataset_complexity/hist_intdim_mle/MNIST_128_sub10000/'
dstRootDir_pkl = '/eecf/cbcsl/data100b/Chenqi/dataset_complexity/hist_intdim_mle/MNIST_128_sub10000/intdim_k_repeated_dicts_sz32.pkl'
nameFlag = 'MNIST_128_sub10000'
"""
"""
#### for MNIST_128_sub30000: 30000 images dataset
srcRootDir_originDataImg = '/eecf/cbcsl/data100b/Chenqi/stylegan2/datasets_images/MNIST_128_sub30000/' # these images are generated from tfrecords using code mycode_loadImgFromTFrecords.py
dstRootDir_figName = '/eecf/cbcsl/data100b/Chenqi/dataset_complexity/hist_intdim_mle/MNIST_128_sub30000/'
dstRootDir_pkl = '/eecf/cbcsl/data100b/Chenqi/dataset_complexity/hist_intdim_mle/MNIST_128_sub30000/intdim_k_repeated_dicts_sz32.pkl'
nameFlag = 'MNIST_128_sub30000'
"""
"""
#### for MNIST_128_train: 60000 images dataset
srcRootDir_originDataImg = '/eecf/cbcsl/data100b/Chenqi/data/MNIST/resized/train/train_60000/' # these images are generated from tfrecords using code mycode_loadImgFromTFrecords.py
dstRootDir_figName = '/eecf/cbcsl/data100b/Chenqi/dataset_complexity/hist_intdim_mle/MNIST_128_train/'
dstRootDir_pkl = '/eecf/cbcsl/data100b/Chenqi/dataset_complexity/hist_intdim_mle/MNIST_128_train/intdim_k_repeated_dicts_sz32.pkl'
nameFlag = 'MNIST_128_train'
"""
"""
#### for LSUN_128_sub200: 200 images dataset
srcRootDir_originDataImg = '/eecf/cbcsl/data100b/Chenqi/stylegan2/datasets_images/LSUN_128_sub200/' # these images are generated from tfrecords using code mycode_loadImgFromTFrecords.py
dstRootDir_figName = '/eecf/cbcsl/data100b/Chenqi/dataset_complexity/hist_intdim_mle/LSUN_128_sub200/'
dstRootDir_pkl = '/eecf/cbcsl/data100b/Chenqi/dataset_complexity/hist_intdim_mle/LSUN_128_sub200/intdim_k_repeated_dicts_sz128.pkl' #sz32
nameFlag = 'LSUN_128_sub200'
"""
"""
#### for LSUN_128_sub1000: 1000 images dataset
srcRootDir_originDataImg = '/eecf/cbcsl/data100b/Chenqi/stylegan2/datasets_images/LSUN_128_sub1000/' # these images are generated from tfrecords using code mycode_loadImgFromTFrecords.py
dstRootDir_figName = '/eecf/cbcsl/data100b/Chenqi/dataset_complexity/hist_intdim_mle/LSUN_128_sub1000/'
dstRootDir_pkl = '/eecf/cbcsl/data100b/Chenqi/dataset_complexity/hist_intdim_mle/LSUN_128_sub1000/intdim_k_repeated_dicts_sz128.pkl' #sz32
nameFlag = 'LSUN_128_sub1000'
"""
"""
#### for LSUN_128_sub5000: 5000 images dataset
srcRootDir_originDataImg = '/eecf/cbcsl/data100b/Chenqi/stylegan2/datasets_images/LSUN_128_sub5000/' # these images are generated from tfrecords using code mycode_loadImgFromTFrecords.py
dstRootDir_figName = '/eecf/cbcsl/data100b/Chenqi/dataset_complexity/hist_intdim_mle/LSUN_128_sub5000/'
dstRootDir_pkl = '/eecf/cbcsl/data100b/Chenqi/dataset_complexity/hist_intdim_mle/LSUN_128_sub5000/intdim_k_repeated_dicts_sz128.pkl' #sz32
nameFlag = 'LSUN_128_sub5000'
"""
"""
#### for LSUN_128_sub10000: 10000 images dataset
srcRootDir_originDataImg = '/eecf/cbcsl/data100b/Chenqi/stylegan2/datasets_images/LSUN_128_sub10000/' # these images are generated from tfrecords using code mycode_loadImgFromTFrecords.py
dstRootDir_figName = '/eecf/cbcsl/data100b/Chenqi/dataset_complexity/hist_intdim_mle/LSUN_128_sub10000/'
dstRootDir_pkl = '/eecf/cbcsl/data100b/Chenqi/dataset_complexity/hist_intdim_mle/LSUN_128_sub10000/intdim_k_repeated_dicts_sz128.pkl' #sz32
nameFlag = 'LSUN_128_sub10000'
"""
"""
#### for LSUN_128_sub30000: 30000 images dataset
srcRootDir_originDataImg = '/eecf/cbcsl/data100b/Chenqi/stylegan2/datasets_images/LSUN_128_sub30000/' # these images are generated from tfrecords using code mycode_loadImgFromTFrecords.py
dstRootDir_figName = '/eecf/cbcsl/data100b/Chenqi/dataset_complexity/hist_intdim_mle/LSUN_128_sub30000/'
dstRootDir_pkl = '/eecf/cbcsl/data100b/Chenqi/dataset_complexity/hist_intdim_mle/LSUN_128_sub30000/intdim_k_repeated_dicts_sz128.pkl' #sz32
nameFlag = 'LSUN_128_sub30000'
"""
"""
#### for FLOWER_128_sub1000: 1000 images dataset
srcRootDir_originDataImg = '/eecf/cbcsl/data100b/Chenqi/stylegan2/datasets_images/FLOWER_128_sub1000/jpg/' # these images are generated from tfrecords using code mycode_loadImgFromTFrecords.py
dstRootDir_figName = '/eecf/cbcsl/data100b/Chenqi/dataset_complexity/hist_intdim_mle/FLOWER_128_sub1000/'
dstRootDir_pkl = '/eecf/cbcsl/data100b/Chenqi/dataset_complexity/hist_intdim_mle/FLOWER_128_sub1000/intdim_k_repeated_dicts_sz128.pkl' #sz32
nameFlag = 'FLOWER_128_sub1000'
"""
"""
#### for FLOWER_128_sub2000: 2000 images dataset
srcRootDir_originDataImg = '/scratch/BigGAN-PyTorch/imgs/FLOWER_128_sub2000/'
dstRootDir_figName = '/eecf/cbcsl/data100b/Chenqi/dataset_complexity/hist_intdim_mle/FLOWER_128_sub2000/'
dstRootDir_pkl = '/eecf/cbcsl/data100b/Chenqi/dataset_complexity/hist_intdim_mle/FLOWER_128_sub2000/intdim_k_repeated_dicts_sz128.pkl' #sz32
nameFlag = 'FLOWER_128_sub2000'
"""
"""
#### for FLOWER_128_sub6000: 6000 images dataset
srcRootDir_originDataImg = '/eecf/cbcsl/data100b/Chenqi/BigGAN-PyTorch/imgs/FLOWER_128_sub6000/'
dstRootDir_figName = '/eecf/cbcsl/data100b/Chenqi/dataset_complexity/hist_intdim_mle/FLOWER_128_sub6000/'
dstRootDir_pkl = '/eecf/cbcsl/data100b/Chenqi/dataset_complexity/hist_intdim_mle/FLOWER_128_sub6000/intdim_k_repeated_dicts_sz128.pkl' #sz32
nameFlag = 'FLOWER_128_sub6000'
"""
"""
#### for FLOWER_128: whole (~8000) images dataset
srcRootDir_originDataImg = '/eecf/cbcsl/data100b/Chenqi/stylegan2/datasets_images/FLOWER_128/jpg/' # these images are generated from tfrecords using code mycode_loadImgFromTFrecords.py
dstRootDir_figName = '/eecf/cbcsl/data100b/Chenqi/dataset_complexity/hist_intdim_mle/FLOWER_128/'
dstRootDir_pkl = '/eecf/cbcsl/data100b/Chenqi/dataset_complexity/hist_intdim_mle/FLOWER_128/intdim_k_repeated_dicts_sz128.pkl' #sz32
nameFlag = 'FLOWER_128'
"""
## for rebuttal:
"""# FLOWER_256 use SAME as FLOWER_128 --> both are computed from 32x32 !!!!
#### for FLOWER_256_sub1000: 1000 images dataset
srcRootDir_originDataImg = '/eecf/cbcsl/data100b/Chenqi/stylegan2/datasets_images/FLOWER_256_sub1000/'
dstRootDir_figName = '/eecf/cbcsl/data100b/Chenqi/dataset_complexity/hist_intdim_mle/FLOWER_256_sub1000/'
dstRootDir_pkl = '/eecf/cbcsl/data100b/Chenqi/dataset_complexity/hist_intdim_mle/FLOWER_256_sub1000/intdim_k_repeated_dicts_sz32.pkl' #sz32
nameFlag = 'FLOWER_256_sub1000'
"""
"""
#### for CIFAR10_32_sub1000: 1000 images dataset
srcRootDir_originDataImg = '/eecf/cbcsl/data100b/Chenqi/stylegan2/datasets_images/CIFAR10_32_sub1000/'
dstRootDir_figName = '/eecf/cbcsl/data100b/Chenqi/dataset_complexity/hist_intdim_mle/CIFAR10_32_sub1000/'
dstRootDir_pkl = '/eecf/cbcsl/data100b/Chenqi/dataset_complexity/hist_intdim_mle/CIFAR10_32_sub1000/intdim_k_repeated_dicts_sz32.pkl' #sz32
nameFlag = 'CIFAR10_32_sub1000'
"""
"""
#### for CIFAR10_32_sub4000: 4000 images dataset
srcRootDir_originDataImg = '/eecf/cbcsl/data100b/Chenqi/stylegan2/datasets_images/CIFAR10_32_sub4000/'
dstRootDir_figName = '/eecf/cbcsl/data100b/Chenqi/dataset_complexity/hist_intdim_mle/CIFAR10_32_sub4000/'
dstRootDir_pkl = '/eecf/cbcsl/data100b/Chenqi/dataset_complexity/hist_intdim_mle/CIFAR10_32_sub4000/intdim_k_repeated_dicts_sz32.pkl' #sz32
nameFlag = 'CIFAR10_32_sub4000'
"""
"""
#### for CIFAR10_32_sub8000: 8000 images dataset
srcRootDir_originDataImg = '/eecf/cbcsl/data100b/Chenqi/stylegan2/datasets_images/CIFAR10_32_sub8000/'
dstRootDir_figName = '/eecf/cbcsl/data100b/Chenqi/dataset_complexity/hist_intdim_mle/CIFAR10_32_sub8000/'
dstRootDir_pkl = '/eecf/cbcsl/data100b/Chenqi/dataset_complexity/hist_intdim_mle/CIFAR10_32_sub8000/intdim_k_repeated_dicts_sz32.pkl' #sz32
nameFlag = 'CIFAR10_32_sub8000'
"""
#### for CIFAR10_32_sub10000: 10000 images dataset
srcRootDir_originDataImg = '/eecf/cbcsl/data100b/Chenqi/stylegan2/datasets_images/CIFAR10_32_sub10000/'
dstRootDir_figName = '/eecf/cbcsl/data100b/Chenqi/dataset_complexity/hist_intdim_mle/CIFAR10_32_sub10000/'
dstRootDir_pkl = '/eecf/cbcsl/data100b/Chenqi/dataset_complexity/hist_intdim_mle/CIFAR10_32_sub10000/intdim_k_repeated_dicts_sz32.pkl' #sz32
nameFlag = 'CIFAR10_32_sub10000'
biFlag = False
#biFlag = True # for MNIST dataset
def image_to_feature_vector(image):
# Note: the image is already resized to a fixed size.
# flatten the image into a list of raw pixel intensities:
return image.flatten()
def generateData_v2(len_featVec, dim):
# referenced from func generateTrainSet()
# generate data X from image dataset (each row represents an image)
all_origin_img_vecs = []
for (dirpath, dirnames, filenames) in os.walk(srcRootDir_originDataImg):
for filename in filenames:
if ".jpg" in filename or ".png" in filename:
#print("------------------deal with---------------------")
#print(filename)
origin_img = cv2.imread(srcRootDir_originDataImg+filename)
if biFlag:
origin_img = origin_img[:,:,0]
"""
# NO need to do this here: already 128x128 !
origin_img_centCrop = my_center_crop(origin_img, min(origin_img.shape[0],origin_img.shape[1]))
"""
# resize using linear interpolation:
#if origin_img.shape[0] != dim[0]:
origin_img_resize = cv2.resize(origin_img, dim)
# convert it to feature vector:
origin_img_resize_vec = image_to_feature_vector(origin_img_resize)
assert(len(origin_img_resize_vec)==len_featVec)
all_origin_img_vecs.append(origin_img_resize_vec)
return np.array(all_origin_img_vecs)
def intrinsic_dim_sample_wise(X, k=5):
"""
neighb = NearestNeighbors(n_neighbors=k + 1).fit(X) # NOT using sklearn NN here!
dist, ind = neighb.kneighbors(X)
dist = dist[:, 1:]
dist = dist[:, 0:k]
assert dist.shape == (X.shape[0], k)
assert np.all(dist > 0)
d = np.log(dist[:, k - 1: k] / dist[:, 0:k-1])
d = d.sum(axis=1) / (k - 2)
d = 1. / d
intdim_sample = d
"""
X_dim = X.shape[1]
# build the index
index = faiss.IndexFlatL2(X_dim)
#print(index.is_trained)
# add vectors (i.e. trainin) to the index
X_float = X.astype('float32')
index.add(X_float)
#print(index.ntotal)
# Nearest Neighbor search
dist_square, ind = index.search(X_float, k+1) # or k?
dist = np.sqrt(dist_square)
dist = dist[:, 1:]
dist = dist[:, 0:k]
assert dist.shape == (X.shape[0], k)
# newly modified:
#assert np.all(dist > 0)
if not np.all(dist > 0):
idx_row, _ =
|
np.where(dist <= 0)
|
numpy.where
|
import numpy as np
x = 1.0
y = 2.0
print(np.exp(x)) #e^x
print(np.log(x)) #ln x
print(
|
np.log10(x)
|
numpy.log10
|
# %% [markdown]
# ##
import os
import warnings
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.transforms as transforms
import numpy as np
import pandas as pd
import seaborn as sns
from joblib import Parallel, delayed
from sklearn.exceptions import ConvergenceWarning
from sklearn.manifold import MDS, TSNE, Isomap
from sklearn.metrics import pairwise_distances
from sklearn.neighbors import NearestNeighbors
from sklearn.utils.testing import ignore_warnings
from tqdm.autonotebook import tqdm
from umap import UMAP
from graspy.embed import (
AdjacencySpectralEmbed,
ClassicalMDS,
LaplacianSpectralEmbed,
OmnibusEmbed,
select_dimension,
selectSVD,
)
from graspy.plot import pairplot
from graspy.simulations import sbm
from graspy.utils import (
augment_diagonal,
binarize,
pass_to_ranks,
symmetrize,
to_laplace,
)
from src.align import Procrustes
from src.cluster import MaggotCluster, get_paired_inds
from src.data import load_metagraph
from src.graph import preprocess
from src.hierarchy import signal_flow
from src.io import savecsv, savefig
from src.visualization import (
CLASS_COLOR_DICT,
add_connections,
adjplot,
barplot_text,
draw_networkx_nice,
gridmap,
matrixplot,
palplot,
screeplot,
set_axes_equal,
stacked_barplot,
)
warnings.filterwarnings(action="ignore", category=ConvergenceWarning)
FNAME = os.path.basename(__file__)[:-3]
print(FNAME)
rc_dict = {
"axes.spines.right": False,
"axes.spines.top": False,
"axes.formatter.limits": (-3, 3),
"figure.figsize": (6, 3),
"figure.dpi": 100,
}
for key, val in rc_dict.items():
mpl.rcParams[key] = val
context = sns.plotting_context(context="talk", font_scale=1, rc=rc_dict)
sns.set_context(context)
np.random.seed(8888)
def stashfig(name, **kws):
savefig(name, foldername=FNAME, save_on=True, **kws)
def stashcsv(df, name, **kws):
savecsv(df, name, foldername=FNAME, **kws)
graph_type = "G"
def plot_pairs(
X, labels, model=None, left_pair_inds=None, right_pair_inds=None, equal=False
):
n_dims = X.shape[1]
fig, axs = plt.subplots(
n_dims, n_dims, sharex=False, sharey=False, figsize=(20, 20)
)
data = pd.DataFrame(data=X)
data["label"] = labels
for i in range(n_dims):
for j in range(n_dims):
ax = axs[i, j]
ax.axis("off")
if i < j:
sns.scatterplot(
data=data,
x=j,
y=i,
ax=ax,
alpha=0.7,
linewidth=0,
s=8,
legend=False,
hue="label",
palette=CLASS_COLOR_DICT,
)
if left_pair_inds is not None and right_pair_inds is not None:
add_connections(
data.iloc[left_pair_inds, j],
data.iloc[right_pair_inds, j],
data.iloc[left_pair_inds, i],
data.iloc[right_pair_inds, i],
ax=ax,
)
plt.tight_layout()
return fig, axs
def preprocess_adjs(adjs, method="ase"):
adjs = [pass_to_ranks(a) for a in adjs]
adjs = [a + 1 / a.size for a in adjs]
if method == "ase":
adjs = [augment_diagonal(a) for a in adjs]
elif method == "lse":
adjs = [to_laplace(a) for a in adjs]
return adjs
def omni(
adjs,
n_components=4,
remove_first=None,
concat_graphs=True,
concat_directed=True,
method="ase",
):
adjs = preprocess_adjs(adjs, method=method)
omni = OmnibusEmbed(n_components=n_components, check_lcc=False, n_iter=10)
embed = omni.fit_transform(adjs)
if concat_directed:
embed = np.concatenate(
embed, axis=-1
) # this is for left/right latent positions
if remove_first is not None:
embed = embed[remove_first:]
if concat_graphs:
embed = np.concatenate(embed, axis=0)
return embed
def ipsi_omni(adj, lp_inds, rp_inds, co_adj=None, n_components=4, method="ase"):
ll_adj = adj[np.ix_(lp_inds, lp_inds)]
rr_adj = adj[np.ix_(rp_inds, rp_inds)]
ipsi_adjs = [ll_adj, rr_adj]
if co_adj is not None:
co_ll_adj = co_adj[np.ix_(lp_inds, lp_inds)]
co_rr_adj = co_adj[np.ix_(rp_inds, rp_inds)]
ipsi_adjs += [co_ll_adj, co_rr_adj]
out_ipsi, in_ipsi = omni(
ipsi_adjs,
n_components=n_components,
concat_directed=False,
concat_graphs=False,
method=method,
)
left_embed = np.concatenate((out_ipsi[0], in_ipsi[0]), axis=1)
right_embed = np.concatenate((out_ipsi[1], in_ipsi[1]), axis=1)
ipsi_embed = np.concatenate((left_embed, right_embed), axis=0)
return ipsi_embed
def contra_omni(adj, lp_inds, rp_inds, co_adj=None, n_components=4, method="ase"):
lr_adj = adj[np.ix_(lp_inds, rp_inds)]
rl_adj = adj[np.ix_(rp_inds, lp_inds)]
contra_adjs = [lr_adj, rl_adj]
if co_adj is not None:
co_lr_adj = co_adj[np.ix_(lp_inds, rp_inds)]
co_rl_adj = co_adj[np.ix_(rp_inds, lp_inds)]
contra_adjs += [co_lr_adj, co_rl_adj]
out_contra, in_contra = omni(
contra_adjs,
n_components=n_components,
concat_directed=False,
concat_graphs=False,
method=method,
)
left_embed = np.concatenate((out_contra[0], in_contra[1]), axis=1)
right_embed = np.concatenate((out_contra[1], in_contra[0]), axis=1)
contra_embed = np.concatenate((left_embed, right_embed), axis=0)
return contra_embed
def lateral_omni(adj, lp_inds, rp_inds, n_components=4, method="ase"):
ipsi_embed = ipsi_omni(
adj, lp_inds, rp_inds, n_components=n_components, method=method
)
contra_embed = contra_omni(
adj, lp_inds, rp_inds, n_components=n_components, method=method
)
embed = np.concatenate((ipsi_embed, contra_embed), axis=1)
return embed
def multi_lateral_omni(adjs, lp_inds, rp_inds, n_components=4):
ipsi_adjs = []
for a in adjs:
ll_adj = a[np.ix_(lp_inds, lp_inds)]
rr_adj = a[np.ix_(rp_inds, rp_inds)]
ipsi_adjs.append(ll_adj)
ipsi_adjs.append(rr_adj)
ipsi_embed = omni(ipsi_adjs, concat_graphs=False, n_components=n_components)
left = []
right = []
for i, e in enumerate(ipsi_embed):
if i % 2 == 0:
left.append(e)
else:
right.append(e)
left = np.concatenate(left, axis=1)
right = np.concatenate(right, axis=1)
ipsi_embed = np.concatenate((left, right), axis=0)
contra_adjs = []
for a in adjs:
lr_adj = a[np.ix_(lp_inds, rp_inds)]
rl_adj = a[np.ix_(rp_inds, lp_inds)]
contra_adjs.append(lr_adj)
contra_adjs.append(rl_adj)
contra_embed = omni(contra_adjs, concat_graphs=False, n_components=n_components)
left = []
right = []
for i, e in enumerate(contra_embed):
if i % 2 == 0:
left.append(e)
else:
right.append(e)
left = np.concatenate(left, axis=1)
right = np.concatenate(right, axis=1)
contra_embed = np.concatenate((left, right), axis=0)
embed = np.concatenate((ipsi_embed, contra_embed), axis=1)
return embed
def reg_lateral_omni(adj, base_adj, lp_inds, rp_inds, n_components=4):
base_ll_adj = base_adj[np.ix_(lp_inds, lp_inds)]
base_rr_adj = base_adj[np.ix_(rp_inds, rp_inds)]
ll_adj = adj[np.ix_(lp_inds, lp_inds)]
rr_adj = adj[np.ix_(rp_inds, rp_inds)]
ipsi_adjs = [base_ll_adj, base_rr_adj, ll_adj, rr_adj]
ipsi_embed = omni(ipsi_adjs, remove_first=2, n_components=n_components)
base_lr_adj = base_adj[np.ix_(lp_inds, rp_inds)]
base_rl_adj = base_adj[np.ix_(rp_inds, lp_inds)]
lr_adj = adj[np.ix_(lp_inds, rp_inds)]
rl_adj = adj[np.ix_(rp_inds, lp_inds)]
contra_adjs = [base_lr_adj, base_rl_adj, lr_adj, rl_adj]
contra_embed = omni(contra_adjs, remove_first=2, n_components=n_components)
embed = np.concatenate((ipsi_embed, contra_embed), axis=1)
return embed
def quick_embed_viewer(
embed, labels=None, lp_inds=None, rp_inds=None, left_right_indexing=False
):
if left_right_indexing:
lp_inds = np.arange(len(embed) // 2)
rp_inds = np.arange(len(embed) // 2) + len(embed) // 2
fig, axs = plt.subplots(3, 2, figsize=(20, 30))
cmds = ClassicalMDS(n_components=2)
cmds_euc = cmds.fit_transform(embed)
plot_df = pd.DataFrame(data=cmds_euc)
plot_df["labels"] = labels
plot_kws = dict(
x=0,
y=1,
hue="labels",
palette=CLASS_COLOR_DICT,
legend=False,
s=20,
linewidth=0.5,
alpha=0.7,
)
ax = axs[0, 0]
sns.scatterplot(data=plot_df, ax=ax, **plot_kws)
ax.axis("off")
add_connections(
plot_df.iloc[lp_inds, 0],
plot_df.iloc[rp_inds, 0],
plot_df.iloc[lp_inds, 1],
plot_df.iloc[rp_inds, 1],
ax=ax,
)
ax.set_title("CMDS o euclidean")
cmds = ClassicalMDS(n_components=2, dissimilarity="precomputed")
pdist = symmetrize(pairwise_distances(embed, metric="cosine"))
cmds_cos = cmds.fit_transform(pdist)
plot_df[0] = cmds_cos[:, 0]
plot_df[1] = cmds_cos[:, 1]
ax = axs[0, 1]
sns.scatterplot(data=plot_df, ax=ax, **plot_kws)
ax.axis("off")
add_connections(
plot_df.iloc[lp_inds, 0],
plot_df.iloc[rp_inds, 0],
plot_df.iloc[lp_inds, 1],
plot_df.iloc[rp_inds, 1],
ax=ax,
)
ax.set_title("CMDS o cosine")
tsne = TSNE(metric="euclidean")
tsne_euc = tsne.fit_transform(embed)
plot_df[0] = tsne_euc[:, 0]
plot_df[1] = tsne_euc[:, 1]
ax = axs[1, 0]
sns.scatterplot(data=plot_df, ax=ax, **plot_kws)
ax.axis("off")
add_connections(
plot_df.iloc[lp_inds, 0],
plot_df.iloc[rp_inds, 0],
plot_df.iloc[lp_inds, 1],
plot_df.iloc[rp_inds, 1],
ax=ax,
)
ax.set_title("TSNE o euclidean")
tsne = TSNE(metric="precomputed")
tsne_cos = tsne.fit_transform(pdist)
plot_df[0] = tsne_cos[:, 0]
plot_df[1] = tsne_cos[:, 1]
ax = axs[1, 1]
sns.scatterplot(data=plot_df, ax=ax, **plot_kws)
ax.axis("off")
add_connections(
plot_df.iloc[lp_inds, 0],
plot_df.iloc[rp_inds, 0],
plot_df.iloc[lp_inds, 1],
plot_df.iloc[rp_inds, 1],
ax=ax,
)
ax.set_title("TSNE o cosine")
umap = UMAP(metric="euclidean", n_neighbors=30, min_dist=1)
umap_euc = umap.fit_transform(embed)
plot_df[0] = umap_euc[:, 0]
plot_df[1] = umap_euc[:, 1]
ax = axs[2, 0]
sns.scatterplot(data=plot_df, ax=ax, **plot_kws)
ax.axis("off")
add_connections(
plot_df.iloc[lp_inds, 0],
plot_df.iloc[rp_inds, 0],
plot_df.iloc[lp_inds, 1],
plot_df.iloc[rp_inds, 1],
ax=ax,
)
ax.set_title("UMAP o euclidean")
umap = UMAP(metric="cosine", n_neighbors=30, min_dist=1)
umap_cos = umap.fit_transform(embed)
plot_df[0] = umap_cos[:, 0]
plot_df[1] = umap_cos[:, 1]
ax = axs[2, 1]
sns.scatterplot(data=plot_df, ax=ax, **plot_kws)
ax.axis("off")
add_connections(
plot_df.iloc[lp_inds, 0],
plot_df.iloc[rp_inds, 0],
plot_df.iloc[lp_inds, 1],
plot_df.iloc[rp_inds, 1],
ax=ax,
)
ax.set_title("UMAP o cosine")
def umapper(embed, metric="euclidean", n_neighbors=30, min_dist=1, **kws):
umap = UMAP(metric=metric, n_neighbors=n_neighbors, min_dist=min_dist)
umap_euc = umap.fit_transform(embed)
plot_df = pd.DataFrame(data=umap_euc)
plot_df["labels"] = labels
fig, ax = plt.subplots(1, 1, figsize=(10, 10))
plot_kws = dict(
x=0,
y=1,
hue="labels",
palette=CLASS_COLOR_DICT,
legend=False,
s=20,
linewidth=0.5,
alpha=0.7,
)
sns.scatterplot(data=plot_df, ax=ax, **plot_kws)
ax.axis("off")
left_right_indexing = True
if left_right_indexing:
tlp_inds = np.arange(len(embed) // 2)
trp_inds = np.arange(len(embed) // 2) + len(embed) // 2
add_connections(
plot_df.iloc[tlp_inds, 0],
plot_df.iloc[trp_inds, 0],
plot_df.iloc[tlp_inds, 1],
plot_df.iloc[trp_inds, 1],
ax=ax,
)
return fig, ax
# %% [markdown]
# ## Load and preprocess data
VERSION = "2020-04-23"
graph_type = "G"
master_mg = load_metagraph(graph_type, version="2020-04-23")
mg = preprocess(
master_mg,
threshold=0,
sym_threshold=False,
remove_pdiff=True,
binarize=False,
weight="weight",
)
meta = mg.meta
degrees = mg.calculate_degrees()
quant_val = np.quantile(degrees["Total edgesum"], 0.05)
# remove low degree neurons
idx = meta[degrees["Total edgesum"] > quant_val].index
print(quant_val)
mg = mg.reindex(idx, use_ids=True)
# remove center neurons # FIXME
idx = mg.meta[mg.meta["hemisphere"].isin(["L", "R"])].index
mg = mg.reindex(idx, use_ids=True)
idx = mg.meta[mg.meta["Pair"].isin(mg.meta.index)].index
mg = mg.reindex(idx, use_ids=True)
mg = mg.make_lcc()
mg.calculate_degrees(inplace=True)
meta = mg.meta
meta["pair_td"] = meta["Pair ID"].map(meta.groupby("Pair ID")["Total degree"].mean())
mg = mg.sort_values(["pair_td", "Pair ID"], ascending=False)
meta["inds"] = range(len(meta))
adj = mg.adj.copy()
lp_inds, rp_inds = get_paired_inds(meta)
left_inds = meta[meta["left"]]["inds"]
print(len(mg))
# %% [markdown]
# ## Plot the ipsilateral connectomes
if meta["pair_td"].max() > 0:
meta["pair_td"] = -meta["pair_td"]
ll_adj = adj[np.ix_(lp_inds, lp_inds)]
rr_adj = adj[np.ix_(rp_inds, rp_inds)]
left_meta = meta.iloc[lp_inds]
right_meta = meta.iloc[rp_inds]
plot_kws = dict(
plot_type="scattermap",
sort_class="merge_class",
item_order=["pair_td", "Pair ID"],
colors="merge_class",
palette=CLASS_COLOR_DICT,
ticks=False,
class_order="pair_td",
sizes=(1, 1),
gridline_kws=dict(linewidth=0.2, color="grey", linestyle="--"),
)
plot_adjs = False
if plot_adjs:
fig, axs = plt.subplots(1, 2, figsize=(20, 10))
_, _, top, _ = adjplot(ll_adj, ax=axs[0], meta=left_meta, **plot_kws)
top.set_title(r"L $\to$ L")
_, _, top, _ = adjplot(rr_adj, ax=axs[1], meta=right_meta, **plot_kws)
top.set_title(r"R $\to$ R")
plt.tight_layout()
stashfig("ipsilateral-adj")
lr_adj = adj[np.ix_(lp_inds, rp_inds)]
rl_adj = adj[np.ix_(rp_inds, lp_inds)]
fig, axs = plt.subplots(1, 2, figsize=(20, 10))
_, _, top, _ = adjplot(lr_adj, ax=axs[0], meta=left_meta, **plot_kws)
top.set_title(r"L $\to$ R")
_, _, top, _ = adjplot(rl_adj, ax=axs[1], meta=right_meta, **plot_kws)
top.set_title(r"R $\to$ L")
plt.tight_layout()
stashfig("contralateral-adj")
# %% [markdown]
# ## Load the 4-color graphs
graph_types = ["Gad", "Gaa", "Gdd", "Gda"]
adjs = []
for g in graph_types:
temp_mg = load_metagraph(g, version=VERSION)
temp_mg.reindex(mg.meta.index, use_ids=True)
temp_adj = temp_mg.adj
adjs.append(temp_adj)
# %% [markdown]
# ## simple demo of "in" vs "out" latent positions
# blocks 0, 1 differ only in their inputs, not their outputs
B = np.array(
[
[0.1, 0.1, 0.2, 0.05],
[0.1, 0.1, 0.2, 0.05],
[0.35, 0.15, 0.1, 0.1],
[0.1, 0.05, 0.3, 0.4],
]
)
sns.heatmap(B, square=True, annot=True)
sbm_sample, sbm_labels = sbm([100, 100, 100, 100], B, directed=True, return_labels=True)
ase = AdjacencySpectralEmbed()
out_embed, in_embed = ase.fit_transform(sbm_sample)
pairplot(out_embed, sbm_labels) # don't see separation between [0, 1]
pairplot(in_embed, sbm_labels) # do see separation between [0, 1]
# from this we can conclude that the "right" embedding or right singular vectors are the
# ones corresponding to input
# (out, in)
# %% [markdown]
# ## Options for the embedding
# - ASE and procrustes (not shown here)
# - Bilateral OMNI on G, SVD
# - Bilateral OMNI on each of the 4-colors, concatenated, SVD
# - Bilateral OMNI on each of the 4-colors, with regularization, concatenated, SVD
# - Bilateral OMNI jointly with all 4-colors
n_omni_components = 8 # this is used for all of the embedings initially
n_svd_components = 16 # this is for the last step
def svd(X, n_components=n_svd_components):
return selectSVD(X, n_components=n_components, algorithm="full")[0]
# %% [markdown]
# ## only contra
# just_contra_embed = omni(
# [full_adjs[0], full_adjs[2]],
# n_components=n_omni_components,
# remove_first=None,
# concat_graphs=True,
# concat_directed=True,
# method="ase",
# )
# svd_contra_embed = svd(just_contra_embed)
# %% [markdown]
# # Omni of contra/ipsi together
full_adjs = [
adj[np.ix_(lp_inds, lp_inds)],
adj[np.ix_(lp_inds, rp_inds)],
adj[np.ix_(rp_inds, rp_inds)],
adj[np.ix_(rp_inds, lp_inds)],
]
out_embed, in_embed = omni(
full_adjs,
n_components=n_omni_components,
remove_first=None,
concat_graphs=False,
concat_directed=False,
method="ase",
)
# ipsi out, contra out, ipsi in, contra in
left_embed = np.concatenate(
(out_embed[0], out_embed[1], in_embed[0], in_embed[3]), axis=1
)
right_embed = np.concatenate(
(out_embed[2], out_embed[3], in_embed[2], in_embed[1]), axis=1
)
omni_naive_embed =
|
np.concatenate((left_embed, right_embed), axis=0)
|
numpy.concatenate
|
import numpy as np
from .ordering import *
from scipy import special
# import hafnian as hf
def GaussianWigner(xi, V, mu):
xi = xi - mu
xi_tmp = np.ravel(xi)
N = np.int(len(xi_tmp) / 2)
det_V = np.linalg.det(V)
V_inv = np.linalg.inv(V)
W = (2 * np.pi)**(-N) / np.sqrt(det_V) * np.exp(-1/2 * np.dot(xi_tmp, np.dot(V_inv, xi_tmp.T)))
return W
def StateAfterMeasurement(mu, V, idx, res, Pi):
N = np.int(V.shape[0] / 2)
subSysA = np.delete(np.delete(V, [2 * idx, 2 * idx + 1], 0), [2 * idx, 2 * idx + 1], 1)
subSysB = V[(2 * idx):(2 * idx + 2), (2 * idx):(2 * idx + 2)]
arrayList = []
for j in range(N):
if j != idx:
arrayList.append(V[(2 * j):(2 * j + 2), (2 * idx):(2 * idx + 2)])
C = np.concatenate(arrayList)
post_V = subSysA - np.dot(C, np.dot(1 / np.sum(subSysB * Pi) * Pi, C.T))
post_V = np.insert(post_V, 2 * idx, [[0], [0]], axis = 0)
post_V = np.insert(post_V, 2 * idx, [[0], [0]], axis = 1)
post_V[2 * idx, 2 * idx] = 1
post_V[2 * idx + 1, 2 * idx + 1] = 1
post_mu = np.delete(mu, [2 * idx, 2 * idx + 1]) + \
np.dot(np.dot(C, 1 / np.sum(subSysB * Pi) * Pi), res * np.diag(Pi) - mu[(2 * idx):(2 * idx + 2)])
post_mu = np.insert(post_mu, 2 * idx, [0, 0])
return post_mu, post_V
def GaussianQfunc(alpha, V, mu):
mu_Q = RtoTvec(mu)
V_Q = RtoTmat(V)
alpha_Q = RtoTvec(alpha)
alpha_Q = alpha_Q - mu_Q
V_Q = V_Q + (np.eye(V_Q.shape[0]) * 0.5)
det_V_Q = np.linalg.det(V_Q)
V_Qinv = np.linalg.inv(V_Q)
Q = 1 / np.sqrt(det_V_Q * np.pi) * np.exp(-1/2 * np.dot(
|
np.conj(alpha_Q)
|
numpy.conj
|
import errno, os, sys, time
from timeit import default_timer as timer
from pathlib import Path
import vide as vu
import numpy as np
import matplotlib.pyplot as plt
import hickle as hkl
import multiprocessing as mp
from vide import periodic_kdtree as pkd
import scipy
from scipy.optimize import curve_fit
import emcee
from ._colors import *
from ._Load import Load
from ._Profiles import Profiles
from .Modules import _functionFitting as func
def HSW(r, rs, alpha, Rv, beta, deltac):
"""
HSW (Hamaus-Sutter-Wendelt) function for the universal void density profile
See: Hamaus et al. (2014)
"""
numerator = 1-(r/rs)**alpha
denominator = 1+(r/Rv)**beta
return deltac*numerator/denominator
def HSW_MCMC(param , param_limits, r, profile ,profile_err):
rs, alpha, Rv, beta, deltac = param
rs_lim, alpha_lim, Rv_lim, beta_lim, deltac_lim = param_limits
numerator = 1-(r/rs)**alpha
denominator = 1+(r/Rv)**beta
model = deltac*numerator/denominator
res = func.log_probability(param, param_limits, model, profile, profile_err)
return res
def HSW_offset(r, rs, alpha, Rv, beta, deltac, offset):
"""
HSW (Hamaus-Sutter-Wendelt) function for the universal void density profile
See: Hamaus et al. (2014)
"""
numerator = 1-(r/rs)**alpha
denominator = 1+(r/Rv)**beta
return deltac*numerator/denominator +offset
def HSW_MCMC_offset(param , param_limits, r, profile ,profile_err):
rs, alpha, Rv, beta, deltac, offset = param
rs_lim, alpha_lim, Rv_lim, beta_lim, deltac_lim, offset_lim = param_limits
numerator = 1-(r/rs)**alpha
denominator = 1+(r/Rv)**beta
model = deltac*numerator/denominator +offset
res = func.log_probability(param, param_limits, model, profile, profile_err)
return res
class HSW_Fitting(Profiles):
def fitting_MCMC(self, **kwargs):
# Kwargs
fitting_limits = kwargs.get('fitting_limits',None)
n_walkers = kwargs.get('n_walkers', 64)
n_iteration = kwargs.get('n_iteration', 5000)
new = kwargs.get('new', False)
offset = kwargs.get('offset', False)
self.add_offset = ''
if offset:
self.add_offset = '_offset'
if np.any(fitting_limits != None):
print(f'\t{col.NOTICE}Fitting data between {fitting_limits[0]} and {fitting_limits[1]} R/R_v{col.END}')
print(f'{col.NOTICE}n_walkers : {n_walkers}{col.END}')
print(f'{col.NOTICE}n_iteration : {n_iteration}{col.END}')
# File name
if self.compare_same:
nameFile_FittingHSW_MCMC = self.folder_profiles+'/FittingHSW_MCMC_sameR'+self.add_offset+self.studied_ranges+self.add_M+'.h5'
else:
nameFile_FittingHSW_MCMC = self.folder_profiles+'/FittingHSW_MCMC'+self.add_offset+self.studied_ranges+self.add_M+'.h5'
# Check if fit has been already calculated
if (Path(nameFile_FittingHSW_MCMC).is_file() and not(new)):
data = self.Upload(nameFile_FittingHSW_MCMC)
if np.all(data['MCMC']['Omega_M_array']== self.Omega_M_array):
print(f'{col.NOTICE}Retrieving parameters:{col.END}')
self.parameters = data['MCMC']['parameters']
self.parameters_errors = data['MCMC']['parameters_errors']
return
print(f'{col.NOTICE}Calculating parameters:{col.END}')
n_r = np.size(self.ranges[:-1])
n_Om = np.size(self.Omega_M_array)
# Initialize parameters vectors
# Parameters:
# rs, alpha, Rv, beta, deltac, offset, log_f
n_par = 5
if offset:
n_par = 6
parameters = np.zeros((np.shape(self.profiles_tot)[0], n_par))
parameters_errors = np.zeros((np.shape(self.profiles_tot)[0], n_par))
# Cycling over Omegas and Ranges
for i, Om in enumerate(self.Omega_M_array):
for j in np.arange(n_r):
k = i*n_r+j
profile = self.profiles_tot[k]
profile_errors = self.profiles_errors_tot[k]
x = (self.profiles_bins_tot[k][1:]+self.profiles_bins_tot[k][:-1])/2.
np.random.seed(42)
# Study values inside the fitting limits
if np.any(fitting_limits != None):
n_profile = np.size(profile)
l_vec = np.ones(n_profile)
for l in range(np.size(profile)):
if fitting_limits[0]<=x[l]<=fitting_limits[1]:
l_vec[l] = 0
l_vec = np.argwhere(l_vec)
profile = np.delete(profile, l_vec)
profile_errors = np.delete(profile_errors, l_vec)
x = np.delete(x, l_vec)
# Run the MCMC algorithm
if offset:
# Define parameters bounds
lower_bound = np.array([0.5, 0, 0.8, 0, -2,-0.1])
upper_bound = np.array([1.1, 5, 1.2, 10, 0, 0.1 ])
initial_guess = np.array([0.8, 2, 1., 1, -0.8,0])
random_start = initial_guess + np.random.rand(n_walkers, n_par)*(upper_bound-lower_bound)/10.
bounds =
|
np.zeros((n_par,2))
|
numpy.zeros
|
from scipydirect import minimize
from operator import mul
from functools import reduce
import warnings
import collections
import scipy.io as sio
from qutip import*
import numpy as np
import scipy as sp
from copy import copy
from copy import deepcopy
from scipy.sparse.linalg import expm_multiply
from math import *
import matplotlib.pyplot as plt
import math
import cmath
import numbers
import sys
import functools
from scipy.linalg import expm
from numpy.linalg import norm
from scipy.integrate import ode
from qutip.solver import Options
from qutip.cy.spmath import zcsr_trace, zcsr_adjoint, zcsr_mult, zcsr_kron
from qutip.cy.spconvert import zcsr_reshape
# from libs.import_notebook import *
from contract_fast.contract_fast import cy_contract
DO_PARALLEL = True
from funmv_pylib import *
try:
from correlators_compute_copy.corr_ssh_comp import get_SSH_correlators_old
from correlators_compute.corr_ssh_comp import get_correlators as get_correlators_c
from correlators_compute.corr_ssh_comp import get_e1_and_e0 as get_e1_and_e0_c
except:
pass
import matplotlib
# import importlib
# import funmv
# importlib.reload(funmv)
# from funmv import *
import ctypes
mkl_rt = ctypes.CDLL('libmkl_rt.so')
mkl_get_max_threads = mkl_rt.mkl_get_max_threads
def mkl_set_num_threads(cores):
mkl_rt.mkl_set_num_threads(ctypes.byref(ctypes.c_int(cores)))
inch = 3.38583 + 0.2
fontsize = 10
fontsize_Numbers = 8
font = {'family' : 'serif',
'serif' : ['STIXGeneral'],
#'sans-serif':['Helvetica'],
'weight' : 'regular',
'size' : fontsize}
dpi = 72
plt.rc('font', **font)
plt.rc('text', usetex=True)
colors_list = plt.rcParams['axes.prop_cycle'].by_key()['color']
plt.rc('text.latex', preamble=r'\usepackage{amsmath} \usepackage{amssymb}')
def printNum(current_val, min_val, glob_min_val, file_logs = None):
glob_min_val_str = glob_min_val if isinstance(glob_min_val, str) else "{:.5f}".format(glob_min_val)
message = '(' + "{:.5f}".format(current_val) + ') '+"{:.5f}".format(min_val) + " / " + glob_min_val_str
message = '\r%s' % message
my_print(message, file_name = file_logs )
sys.stdout.write( message )
sys.stdout.flush()
sys.__stdout__.write(message)
sys.__stdout__.flush()
def my_print(text, update = False, file_name = None):
if file_name is None:
if update:
text = '\r%s' % text
else:
text+='\n'
sys.stdout.write( text)
sys.stdout.flush()
else:
with open(file_name, "a") as text_file:
# text_file.write('\n'+text)
print(text, file=text_file)
# Variational constant
eps = 1e-8
# Basis spin 1/2
a0 = fock(2,0)
a1 = fock(2,1)
# Pauli
S_x = sigmax()
S_y = sigmay()
S_z = -sigmaz()
S_m = sigmap()
S_p = sigmam()
I = identity(2)
# spin 1
rm = fock(3,0)
r0 = fock(3,1)
rp = fock(3,2)
Sp0 = rp* r0.dag()
S0m = r0* rm.dag()
Sp0_0m = Sp0 + S0m
Sr_x = (Sp0_0m + Sp0_0m.dag())/2**0.5
Sr_y = 1j*(-Sp0_0m + Sp0_0m.dag())/2**0.5
Sr_z = rp*rp.dag() - rm*rm.dag()
# map from 2 Rydebergs, spins 1/2, to spin 1
map_01 = tensor(a0,a1)
map_00 = tensor(a0,a0)
map_10 = tensor(a1,a0)
trans_map_ryd = tensor(rm,map_01.dag()) + tensor(r0,map_00.dag()) + tensor(rp,map_10.dag())
trans_map_ryd.dims = [[3],[2,2]]
# map from 2 ions, spins 1/2, to spin 1
# rm = fock(4,0)
# r0 = fock(4,1)
# rp = fock(4,2)
# re = fock(4,3)
# Sp0 = rp* r0.dag()
# S0m = r0* rm.dag()
# Sp0_0m = Sp0 + S0m
# Se = re*re.dag()
# Sr_x = (Sp0_0m + Sp0_0m.dag())/2**0.5
# Sr_y = 1j*(-Sp0_0m + Sp0_0m.dag())/2**0.5
# Sr_z = rp*rp.dag() - rm*rm.dag()
# map_00 = tensor(a0,a0)
# map_10 = tensor(a1,a0)
# map_01 = tensor(a0,a1)
# map_11 = tensor(a1,a1)
# map_10p01 = (tensor(a0,a1) + tensor(a1,a0)).unit()
# map_10m01 = (tensor(a0,a1) - tensor(a1,a0)).unit()
# trans_dims = [[4],[2,2]]
# trans_map_ryd = tensor(rm,map_01.dag()) + tensor(r0,map_00.dag()) + tensor(rp,map_10.dag())
# trans_map_ryd.dims = trans_dims
# trans_map_ion = tensor(rm,map_00.dag()) + tensor(r0,map_01.dag()) + tensor(rp,map_11.dag())
# # trans_map_ion = tensor(rm,map_00.dag()) + tensor(r0,map_10p01.dag()) + tensor(rp,map_11.dag()) + tensor(re,map_10m01.dag())
# # trans_map_ion = tensor(rm,map_00.dag()) + tensor(r0,map_10p01.dag()) + tensor(rp,map_11.dag())
# # trans_map_ion.dims = [[3],[2,2]]
# trans_map_ion.dims = trans_dims
# # Mixed blockaded state of twp Rydberg atoms
# I_block = Qobj(np.diag([1,1,1,0]),dims = [[2,2],[2,2]])
FORMAT_IDENT = 'ident'
FORMAT_DIAG = 'diag'
FORMAT_SPARSE = 'sparse'
FORMAT_DENSE = 'dense'
FORMAT_SPARSE_REAL_IMAG = 'sparse_real_imag'
TYPE_STATE = 'state'
TYPE_HAMILTONIAN = 'Hamiltonian'
TYPE_OPERATOR = 'operator'
class MyQobj(object):
'''
Object represented quantum states and operators in defferent formats.
The object takes care about product of different formats.
Parameters
----------
data : one of following forms
FORMAT_IDENT: 1
FORMAT_DIAG: array
Diagonal of the matrix
FORMAT_SPARSE: matrix sparse csr
FORMAT_DENSE: matrix array
FORMAT_SPARSE_REAL_IMAG: [sparse csr, sparse csr], both with format float64
Real and imaginary parts of the state.
q_type : str
'''
def __init__( self, data, q_type = TYPE_OPERATOR, is_super = False, dims = None):
self.is_super = is_super
self.q_type = q_type
self.set_data(data, dims)
def set_data(self, data, dims):
if isinstance(data, Qobj):
# dims = data.dims[0]
dims = data.dims
data = data.data
self.data = data
if data is 1:
self.format = FORMAT_IDENT
self.complexity = 1
elif isinstance(data, qutip.fastsparse.fast_csr_matrix) or isinstance(data, sp.sparse.csr.csr_matrix):
self.format = FORMAT_SPARSE
# self.complexity = np.prod(data.shape)
elif isinstance(data, list):
self.format = FORMAT_SPARSE_REAL_IMAG
self.complexity = np.prod(data[0].shape)
elif isinstance(data, np.ndarray):
# self.complexity = np.prod(data.shape)
if len(data.shape) == 1:
self.format = FORMAT_DIAG
else:
self.format = FORMAT_DENSE
else:
raise ValueError
if dims is None:
shape = self.get_shape()
dims = [[shape[0]],[shape[1]]]
# for i, D in enumerate(dims):
# if len(D)>1:
# D = [d for d in D if d!=1]
# if len(D)==0: D = [1]
# dims[i] = D
# print(dims)
# if len(dims[0]) != len(dims[1]):
# raise Exception('len(dims[0]) != len(dims[1]):')
self.dims = check_dims(dims)
def to_dense(self, is_copy = False):
if self.format == FORMAT_SPARSE:
data = self.data.toarray()
elif self.format in [FORMAT_DENSE, FORMAT_IDENT]:
data = self.data
else:
raise NotImplementedError
if is_copy:
return MyQobj(data, self.q_type, is_super = self.is_super, dims = self.dims)
else:
self.set_data(data, self.dims)
def to_qobj(self):
# return Qobj(self.data, dims = [self.dims]*2)
return Qobj(self.data, dims = self.dims)
def to_sparse(self, is_copy = False):
if self.format == FORMAT_SPARSE:
data = self.data
elif self.format == FORMAT_DENSE:
data = sp.sparse.csr_matrix(self.data)
# data = fast_csr_matrix((_tmp.data, _tmp.indices, _tmp.indptr),
# shape=_tmp.shape)
else:
raise NotImplementedError
if is_copy:
return MyQobj(data, self.q_type, is_super = self.is_super, dims = self.dims)
else:
self.set_data(data, self.dims)
def vector_to_operator(self, is_copy = False):
# Super vector state to density matrix
if self.q_type != TYPE_STATE:
raise Exception("Only for TYPE_STATE")
if self.is_super and len(self.data.shape)==2 and self.data.shape[-1]==1:
data = my_vec_to_op(self.data)
else:
data = self.data
if is_copy:
return MyQobj(data, TYPE_STATE, is_super = False, dims = self.dims)
else:
self.data = data
self.is_super = False
def operator_to_vector(self, is_copy = False):
# Density matrix to super vector state
if self.q_type != TYPE_STATE:
raise Exception("Only for TYPE_STATE")
if self.format not in [FORMAT_DENSE, FORMAT_SPARSE]:
raise Exception('self.format = '+self.format)
data = self.data
if not self.is_super:
if self.data.shape[1] == 1:
data = data * data.conj().T
data = my_op_to_vec(data)
if is_copy:
return MyQobj(data, TYPE_STATE, is_super = True, dims = self.dims)
else:
self.data = data
self.is_super = True
def to_super(self, is_copy = False):
q_type = self.q_type
# Apply sprepost to data
if self.format == FORMAT_IDENT or self.is_super:
data = self.data
elif self.format == FORMAT_DIAG:
if q_type == TYPE_HAMILTONIAN:
data = to_super_H_diag(self.data)
elif q_type == TYPE_OPERATOR:
data = to_super_oper_diag(self.data)
else:
raise NotImplementedError
elif self.format in [FORMAT_DENSE, FORMAT_SPARSE]:
if q_type == TYPE_HAMILTONIAN:
data = to_super_H(self.data)
elif q_type == TYPE_OPERATOR:
data = to_super_oper(self.data)
elif q_type == TYPE_STATE:
return self.operator_to_vector(is_copy)
if self.format == FORMAT_DENSE and not isinstance(data,np.ndarray) :
data = data.toarray()
else:
raise NotImplementedError
dims = self.dims
if not self.is_super:
dims = [[dims[0]]*2,[dims[1]]*2]
if is_copy:
return MyQobj(data, q_type, is_super = True, dims = dims)
else:
self.dims = dims
self.data = data
self.is_super = True
def tr(self):
if self.format == FORMAT_IDENT or self.is_super:
tr = 1
elif self.format == FORMAT_DIAG:
tr = sum(self.data)
elif self.format == FORMAT_DENSE or FORMAT_SPARSE:
tr = sum(self.data.diagonal())
else:
raise NotImplementedError
return tr
def dag(self):
data = self.data
if self.format == FORMAT_IDENT:
pass
elif self.format == FORMAT_DIAG:
data = data.conj().T
elif self.format == FORMAT_DENSE:
data = data.conj().T.copy()
elif self.format == FORMAT_SPARSE:
data = zcsr_adjoint(data)
else:
raise NotImplementedError
return MyQobj(data, self.q_type, is_super = self.is_super, dims = self.dims[::-1])
def __div__(self, other):
if isinstance(other, (int, np.int64, float, complex)):
return MyQobj(self.data / other, self.q_type, is_super = self.is_super, dims = self.dims)
else:
raise ValueError
def __mul__(self, other):
if isinstance(other, (int, np.int64,float, complex)):
# if other == 0:
# return 0
return MyQobj(self.data * other, self.q_type, is_super = self.is_super, dims = self.dims)
elif type(other) != MyQobj:
raise NotImplementedError
if self.is_super or other.is_super:
self.to_super()
other.to_super()
A = self.data
B = other.data
if self.format == FORMAT_IDENT:
return other
elif other.format == FORMAT_IDENT:
return self
elif self.format == FORMAT_DIAG:
if other.format == FORMAT_DIAG:
st_out = A * B
elif other.format == FORMAT_DENSE:
st_out = (B.T * A).T
else:
raise NotImplementedError
elif self.format == FORMAT_DENSE:
if other.format == FORMAT_DIAG:
st_out = A * B
elif other.format == FORMAT_DENSE:
st_out = np.dot(A, B)
elif other.format == FORMAT_SPARSE:
# print(1)
st_out = A * B
else:
raise NotImplementedError
elif self.format == FORMAT_SPARSE:
if other.format == FORMAT_DENSE:
# print(2)
if B.shape[1] == 1:
st_out = my_mv(A, B)
else:
st_out = my_mm(A, B)
# st_out = A * B
elif other.format == FORMAT_SPARSE:
st_out = A * B
else:
raise NotImplementedError
else:
raise NotImplementedError
if TYPE_STATE in [self.q_type, other.q_type]:
q_type_out = TYPE_STATE
# elif TYPE_HAMILTONIAN in [self.q_type, other.q_type]:
# q_type_out = TYPE_HAMILTONIAN
else:
q_type_out = TYPE_OPERATOR
return MyQobj(st_out, q_type_out, is_super = other.is_super, dims = other.dims)
__rmul__ = __mul__
def __add__(self, other):
if self.format == FORMAT_SPARSE_REAL_IMAG:
raise NotImplementedError
elif isinstance(other, (int, np.int64, float, complex)):
data = self.data + other
elif self.format != other.format:
raise NotImplementedError
else:
if self.q_type != other.q_type:
raise ValueError("Only the same q_types could be summed up")
if self.is_super or other.is_super:
self.to_super()
other.to_super()
data = self.data + other.data
return MyQobj(data, self.q_type, is_super = self.is_super, dims = self.dims)
__radd__ = __add__
def __repr__(self):
return self.__str__()
# data_str = self.data.__repr__()
# return 'MyQobj\nis_super = ' + str(self.is_super) + '\nData:\n' + data_str + '\n'
def __str__(self):
data_str = str(self.data)
return 'MyQobj' + '\n' + 'q_type = ' + self.q_type + '\n'+ 'is_super = ' + str(self.is_super) + '\n'+ 'format = ' + self.format + '\n'+ 'shape = ' + str(self.get_shape()) + '\n'+ 'dims = ' + str(self.dims) + '\n'+ 'Data:'+'\n' + data_str + '\n\n'
def get_shape(self):
data = self.data
if self.format == FORMAT_SPARSE_REAL_IMAG:
shape = data[0].shape
elif self.format == FORMAT_IDENT:
shape = [1,1]
else:
shape = data.shape
return shape
def expm(self):
if self.q_type == TYPE_STATE:
raise Exception("TYPE_STATE could not be exponented")
if self.format == FORMAT_DIAG:
data = np.exp(self.data)
elif self.format == FORMAT_DENSE:
data = sp.linalg.expm(self.data)
elif self.format == FORMAT_SPARSE:
# Has to be corrected
data = sp.linalg.expm(self.data.todense())
else:
raise NotImplementedError
return MyQobj(data, TYPE_OPERATOR, is_super = self.is_super, dims = self.dims)
def expm_multiply(self, B):
A = self
if type(A) != MyQobj or type(B) != MyQobj:
raise ValueError("A and B have to be MyQobj")
if A.q_type == TYPE_STATE:
raise Exception("TYPE_STATE could not be exponented")
if A.is_super or B.is_super:
A.to_super()
B.to_super()
if A.format == FORMAT_IDENT:
return B
elif B.format == FORMAT_IDENT:
return A.expm()
elif A.format == FORMAT_DIAG:
return A.expm() * B
elif A.format == FORMAT_DENSE:
return A.expm() * B
elif A.format == FORMAT_SPARSE:
if B.format == FORMAT_DENSE:
if DO_PARALLEL:
data = expm_multiply_parallel(A.data, B.data)
else:
data = expm_multiply(A.data, B.data)
else:
raise NotImplementedError
else:
raise NotImplementedError
if B.q_type == TYPE_STATE:
q_type_put = TYPE_STATE
else:
q_type_put = TYPE_OPERATOR
return MyQobj(data, q_type_put, is_super = B.is_super, dims = [A.dims[0], B.dims[1]])
# def tr(self):
# if self.format is FORMAT_DENSE:
# return np.trace(self.data)
# else:
# return zcsr_trace(self.data, True)
def check_dims(dims):
dims = deepcopy(dims)
N_d = len(dims[0])
if N_d == 1:
return dims
for i in range(N_d)[::-1]:
if dims[0][i] == 1 and dims[1][i] == 1:
del dims[0][i]
del dims[1][i]
return dims
def tensor_mq(states_list):
st_out = states_list[0].data
if states_list[0].format is FORMAT_SPARSE:
for st in states_list[1:]:
st_out = zcsr_kron(st_out, st.data)
else:
for st in states_list[1:]:
st_out = np.kron(st_out, st.data)
dims_0 = np.concatenate([st.dims[0] for st in states_list]).ravel().tolist()
dims_1 = np.concatenate([st.dims[1] for st in states_list]).ravel().tolist()
dims = [dims_0, dims_1]
return MyQobj(st_out, q_type = states_list[0].q_type, dims = dims)
def my_mv(A, B):
if DO_PARALLEL:
st_out = mkl_zspmv(A, B)
else:
st_out = A * B
return st_out
def my_mm(A, B):
if DO_PARALLEL:
st_out = mkl_zspmm(A, B)
else:
st_out = A * B
return st_out
class Optimiz_props(object):
'''
Properties of the optimization algorithm.
Parameters
----------
method: str, for example 'BFGS', "basinhop"
Inserted to sp.optimize.minimize
tol_rel: float
Optimization tolerance in E_min/E_max
maxiter: int
Max number of ineration of sp.optimize.minimize
is_kraus: bool
If True Kraus map is culculated and are applyed to the initial state required times.
Use if the map is the same.
If False the whole evolution is applied to the initial state directly.
Use if the initial state is pure and the map is applied ones or twise.
jac: bool
If True, the Jacobian for sp.optimize.minimize is calculated analytically during the function evaluation.
If False, sp.optimize.minimize varies parameters an evaluate function several times.
do_sparse: bool
Make calculations with sparse or dense matrices.
print_data: bool
Print progress of minimization.
'''
do_sparse = False
do_approx = False
MIN_glob = np.inf
def __init__( self, method = 'basinhop', tol_rel = 1e-3, maxiter = 0, jac = True, do_sparse = True, print_data = True,
# next are deprecated
N_approx = 5, use_probs = False, P_N_samp = 0, time_dep = False, file_logs = None):
self.do_sparse = do_sparse
self.method = method
self.tol_rel = tol_rel
self.maxiter = maxiter
self.print_data = print_data
self.jac = jac
self.N_approx = N_approx
self.use_probs = use_probs
self.P_N_samp = P_N_samp
self.time_dep = time_dep
# up to which iteration use probabilities of outcomes
if use_probs and P_N_samp == 0: self.P_N_samp = N_iter
class System(object):
'''
Parameters
----------
state_in : Qobj / [[Qobj, inds_list],...]
System initial state, where Qobj-s have type 'ket' or 'oper', and inds_list-s are lists of the
modes indises of the state generated by a tesor product of the list of Qobj-s.
logic_mode_inds: [[mode_ind, lvls_list],...] or [mode_ind,...]
Logical subspase. List of mode_ind of the state in the correponding order and the indices of levels,
lvls_list, of the mode with mode_ind corresponded to {|i>} in the correponding order.
If logic_mode_inds = None, the whole space is logical.
If no lvls_list, a whole mode space is logical
The logical subspase must fit the simulated model.
inverse_logic: bool
If True logic_mode_inds -> aux_mode_inds
'''
def __init__( self, state_in, logic_mode_inds = None, inverse_logic = False):
if isinstance(state_in, Qobj):
state_in = [[state_in, range(get_q_N_mode(state_in))]]
elif isinstance(state_in, list):
if isinstance(state_in[0], Qobj):
state_in_new = []
N_modes = 0
for state in state_in:
n_modes = get_q_N_mode(state)
state_in_new += [[state, list(range(N_modes, N_modes+n_modes))]]
N_modes += n_modes
state_in = state_in_new
else:
raise ValueError()
self.initialize_state(state_in)
self.initialize_logic_space(logic_mode_inds, inverse_logic)
def initialize_logic_space(self, logic_mode_inds, inverse_logic):
dims_state_list = self.dims_state_list
if logic_mode_inds is None:
logic_mode_inds = list(range(len(dims_state_list)))
logic_mode_inds_new = []
for logic_mode in logic_mode_inds:
if isinstance(logic_mode, int):
logic_mode = [logic_mode]
if len(logic_mode) == 1:
dim_logic_mode = dims_state_list[logic_mode[0]]
logic_lvls = list(range(dim_logic_mode))
logic_mode = [logic_mode[0], logic_lvls]
logic_mode_inds_new += [logic_mode]
logic_mode_inds = logic_mode_inds_new
aux_mode_inds = []
logic_modes = [a[0] for a in logic_mode_inds_new]
logic_lvls = [a[1] for a in logic_mode_inds_new]
for i, dim in enumerate(dims_state_list):
if i in logic_modes:
aux_lvls = [ind for ind in range(dim) if ind not in logic_lvls[logic_modes.index(i)]]
if len(aux_lvls)>0:
aux_mode_inds += [[i, aux_lvls]]
else:
aux_lvls = list(range(dim))
aux_mode_inds += [[i, aux_lvls]]
if inverse_logic:
logic_mode_inds, aux_mode_inds = aux_mode_inds, logic_mode_inds
self.N_sys = len(logic_mode_inds)
self.N_aux = len(aux_mode_inds)
self.logic_mode_inds = logic_mode_inds
self.aux_mode_inds = aux_mode_inds
def initialize_state(self, state_in_list):
self.state_in_list = state_in_list
# Separate pure and dens states
inds_pure_part = []
inds_dens_part = []
pure_state_list = []
dens_state_list = []
for qobj, inds_list in state_in_list:
if not isinstance(qobj, Qobj):
raise ValueError("qobj has to be a Qobj")
# Check inds_list
if len(inds_list) != get_q_N_mode(qobj):
raise ValueError("len(inds_list) has to be equal to the modes number of the qobj")
inds_list = list(inds_list)
if qobj.type is 'oper':
dens_state_list += [qobj]
inds_dens_part += inds_list
else:
pure_state_list += [qobj]
inds_pure_part += inds_list
# Check inds
inds_state = sorted(inds_dens_part + inds_pure_part)
N_modes = max(inds_state)+1
if inds_state != list(range(N_modes)):
raise ValueError('Inds of the state must not repeat and must be serial')
self.N_modes = N_modes
self.inds_pure_part = inds_pure_part
self.inds_dens_part = inds_dens_part
# Init tensor states
if len(pure_state_list)>0:
self.dims_pure_part_list = list(np.ravel(
[get_q_dim_list(s) for s in pure_state_list ]
))
else:
self.pure_state_part = 1
self.dims_pure_part_list = []
if len(dens_state_list)>0:
self.dims_dens_part_list = np.concatenate([get_q_dim_list(s)
for s in dens_state_list ]).ravel().tolist()
else:
self.dens_state_part = 1
self.dims_dens_part_list = []
# State dims according to the modes order
self.dims_state_list = [x for _,x in sorted(zip(
inds_pure_part + inds_dens_part,
self.dims_pure_part_list + self.dims_dens_part_list,
))]
self.pure_state_list = pure_state_list
self.dens_state_list = dens_state_list
self.shape =
|
np.prod(self.dims_state_list)
|
numpy.prod
|
'''
Deep Q learning, i.e. learning the Q function Q(x,u) so that Pi(x) = u = argmax Q(x,u)
is the optimal policy. The control u is discretized as 0..NU-1
This program instantiates an environment env and a Q network qvalue.
The main signals are qvalue.x (state input), qvalue.qvalues (value for any u in 0..NU-1),
qvalue.policy (i.e. argmax(qvalue.qvalues)) and qvalue.qvalue (i.e. max(qvalue.qvalue)).
Reference:
Mnih, Volodymyr, et al. "Human-level control through deep reinforcement learning."
Nature 518.7540 (2015): 529.
'''
import tensorflow as tf
import numpy as np
from tensorflow.keras import layers
import tensorflow.keras as keras
import tensorflow.keras.backend as K
def batch_gather(reference, indices):
"""
From https://github.com/keras-team/keras/pull/6377 (not merged).
Batchwise gathering of row indices.
The numpy equivalent is `reference[np.arange(batch_size), indices]`, where
`batch_size` is the first dimension of the reference tensor.
# Arguments
reference: A tensor with ndim >= 2 of shape.
(batch_size, dim1, dim2, ..., dimN)
indices: A 1d integer tensor of shape (batch_size) satisfying
0 <= i < dim2 for each element i.
# Returns
The selected tensor with shape (batch_size, dim2, ..., dimN).
# Examples
1. If reference is `[[3, 5, 7], [11, 13, 17]]` and indices is `[2, 1]`
then the result is `[7, 13]`.
"""
batch_size = keras.backend.shape(reference)[0]
indices = tf.concat([tf.reshape(tf.range(batch_size),[batch_size,1]),
indices],1)
return tf.gather_nd(reference,indices=indices)
class QNetwork:
'''
Build a keras model computing:
- qvalues(x) = [ Q(x,u_1) ... Q(x,u_NU) ]
- value(x) = max_u qvalues(x)
- qvalue(x,u) = Q(x,u)
'''
def __init__(self,nx,nu,name='',nhiden=32,learning_rate=None):
'''
The network has the following structure:
x => [ DENSE1 ] => [ DENSE2 ] => [ QVALUES ] ==========MAX=> VALUE(x)
\=>[ ]
u ============================================>[ NGATHER ] => QVALUE(x,u)
where:
- qvalues(x) = [ qvalue(x,u=0) ... qvalue(x,u=NU-1) ]
- value(x) = max_u qvalues(x)
- value(x,u) = qvalues(x)[u]
The <trainer> model self.trainer corresponds to a mean-square loss of qvalue(x,u)
wrt to a reference q_ref.
The main model self.model has no optimizer and simply computes qvalues,value,qvalue
as a function of x and u (useful for debug only).
Additional helper functions are set to compute the value function and the policy.
'''
self.nx=nx;self.nu=nu
input_x = keras.Input(shape=(nx,), name=name+'state')
input_u = keras.Input(shape=(1,), name=name+'control',dtype="int32")
dens1 = keras.layers.Dense(nhiden, activation='relu', name=name+'dense_1',
bias_initializer='random_uniform')(input_x)
dens2 = keras.layers.Dense(nhiden, activation='relu', name=name+'dense_2',
bias_initializer='random_uniform')(dens1)
qvalues = keras.layers.Dense(nu, activation='linear', name=name+'qvalues',
bias_initializer='random_uniform')(dens2)
value = keras.backend.max(qvalues,keepdims=True,axis=1)
value = keras.layers.Lambda(lambda x:x,name=name+'value')(value)
qvalue = batch_gather(qvalues,input_u)
qvalue = keras.layers.Lambda(lambda x:x,name=name+'qvalue')(qvalue)
policy = keras.backend.argmax(qvalues,axis=1)
policy = keras.layers.Lambda(lambda x:x,name=name+'policy')(policy)
self.trainer = keras.Model(inputs=[input_x,input_u],outputs=qvalue)
self.saver = keras.Model(inputs=input_x,outputs=qvalues)
self.trainer.compile(optimizer='adam',loss='mse')
if learning_rate is not None:
self.trainer.optimizer.lr = learning_rate
self.model = keras.Model(inputs=[input_x,input_u],
outputs=[qvalues,value,qvalue,policy])
self.saver = keras.Model(inputs=input_x,outputs=qvalues) # For saving the weights
self._policy = keras.backend.function(input_x,policy)
self._qvalues = keras.backend.function(input_x,qvalues)
self._value = keras.backend.function(input_x,value)
# FOR DEBUG ONLY
self._qvalues = keras.backend.function(input_x,qvalues)
self._h1 = keras.backend.function(input_x,dens1)
self._h2 = keras.backend.function(input_x,dens2)
def targetAssign(self,ref,rate):
'''
Change model to approach modelRef, with homotopy parameter <rate>
(rate=0: do not change, rate=1: exacttly set it to the ref).
'''
assert(rate<=1 and rate>=0)
for v,vref in zip(self.trainer.trainable_variables,ref.trainer.trainable_variables):
v.assign((1-rate)*v+rate*vref)
def policy(self,x,noise=None):
'''
Evaluate the policy u = pi(x) = argmax_u Q(x,u).
If noise is not None, then evaluate a noisy-greedy policy
u = pi(x|noise) = argmax_u(Q(x,u)+uniform(noise)).
'''
if len(x.shape)==1: x=np.reshape(x,[1,len(x)])
if noise is None: return self._policy(x)
q = self._qvalues(x)
if noise is not None: q += (
|
np.random.rand(self.nu)
|
numpy.random.rand
|
import matplotlib.pyplot as plt
import numpy as np
from skimage.util import img_as_float
from skimage.io import imread
from skimage.io import imsave
from skimage import filters
from skimage.color import rgb2gray
import scipy.ndimage as ndi
from skimage import dtype_limits
from scipy.ndimage import gaussian_filter
from collections import namedtuple
def show_sub_image(img):
plt.figure()
plt.imshow(img,cmap=plt.cm.gray)
plt.axis('off')
plt.show()
def smooth_with_function_and_mask(image, function, mask):
# taken from _canny.py
bleed_over = function(mask.astype(float))
masked_image = np.zeros(image.shape, image.dtype)
masked_image[mask] = image[mask]
smoothed_image = function(masked_image)
output_image = smoothed_image / (bleed_over + np.finfo(float).eps)
return output_image
def connected_component_labeling(binary_image):
# 8-connectivity based
# two-pass algorithm
label_image = np.zeros(binary_image.shape, int)
label_count = 0;
equivalent_label_array = [0]
# 1st pass
for row in range(0,binary_image.shape[0]):
for col in range(0,binary_image.shape[1]):
if binary_image[row,col] == False:
continue # current pixel is background
label = 0
if row > 0:
# check north-west, north and north-east neighbours
for col_shift in range(-1,2):
if ((col + col_shift) < 0) or ((col + col_shift) >= binary_image.shape[1]):
continue
if binary_image[row-1,col + col_shift] == True:
label_neighbour = label_image[row-1,col + col_shift]
if label > 0:
# check for equivalence label
if label_neighbour < label:
equivalent_label_array[label] = label_neighbour
label = label_neighbour
elif label_neighbour > label:
equivalent_label_array[label_neighbour] = label
else:
label = label_neighbour
# check the west neighbour
if col > 0:
if binary_image[row,col-1] == True:
label_neighbour = label_image[row,col-1]
if label > 0:
# check for equivalence label
if label_neighbour < label:
equivalent_label_array[label] = label_neighbour
label = label_neighbour
elif label_neighbour > label:
equivalent_label_array[label_neighbour] = label
else:
label = label_neighbour
if label == 0:
# none of the above neighbours are foreground
label_count += 1
label = label_count
equivalent_label_array.append(label_count)
# assign label for current pixel
label_image[row,col] = label
print('label_count: %d\n' % (label_count))
# for equivalence set, assigning smallest label as parent
for label_i in range(2,len(equivalent_label_array)):
if equivalent_label_array[label_i] != label_i:
label = label_i
# find the smallest label as parent
while equivalent_label_array[label] != label:
label = equivalent_label_array[label]
# now assign that label as parent to each of these
label_intermediate = label_i
while equivalent_label_array[label_intermediate] != label:
label_parent = equivalent_label_array[label_intermediate]
equivalent_label_array[label_intermediate] = label
label_intermediate = label_parent
print('equivalent_label_array: %s\n' % (equivalent_label_array))
equivalent_label_set = list(set(equivalent_label_array))
print('label_count(unique): %d\n' % (len(equivalent_label_set)))
# 2nd pass
for row in range(0,binary_image.shape[0]):
for col in range(0,binary_image.shape[1]):
if binary_image[row,col] == False:
continue # current pixel is background
label = label_image[row,col]
if equivalent_label_array[label] != label:
label_image[row,col] = equivalent_label_array[label]
point_t = namedtuple('point','row col')
bbox_t = namedtuple('bbox','row_min row_max col_min col_max')
connected_component_t = namedtuple('connected_component','points stroke_width_mean stroke_width_std bounding_box text_possible_flag')
label_cc_map = {}
# connected components
#sum_count = 0
#cur_point = point_t(row=0, col=0)
for row_i in range(0,binary_image.shape[0]):
for col_i in range(0,binary_image.shape[1]):
if binary_image[row_i,col_i] == False:
continue # current pixel is background
label = label_image[row_i,col_i]
cur_point = point_t(row=row_i,col=col_i)
if label not in label_cc_map:
bbox = bbox_t(row_min=row_i, row_max=row_i, col_min=col_i, col_max=col_i)
label_cc_map[label] = connected_component_t(points=[cur_point],stroke_width_mean=np.nan, \
stroke_width_std=np.nan, bounding_box=bbox, text_possible_flag=False)
#label_cc_map[label] = label_cc_map[label].points.append(cur_point)
else:
#sum_count += 1
list_points = label_cc_map[label].points
list_points.append(cur_point)
bbox = label_cc_map[label].bounding_box
if row_i < bbox.row_min:
bbox = bbox._replace(row_min = row_i)
if row_i > bbox.row_max:
bbox = bbox._replace(row_max = row_i)
if col_i < bbox.col_min:
bbox = bbox._replace(col_min = col_i)
if col_i > bbox.col_max:
bbox = bbox._replace(col_max = col_i)
label_cc_map[label] = label_cc_map[label]._replace(points = list_points, bounding_box = bbox)
#print('sum_count: %d\n' % (sum_count))
# stroke width transform(SWT)
swt_image = np.zeros(binary_image.shape, int)
swt_image += (binary_image.shape[0]+binary_image.shape[1])
# 1st pass of SWT
for row in range(0,binary_image.shape[0]):
for col in range(0,binary_image.shape[1]):
if binary_image[row,col] == False:
continue # current pixel is background
# horizontal direction
if (col == 0) or (binary_image[row,col-1] == False):
# edge with gradient in horizontal direction
opp_col = col+1
while (opp_col < binary_image.shape[1]) and (binary_image[row,opp_col] == True):
opp_col += 1
stroke_width = opp_col - col
for col_index in range(col,opp_col):
if stroke_width < swt_image[row,col_index]:
swt_image[row,col_index] = stroke_width
# vertical direction
if (row == 0) or (binary_image[row-1,col] == False):
# edge with gradient in vertical direction
opp_row = row+1;
while (opp_row < binary_image.shape[0]) and (binary_image[opp_row,col] == True):
opp_row += 1
stroke_width = opp_row - row
for row_index in range(row,opp_row):
if stroke_width < swt_image[row_index,col]:
swt_image[row_index,col] = stroke_width
# diagonal directions
if (row > 0) and (col > 0) and (binary_image[row-1,col-1] == False) and binary_image[row,col-1] and \
binary_image[row-1,col]:
# edge with gradient towards 4th quadrant
opp_index = 1
while (row+opp_index < binary_image.shape[0]) and (col+opp_index < binary_image.shape[1]) and \
binary_image[row+opp_index,col+opp_index]:
opp_index += 1
stroke_width = opp_index
for incr_index in range(1,opp_index):
if stroke_width < swt_image[row+incr_index,col+incr_index]:
swt_image[row+incr_index,col+incr_index] = stroke_width
if (col < binary_image.shape[1]-1) and (row > 0) and (binary_image[row-1,col+1] == False) and \
binary_image[row-1,col] and binary_image[row,col+1]:
# edge with gradient towards 3rd quadrant
opp_index = 1
while (row+opp_index < binary_image.shape[0]) and (col >= opp_index) and binary_image[row+opp_index,col-opp_index]:
opp_index += 1
stroke_width = opp_index
for incr_index in range(1,opp_index):
if stroke_width < swt_image[row+incr_index,col-incr_index]:
swt_image[row+incr_index,col-incr_index] = stroke_width
# 2nd pass (median)
swt_final_image = swt_image
for row in range(0,binary_image.shape[0]):
for col in range(0,binary_image.shape[1]):
if binary_image[row,col] == False:
continue # current pixel is background
# horizontal direction
if (col == 0) or (binary_image[row,col-1] == False):
# edge with gradient in horizontal direction
label_array = [swt_image[row,col]]
opp_col = col+1
while (opp_col < binary_image.shape[1]) and (binary_image[row,opp_col] == True):
label_array.append(swt_image[row,opp_col])
opp_col += 1
median_label = np.median(label_array)
for col_index in range(col,opp_col):
if median_label < swt_final_image[row,col_index]:
swt_final_image[row,col_index] = median_label
# vertical direction
if (row == 0) or (binary_image[row-1,col] == False):
# edge with gradient in vertical direction
label_array = [swt_image[row,col]]
opp_row = row+1;
while (opp_row < binary_image.shape[0]) and (binary_image[opp_row,col] == True):
label_array.append(swt_image[opp_row,col])
opp_row += 1
median_label = np.median(label_array)
for row_index in range(row,opp_row):
if median_label < swt_final_image[row_index,col]:
swt_final_image[row_index,col] = median_label
# diagonal directions
if (row > 0) and (col > 0) and (binary_image[row-1,col-1] == False) and binary_image[row,col-1] \
and binary_image[row-1,col]:
# edge with gradient towards 4th quadrant
label_array = [swt_image[row,col]]
opp_index = 1
while (row+opp_index < binary_image.shape[0]) and (col+opp_index < binary_image.shape[1]) \
and binary_image[row+opp_index,col+opp_index]:
label_array.append(swt_image[row+opp_index,col+opp_index])
opp_index += 1
median_label =
|
np.median(label_array)
|
numpy.median
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pandas as pd
from datetime import datetime, timedelta
import numpy as np
from scipy.stats import pearsonr
# from mpl_toolkits.axes_grid1 import host_subplot
# import mpl_toolkits.axisartist as AA
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.ticker as tck
import matplotlib.cm as cm
import matplotlib.font_manager as fm
import math as m
import matplotlib.dates as mdates
import matplotlib.ticker as ticker
import matplotlib.transforms as transforms
import matplotlib.colors as colors
##############################################################################
#-----------------------------------------------------------------------------
# Rutas para las fuentes -----------------------------------------------------
##############################################################################
prop = fm.FontProperties(fname='/home/nacorreasa/SIATA/Cod_Califi/AvenirLTStd-Heavy.otf' )
prop_1 = fm.FontProperties(fname='/home/nacorreasa/SIATA/Cod_Califi/AvenirLTStd-Book.otf')
prop_2 = fm.FontProperties(fname='/home/nacorreasa/SIATA/Cod_Califi/AvenirLTStd-Black.otf')
#####################################################################
## ----------------GRÁFICA DE LOS VECTORES PROPIAS---------------- ##
#####################################################################
vector_propio_348_1 = np.load('/home/nacorreasa/Maestria/Datos_Tesis/Arrays/VectorProp_1_348.npy')
vector_propio_348_2 = np.load('/home/nacorreasa/Maestria/Datos_Tesis/Arrays/VectorProp_2_348.npy')
vector_propio_348_3 = np.load('/home/nacorreasa/Maestria/Datos_Tesis/Arrays/VectorProp_3_348.npy')
vector_propio_350_1 = np.load('/home/nacorreasa/Maestria/Datos_Tesis/Arrays/VectorProp_1_350.npy')
vector_propio_350_2 = np.load('/home/nacorreasa/Maestria/Datos_Tesis/Arrays/VectorProp_2_350.npy')
vector_propio_350_3 = np.load('/home/nacorreasa/Maestria/Datos_Tesis/Arrays/VectorProp_3_350.npy')
vector_propio_975_1 = np.load('/home/nacorreasa/Maestria/Datos_Tesis/Arrays/VectorProp_1_975.npy')
vector_propio_975_2 = np.load('/home/nacorreasa/Maestria/Datos_Tesis/Arrays/VectorProp_2_975.npy')
vector_propio_975_3 =
|
np.load('/home/nacorreasa/Maestria/Datos_Tesis/Arrays/VectorProp_3_975.npy')
|
numpy.load
|
from echonn.ml import EchoStateNetwork
import itertools
from echonn.ml import ESNExperiment
from echonn.sys import LorenzSystem
import numpy as np
import unittest
class TestEchoStateNetwork(unittest.TestCase):
# def __init__(self, K, N, L, T0=100, alpha=.999, use_noise=False, sparse=False, f=None, g=None)
def testInit(self):
K = 10
N = 11
L = 12
esn = EchoStateNetwork(K, N, L)
if esn.bias:
bias_K = K + 1
self.assertEqual(esn.Win.shape, (N, K))
self.assertEqual(esn.W.shape, (N, N))
self.assertEqual(esn.Wback.shape, (N, L))
self.assertEqual(esn.Wout.shape, (L, bias_K+N))
# def init_weights(self):
# test echo state property
def testWEigenAlpha(self):
for alpha in [.7, .8, .85, .9, .95]:
esn = EchoStateNetwork(10, 11, 12, alpha=alpha)
eigs, _ = np.linalg.eig(esn.W / alpha)
eig_val = max(np.absolute(eigs))
self.assertAlmostEqual(1, eig_val)
def testInitStateFadesWithTime(self):
for alpha in [.7, .8, .85, .9, .95]:
esn = EchoStateNetwork(1, 5, 1, alpha=alpha)
ds = np.arange(500)
ds[30:] = 0
us = np.copy(ds)
esn.predict(ds, us)
self.assertNotAlmostEqual(0, np.sum(esn.x[30]))
self.assertAlmostEqual(0, np.sum(esn.x[-1]))
# def scale_matrix(self, W):
# test scales properly
# test with zero length matrix
def testScaleMatrix(self):
esn = EchoStateNetwork(10, 10, 10)
W1x100 = np.ones((1, 100))
W10x100 = np.ones((10, 100))
W1x25 = np.ones((1, 25))
W10x25 = np.ones((10, 25))
for W in [W1x100, W10x100, W1x25, W10x25]:
scale = 1 / np.sqrt(W.shape[1])
scaled_W = esn.scale_matrix(W)
self.assertAlmostEqual(
|
np.mean(scaled_W)
|
numpy.mean
|
from __future__ import division, absolute_import, print_function
import warnings
import numpy as np
from numpy.testing import (run_module_suite, TestCase, assert_equal,
assert_allclose, assert_raises, assert_)
from numpy.testing.decorators import knownfailureif
from scipy.interpolate import (BSpline, BPoly, PPoly, make_interp_spline,
make_lsq_spline, _bspl, splev, splrep, splprep, splder, splantider,
sproot, splint, insert)
import scipy.linalg as sl
from scipy.interpolate._bsplines import _not_a_knot, _augknt
import scipy.interpolate._fitpack_impl as _impl
class TestBSpline(TestCase):
def test_ctor(self):
# knots should be an ordered 1D array of finite real numbers
assert_raises((TypeError, ValueError), BSpline,
**dict(t=[1, 1.j], c=[1.], k=0))
assert_raises(ValueError, BSpline, **dict(t=[1, np.nan], c=[1.], k=0))
assert_raises(ValueError, BSpline, **dict(t=[1, np.inf], c=[1.], k=0))
assert_raises(ValueError, BSpline, **dict(t=[1, -1], c=[1.], k=0))
assert_raises(ValueError, BSpline, **dict(t=[[1], [1]], c=[1.], k=0))
# for n+k+1 knots and degree k need at least n coefficients
assert_raises(ValueError, BSpline, **dict(t=[0, 1, 2], c=[1], k=0))
assert_raises(ValueError, BSpline,
**dict(t=[0, 1, 2, 3, 4], c=[1., 1.], k=2))
# non-integer orders
assert_raises(ValueError, BSpline,
**dict(t=[0., 0., 1., 2., 3., 4.], c=[1., 1., 1.], k="cubic"))
assert_raises(ValueError, BSpline,
**dict(t=[0., 0., 1., 2., 3., 4.], c=[1., 1., 1.], k=2.5))
# basic inteval cannot have measure zero (here: [1..1])
assert_raises(ValueError, BSpline,
**dict(t=[0., 0, 1, 1, 2, 3], c=[1., 1, 1], k=2))
# tck vs self.tck
n, k = 11, 3
t = np.arange(n+k+1)
c = np.random.random(n)
b = BSpline(t, c, k)
assert_allclose(t, b.t)
assert_allclose(c, b.c)
assert_equal(k, b.k)
def test_tck(self):
b = _make_random_spline()
tck = b.tck
assert_allclose(b.t, tck[0], atol=1e-15, rtol=1e-15)
assert_allclose(b.c, tck[1], atol=1e-15, rtol=1e-15)
assert_equal(b.k, tck[2])
# b.tck is read-only
try:
b.tck = 'foo'
except AttributeError:
pass
except:
raise AssertionError("AttributeError not raised.")
def test_degree_0(self):
xx = np.linspace(0, 1, 10)
b = BSpline(t=[0, 1], c=[3.], k=0)
assert_allclose(b(xx), 3)
b = BSpline(t=[0, 0.35, 1], c=[3, 4], k=0)
assert_allclose(b(xx), np.where(xx < 0.35, 3, 4))
def test_degree_1(self):
t = [0, 1, 2, 3, 4]
c = [1, 2, 3]
k = 1
b = BSpline(t, c, k)
x = np.linspace(1, 3, 50)
assert_allclose(c[0]*B_012(x) + c[1]*B_012(x-1) + c[2]*B_012(x-2),
b(x), atol=1e-14)
assert_allclose(splev(x, (t, c, k)), b(x), atol=1e-14)
def test_bernstein(self):
# a special knot vector: Bernstein polynomials
k = 3
t = np.asarray([0]*(k+1) + [1]*(k+1))
c = np.asarray([1., 2., 3., 4.])
bp = BPoly(c.reshape(-1, 1), [0, 1])
bspl = BSpline(t, c, k)
xx = np.linspace(-1., 2., 10)
assert_allclose(bp(xx, extrapolate=True),
bspl(xx, extrapolate=True), atol=1e-14)
assert_allclose(splev(xx, (t, c, k)),
bspl(xx), atol=1e-14)
def test_rndm_naive_eval(self):
# test random coefficient spline *on the base interval*,
# t[k] <= x < t[-k-1]
b = _make_random_spline()
t, c, k = b.tck
xx = np.linspace(t[k], t[-k-1], 50)
y_b = b(xx)
y_n = [_naive_eval(x, t, c, k) for x in xx]
assert_allclose(y_b, y_n, atol=1e-14)
y_n2 = [_naive_eval_2(x, t, c, k) for x in xx]
assert_allclose(y_b, y_n2, atol=1e-14)
def test_rndm_splev(self):
b = _make_random_spline()
t, c, k = b.tck
xx = np.linspace(t[k], t[-k-1], 50)
assert_allclose(b(xx), splev(xx, (t, c, k)), atol=1e-14)
def test_rndm_splrep(self):
np.random.seed(1234)
x = np.sort(np.random.random(20))
y = np.random.random(20)
tck = splrep(x, y)
b = BSpline(*tck)
t, k = b.t, b.k
xx = np.linspace(t[k], t[-k-1], 80)
assert_allclose(b(xx), splev(xx, tck), atol=1e-14)
def test_rndm_unity(self):
b = _make_random_spline()
b.c =
|
np.ones_like(b.c)
|
numpy.ones_like
|
import numpy as np
import networkx as nx
from tqdm import tqdm
from shapely.geometry import (LineString,
Point,
GeometryCollection,
MultiLineString,
)
from .rio_tools import get_geopandas_features_from_array
import geopandas as gpd
from shapely.ops import unary_union
import affine
from rasterio.transform import rowcol
from scipy.ndimage import find_objects
import scipy.ndimage as nd
from .nd_tools import apply_func_to_superpixels
from typing import Union
##################
# Width Segments
##################
def get_width_features_from_segments(label_array: np.ndarray,
profile: dict) -> np.ndarray:
"""
Takes a label array and rasterio profile (specifically its geotransform) to
obtain width using the (scipy) distance transform in a neighborhood of each
segment.
The width within a segment is computed as `(2*d - 1) * res`, where `d` is
the maximum of the distance transform in that segment where we compute the
distance transform in a small 1 pixel buffered bounding box of the segment
and the `res` is the resolution (in meters; we assume all rasters are in
meters) determined using the rasterio profile.
We assume label 0 is land and the channel_mask is (label_array != 0).
The `get_width_features_from_segments_naive` uses the distance transform on
the full channel mask rather than 1 pixel buffered bounding box of the
segment.
Parameters
----------
label_array : np.ndarray
array of labels (m x n) with p unique labels
profile : dict
Rasterio profile dictionary
Returns
-------
np.ndarray:
Obtain features of shape (p x 1) where `p:= unique labels in label
array` and index i of feature vector corresponds to width of label i.
Notes
-----
+ width at label 0 will be set to np.nan
+ If distance transform at particular segment is 0 then we assign width at
node the width determined using the full distance transform determined
using the channel mask, i.e. (label_array != 0) not just the distance
transfom in a buffered area around the segment.
"""
transform = profile['transform']
if transform.a != - transform.e:
msg = 'Unequal x/y resolutions in channel mask and cannot use scipy'
raise ValueError(msg)
resolution = transform.a
labels_unique = np.unique(label_array)
indices = find_objects(label_array)
m = len(labels_unique)
width_features = np.zeros((m, 1))
channel_mask = (label_array != 0)
channel_dist_full = nd.distance_transform_edt(channel_mask) * resolution
for k, label in enumerate(labels_unique):
if label == 0:
continue
indices_temp = indices[label-1]
# Buffer
sy, sx = indices_temp
sy = np.s_[max(sy.start - 1, 0): sy.stop + 1]
sx = np.s_[max(sx.start - 1, 0): sx.stop + 1]
indices_temp = sy, sx
label_mask_in_slice = (label_array[indices_temp] == label)
label_slice = label_array[indices_temp]
channel_mask_slice = (label_slice != 0)
# Our formula is: 2 * (nd.distance_transform_edt(channel_mask_slice)) -
# 1. Note the "-1". Because scipy determines distance using pixel's
# centers we overcount the 1/2 boundary pixels not in the channel
# However, this could be an *underestimate* as along the diagonal this
# is \sqrt(2) / 2
width_arr = 2 * (nd.distance_transform_edt(channel_mask_slice)) - 1
width_arr *= resolution
max_dist_in_label = np.nanmax(width_arr[label_mask_in_slice])
# If no land in block, max_dist_in_label should be 0
# To Ensure some positve ditance recorded, use full channel distance
if max_dist_in_label == 0:
channel_dist_full_slice = channel_dist_full[indices_temp]
temp_slice = label_slice == label
channel_dist_full_at_label = channel_dist_full_slice[temp_slice]
max_dist_in_label = np.nanmax(channel_dist_full_at_label)
width_features[k] = max_dist_in_label
width_features[0] = np.nan
return width_features
def get_width_features_from_segments_naive(label_array: np.ndarray,
profile: dict) -> np.ndarray:
"""
Takes a label array and obtains width using the distance transform using
the entire channel mask (label_array != 0). Specifically, the width within
a segment is determined as `(2*d - 1) * res`, where `d` is the distance
transform determined using the the entire channel mask. Assume label 0 is
land and channel_mask is (label_array != 0).
Contrast this with get_width_features_from_segments which computes distance
transform only within a buffered bounding box of the segment.
Parameters
----------
label_array : np.ndarray
array of labels (m x n)
profile : dict
Rasterio profile dictionary
Returns
-------
np.ndarray:
Obtain features of shape (p x 1) where p:= unique labels in label array
and index i of feature vector corresponds to width of label i.
Notes
-----
+ width at label 0 will be set to np.nan
"""
transform = profile['transform']
if transform.a != - transform.e:
msg = 'Unequal x/y resolutions in channel mask and cannot use scipy'
raise ValueError(msg)
resolution = transform.a
channel_mask = (label_array != 0).astype(np.uint8)
channel_dist = nd.distance_transform_edt(channel_mask)
# This could be an *underestimate* as this could be diagonal which would be
# approximately \sqrt(2) / 2
d = apply_func_to_superpixels(np.nanmax, label_array, channel_dist)
width_features = (2 * d - 1) * resolution
width_features[0] = np.nan
return width_features
def add_width_features_to_graph(G: nx.classes.graph.Graph,
width_features: np.ndarray,
width_label: str = 'width_from_segment')\
-> nx.Graph:
"""
Take width features of length p in which index i has width at that label
and add that as node attribute for node with label i. Width features
should be flattened (or width_features.ravel()) We take the node_data =
{node: data_dict for node in G.nodes()} and update each data_dict with
`width_label`: width.
Parameters
----------
G : nx.classes.graph.Graph
Graph to update
width_features : np.ndarray
Features of widths with index i corresponding to width at label i
width_label : str
Label to update node attribute. Defaults to `width_from_segment`. Keep
default to ensure other analyses work as expected without modification.
Returns
-------
nx.Graph:
Graph is modified in place and returned
"""
node_data = dict(G.nodes(data=True))
nodes = node_data.keys()
def update_node_data(node):
# label should correspond to the same feature
label = node_data[node]['label']
node_data[node][width_label] = width_features[label]
list(map(update_node_data, nodes))
nx.set_node_attributes(G, node_data)
return G
##################
# Width Directions
# and Flows
##################
def unit_vector(vector: np.ndarray) -> np.array:
"""
Normalize a vector to have unit l2 norm
Parameters
----------
vector : np.ndarray
Returns
-------
np.array:
Normalized vector, i.e. v / ||v||_2
"""
return vector / np.linalg.norm(vector)
def angle_between(v1: np.ndarray, v2: np.ndarray) -> float:
"""
Find the angle between two vectors using the arccosine. Specifically,
arcos( v1 * v2 ) / (||v1||_2 ||v2||_2), where * indicates the vector dot
product.
Parameters
----------
v1 : np.ndarray
Vector
v2 : np.ndarray
Vector
Returns
-------
float:
The angle in radians
"""
v1_u = unit_vector(v1)
v2_u = unit_vector(v2)
return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0))
def edge_to_vector(edge_tuple: tuple) -> np.ndarray:
"""
Transforms edges of the form (p0, p1) to vector p1 - p0,
where p0, p1 are 2d vectors.
Parameters
----------
edge_tuple : tuple
Tuple of tuples, i.e. (p0, p1), where p0, p1 in R2.
Returns
-------
np.ndarray:
Edge vector indicating direction of edge from p0.
"""
return np.array(edge_tuple[1]) - np.array(edge_tuple[0])
def realign_vector(edge: np.ndarray, reference_edge: np.ndarray) -> np.ndarray:
"""
Using the reference_edge, ensure that the other edge is within pi/2. If
not, take its negative aka reverse the vector aka reflect through origin.
Parameters
----------
edge : np.ndarray
Edge to possibly reverse
reference_edge : np.ndarray
Edge that remains fixed
Returns
-------
np.ndarray:
edge or -edge depending on angle between.
"""
if angle_between(edge, reference_edge) <= np.pi/2:
return edge
else:
return edge * -1
def _perp(t: tuple) -> tuple:
"""
Obtain the point whose corresponding vector is perpendicular to t.
Parameters
----------
t : tuple
Point (x, y)
Returns
-------
tuple:
(-y, x)
"""
return -t[1], t[0]
def get_vector_tail(center: tuple,
direction: tuple,
magnitude: float) -> LineString:
"""
Obtain a LineString of the (center, center + direction * magnitude)
Assume direction is unit vector.
Parameters
----------
center : tuple
Head of LineString
direction : tuple
Direction from center; assume is unit norm.
magnitude : float
Length of desired output vector
Returns
-------
LineString:
Shaply geometry of the vector
"""
return Point(center[0] + direction[0] * magnitude,
center[1] + direction[1] * magnitude)
def _lookup_flow_vector_from_widths_arr(node: tuple,
width_x: np.array,
width_y: np.array,
transform: affine.Affine)\
-> Union[np.ndarray, None]:
"""
Obtain the unit vector direction from an array of widths in x and y
directions. Here, this will be the perpindicular line to the gradient of
distance function determined with fmm.
We expect the following for the width arrays:
neg_nabla_y, nabla_x = np.gradient(distance_arr)
width_x, width_y = neg_nabla_y, nabla_x
Parameters
----------
node : tuple
(x, y) in R2 in map coordinates associated with transform below
width_x : np.array
Will be the negative of nabla_y
width_y : np.array
Will be nabla_x
transform : affine.Affine
The rasterio transform associated with the width_x and width_y arrays.
Returns
-------
np.ndarray or None:
2d unit vector indicating width direction. Returns none if gradient is
np.nan
"""
row, col = rowcol(transform, node[0], node[1])
direction = np.array([width_x[row, col], width_y[row, col]])
if np.isnan(direction[0]) |
|
np.isnan(direction[1])
|
numpy.isnan
|
#!/usr/bin/env python3
# File: tetris.py
# Description: Main file with tetris game.
# Author: <NAME> <<EMAIL>>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import time
from collections import defaultdict
import pygame
import random
import math
import block
import constants
import numpy as np
import cv2
import torch
import torchvision.transforms as T
from PIL import Image
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from constants import BlockType
class Tetris(object):
"""
The class with implementation of tetris game logic.
"""
def __init__(self, bx, by):
"""
Initialize the tetris object.
Parameters:
- bx - number of blocks in x
- by - number of blocks in y
"""
self.bx = bx
self.by = by
# Compute the resolution of the play board based on the required number of blocks.
self.resx = bx*constants.BWIDTH+2*constants.BOARD_HEIGHT+constants.BOARD_MARGIN
self.resy = by*constants.BHEIGHT+2*constants.BOARD_HEIGHT+constants.BOARD_MARGIN
# Prepare the pygame board objects (white lines)
self.board_up = pygame.Rect(0,constants.BOARD_UP_MARGIN,self.resx,constants.BOARD_HEIGHT)
self.board_down = pygame.Rect(0,self.resy-constants.BOARD_HEIGHT,self.resx,constants.BOARD_HEIGHT)
self.board_left = pygame.Rect(0,constants.BOARD_UP_MARGIN,constants.BOARD_HEIGHT,self.resy)
self.board_right = pygame.Rect(self.resx-constants.BOARD_HEIGHT,constants.BOARD_UP_MARGIN,constants.BOARD_HEIGHT,self.resy)
# List of used blocks
self.blk_list = []
# Compute start indexes for tetris blocks
self.start_x = math.ceil(self.resx/2.0)
self.start_y = constants.BOARD_UP_MARGIN + constants.BOARD_HEIGHT + constants.BOARD_MARGIN
# Block data (shapes and colors). The shape is encoded in the list of [X,Y] points. Each point
# represents the relative position. The true/false value is used for the configuration of rotation where
# False means no rotate and True allows the rotation.
self.block_data = (
([[0,0],[1,0],[2,0],[3,0]],constants.RED,True, BlockType.I_BLOCK), # I block
([[0,0],[1,0],[0,1],[-1,1]],constants.GREEN,True, BlockType.S_BLOCK), # S block
([[0,0],[1,0],[2,0],[2,1]],constants.BLUE,True, BlockType.L_BLOCK), # L block
([[0,0],[0,1],[1,0],[1,1]],constants.ORANGE,False, BlockType.O_BLOCK), # O block
([[-1,0],[0,0],[0,1],[1,1]],constants.GOLD,True, BlockType.Z_BLOCK), # Z block
([[0,0],[1,0],[2,0],[1,1]],constants.PURPLE,True, BlockType.T_BLOCK), # T block
([[0,0],[1,0],[2,0],[0,1]],constants.CYAN,True, BlockType.J_BLOCK), # J block
)
# Compute the number of blocks. When the number of blocks is even, we can use it directly but
# we have to decrese the number of blocks in line by one when the number is odd (because of the used margin).
self.blocks_in_line = bx if bx%2 == 0 else bx-1
self.blocks_in_pile = by
# Score settings
self.score = 0
# Remember the current speed
self.speed = 1
# The score level threshold
self.score_level = constants.SCORE_LEVEL
self.lines_cleared = 0
self.x_positions = list(range(self.start_x % constants.BWIDTH, self.resx - self.start_x % constants.BWIDTH, constants.BWIDTH))
self.y_positions = list(range(self.start_y % constants.BHEIGHT, self.resy - self.start_y % constants.BHEIGHT, constants.BHEIGHT))
self.possible_block_states = defaultdict(list)
def apply_action(self):
"""
Get the event from the event queue and run the appropriate
action.
"""
# Take the event from the event queue.
for ev in pygame.event.get():
# Check if the close button was fired.
if ev.type == pygame.QUIT or (ev.type == pygame.KEYDOWN and ev.unicode == 'q'):
self.done = True
# Detect the key evevents for game control.
if ev.type == pygame.KEYDOWN:
if ev.key == pygame.K_DOWN:
self.active_block.move(0,constants.BHEIGHT)
if ev.key == pygame.K_LEFT:
self.active_block.move(-constants.BWIDTH,0)
if ev.key == pygame.K_RIGHT:
self.active_block.move(constants.BWIDTH,0)
if ev.key == pygame.K_SPACE:
self.active_block.rotate()
if ev.key == pygame.K_p:
self.pause()
if ev.key == pygame.K_d:
self.drop_active_block()
# Detect if the movement event was fired by the timer.
if ev.type == constants.TIMER_MOVE_EVENT:
self.active_block.move(0,constants.BHEIGHT)
def pause(self):
"""
Pause the game and draw the string. This function
also calls the flip function which draws the string on the screen.
"""
# Draw the string to the center of the screen.
self.print_center(["PAUSE","Press \"p\" to continue"])
pygame.display.flip()
while True:
for ev in pygame.event.get():
if ev.type == pygame.KEYDOWN and ev.key == pygame.K_p:
return
def set_move_timer(self):
"""
Setup the move timer to the
"""
# Setup the time to fire the move event. Minimal allowed value is 1
speed = math.floor(constants.MOVE_TICK / self.speed)
speed = max(1,speed)
pygame.time.set_timer(constants.TIMER_MOVE_EVENT,speed)
def run(self):
# Initialize the game (pygame, fonts)
pygame.init()
pygame.font.init()
self.myfont = pygame.font.SysFont(pygame.font.get_default_font(),constants.FONT_SIZE)
self.screen = pygame.display.set_mode((self.resx,self.resy))
pygame.display.set_caption("Tetris")
# Setup the time to fire the move event every given time
self.set_move_timer()
# Control variables for the game. The done signal is used
# to control the main loop (it is set by the quit action), the game_over signal
# is set by the game logic and it is also used for the detection of "game over" drawing.
# Finally the new_block variable is used for the requesting of new tetris block.
self.done = False
self.game_over = False
self.new_block = True
# Print the initial score
self.print_status_line()
while not(self.done) and not(self.game_over):
# Get the block and run the game logic
self.get_block()
self.game_logic()
self.draw_game()
# Display the game_over and wait for a keypress
if self.game_over:
self.print_game_over()
# Disable the pygame stuff
pygame.font.quit()
pygame.display.quit()
def print_status_line(self):
"""
Print the current state line
"""
string = ["SCORE: {0} SPEED: {1}x".format(self.score,self.speed)]
self.print_text(string,constants.POINT_MARGIN,constants.POINT_MARGIN)
def print_game_over(self):
"""
Print the game over string.
"""
# Print the game over text
self.print_center(["Game Over","Press \"q\" to exit"])
# Draw the string
pygame.display.flip()
# Wait untill the space is pressed
while True:
for ev in pygame.event.get():
if ev.type == pygame.QUIT or (ev.type == pygame.KEYDOWN and ev.unicode == 'q'):
return
def print_text(self,str_lst,x,y):
"""
Print the text on the X,Y coordinates.
Parameters:
- str_lst - list of strings to print. Each string is printed on new line.
- x - X coordinate of the first string
- y - Y coordinate of the first string
"""
prev_y = 0
for string in str_lst:
size_x,size_y = self.myfont.size(string)
txt_surf = self.myfont.render(string,False,(255,255,255))
self.screen.blit(txt_surf,(x,y+prev_y))
prev_y += size_y
def print_center(self,str_list):
"""
Print the string in the center of the screen.
Parameters:
- str_lst - list of strings to print. Each string is printed on new line.
"""
max_xsize = max([tmp[0] for tmp in map(self.myfont.size,str_list)])
self.print_text(str_list,self.resx/2-max_xsize/2,self.resy/2)
def block_colides(self):
"""
Check if the block colides with any other block.
The function returns True if the collision is detected.
"""
for blk in self.blk_list:
# Check if the block is not the same
if blk == self.active_block:
continue
# Detect situations
if(blk.check_collision(self.active_block.shape)):
return True
return False
def game_logic(self):
"""
Implementation of the main game logic. This function detects colisions
and insertion of new tetris blocks.
"""
# Remember the current configuration and try to
# apply the action
self.active_block.backup()
self.apply_action()
# Border logic, check if we colide with down border or any
# other border. This check also includes the detection with other tetris blocks.
down_board = self.active_block.check_collision([self.board_down])
any_border = self.active_block.check_collision([self.board_left,self.board_up,self.board_right])
block_any = self.block_colides()
# Restore the configuration if any collision was detected
if down_board or any_border or block_any:
self.active_block.restore()
# So far so good, sample the previous state and try to move down (to detect the colision with other block).
# After that, detect the the insertion of new block. The block new block is inserted if we reached the boarder
# or we cannot move down.
self.active_block.backup()
self.active_block.move(0,constants.BHEIGHT)
can_move_down = not self.block_colides()
self.active_block.restore()
# We end the game if we are on the respawn and we cannot move --> bang!
if not can_move_down and (self.start_x == self.active_block.x and self.start_y == self.active_block.y):
self.game_over = True
# The new block is inserted if we reached down board or we cannot move down.
if down_board or not can_move_down:
# Request new block
self.new_block = True
# Detect the filled line and possibly remove the line from the
# screen.
self.detect_line()
def detect_line(self):
"""
Detect if the line is filled. If yes, remove the line and
move with remaining bulding blocks to new positions.
"""
# Get each shape block of the non-moving tetris block and try
# to detect the filled line. The number of bulding blocks is passed to the class
# in the init function.
for shape_block in self.active_block.shape:
tmp_y = shape_block.y
tmp_cnt = self.get_blocks_in_line(tmp_y)
# Detect if the line contains the given number of blocks
if tmp_cnt != self.blocks_in_line:
continue
# Ok, the full line is detected!
self.remove_line(tmp_y)
# Update the score.
self.score += self.blocks_in_line * constants.POINT_VALUE
# Check if we need to speed up the game. If yes, change control variables
if self.score > self.score_level:
self.score_level *= constants.SCORE_LEVEL_RATIO
self.speed *= constants.GAME_SPEEDUP_RATIO
# Change the game speed
self.set_move_timer()
def remove_line(self,y):
"""
Remove the line with given Y coordinates. Blocks below the filled
line are untouched. The rest of blocks (yi > y) are moved one level done.
Parameters:
- y - Y coordinate to remove.
"""
# Iterate over all blocks in the list and remove blocks with the Y coordinate.
for block in self.blk_list:
block.remove_blocks(y)
# Setup new block list (not needed blocks are removed)
self.blk_list = [blk for blk in self.blk_list if blk.has_blocks()]
def get_blocks_in_line(self,y):
"""
Get the number of shape blocks on the Y coordinate.
Parameters:
- y - Y coordinate to scan.
"""
# Iteraveovel all block's shape list and increment the counter
# if the shape block equals to the Y coordinate.
tmp_cnt = 0
for block in self.blk_list:
for shape_block in block.shape:
tmp_cnt += (1 if y == shape_block.y else 0)
return tmp_cnt
def draw_board(self, draw_status=True):
"""
Draw the white board.
"""
pygame.draw.rect(self.screen,constants.WHITE,self.board_up)
pygame.draw.rect(self.screen,constants.WHITE,self.board_down)
pygame.draw.rect(self.screen,constants.WHITE,self.board_left)
pygame.draw.rect(self.screen,constants.WHITE,self.board_right)
# Update the score
if draw_status:
self.print_status_line()
def get_block(self):
"""
Generate new block into the game if is required.
"""
if self.new_block:
# Get the block and add it into the block list(static for now)
tmp = random.randint(0,len(self.block_data)-1)
data = self.block_data[tmp]
self.active_block = block.Block(data[0],self.start_x,self.start_y,self.screen, data[1], data[2], data[3])
self.blk_list.append(self.active_block)
self.new_block = False
def draw_game(self, draw_status=True):
"""
Draw the game screen.
"""
# Clean the screen, draw the board and draw
# all tetris blocks
self.screen.fill(constants.BLACK)
self.draw_board(draw_status=draw_status)
for blk in self.blk_list:
blk.draw()
# Draw the screen buffer
pygame.display.flip()
# ======== !!! Below starts logic added for emulating the game, used for RL !!! ========
def draw_game_rl(self, episode, loss, episode_reward):
"""
Draw the game screen.
"""
# Clean the screen, draw the board and draw
# all tetris blocks
self.screen.fill(constants.BLACK)
self.draw_board(draw_status=False)
self.print_learning_status(episode=episode, loss=loss, episode_reward=episode_reward)
for blk in self.blk_list:
blk.draw()
# Draw the screen buffer
pygame.display.flip()
def remove_lines_emulator(self):
"""
Detect if the line is filled. If yes, remove the line and
move with remaining bulding blocks to new positions.
"""
# Get each shape block of the non-moving tetris block and try
# to detect the filled line. The number of bulding blocks is passed to the class
# in the init function.
curr_lines_cleared = 0
for shape_block in self.active_block.shape:
tmp_y = shape_block.y
tmp_cnt = self.get_blocks_in_line(tmp_y)
# Detect if the line contains the given number of blocks
if tmp_cnt != self.blocks_in_line:
continue
# Ok, the full line is detected!
self.remove_line(tmp_y)
# Update the score.
self.score += self.blocks_in_line * constants.POINT_VALUE
curr_lines_cleared += 1
self.lines_cleared += curr_lines_cleared
return curr_lines_cleared
def get_potential_lines_cleared(self):
lines_cleared = 0
for shape_block in self.active_block.shape:
tmp_y = shape_block.y
tmp_cnt = self.get_blocks_in_line(tmp_y)
# Detect if the line contains the given number of blocks
if tmp_cnt != self.blocks_in_line:
continue
lines_cleared += 1
return lines_cleared
def create_possible_block_states(self):
x_positions_set = set(self.x_positions)
for blk in self.block_data:
self.active_block = block.Block(blk[0],self.start_x,self.start_y,self.screen, blk[1], blk[2], blk[3])
self.blk_list.append(self.active_block)
x_offsets = (np.array(self.x_positions) - self.active_block.x).tolist()
if self.active_block.type in [BlockType.O_BLOCK]:
rotations = [0]
elif self.active_block.type in [BlockType.I_BLOCK, BlockType.S_BLOCK, BlockType.Z_BLOCK]:
rotations = [0, 90]
else:
rotations = [0, 90, 180, 270]
for rotation in rotations:
for x_idx, x in enumerate(self.x_positions):
backup_cfg = self.active_block.backup_config()
self.active_block.rotate_by(rotation)
self.active_block.move(x_offsets[x_idx], 0)
active_block_rects = set([el.x for el in self.active_block.shape])
# If the block collided after movement move on
if self.active_block.check_collision([self.board_left, self.board_right, self.board_down]) \
or len(active_block_rects - x_positions_set) > 0:
self.active_block.restore_config(*backup_cfg)
continue
self.drop_active_block()
# tore
self.possible_block_states[blk[3]].append((x, x_idx, rotation))
# Restore the original config
self.active_block.restore_config(*backup_cfg)
self.blk_list.clear()
def get_next_states(self):
x_offsets = (np.array(self.x_positions) - self.active_block.x).tolist()
state_action_pairs = {}
for x, x_idx, rotation in self.possible_block_states[self.active_block.type]:
# Backup the current store
backup_cfg = self.active_block.backup_config()
self.active_block.rotate_by(rotation)
self.active_block.move(x_offsets[x_idx], 0)
self.drop_active_block()
# Get the state and store
lines_cleared = self.get_potential_lines_cleared()
state = self.get_game_state(lines_cleared, skip_active_block=False)
state_action_pairs[(x, rotation)] = state
# Restore the original config
self.active_block.restore_config(*backup_cfg)
return state_action_pairs
def get_next_display_states(self, display_state, draw_states=False):
x_offsets = (np.array(self.x_positions) - self.active_block.x).tolist()
state_action_pairs = {}
for x, x_idx, rotation in self.possible_block_states[self.active_block.type]:
# Backup the current store
backup_cfg = self.active_block.backup_config()
self.active_block.rotate_by(rotation)
self.active_block.move(x_offsets[x_idx], 0)
self.drop_active_block()
self.draw_game()
# Get the state and store
state = self.get_display_state() - display_state
state_action_pairs[(x, rotation)] = state
if draw_states:
plt.figure()
plt.imshow(state.cpu().squeeze(0).permute(1, 2, 0).numpy(), interpolation='none', cmap='gray')
plt.show()
# Restore the original config
self.active_block.restore_config(*backup_cfg)
self.draw_game()
return state_action_pairs
def get_next_grid_states(self, grid_state):
x_offsets = (np.array(self.x_positions) - self.active_block.x).tolist()
state_action_pairs = {}
for x, x_idx, rotation in self.possible_block_states[self.active_block.type]:
# Backup the current store
backup_cfg = self.active_block.backup_config()
self.active_block.rotate_by(rotation)
self.active_block.move(x_offsets[x_idx], 0)
self.drop_active_block()
# Get the state and store
new_state = self.get_game_grid_state(skip_active_block=False)
state = new_state - grid_state
state_action_pairs[(x, rotation)] = state
# Restore the original config
self.active_block.restore_config(*backup_cfg)
return state_action_pairs
def perform_action(self, x, rotation):
x_offsets = (np.array(self.x_positions) - self.active_block.x).tolist()
offset_index = self.x_positions.index(x)
offset = x_offsets[offset_index]
self.active_block.rotate_by(rotation)
self.active_block.move(offset, 0)
def drop_active_block(self, episode=None, loss=None, episode_reward=None, draw=False):
while True:
self.active_block.backup()
self.active_block.move(0, constants.BHEIGHT)
down_board = self.active_block.check_collision([self.board_down])
block_any = self.block_colides()
if down_board or block_any:
self.active_block.restore()
break
if draw:
time.sleep(0.000001)
self.draw_game_rl(episode, loss, episode_reward)
def check_collisions(self):
down_board = self.active_block.check_collision([self.board_down])
any_border = self.active_block.check_collision([self.board_left, self.board_up, self.board_right])
block_any = self.block_colides()
return down_board, any_border, block_any
def get_display_state(self):
resize = T.Compose([T.ToPILImage(),
T.Resize(40, interpolation=Image.CUBIC),
T.ToTensor()])
display = pygame.surfarray.array3d(pygame.display.get_surface())
display = display.transpose([1, 0, 2])
# Convert to grayscale.
display = cv2.cvtColor(display, cv2.COLOR_BGR2GRAY)
display[display > 0] = 255
# Remove score board and edges.
img_h, img_w = display.shape
display = display[
constants.BOARD_UP_MARGIN + constants.BOARD_HEIGHT:img_h - constants.BOARD_HEIGHT,
constants.BOARD_HEIGHT:img_w - constants.BOARD_HEIGHT
]
display = np.ascontiguousarray(display, dtype=np.float32) / 255
display = torch.from_numpy(display)
display = resize(display).unsqueeze(0)
display = display.permute(1, 0, 2, 3)
return display
def step(self, x, rotation, episode, loss, episode_reward, draw_game=True):
self.reward = 0
# Generate a new block into the game if required
self.get_block()
# Handle the events to allow pygame to handle internal actions
pygame.event.pump()
# Remember the current configuration and try to
# Apply the action supplied by the agent
self.active_block.backup()
self.perform_action(x, rotation)
can_move_down = not self.block_colides()
if not can_move_down:
self.game_over = True
state = self.get_game_state(0)
return state, -10, self.game_over
self.drop_active_block(episode, loss, episode_reward, draw=True)
_, any_border, block_any = self.check_collisions()
if any_border or block_any:
self.active_block.restore()
# Move down by one each step - no matter what
# self.active_block.backup()
# self.active_block.move(0, constants.BHEIGHT)
# down_board, any_border, block_any = self.check_collisions()
# if down_board or any_border or block_any:
# self.active_block.restore()
self.active_block.backup()
self.active_block.move(0, constants.BHEIGHT)
can_move_down = not self.block_colides()
down_board, any_border, block_any = self.check_collisions()
self.active_block.restore()
# down_board, any_border, block_any = self.check_collisions()
# We end the game if we are on the respawn and we cannot move --> bang!
if not can_move_down and (self.start_x == self.active_block.x and self.start_y == self.active_block.y):
self.game_over = True
current_lines_cleared = 0
# The new block is inserted if we reached down board or we cannot move down.
if down_board or not can_move_down:
# Request new block
self.new_block = True
# A block was placed --> add 1 reward point
self.reward += 1
# Detect the filled line and possibly remove the line from the
# screen.
current_lines_cleared = self.remove_lines_emulator()
# Add the reward for lines cleared
self.reward += (2*current_lines_cleared)**2 * self.bx
if draw_game: self.draw_game_rl(episode, loss, episode_reward)
done = self.done or self.game_over
if done:
self.reward -= 1
state = self.get_game_state(current_lines_cleared)
self.get_block()
return state, self.reward, done
def get_game_state(self, lines_cleared, skip_active_block=True):
grid = self.get_game_grid(skip_active_block=skip_active_block)
agg_height = self.aggregate_height(grid)
n_holes = self.number_of_holes(grid)
bumpiness, _, _ = self.bumpiness(grid)
block_type = [idx for idx, el in enumerate(self.block_data) if el[3] == self.active_block.type][0]
return np.array([agg_height, n_holes, bumpiness, lines_cleared, block_type])
def get_game_grid(self, skip_active_block=True):
grid = np.zeros((len(self.y_positions), len(self.x_positions)), dtype=np.int)
try:
for block in self.blk_list:
# Skip the active block when building the grid
if skip_active_block and block.x == self.active_block.x and block.y == self.active_block.y:
continue
for block_shape in block.shape:
x_grid_idx = self.x_positions.index(block_shape.x)
y_grid_idx = self.y_positions.index(block_shape.y)
grid[y_grid_idx, x_grid_idx] = 1
except Exception as e:
print(e)
print(self.x_positions)
print(self.y_positions)
return grid
def get_game_grid_state(self, skip_active_block=True):
game_grid_state = self.get_game_grid(skip_active_block)
game_grid_state =
|
np.ascontiguousarray(game_grid_state, dtype=np.float32)
|
numpy.ascontiguousarray
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import sep
import numpy as np
from scipy.stats import sigmaclip
from astropy.table import Table
from ..math.array import xy2r, trim_array
from ._utils import _sep_fix_byte_order
from .detection import calc_fwhm
from ..logger import logger
from ..fits_utils import imhdus
def sky_annulus(data, x, y, r_ann, algorithm='mmm', mask=None, logger=logger):
"""Determine the sky value of a single pixel based on a sky annulus.
Parameters
----------
data : `~nnumpy.ndarray`
2D image data for photometry
x, y : array_like
Positions of the sources
r_ann : array_like([float, float])
Annulus radius (intern and extern) to calculate the background
value
algorithm : 'mmm' or 'sigmaclip' (optional)
Algorith to calculate the background value. 'mmm' (mean, median,
mode) should be better for populated fields, while 'sigmaclip'
(clipped mean) should be better for sparse fields.
Default: 'mmm'
Returns
-------
sky : array_like
The computed value of sky for each (x, y) source.
sky_error : array_like
The error of sky value, computed as the sigma cliped stddev.
"""
# TODO: this code needs optimization for faster work
if len(x) != len(y):
raise ValueError('x and y variables don\'t have the same lenght.')
if len(r_ann) != 2:
raise ValueError('r_ann must have two components (r_in, r_out)')
sky = np.zeros_like(x, dtype='f8')
sky.fill(np.nan)
sky_error = np.zeros_like(x, dtype='f8')
sky_error.fill(np.nan)
box_size = 2*int(np.max(r_ann)+2) # Ensure the aperture is entirely in box
r_ann = sorted(r_ann)
indices = np.indices(data.shape)
for i in range(len(x)):
xi, yi = x[i]-0.5, y[i]-0.5 # needed to check pixel centers
d, ix, iy = trim_array(data, box_size, (xi, yi), indices)
if mask is not None:
m = np.ravel(mask[iy, ix])
r, f = xy2r(ix, iy, d, xi, yi)
# Filter only values inside the annulus
# To think: this do not perform subpixel, just check the pixel center
filt = (r >= r_ann[0]) & (r <= r_ann[1])
# mask nans here to go faster
filt = filt & ~np.isnan(f)
if mask is not None:
filt = filt & ~m
f = f[np.where(filt)]
if len(f) < 1:
logger.warn('No pixels for sky subtraction found at position'
f' {x[i]}x{y[i]}.')
sky[i] = 0
sky_error[i] = 0
else:
for _ in range(3):
f, _, _ = sigmaclip(f)
mean = np.nanmean(f)
median = np.nanmedian(f)
sky_error[i] =
|
np.nanstd(f)
|
numpy.nanstd
|
# -*- coding: utf-8 -*-
#GSASIIdataGUI - Main GUI routines
########### SVN repository information ###################
# $Date: 2021-01-12 04:57:49 +0900 (火, 12 1月 2021) $
# $Author: toby $
# $Revision: 4761 $
# $URL: https://subversion.xray.aps.anl.gov/pyGSAS/trunk/GSASIIdataGUI.py $
# $Id: GSASIIdataGUI.py 4761 2021-01-11 19:57:49Z toby $
########### SVN repository information ###################
'''
*GSASIIdataGUI: Main GSAS-II GUI*
------------------------------------
Module that defines GUI routines and classes for the main GUI Frame (window)
and the main routines that define the GSAS-II tree panel and much of the
data editing panel.
'''
from __future__ import division, print_function
import platform
import time
import math
import random as ran
import copy
import sys
import os
import inspect
if '2' in platform.python_version_tuple()[0]:
import cPickle
else:
try:
import _pickle as cPickle
except:
print('Warning: failed to import the optimized Py3 pickle (_pickle)')
import pickle as cPickle
import re
import numpy as np
import numpy.ma as ma
import matplotlib as mpl
try:
import OpenGL as ogl
try:
import OpenGL.GL # this fails in <=2020 versions of Python on OS X 11.x
except ImportError:
print('Drat, patching for Big Sur')
from ctypes import util
orig_util_find_library = util.find_library
def new_util_find_library( name ):
res = orig_util_find_library( name )
if res: return res
return '/System/Library/Frameworks/'+name+'.framework/'+name
util.find_library = new_util_find_library
except ImportError:
pass
import scipy as sp
import scipy.optimize as so
try:
import wx
import wx.grid as wg
#import wx.wizard as wz
#import wx.aui
import wx.lib.scrolledpanel as wxscroll
except ImportError:
pass
import GSASIIpath
GSASIIpath.SetVersionNumber("$Revision: 4761 $")
import GSASIImath as G2mth
import GSASIIIO as G2IO
import GSASIIfiles as G2fil
import GSASIIstrIO as G2stIO
import GSASIIlattice as G2lat
import GSASIIplot as G2plt
import GSASIIpwdGUI as G2pdG
import GSASIIimgGUI as G2imG
import GSASIIphsGUI as G2phG
import GSASIIspc as G2spc
import GSASIImapvars as G2mv
import GSASIIconstrGUI as G2cnstG
import GSASIIrestrGUI as G2restG
import GSASIIobj as G2obj
import GSASIIexprGUI as G2exG
import GSASIIlog as log
import GSASIIctrlGUI as G2G
import GSASIIElem as G2elem
import GSASIIpwd as G2pwd
import GSASIIstrMain as G2stMn
import defaultIparms as dI
import GSASIIfpaGUI as G2fpa
try:
wx.NewIdRef
wx.NewId = wx.NewIdRef
except AttributeError:
pass
# trig functions in degrees
sind = lambda x: np.sin(x*np.pi/180.)
tand = lambda x: np.tan(x*np.pi/180.)
cosd = lambda x: np.cos(x*np.pi/180.)
# Define short names for convenience
WACV = wx.ALIGN_CENTER_VERTICAL
VERY_LIGHT_GREY = wx.Colour(240,240,240)
DULL_YELLOW = (230,230,190)
# define Ids for wx menu items
commonTrans = {'abc':np.eye(3),'a-cb':np.array([[1.,0.,0.],[0.,0.,-1.],[0.,1.,0.]]),
'ba-c':np.array([[0.,1.,0.],[1.,0.,0.],[0.,0.,-1.]]),'-cba':np.array([[0.,0.,-1.],[0.,1.,0.],[1.,0.,0.]]),
'bca':np.array([[0.,1.,0.],[0.,0.,1.],[1.,0.,0.]]),'cab':np.array([[0.,0.,1.],[1.,0.,0.],[0.,1.,0.]]),
'R->H':np.array([[1.,-1.,0.],[0.,1.,-1.],[1.,1.,1.]]),'H->R':np.array([[2./3,1./3,1./3],[-1./3,1./3,1./3],[-1./3,-2./3,1./3]]),
'P->A':np.array([[-1.,0.,0.],[0.,-1.,1.],[0.,1.,1.]]),'R->O':np.array([[-1.,0.,0.],[0.,-1.,0.],[0.,0.,1.]]),
'P->B':np.array([[-1.,0.,1.],[0.,-1.,0.],[1.,0.,1.]]),'B->P':np.array([[-.5,0.,.5],[0.,-1.,0.],[.5,0.,.5]]),
'P->C':np.array([[1.,1.,0.],[1.,-1.,0.],[0.,0.,-1.]]),'C->P':np.array([[.5,.5,0.],[.5,-.5,0.],[0.,0.,-1.]]),
'P->F':np.array([[-1.,1.,1.],[1.,-1.,1.],[1.,1.,-1.]]),'F->P':np.array([[0.,.5,.5],[.5,0.,.5],[.5,.5,0.]]),
'P->I':np.array([[0.,1.,1.],[1.,0.,1.],[1.,1.,0.]]),'I->P':np.array([[-.5,.5,.5],[.5,-.5,.5],[.5,.5,-.5]]),
'A->P':np.array([[-1.,0.,0.],[0.,-.5,.5],[0.,.5,.5]]),'O->R':np.array([[-1.,0.,0.],[0.,-1.,0.],[0.,0.,1.]]),
'abc*':np.eye(3), }
commonNames = ['abc','bca','cab','a-cb','ba-c','-cba','P->A','A->P','P->B','B->P','P->C','C->P',
'P->I','I->P','P->F','F->P','H->R','R->H','R->O','O->R','abc*','setting 1->2'] #don't put any new ones after the setting one!
def SetDefaultDData(dType,histoName,NShkl=0,NDij=0):
if dType in ['SXC','SNC']:
return {'Histogram':histoName,'Show':False,'Scale':[1.0,True],
'Babinet':{'BabA':[0.0,False],'BabU':[0.0,False]},
'Extinction':['Lorentzian','None', {'Tbar':0.1,'Cos2TM':0.955,
'Eg':[1.e-10,False],'Es':[1.e-10,False],'Ep':[1.e-10,False]}],
'Flack':[0.0,False]}
elif dType == 'SNT':
return {'Histogram':histoName,'Show':False,'Scale':[1.0,True],
'Babinet':{'BabA':[0.0,False],'BabU':[0.0,False]},
'Extinction':['Lorentzian','None', {
'Eg':[1.e-10,False],'Es':[1.e-10,False],'Ep':[1.e-10,False]}]}
elif 'P' in dType:
return {'Histogram':histoName,'Show':False,'Scale':[1.0,False],
'Pref.Ori.':['MD',1.0,False,[0,0,1],0,{},[],0.1],
'Size':['isotropic',[1.,1.,1.],[False,False,False],[0,0,1],
[1.,1.,1.,0.,0.,0.],6*[False,]],
'Mustrain':['isotropic',[1000.0,1000.0,1.0],[False,False,False],[0,0,1],
NShkl*[0.01,],NShkl*[False,]],
'HStrain':[NDij*[0.0,],NDij*[False,]],
'Extinction':[0.0,False],'Babinet':{'BabA':[0.0,False],'BabU':[0.0,False]}}
def GetDisplay(pos):
'''Gets display number (0=main display) for window position (pos). If pos outside all displays
returns None
'''
displays = np.array([list(wx.Display(i).GetGeometry()) for i in range(wx.Display.GetCount())])
for ip,display in enumerate(displays):
display[2:3] += display[0:1]
if (display[0] < pos[0] < display[2]) and (display[1] < pos[1] < display[3]):
return ip
return None
################################################################################
#### class definitions used for main GUI
################################################################################
class MergeDialog(wx.Dialog):
''' HKL transformation & merge dialog
:param wx.Frame parent: reference to parent frame (or None)
:param data: HKLF data
'''
def __init__(self,parent,data):
wx.Dialog.__init__(self,parent,wx.ID_ANY,'Setup HKLF merge',
pos=wx.DefaultPosition,style=wx.DEFAULT_DIALOG_STYLE)
self.panel = wx.Panel(self) #just a dummy - gets destroyed in Draw!
self.data = data
self.Super = data[1]['Super']
if self.Super:
self.Trans = np.eye(4)
else:
self.Trans = np.eye(3)
self.Cent = 'noncentrosymmetric'
self.Laue = '1'
self.Class = 'triclinic'
self.Common = 'abc'
self.Draw()
def Draw(self):
def OnCent(event):
Obj = event.GetEventObject()
self.Cent = Obj.GetValue()
self.Laue = ''
wx.CallAfter(self.Draw)
def OnLaue(event):
Obj = event.GetEventObject()
self.Laue = Obj.GetValue()
wx.CallAfter(self.Draw)
def OnClass(event):
Obj = event.GetEventObject()
self.Class = Obj.GetValue()
self.Laue = ''
wx.CallAfter(self.Draw)
def OnCommon(event):
Obj = event.GetEventObject()
self.Common = Obj.GetValue()
self.Trans = commonTrans[self.Common]
wx.CallAfter(self.Draw)
self.panel.Destroy()
self.panel = wx.Panel(self)
mainSizer = wx.BoxSizer(wx.VERTICAL)
MatSizer = wx.BoxSizer(wx.HORIZONTAL)
transSizer = wx.BoxSizer(wx.VERTICAL)
transSizer.Add(wx.StaticText(self.panel,label=" HKL Transformation matrix: M*H = H'"))
if self.Super:
Trmat = wx.FlexGridSizer(4,4,0,0)
else:
commonSizer = wx.BoxSizer(wx.HORIZONTAL)
commonSizer.Add(wx.StaticText(self.panel,label=' Common transformations: '),0,WACV)
common = wx.ComboBox(self.panel,value=self.Common,choices=commonNames[:-2], #not the last two!
style=wx.CB_READONLY|wx.CB_DROPDOWN)
common.Bind(wx.EVT_COMBOBOX,OnCommon)
commonSizer.Add(common,0,WACV)
transSizer.Add(commonSizer)
Trmat = wx.FlexGridSizer(3,3,0,0)
for iy,line in enumerate(self.Trans):
for ix,val in enumerate(line):
item = G2G.ValidatedTxtCtrl(self.panel,self.Trans[iy],ix,nDig=(10,3),size=(65,25))
Trmat.Add(item)
transSizer.Add(Trmat)
MatSizer.Add((10,0),0)
MatSizer.Add(transSizer)
mainSizer.Add(MatSizer)
laueClass = ['triclinic','monoclinic','orthorhombic','trigonal(H)','tetragonal','hexagonal','cubic']
centroLaue = {'triclinic':['-1',],'monoclinic':['2/m','1 1 2/m','2/m 1 1',],
'orthorhombic':['m m m',],'trigonal(H)':['-3','-3 m 1','-3 1 m',], \
'tetragonal':['4/m','4/m m m',],'hexagonal':['6/m','6/m m m',],'cubic':['m 3','m 3 m']}
noncentroLaue = {'triclinic':['1',],'monoclinic':['2','2 1 1','1 1 2','m','m 1 1','1 1 m',],
'orthorhombic':['2 2 2','m m 2','m 2 m','2 m m',],
'trigonal(H)':['3','3 1 2','3 2 1','3 m 1','3 1 m',],
'tetragonal':['4','-4','4 2 2','4 m m','-4 2 m','-4 m 2',], \
'hexagonal':['6','-6','6 2 2','6 m m','-6 m 2','-6 2 m',],'cubic':['2 3','4 3 2','-4 3 m']}
centChoice = ['noncentrosymmetric','centrosymmetric']
mainSizer.Add(wx.StaticText(self.panel,label=' Select Laue class for new lattice:'),0)
Class = wx.ComboBox(self.panel,value=self.Class,choices=laueClass,
style=wx.CB_READONLY|wx.CB_DROPDOWN)
Class.Bind(wx.EVT_COMBOBOX,OnClass)
mainSizer.Add(Class,0)
mainSizer.Add(wx.StaticText(self.panel,label=' Target Laue symmetry:'),0)
Cent = wx.ComboBox(self.panel,value=self.Cent,choices=centChoice,
style=wx.CB_READONLY|wx.CB_DROPDOWN)
Cent.Bind(wx.EVT_COMBOBOX,OnCent)
mergeSizer = wx.BoxSizer(wx.HORIZONTAL)
mergeSizer.Add(Cent,0,WACV)
mergeSizer.Add((10,0),0)
Choice = centroLaue[self.Class]
if 'non' in self.Cent:
Choice = noncentroLaue[self.Class]
Laue = wx.ComboBox(self.panel,value=self.Laue,choices=Choice,
style=wx.CB_READONLY|wx.CB_DROPDOWN)
Laue.Bind(wx.EVT_COMBOBOX,OnLaue)
mergeSizer.Add(Laue,0,WACV)
mainSizer.Add(mergeSizer)
OkBtn = wx.Button(self.panel,-1,"Ok")
OkBtn.Bind(wx.EVT_BUTTON, self.OnOk)
cancelBtn = wx.Button(self.panel,-1,"Cancel")
cancelBtn.Bind(wx.EVT_BUTTON, self.OnCancel)
btnSizer = wx.BoxSizer(wx.HORIZONTAL)
btnSizer.Add((20,20),1)
if self.Laue:
btnSizer.Add(OkBtn)
btnSizer.Add((20,20),1)
btnSizer.Add(cancelBtn)
btnSizer.Add((20,20),1)
mainSizer.Add(btnSizer,0,wx.EXPAND|wx.BOTTOM|wx.TOP, 10)
self.panel.SetSizer(mainSizer)
self.panel.Fit()
self.Fit()
def GetSelection(self):
return self.Trans,self.Cent,self.Laue
def OnOk(self,event):
parent = self.GetParent()
parent.Raise()
self.EndModal(wx.ID_OK)
def OnCancel(self,event):
parent = self.GetParent()
parent.Raise()
self.EndModal(wx.ID_CANCEL)
def GUIpatches():
'Misc fixes that only needs to be done when running a GUI'
try: # patch for LANG environment var problem on occasional OSX machines
import locale
locale.getdefaultlocale()
except ValueError:
print('Fixing location (see https://github.com/matplotlib/matplotlib/issues/5420.)')
os.environ['LC_ALL'] = 'en_US.UTF-8'
locale.getdefaultlocale()
try:
import OpenGL
OpenGL # avoids unused package error
except ImportError:
print('*******************************************************')
print('PyOpenGL is missing from your python installation')
print(' - we will try to install it')
print('*******************************************************')
def install_with_easyinstall(package):
try:
print ("trying a system-wide PyOpenGl install")
easy_install.main(['-f',os.path.split(__file__)[0],package])
return
except:
pass
try:
print ("trying a user level PyOpenGl install")
easy_install.main(['-f',os.path.split(__file__)[0],'--user',package])
return
except:
print (u"Install of '+package+u' failed. Please report this information:")
import traceback
print (traceback.format_exc())
sys.exit()
from setuptools.command import easy_install
install_with_easyinstall('PyOpenGl')
print('*******************************************************')
print('OpenGL has been installed. Restarting GSAS-II')
print('*******************************************************')
loc = os.path.dirname(__file__)
import subprocess
subprocess.Popen([sys.executable,os.path.join(loc,'GSASII.py')])
sys.exit()
# PATCH: for Mavericks (OS X 10.9.x), wx produces an annoying warning about LucidaGrandeUI.
# In case stderr has been suppressed there, redirect python error output to stdout. Nobody
# else should care much about this.
sys.stderr = sys.stdout
def convVersion(version):
'''Convert a version string ("x", "x.y", "x.y.z") into a series of
ints.
:returns: [i0, i1, i2] where None is used if a value is not specified
and 0 is used if a field cannot be parsed.
'''
vIntList = [None,None,None]
for i,v in enumerate(version.split('.')):
if i >= 3: break
if len(v) == 0: break
v = list(filter(None,re.split('(\\d+)',v)))[0] # conv '1b2' to '1'
try:
vIntList[i] = int(v)
except:
vIntList[i] = 0
return vIntList
def compareVersions(version1,version2):
'''Compare two version strings ("x", "x.y", "x.y.z")
Note that '3.' matches '3.1', and '3.0' matches '3.0.1'
but '3.0.0' does not match '3.0.1'
:returns: 0 if the versions match, -1 if version1 < version2,
or 1 if version1 > version2
'''
for v1,v2 in zip(convVersion(version1),convVersion(version2)):
if v1 is None or v2 is None:
return 0
if v1 < v2: return -1
if v1 > v2: return 1
return 0
# tabulate package versions that users should be warned about
versionDict = {}
'''Variable versionDict is used to designate versions of packages that
should generate warnings or error messages.
* ``versionDict['tooOld']`` is a dict with module versions that are too old and are
known to cause serious errors
* ``versionDict['tooOldWarn']`` is a dict with module versions that are
significantly out of date and should be updated, but will probably function OK.
* ``versionDict['badVersionWarn']`` is a dict of with lists of package
versions that are known to have bugs. One should select an older or
newer version of the package.
* ``versionDict['tooNewWarn']`` is a dict with module versions that have not
been tested but have changes that lead us to believe that errors are
likely to happen.
**Packages/versions to be avoided**
* wxPython:
* <=2.x.x: while most of GSAS-II has been written to be
compatible with older versions of wxpython, we are now testing with
version 4.0 only. Version 3.0 is pretty similar to 4.0 and should not
have problems. wxpython 4.1 seems to create a lot of errors for
conflicting options that will need to be checked up upon.
* Matplotlib:
* 1.x: there have been significant API changes since these versions and
significant graphics errors will occur.
* 3.1.x and 3.2.x: these versions have a known bug for plotting
3-D surfaces, such as microstrain vs crystal axes. The plots may appear
distorted as the lengths of x, y & z will not be constrained as equal.
Preferably use 3.0.x as 3.3.x is not fully tested.
* numpy:
* 1.16.0: produces .gpx files that are not compatible with older
version numpy versions. This is a pretty outmoded version; upgrade.
'''
# add comments above when changing anything below
versionDict['tooOld'] = {'matplotlib': '1.'}
'modules that will certainly fail'
versionDict['tooOldWarn'] = {'wx': '2.'}
'modules that may fail but should be updated'
versionDict['badVersionWarn'] = {'numpy':['1.16.0'],
'matplotlib': ['3.1','3.2']}
'versions of modules that are known to have bugs'
versionDict['tooNewWarn'] = {'wx':'4.1'}
#'matplotlib': '3.4',
'module versions newer than what we have tested where problems are suspected'
def ShowVersions():
'''Show the versions all of required Python packages, etc.
'''
import numpy as np
import scipy as sp
import wx
import matplotlib as mpl
import OpenGL as ogl
import GSASIIpath
# print (versions)
print ("Python module versions loaded:")
print (" Python: %s from %s"%(sys.version.split()[0],sys.executable))
Image = None
version = '?'
versionDict['errors'] = ''
warn = False
for s,m in [('wx',wx), ('matplotlib', mpl), ('numpy',np),
('scipy',sp), ('OpenGL',ogl)]:
msg = ''
if s in versionDict['tooOld']:
match = compareVersions(m.__version__,versionDict['tooOld'][s])
if match <= 0:
msg = "version will cause problems"
warn = True
if versionDict['errors']: versionDict['errors'] += '\n'
versionDict['errors'] += 'Package {} version {} is too old for GSAS-II. An update is required'.format(s,m.__version__)
if s in versionDict['tooOldWarn']:
match = compareVersions(m.__version__,versionDict['tooOldWarn'][s])
if match <= 0:
msg = "version can cause problems"
warn = True
if s in versionDict['tooNewWarn']:
match = compareVersions(m.__version__,versionDict['tooNewWarn'][s])
if match >= 0:
msg = "version is too new and could cause problems"
warn = True
if s in versionDict['badVersionWarn']:
for v in versionDict['badVersionWarn'][s]:
if compareVersions(m.__version__,v) == 0:
msg = "version is known to be buggy"
warn = True
break
print(" {:12s}{} {}".format(s+':',m.__version__,msg))
try:
from PIL import Image
except ImportError:
try:
import Image
except ImportError:
pass
if Image is None:
print ("Image module not present; Note that PIL (Python Imaging Library) or pillow is needed for some image operations")
else:
# version # can be in various places, try standard dunderscore first
for ver in '__version__','VERSION','PILLOW_VERSION':
if hasattr(Image,ver):
try:
version = eval('Image.'+ver)
break
except:
pass
print (" Image: %s (PIL or Pillow)"%version)
print (" Platform: %s %s %s"%(sys.platform,platform.architecture()[0],platform.machine()))
try:
import mkl
print (" Max threads:%s"%mkl.get_max_threads())
except:
pass
rev = GSASIIpath.svnGetRev()
if rev is None:
"no SVN"
else:
rev = "SVN version {}".format(rev)
print ("Latest GSAS-II revision (from .py files): {} ({})\n".format(
GSASIIpath.GetVersionNumber(),rev))
# patch 11/2020: warn if GSASII path has not been updated past v4576.
# For unknown reasons on Mac with gsas2full, there have been checksum
# errors in the .so files that prevented svn from completing updates.
# If GSASIIpath.svnChecksumPatch is not present, then the fix for that
# has not been retrieved, so warn. Keep for a year or so.
try:
GSASIIpath.svnChecksumPatch
except:
print('Warning GSAS-II incompletely updated. Please contact <EMAIL>')
# end patch
if warn:
print('You are suggested to install a new version of GSAS-II.\nSee https://bit.ly/G2install',
'\n\nFor information on packages see\nhttps://gsas-ii.readthedocs.io/en/latest/packages.html and',
'\nhttps://gsas-ii.readthedocs.io/en/latest/GSASIIGUI.html#GSASIIdataGUI.versionDict')
###############################################################################
#### GUI creation
###############################################################################
def GSASIImain(application):
'''Start up the GSAS-II GUI'''
ShowVersions()
GUIpatches()
if platform.python_version()[:3] == '2.7':
msg = '''The end-of-life for python 2.7 was January 1, 2020.
We strongly recommend reinstalling GSAS-II from a new installation kit as we may not be able to offer support for operation of GSAS-II in python 2.7. See instructions for details.
'''
download = ''
cmds = []
instructions = 'https://subversion.xray.aps.anl.gov/trac/pyGSAS'
if sys.platform == "win32":
download = 'https://subversion.xray.aps.anl.gov/admin_pyGSAS/downloads/gsas2full-Latest-Windows-x86_64.exe'
instructions = 'https://subversion.xray.aps.anl.gov/trac/pyGSAS/wiki/SingleStepWindowsIllustrated'
elif sys.platform == "darwin":
cmds = ['echo starting download, please wait...',
'''echo 'curl "https://subversion.xray.aps.anl.gov/admin_pyGSAS/downloads/gsas2full-Latest-MacOSX-x86_64.sh" > /tmp/g2.sh; bash /tmp/g2.sh' ''',
'curl "https://subversion.xray.aps.anl.gov/admin_pyGSAS/downloads/gsas2full-Latest-MacOSX-x86_64.sh" > /tmp/g2.sh; bash /tmp/g2.sh'
]
instructions = 'https://subversion.xray.aps.anl.gov/trac/pyGSAS/wiki/MacSingleStepInstallerFigs'
elif sys.platform.startswith("linux"):
download = 'https://subversion.xray.aps.anl.gov/admin_pyGSAS/downloads/gsas2full-Latest-Linux-x86_64.sh'
instructions = 'https://subversion.xray.aps.anl.gov/trac/pyGSAS/wiki/LinuxSingleStepInstaller'
else:
print(u'Unknown platform: '+sys.platform)
if platform.architecture()[0] != '64bit' and sys.platform == "win32":
msg += '''\nYou are currently using 32-bit Python. Please check if you are running 32-bit windows or 64-bit windows (use Start/Settings/System/About & look for "System type".
We recommend using the 64-bit installer if you have 64-bit windows.'''
download = ''
elif platform.architecture()[0] != '64bit' and sys.platform.startswith("linux"):
msg += '''\nYou are using 32-bit Python. We now only package for 64-bit linux.
If you are running 32-bit linux you will need to install Python yourself.
See instructions at https://subversion.xray.aps.anl.gov/trac/pyGSAS/wiki/InstallLinux'''
instructions = 'https://subversion.xray.aps.anl.gov/trac/pyGSAS/wiki/InstallLinux'
dlg = wx.Dialog(None,wx.ID_ANY,'End-Of-Life warning for Python 2.7',
style=wx.DEFAULT_DIALOG_STYLE|wx.RESIZE_BORDER)
mainSizer = wx.BoxSizer(wx.VERTICAL)
txt = wx.StaticText(dlg,wx.ID_ANY,G2G.StripIndents(msg))
mainSizer.Add(txt)
txt.Wrap(400)
dlg.SetSizer(mainSizer)
btnsizer = wx.BoxSizer(wx.HORIZONTAL)
btnsizer.Add((1,1),1,wx.EXPAND,1)
OKbtn = wx.Button(dlg, wx.ID_OK,'Continue')
OKbtn.SetDefault()
OKbtn.Bind(wx.EVT_BUTTON,lambda event: dlg.EndModal(wx.ID_OK))
btnsizer.Add(OKbtn)
btn = wx.Button(dlg, wx.ID_ANY,'Show Instructions')
def openInstructions(event):
G2G.ShowWebPage(instructions,None)
btn.Bind(wx.EVT_BUTTON, openInstructions)
btnsizer.Add(btn)
if download:
btn = wx.Button(dlg, wx.ID_ANY,'Start Download')
btn.Bind(wx.EVT_BUTTON,lambda event: dlg.EndModal(wx.ID_YES))
btnsizer.Add(btn)
elif cmds:
btn = wx.Button(dlg, wx.ID_ANY,'Start Install')
btn.Bind(wx.EVT_BUTTON,lambda event: dlg.EndModal(wx.ID_CANCEL))
btnsizer.Add(btn)
#btn = wx.Button(dlg, wx.ID_CANCEL)
#btnsizer.AddButton(btn)
btnsizer.Add((1,1),1,wx.EXPAND,1)
#btnsizer.Realize()
mainSizer.Add((-1,5),1,wx.EXPAND,1)
mainSizer.Add(btnsizer,0,wx.ALIGN_CENTER,0)
mainSizer.Add((-1,10))
res = 0
try:
res = dlg.ShowModal()
finally:
dlg.Destroy()
if res == wx.ID_YES:
G2G.ShowWebPage(download,None)
G2G.ShowWebPage(instructions,None)
wx.Sleep(1)
dlg = wx.MessageDialog(None,G2G.StripIndents(
'''Download has been started in your browser; installation instructions will also be shown in a web page\n\nPress OK to exit GSAS-II, Cancel to continue.'''),
'start install',wx.OK|wx.CANCEL)
if dlg.ShowModal() == wx.ID_OK:
sys.exit()
elif res == wx.ID_CANCEL:
dlg = wx.MessageDialog(None,G2G.StripIndents(
'''Press OK to continue. Instructions will be shown in a web page.
Download and installation will start in the terminal window after you press OK. Respond to questions there.'''),
'start install',wx.OK|wx.CANCEL)
if dlg.ShowModal() == wx.ID_OK:
G2G.ShowWebPage(instructions,None)
GSASIIpath.runScript(cmds, wait=True)
sys.exit()
if versionDict['errors']:
dlg = wx.MessageDialog(None, versionDict['errors']+
'\n\nThe simplest solution is to install a new version of GSAS-II. '+
'See https://bit.ly/G2install',
'Python package problem', wx.OK)
try:
dlg.ShowModal()
finally:
dlg.Destroy()
sys.exit()
elif platform.python_version()[:3] not in ['2.7','3.6','3.7','3.8','3.9']:
dlg = wx.MessageDialog(None,
'GSAS-II requires Python 2.7.x or 3.6+\n Yours is '+sys.version.split()[0],
'Python version error', wx.OK)
try:
dlg.ShowModal()
finally:
dlg.Destroy()
sys.exit()
application.main = GSASII(None) # application.main is the main wx.Frame (G2frame in most places)
application.SetTopWindow(application.main)
# save the current package versions
application.main.PackageVersions = G2fil.get_python_versions([wx, mpl, np, sp, ogl])
if GSASIIpath.GetConfigValue('wxInspector'):
import wx.lib.inspection as wxeye
wxeye.InspectionTool().Show()
try:
application.SetAppDisplayName('GSAS-II')
except:
pass
#application.GetTopWindow().SendSizeEvent()
application.GetTopWindow().Show(True)
################################################################################
#### Create main frame (window) for GUI
################################################################################
class GSASII(wx.Frame):
'''Define the main GSAS-II frame and its associated menu items.
:param parent: reference to parent application
'''
def MenuBinding(self,event):
'''Called when a menu is clicked upon; looks up the binding in table
'''
log.InvokeMenuCommand(event.GetId(),self,event)
# def Bind(self,eventtype,handler,*args,**kwargs):
# '''Override the Bind function so that we can wrap calls that will be logged.
#
# N.B. This is a bit kludgy. Menu bindings with an id are wrapped and
# menu bindings with an object and no id are not.
# '''
# if eventtype == wx.EVT_MENU and 'id' in kwargs:
# menulabels = log.SaveMenuCommand(kwargs['id'],self,handler)
# if menulabels:
# wx.Frame.Bind(self,eventtype,self.MenuBinding,*args,**kwargs)
# return
# wx.Frame.Bind(self,eventtype,handler,*args,**kwargs)
def _Add_FileMenuItems(self, parent):
'''Add items to File menu
'''
item = parent.Append(wx.ID_ANY,'&Open project...\tCtrl+O','Open a GSAS-II project file (*.gpx)')
self.Bind(wx.EVT_MENU, self.OnFileOpen, id=item.GetId())
if sys.platform == "darwin":
item = parent.Append(wx.ID_ANY,'&Open in new window...','Open a GSAS-II project file (*.gpx) in a separate process')
self.Bind(wx.EVT_MENU, self.OnNewGSASII, id=item.GetId())
item = parent.Append(wx.ID_ANY,'Reopen recent...\tCtrl+E','Reopen a previously used GSAS-II project file (*.gpx)')
self.Bind(wx.EVT_MENU, self.OnFileReopen, id=item.GetId())
item = parent.Append(wx.ID_ANY,'&Save project\tCtrl+S','Save project under current name')
self.Bind(wx.EVT_MENU, self.OnFileSave, id=item.GetId())
item = parent.Append(wx.ID_ANY,'Save project as...','Save current project to new file')
self.Bind(wx.EVT_MENU, self.OnFileSaveas, id=item.GetId())
item = parent.Append(wx.ID_ANY,'&New project','Create empty new project, saving current is optional')
self.Bind(wx.EVT_MENU, self.OnFileClose, id=item.GetId())
item = parent.Append(wx.ID_PREFERENCES,"&Preferences",'')
self.Bind(wx.EVT_MENU, self.OnPreferences, item)
if GSASIIpath.whichsvn():
item = parent.Append(wx.ID_ANY,'Edit proxy...','Edit proxy internet information (used for updates)')
self.Bind(wx.EVT_MENU, self.EditProxyInfo, id=item.GetId())
if GSASIIpath.GetConfigValue('debug'):
def OnIPython(event):
GSASIIpath.IPyBreak()
item = parent.Append(wx.ID_ANY,"IPython Console",'')
self.Bind(wx.EVT_MENU, OnIPython, item)
def OnwxInspect(event):
import wx.lib.inspection as wxeye
wxeye.InspectionTool().Show()
item = parent.Append(wx.ID_ANY,"wx inspection tool",'')
self.Bind(wx.EVT_MENU, OnwxInspect, item)
item = parent.Append(wx.ID_EXIT,'Exit\tALT+F4','Exit from GSAS-II')
self.Bind(wx.EVT_MENU, self.ExitMain, id=item.GetId())
def _Add_DataMenuItems(self,parent):
'''Add items to Data menu
'''
# item = parent.Append(
# help='',id=wx.ID_ANY,
# kind=wx.ITEM_NORMAL,
# text='Read image data...')
# self.Bind(wx.EVT_MENU, self.OnImageRead, id=item.GetId())
item = parent.Append(wx.ID_ANY,'Read Powder Pattern Peaks...','')
self.Bind(wx.EVT_MENU, self.OnReadPowderPeaks, id=item.GetId())
item = parent.Append(wx.ID_ANY,'Sum or Average powder data','')
self.Bind(wx.EVT_MENU, self.OnPwdrSum, id=item.GetId())
item = parent.Append(wx.ID_ANY,'Sum image data','')
self.Bind(wx.EVT_MENU, self.OnImageSum, id=item.GetId())
item = parent.Append(wx.ID_ANY,'Add new phase','')
self.Bind(wx.EVT_MENU, self.OnAddPhase, id=item.GetId())
item = parent.Append(wx.ID_ANY,'Delete phase entries','')
self.Bind(wx.EVT_MENU, self.OnDeletePhase, id=item.GetId())
item = parent.Append(wx.ID_ANY,'Rename data entry',
'Rename the selected data tree item (PWDR, HKLF or IMG)')
self.Bind(wx.EVT_MENU, self.OnRenameData, id=item.GetId())
item = parent.Append(wx.ID_ANY,'Delete data entries',
'Delete selected data items from data tree')
self.Bind(wx.EVT_MENU, self.OnDataDelete, id=item.GetId())
item = parent.Append(wx.ID_ANY,'Delete plots','Delete selected plots')
self.Bind(wx.EVT_MENU, self.OnPlotDelete, id=item.GetId())
expandmenu = wx.Menu()
item = parent.AppendSubMenu(expandmenu,'Expand tree items',
'Expand items of type in GSAS-II data tree')
for s in 'all','IMG','PWDR','PDF','HKLF','SASD','REFD':
if s == 'all':
help = 'Expand all items in GSAS-II data tree'
else:
help = 'Expand '+s+' type items in GSAS-II data tree'
item = expandmenu.Append(wx.ID_ANY,s,help)
self.Bind(wx.EVT_MENU,self.ExpandAll,id=item.GetId())
movemenu = wx.Menu()
item = parent.AppendSubMenu(movemenu,'Move tree items',
'Move items of type items to end of GSAS-II data tree')
for s in 'IMG','PWDR','PDF','HKLF','SASD','REFD','Phase':
help = 'Move '+s+' type items to end of GSAS-II data tree'
item = movemenu.Append(wx.ID_ANY,s,help)
self.Bind(wx.EVT_MENU,self.MoveTreeItems,id=item.GetId())
def _Add_CalculateMenuItems(self,parent):
item = parent.Append(wx.ID_ANY,'Setup PDFs','Create PDF tree entries for selected powder patterns')
self.MakePDF.append(item)
self.Bind(wx.EVT_MENU, self.OnMakePDFs, id=item.GetId())
item = parent.Append(wx.ID_ANY,'&View LS parms\tCTRL+L','View least squares parameters')
self.Bind(wx.EVT_MENU, self.OnShowLSParms, id=item.GetId())
item = parent.Append(wx.ID_ANY,'&Refine\tCTRL+R','Perform a refinement')
if len(self.Refine): # extend state for new menus to match main (on mac)
state = self.Refine[0].IsEnabled()
else:
state = False
item.Enable(state)
self.Refine.append(item)
self.Bind(wx.EVT_MENU, self.OnRefine, id=item.GetId())
item = parent.Append(wx.ID_ANY,'&Run Fprime','X-ray resonant scattering')
self.Bind(wx.EVT_MENU, self.OnRunFprime, id=item.GetId())
item = parent.Append(wx.ID_ANY,'&Run Absorb','x-ray absorption')
self.Bind(wx.EVT_MENU, self.OnRunAbsorb, id=item.GetId())
# if GSASIIpath.GetConfigValue('debug'): # allow exceptions for debugging
# item = parent.Append(help='', id=wx.ID_ANY, kind=wx.ITEM_NORMAL,
# text='tree test')
# self.Bind(wx.EVT_MENU, self.TreeTest, id=item.GetId())
def _init_Imports(self):
'''import all the G2phase*.py & G2sfact*.py & G2pwd*.py files that
are found in the path
'''
self.ImportPhaseReaderlist = G2fil.LoadImportRoutines('phase','Phase')
self.ImportSfactReaderlist = G2fil.LoadImportRoutines('sfact','Struct_Factor')
self.ImportPowderReaderlist = G2fil.LoadImportRoutines('pwd','Powder_Data')
self.ImportSmallAngleReaderlist = G2fil.LoadImportRoutines('sad','SmallAngle_Data')
self.ImportReflectometryReaderlist = G2fil.LoadImportRoutines('rfd','Reflectometry_Data')
self.ImportPDFReaderlist = G2fil.LoadImportRoutines('pdf','PDF_Data')
self.ImportImageReaderlist = G2fil.LoadImportRoutines('img','Images')
self.ImportMenuId = {}
def testSeqRefineMode(self):
'''Returns the list of histograms included in a sequential refinement or
an empty list if a standard (non-sequential) refinement.
Also sets Menu item status depending on mode
'''
cId = GetGPXtreeItemId(self,self.root, 'Controls')
if cId:
controls = self.GPXtree.GetItemPyData(cId)
seqSetting = controls.get('Seq Data',[])
else:
seqSetting = None
if seqSetting:
for item in self.Refine:
item.SetItemLabel('Se&quential refine\tCtrl+R') #might fail on old wx
seqMode = True
else:
for item in self.Refine:
item.SetItemLabel('&Refine\tCtrl+R') #might fail on old wx
seqMode = False
for menu,Id in self.ExportSeq:
menu.Enable(Id,seqMode)
for menu,Id in self.ExportNonSeq:
menu.Enable(Id,not seqMode)
return seqSetting
def PreviewFile(self,filename):
'utility to confirm we have the right file'
fp = open(filename,'r')
rdmsg = u'File '+ filename +u' begins:\n\n'
try:
rdmsg += fp.read(80)
rdmsg += '\n\nDo you want to read this file?'
except UnicodeDecodeError:
rdmsg = None
fp.close()
if rdmsg is None or not all([ord(c) < 128 and ord(c) != 0 for c in rdmsg]): # show only if ASCII
rdmsg = u'File '+ filename +u' is a binary file. Do you want to read this file?'
# it would be better to use something that
# would resize better, but this will do for now
dlg = wx.MessageDialog(
self, rdmsg,
'Is this the file you want?',
wx.YES_NO | wx.ICON_QUESTION,
)
dlg.SetSize((700,300)) # does not resize on Mac
result = wx.ID_NO
try:
result = dlg.ShowModal()
finally:
dlg.Destroy()
if result == wx.ID_NO: return True
return False
def OnImportGeneric(self,reader,readerlist,label,multiple=False,
usedRanIdList=[],Preview=True,load2Tree=False):
'''Used for all imports, including Phases, datasets, images...
Called from :meth:`GSASII.OnImportPhase`, :meth:`GSASII.OnImportImage`,
:meth:`GSASII.OnImportSfact`, :meth:`GSASII.OnImportPowder`,
:meth:`GSASII.OnImportSmallAngle` and :meth:'GSASII.OnImportReflectometry`
Uses reader_objects subclassed from :class:`GSASIIobj.ImportPhase`,
:class:`GSASIIobj.ImportStructFactor`,
:class:`GSASIIobj.ImportPowderData`,
:class:`GSASIIobj.ImportSmallAngleData`
:class:`GSASIIobj.ImportReflectometryData` or
:class:`GSASIIobj.ImportImage`.
If a specific reader is specified, only that method will be called,
but if no reader is specified, every one that is potentially
compatible (by file extension) will be tried on the file(s)
selected in the Open File dialog.
:param reader_object reader: This will be a reference to
a particular object to be used to read a file or None,
if every appropriate reader should be used.
:param list readerlist: a list of reader objects appropriate for
the current read attempt. At present, this will be either
self.ImportPhaseReaderlist, self.ImportSfactReaderlist
self.ImportPowderReaderlist or self.ImportImageReaderlist
(defined in _init_Imports from the files found in the path),
but in theory this list could be tailored.
Used only when reader is None.
:param str label: string to place on the open file dialog:
Open `label` input file
:param bool multiple: True if multiple files can be selected
in the file dialog. False is default. At present True is used
only for reading of powder data.
:param list usedRanIdList: an optional list of random Ids that
have been used and should not be reused
:param bool Preview: indicates if a preview of the file should
be shown. Default is True, but set to False for image files
which are all binary.
:param bool load2Tree: indicates if the file should be loaded
into the data tree immediately (used for images only). True
only when called from :meth:`OnImportImage`; causes return
value to change to a list of True values rather than
reader objects.
:returns: a list of reader objects (rd_list) that were able
to read the specified file(s). This list may be empty.
'''
self.lastimport = ''
self.zipfile = None
singlereader = True
if reader is None:
singlereader = False
multiple = False
#print "use all formats"
choices = "any file (*.*)|*.*"
choices += "|zip archive (.zip)|*.zip"
extdict = {}
# compile a list of allowed extensions
for rd in readerlist:
fmt = rd.formatName
for extn in rd.extensionlist:
if not extdict.get(extn): extdict[extn] = []
extdict[extn] += [fmt,]
for extn in sorted(extdict.keys(),key=lambda k: k.lower()):
fmt = ''
for f in extdict[extn]:
if fmt != "": fmt += ', '
fmt += f
choices += "|" + fmt + " file (*" + extn + ")|*" + extn
else:
readerlist = [reader,]
# compile a list of allowed extensions
choices = reader.formatName + " file ("
w = ""
for extn in reader.extensionlist:
if w != "": w += ";"
w += "*" + extn
choices += w + ")|" + w
choices += "|zip archive (.zip)|*.zip"
if not reader.strictExtension:
choices += "|any file (*.*)|*.*"
# get the file(s)
if multiple:
mode = wx.FD_OPEN|wx.FD_MULTIPLE
else:
mode = wx.FD_OPEN
if len(readerlist) > 1:
typ = ' (type to be guessed)'
else:
typ = '( type '+readerlist[0].formatName+')'
filelist = G2G.GetImportFile(self,
message="Choose "+label+" input file"+typ,
defaultFile="",wildcard=choices,style=mode)
rd_list = []
filelist1 = []
for filename in filelist:
# is this a zip file?
if os.path.splitext(filename)[1].lower() == '.zip':
extractedfiles = G2IO.ExtractFileFromZip(
filename,parent=self,
multipleselect=True)
if extractedfiles is None: continue # error or Cancel
if extractedfiles != filename:
self.zipfile = filename # save zip name
filelist1 += extractedfiles
continue
filelist1.append(filename)
filelist = filelist1
Start = True #1st time read - clear selections below
for filename in filelist:
# is this a zip file?
if os.path.splitext(filename)[1].lower() == '.zip':
extractedfile = G2IO.ExtractFileFromZip(filename,parent=self)
if extractedfile is None: continue # error or Cancel
if extractedfile != filename:
filename,self.zipfile = extractedfile,filename # now use the file that was created
# determine which formats are compatible with this file
primaryReaders = []
secondaryReaders = []
for rd in readerlist:
flag = rd.ExtensionValidator(filename)
if flag is None:
secondaryReaders.append(rd)
elif flag:
primaryReaders.append(rd)
if len(secondaryReaders) + len(primaryReaders) == 0 and reader:
self.ErrorDialog('Not supported','The selected reader cannot read file '+filename)
return []
elif len(secondaryReaders) + len(primaryReaders) == 0:
self.ErrorDialog('No Format','No matching format for file '+filename)
return []
fp = None
msg = ''
if len(filelist) == 1 and Preview:
if self.PreviewFile(filename): return []
self.lastimport = filename # this is probably not what I want to do -- it saves only the
# last name in a series. See rd.readfilename for a better name.
# try the file first with Readers that specify the
# file's extension and later with ones that merely allow it
errorReport = ''
for rd in primaryReaders+secondaryReaders:
if Start: #clear old bank selections to allow new ones to be selected by user
rd.selections = []
rd.dnames = []
rd.ReInitialize() # purge anything from a previous read
rd.errors = "" # clear out any old errors
if not rd.ContentsValidator(filename): # rejected on cursory check
errorReport += "\n "+rd.formatName + ' validator error'
if rd.errors:
errorReport += ': '+rd.errors
continue
if len(rd.selections)>1 and Start:
dlg = G2G.G2MultiChoiceDialog(self,'Dataset Selector','Select data to read from the list below',rd.dnames)
if dlg.ShowModal() == wx.ID_OK:
rd.selections = dlg.GetSelections()
Start = False
dlg.Destroy()
repeat = True
rdbuffer = {} # create temporary storage for file reader
block = 0
while repeat: # loop if the reader asks for another pass on the file
block += 1
repeat = False
rd.objname = os.path.basename(filename)
flag = False
if GSASIIpath.GetConfigValue('debug'): # allow exceptions for debugging
flag = rd.Reader(filename,self,buffer=rdbuffer,blocknum=block,
usedRanIdList=usedRanIdList,)
else:
try:
flag = rd.Reader(filename,self,buffer=rdbuffer,
blocknum=block,usedRanIdList=usedRanIdList,)
except rd.ImportException as detail:
rd.errors += "\n Read exception: "+str(detail)
except Exception as detail:
import traceback
rd.errors += "\n Unhandled read exception: "+str(detail)
rd.errors += "\n Traceback info:\n"+str(traceback.format_exc())
if flag: # this read succeeded
if rd.SciPy: #was default read by scipy; needs 1 time fixes
G2IO.EditImageParms(self,rd.Data,rd.Comments,rd.Image,filename)
rd.SciPy = False
rd.readfilename = filename
if load2Tree: #images only
if rd.repeatcount == 1 and not rd.repeat: # skip image number if only one in set
rd.Data['ImageTag'] = None
else:
rd.Data['ImageTag'] = rd.repeatcount
rd.Data['formatName'] = rd.formatName
if rd.sumfile:
rd.readfilename = rd.sumfile
# Load generic metadata, as configured
G2fil.GetColumnMetadata(rd)
G2IO.LoadImage2Tree(rd.readfilename,self,rd.Comments,rd.Data,rd.Npix,rd.Image)
rd_list.append(True) # save a stub the result before it is written over
del rd.Image
else:
rd_list.append(copy.deepcopy(rd)) # save the result before it is written over
if rd.repeat:
repeat = True
continue
errorReport += '\n'+rd.formatName + ' read error'
if rd.errors:
errorReport += ': '+rd.errors
if rd_list: # read succeeded, was there a warning or any errors?
if rd.warnings:
self.ErrorDialog('Read Warning','The '+ rd.formatName+
' reader reported a warning message:\n\n'+rd.warnings)
break # success in reading, try no further
else:
if singlereader:
msg += '\n'+rd.warnings
print(u'The '+ rd.formatName+u' reader was not able to read file '+filename+msg)
try:
print(u'\n\nError message(s):\n\t'+errorReport)
except:
pass
self.ErrorDialog('Read Error','The '+ rd.formatName+
' reader was not able to read file '+filename+msg)
else:
print('No reader was able to read file '+filename+msg)
try:
print('\n\nError message(s):\n\t'+errorReport)
except:
pass
self.ErrorDialog('Read Error','No reader was able to read file '+filename+msg)
if fp: fp.close()
return rd_list
def _Add_ImportMenu_Phase(self,parent):
'''configure the Import Phase menus accord to the readers found in _init_Imports
'''
submenu = wx.Menu()
item = parent.AppendSubMenu(submenu,'Phase','Import phase data')
for reader in self.ImportPhaseReaderlist:
item = submenu.Append(wx.ID_ANY,u'from '+reader.formatName+u' file',reader.longFormatName)
self.ImportMenuId[item.GetId()] = reader
self.Bind(wx.EVT_MENU, self.OnImportPhase, id=item.GetId())
item = submenu.Append(wx.ID_ANY,'guess format from file','Import phase data, use file to try to determine format')
self.Bind(wx.EVT_MENU, self.OnImportPhase, id=item.GetId())
def OnImportPhase(self,event):
'''Called in response to an Import/Phase/... menu item
to read phase information.
dict self.ImportMenuId is used to look up the specific
reader item associated with the menu item, which will be
None for the last menu item, which is the "guess" option
where all appropriate formats will be tried.
'''
# look up which format was requested
reqrdr = self.ImportMenuId.get(event.GetId())
# make a list of phase names, ranId's and the histograms used in those phases
phaseRIdList,usedHistograms = self.GetPhaseInfofromTree()
phaseNameList = list(usedHistograms.keys()) # phase names in use
usedHKLFhists = [] # used single-crystal histograms
for p in usedHistograms:
for h in usedHistograms[p]:
if h.startswith('HKLF ') and h not in usedHKLFhists:
usedHKLFhists.append(h)
rdlist = self.OnImportGeneric(reqrdr,self.ImportPhaseReaderlist,
'phase',usedRanIdList=phaseRIdList)
if len(rdlist) == 0: return
# for now rdlist is only expected to have one element
# but below will allow multiple phases to be imported
# if ever the import routines ever implement multiple phase reads.
self.CheckNotebook()
newPhaseList = []
for rd in rdlist:
PhaseName = ''
dlg = wx.TextEntryDialog(self, 'Enter the name for the new phase',
'Edit phase name', rd.Phase['General']['Name'],style=wx.OK)
while PhaseName == '':
dlg.CenterOnParent()
if dlg.ShowModal() == wx.ID_OK:
PhaseName = dlg.GetValue().strip()
else:
dlg.Destroy()
return
dlg.Destroy()
# make new phase names unique
rd.Phase['General']['Name'] = G2obj.MakeUniqueLabel(PhaseName,phaseNameList)
if rd.Phase['General']['SGData']['SpGrp'] in G2spc.spg2origins:
choice = G2G.ChooseOrigin(self,rd)
if choice is None: return # dialog cancelled
rd.Phase = choice
PhaseName = rd.Phase['General']['Name'][:]
newPhaseList.append(PhaseName)
print(u'Read phase {} from file {}'.format(PhaseName,self.lastimport))
if not GetGPXtreeItemId(self,self.root,'Phases'):
sub = self.GPXtree.AppendItem(parent=self.root,text='Phases')
else:
sub = GetGPXtreeItemId(self,self.root,'Phases')
psub = self.GPXtree.AppendItem(parent=sub,text=PhaseName)
self.GPXtree.SetItemPyData(psub,rd.Phase)
wx.CallAfter(self.GPXtree.SelectItem,psub) # should call SelectDataTreeItem
try:
rd.MPhase['General']['Name'] = G2obj.MakeUniqueLabel(PhaseName+' mag',phaseNameList)
PhaseName = rd.MPhase['General']['Name'][:]
newPhaseList.append(PhaseName)
psub = self.GPXtree.AppendItem(parent=sub,text=PhaseName)
self.GPXtree.SetItemPyData(psub,rd.MPhase)
wx.CallAfter(self.GPXtree.SelectItem,psub) # should call SelectDataTreeItem
except (AttributeError,TypeError):
pass
self.GPXtree.Expand(self.root) # make sure phases are seen
self.GPXtree.Expand(sub)
self.GPXtree.Expand(psub)
self.PickIdText = None
# add constraints imported with phase to tree
# at present, constraints are generated only in ISODISTORT_proc in the
# CIF import
if rd.Constraints:
sub = GetGPXtreeItemId(self,self.root,'Constraints') # was created in CheckNotebook if needed
Constraints = self.GPXtree.GetItemPyData(sub)
for i in rd.Constraints:
if type(i) is dict:
if '_Explain' not in Constraints: Constraints['_Explain'] = {}
Constraints['_Explain'].update(i)
else:
Constraints['Phase'].append(i)
if not newPhaseList: return # somehow, no new phases
# get a list of existing histograms
PWDRlist = []
HKLFlist = []
if self.GPXtree.GetCount():
item, cookie = self.GPXtree.GetFirstChild(self.root)
while item:
name = self.GPXtree.GetItemText(item)
if name.startswith('PWDR ') and name not in PWDRlist:
PWDRlist.append(name)
if name.startswith('HKLF ') and name not in HKLFlist:
HKLFlist.append(name)
item, cookie = self.GPXtree.GetNextChild(self.root, cookie)
TextList = PWDRlist + HKLFlist
if not TextList:
return #no histograms
header = 'Select histogram(s) to add to new phase(s):'
for phaseName in newPhaseList:
header += '\n '+phaseName
notOK = True
while notOK:
result = G2G.ItemSelector(TextList,self,header,header='Add histogram(s)',multiple=True)
if not result: return
# check that selected single crystal histograms are not already in use!
used = [TextList[i] for i in result if TextList[i] in usedHKLFhists]
#for i in result:
# if TextList[i] in usedHKLFhists: used.append(TextList[i])
if used:
msg = 'The following single crystal histogram(s) are already in use'
for i in used:
msg += '\n '+str(i)
msg += '\nAre you sure you want to add them to this phase? '
msg += 'Associating a single crystal dataset to >1 histogram is usually an error, '
msg += 'so No is suggested here.'
if self.ErrorDialog('Likely error',msg,self,wtype=wx.YES_NO) == wx.ID_YES: notOK = False
else:
notOK = False
# connect new phases to histograms
sub = GetGPXtreeItemId(self,self.root,'Phases')
if not sub:
raise Exception('ERROR -- why are there no phases here?')
wx.BeginBusyCursor()
item, cookie = self.GPXtree.GetFirstChild(sub)
while item: # loop over (new) phases
phaseName = self.GPXtree.GetItemText(item)
data = self.GPXtree.GetItemPyData(item)
item, cookie = self.GPXtree.GetNextChild(sub, cookie)
if phaseName not in newPhaseList: continue
generalData = data['General']
SGData = generalData['SGData']
Super = generalData.get('Super',0)
SuperVec = []
if Super:
SuperVec = np.array(generalData['SuperVec'][0])
UseList = data['Histograms']
NShkl = len(G2spc.MustrainNames(SGData))
NDij = len(G2spc.HStrainNames(SGData))
for i in result:
histoName = TextList[i]
if histoName in HKLFlist:
#redo UpdateHKLFdata(histoName) here:
Id = GetGPXtreeItemId(self,self.root,histoName)
refDict,reflData = self.GPXtree.GetItemPyData(Id)
G,g = G2lat.cell2Gmat(generalData['Cell'][1:7])
Super = reflData.get('Super',0)
for iref,ref in enumerate(reflData['RefList']):
hkl = ref[:3]
if Super:
H = list(hkl+SuperVec*ref[3])
else:
H = hkl
ref[4+Super] = np.sqrt(1./G2lat.calc_rDsq2(H,G))
iabsnt = G2spc.GenHKLf(H,SGData)[0]
if iabsnt: #flag space gp. absences
if Super:
if not ref[2+Super]:
ref[3+Super] = 0
else:
ref[3+Super] = 1 #twin id
else:
ref[3] = 0
UseList[histoName] = SetDefaultDData(reflData['Type'],histoName)
elif histoName in PWDRlist:
Id = GetGPXtreeItemId(self,self.root,histoName)
refList = self.GPXtree.GetItemPyData(
GetGPXtreeItemId(self,Id,'Reflection Lists'))
refList[generalData['Name']] = {}
UseList[histoName] = SetDefaultDData('PWDR',histoName,NShkl=NShkl,NDij=NDij)
else:
raise Exception('Unexpected histogram '+histoName)
wx.EndBusyCursor()
self.EnableRefineCommand()
return # success
def _Add_ImportMenu_Image(self,parent):
'''configure the Import Image menus accord to the readers found in _init_Imports
'''
submenu = wx.Menu()
item = parent.AppendSubMenu(submenu, 'Image','Import image file')
for reader in self.ImportImageReaderlist:
item = submenu.Append(wx.ID_ANY,u'from '+reader.formatName+u' file',reader.longFormatName)
self.ImportMenuId[item.GetId()] = reader
self.Bind(wx.EVT_MENU, self.OnImportImage, id=item.GetId())
item = submenu.Append(wx.ID_ANY,'guess format from file','Import image data, use file to try to determine format')
self.Bind(wx.EVT_MENU, self.OnImportImage, id=item.GetId())
def OnImportImage(self,event):
'''Called in response to an Import/Image/... menu item
to read an image from a file. Like all the other imports,
dict self.ImportMenuId is used to look up the specific
reader item associated with the menu item, which will be
None for the last menu item, which is the "guess" option
where all appropriate formats will be tried.
A reader object is filled each time an image is read.
'''
self.CheckNotebook()
# look up which format was requested
reqrdr = self.ImportMenuId.get(event.GetId())
rdlist = self.OnImportGeneric(reqrdr,self.ImportImageReaderlist,
'image',multiple=True,Preview=False,load2Tree=True)
if rdlist:
self.GPXtree.SelectItem(GetGPXtreeItemId(self,self.Image,'Image Controls')) #show last image to have beeen read
def _Add_ImportMenu_Sfact(self,parent):
'''configure the Import Structure Factor menus accord to the readers found in _init_Imports
'''
submenu = wx.Menu()
item = parent.AppendSubMenu(submenu,'Structure Factor','Import Structure Factor data')
for reader in self.ImportSfactReaderlist:
item = submenu.Append(wx.ID_ANY,u'from '+reader.formatName+u' file',reader.longFormatName)
self.ImportMenuId[item.GetId()] = reader
self.Bind(wx.EVT_MENU, self.OnImportSfact, id=item.GetId())
item = submenu.Append(wx.ID_ANY,'guess format from file','Import Structure Factor, use file to try to determine format')
self.Bind(wx.EVT_MENU, self.OnImportSfact, id=item.GetId())
def OnImportSfact(self,event):
'''Called in response to an Import/Structure Factor/... menu item
to read single crystal datasets.
dict self.ImportMenuId is used to look up the specific
reader item associated with the menu item, which will be
None for the last menu item, which is the "guess" option
where all appropriate formats will be tried.
'''
# get a list of existing histograms
HKLFlist = []
if self.GPXtree.GetCount():
item, cookie = self.GPXtree.GetFirstChild(self.root)
while item:
name = self.GPXtree.GetItemText(item)
if name.startswith('HKLF ') and name not in HKLFlist:
HKLFlist.append(name)
item, cookie = self.GPXtree.GetNextChild(self.root, cookie)
# look up which format was requested
reqrdr = self.ImportMenuId.get(event.GetId())
rdlist = self.OnImportGeneric(reqrdr,self.ImportSfactReaderlist,
'Structure Factor',multiple=True)
if len(rdlist) == 0: return
self.CheckNotebook()
newHistList = []
for rd in rdlist:
HistName = rd.objname
if len(rdlist) <= 2:
dlg = wx.TextEntryDialog( # allow editing of Structure Factor name
self, 'Enter the name for the new Structure Factor',
'Edit Structure Factor name', HistName,
style=wx.OK)
dlg.CenterOnParent()
if dlg.ShowModal() == wx.ID_OK:
HistName = dlg.GetValue()
dlg.Destroy()
HistName = 'HKLF '+G2obj.StripUnicode(HistName,'_')
# make new histogram names unique
if len(rd.Banks):
for Bank in rd.Banks:
valuesdict = {'wtFactor':1.0,'Dummy':False,'ranId':ran.randint(0,sys.maxsize),}
HistName = G2obj.MakeUniqueLabel(HistName,HKLFlist)
print (u'Read structure factor table '+HistName+u' from file '+self.lastimport)
Id = self.GPXtree.AppendItem(parent=self.root,text=HistName)
if not Bank['RefDict'].get('FF'):
Bank['RefDict']['FF'] = {}
self.GPXtree.SetItemPyData(Id,[valuesdict,Bank['RefDict']])
Sub = self.GPXtree.AppendItem(Id,text='Instrument Parameters')
self.GPXtree.SetItemPyData(Sub,copy.copy(rd.Parameters))
self.GPXtree.SetItemPyData(
self.GPXtree.AppendItem(Id,text='Reflection List'),{}) #dummy entry for GUI use
newHistList.append(HistName)
else:
valuesdict = {'wtFactor':1.0,'Dummy':False,'ranId':ran.randint(0,sys.maxsize),}
HistName = G2obj.MakeUniqueLabel(HistName,HKLFlist)
print (u'Read structure factor table '+HistName+u' from file '+self.lastimport)
if not rd.RefDict.get('FF'):
rd.RefDict['FF'] = {}
Id = self.GPXtree.AppendItem(parent=self.root,text=HistName)
self.GPXtree.SetItemPyData(Id,[valuesdict,rd.RefDict])
Sub = self.GPXtree.AppendItem(Id,text='Instrument Parameters')
self.GPXtree.SetItemPyData(Sub,rd.Parameters)
self.GPXtree.SetItemPyData(
self.GPXtree.AppendItem(Id,text='Reflection List'),{}) #dummy entry for GUI use
newHistList.append(HistName)
self.GPXtree.SelectItem(Id)
self.GPXtree.Expand(Id)
self.Sngl = True
if not newHistList: return # somehow, no new histograms
# make a list of phase names
phaseRIdList,usedHistograms = self.GetPhaseInfofromTree()
phaseNameList = list(usedHistograms.keys()) # phase names in use
if not phaseNameList: return # no phases yet, nothing to do
header = 'Select phase(s) to add the new\nsingle crystal dataset(s) to:'
for Name in newHistList:
header += '\n '+str(Name)
result = G2G.ItemSelector(phaseNameList,self,header,header='Add to phase(s)',multiple=True)
if not result: return
# connect new phases to histograms
sub = GetGPXtreeItemId(self,self.root,'Phases')
if not sub:
raise Exception('ERROR -- why are there no phases here?')
wx.BeginBusyCursor()
item, cookie = self.GPXtree.GetFirstChild(sub)
iph = -1
while item: # loop over (new) phases
iph += 1
data = self.GPXtree.GetItemPyData(item)
item, cookie = self.GPXtree.GetNextChild(sub, cookie)
if iph not in result: continue
generalData = data['General']
SGData = generalData['SGData']
Super = generalData.get('Super',0)
SuperVec = []
if Super:
SuperVec = np.array(generalData['SuperVec'][0])
UseList = data['Histograms']
for histoName in newHistList:
#redo UpdateHKLFdata(histoName) here:
Id = GetGPXtreeItemId(self,self.root,histoName)
refDict,reflData = self.GPXtree.GetItemPyData(Id)
UseList[histoName] = SetDefaultDData(reflData['Type'],histoName)
G,g = G2lat.cell2Gmat(generalData['Cell'][1:7])
if 'TwMax' in reflData: #nonmerohedral twins present
UseList[histoName]['Twins'] = []
for iT in range(reflData['TwMax'][0]+1):
if iT in reflData['TwMax'][1]:
UseList[histoName]['Twins'].append([False,0.0])
else:
UseList[histoName]['Twins'].append([np.array([[1,0,0],[0,1,0],[0,0,1]]),[1.0,False,reflData['TwMax'][0]]])
else: #no nonmerohedral twins
UseList[histoName]['Twins'] = [[np.array([[1,0,0],[0,1,0],[0,0,1]]),[1.0,False,0]],]
for iref,ref in enumerate(reflData['RefList']):
hkl = ref[:3]
if Super:
H = list(hkl+SuperVec*ref[3])
else:
H = hkl
ref[4+Super] = np.sqrt(1./G2lat.calc_rDsq2(H,G))
iabsnt,mul,Uniq,phi = G2spc.GenHKLf(H,SGData)
if iabsnt: #flag space gp. absences
if Super:
if not ref[2+Super]:
ref[3+Super] = 0
else:
ref[3+Super] = 1 #twin id?
else:
ref[3] = 0
wx.EndBusyCursor()
self.EnableRefineCommand()
return # success
def _Add_ImportMenu_powder(self,parent):
'''configure the Powder Data menus accord to the readers found in _init_Imports
'''
submenu = wx.Menu()
item = parent.AppendSubMenu(submenu,'Powder Data','Import Powder data')
for reader in self.ImportPowderReaderlist:
item = submenu.Append(wx.ID_ANY,u'from '+reader.formatName+u' file',reader.longFormatName)
self.ImportMenuId[item.GetId()] = reader
self.Bind(wx.EVT_MENU, self.OnImportPowder, id=item.GetId())
item = submenu.Append(wx.ID_ANY,'guess format from file','Import powder data, use file to try to determine format')
self.Bind(wx.EVT_MENU, self.OnImportPowder, id=item.GetId())
submenu.AppendSeparator()
item = submenu.Append(wx.ID_ANY,'Simulate a dataset','Create a powder data set entry that will be simulated')
self.Bind(wx.EVT_MENU, self.OnDummyPowder, id=item.GetId())
item = submenu.Append(wx.ID_ANY,'Auto Import','Import data files as found')
def OnAutoImport(event):
G2G.AutoLoadFiles(self,FileTyp='pwd')
self.Bind(wx.EVT_MENU, OnAutoImport, id=item.GetId())
item = submenu.Append(wx.ID_ANY,'Fit instr. profile from fundamental parms...','')
self.Bind(wx.EVT_MENU, self.OnPowderFPA, id=item.GetId())
def OpenPowderInstprm(self,instfile):
'''Read a GSAS-II (new) instrument parameter file
:param str instfile: name of instrument parameter file
'''
File = open(instfile,'r')
lines = File.readlines()
File.close()
return lines
def ReadPowderInstprm(self,instLines,bank,databanks,rd):
'''Read lines from a GSAS-II (new) instrument parameter file
similar to G2pwdGUI.OnLoad
If instprm file has multiple banks each with header #Bank n: ..., this
finds matching bank no. to load - problem with nonmatches?
Note that this routine performs a similar role to :func:`GSASIIfiles.ReadPowderInstprm`,
but this will call a GUI routine for selection when needed. TODO: refactor to combine
:param list instLines: strings from GSAS-II parameter file; can be concatenated with ';'
:param int bank: bank number to check when instprm file has '#BANK n:...' strings
when bank = n then use parameters; otherwise skip that set. Ignored if BANK n:
not present. NB: this kind of instprm file made by a Save all profile command in Instrument Parameters
:return dict: Inst instrument parameter dict if OK, or
str: Error message if failed
'''
if 'GSAS-II' not in instLines[0]: # not a valid file
return 'Not a valid GSAS-II instprm file'
newItems = []
newVals = []
Found = False
il = 0
if bank is None: # no bank was specified in the input file, is more than one present in file?
banklist = set([])
for S in instLines:
if S[0] == '#' and 'Bank' in S:
banklist.add(int(S.split(':')[0].split()[1]))
if len(banklist) > 1: # yes, the user must make a selection
choices = [str(i) for i in banklist]
bank = int(G2G.ItemSelector(choices,self,multiple=False))
else:
bank = 1
rd.powderentry[2] = bank
while il < len(instLines):
S = instLines[il]
if S[0] == '#':
if Found:
break
if 'Bank' in S:
if bank == int(S.split(':')[0].split()[1]):
il += 1
S = instLines[il]
else:
il += 1
S = instLines[il]
while il < len(instLines) and '#Bank' not in S:
il += 1
if il == len(instLines):
return 'Bank %d not found in instprm file'%(bank)
S = instLines[il]
continue
else: #a non #Bank file
il += 1
S = instLines[il]
Found = True
if '"""' in S:
delim = '"""'
elif "'''" in S:
delim = "'''"
else:
S = S.replace(' ','')
SS = S.strip().split(';')
for s in SS:
[item,val] = s.split(':',1)
newItems.append(item)
try:
newVals.append(float(val))
except ValueError:
newVals.append(val)
il += 1
continue
# read multiline values, delimited by ''' or """
item,val = S.strip().split(':',1)
val = val.replace(delim,'').rstrip()
val += '\n'
while True:
il += 1
if il >= len(instLines): break
S = instLines[il]
if delim in S:
val += S.replace(delim,'').rstrip()
val += '\n'
break
else:
val += S.rstrip()
val += '\n'
newItems.append(item)
newVals.append(val)
il += 1
if 'Lam1' in newItems:
rd.Sample.update({'Type':'Bragg-Brentano','Shift':[0.,False],'Transparency':[0.,False],
'SurfRoughA':[0.,False],'SurfRoughB':[0.,False]})
else:
rd.Sample.update({'Type':'Debye-Scherrer','Absorption':[0.,False],'DisplaceX':[0.,False],'DisplaceY':[0.,False]})
return [G2fil.makeInstDict(newItems,newVals,len(newVals)*[False,]),{}]
def ReadPowderIparm(self,instfile,bank,databanks,rd):
'''Read a GSAS (old) instrument parameter file
:param str instfile: name of instrument parameter file
:param int bank: the bank number read in the raw data file
:param int databanks: the number of banks in the raw data file.
If the number of banks in the data and instrument parameter files
agree, then the sets of banks are assumed to match up and bank
is used to select the instrument parameter file. If not and not TOF,
the user is asked to make a selection.
:param obj rd: the raw data (histogram) data object. This
sets rd.instbank.
'''
if not os.path.exists(instfile): # no such file
return {}
fp = 0
try:
fp = open(instfile,'r')
Iparm = {}
for S in fp:
if '#' in S[0]:
continue
Iparm[S[:12]] = S[12:-1]
except IOError:
print(u'Error reading file: {}'.format(instfile))
if fp:
fp.close()
ibanks = int(Iparm.get('INS BANK ','1').strip())
if ibanks == 1: # there is only one bank here, return it
rd.instbank = 1
rd.powderentry[2] = 1
return Iparm
if 'PNT' in Iparm['INS HTYPE ']: #allow mismatch between banks in data iparm file for TOF
rd.instbank = bank
elif ibanks != databanks or bank is None:
choices = []
for i in range(1,1+ibanks):
choices.append('Bank '+str(i))
bank = 1 + G2G.BlockSelector(
choices, self,
title=u'Select an instrument parameter bank for '+
os.path.split(rd.powderentry[0])[1]+u' BANK '+str(bank)+
u'\nOr use Cancel to select from the default parameter sets',
header='Block Selector')
if bank is None: return {}
# pull out requested bank # bank from the data, and change the bank to 1
IparmS = {}
for key in Iparm:
if 'INS' in key[:3]: #skip around rubbish lines in some old iparm files
if key[4:6] == " ":
IparmS[key] = Iparm[key]
elif int(key[4:6].strip()) == bank:
IparmS[key[:4]+' 1'+key[6:]] = Iparm[key]
rd.instbank = bank
return IparmS
def GetPowderIparm(self,rd, prevIparm, lastIparmfile, lastdatafile):
'''Open and read an instrument parameter file for a data file
Returns the list of parameters used in the data tree
:param obj rd: the raw data (histogram) data object.
:param str prevIparm: not used
:param str lastIparmfile: Name of last instrument parameter
file that was read, or a empty string.
:param str lastdatafile: Name of last data file that was read.
:returns: a list of two dicts, the first containing instrument parameters
and the second used for TOF lookup tables for profile coeff.
'''
def GetDefaultParms(self,rd):
'''Solicits from user a default set of parameters & returns Inst parm dict
param: self: refers to the GSASII main class
param: rd: importer data structure
returns: dict: Instrument parameter dictionary
'''
sind = lambda x: math.sin(x*math.pi/180.)
tand = lambda x: math.tan(x*math.pi/180.)
while True: # loop until we get a choice
choices = []
head = 'Select from default instrument parameters for '+rd.idstring
for l in dI.defaultIparm_lbl:
choices.append('Defaults for '+l)
res = G2G.BlockSelector(choices,ParentFrame=self,title=head,
header='Select default inst parms',useCancel=True)
if res is None: return None
rd.instfile = ''
if 'lab data' in choices[res]:
rd.Sample.update({'Type':'Bragg-Brentano','Shift':[0.,False],'Transparency':[0.,False],
'SurfRoughA':[0.,False],'SurfRoughB':[0.,False]})
else:
rd.Sample.update({'Type':'Debye-Scherrer','Absorption':[0.,False],'DisplaceX':[0.,False],
'DisplaceY':[0.,False]})
if 'Generic' in choices[res]:
dlg = G2G.MultiDataDialog(self,title='Generic TOF detector bank',
prompts=['Total FP','2-theta',],values=[25.0,150.,],
limits=[[6.,200.],[5.,175.],],formats=['%6.2f','%6.1f',])
if dlg.ShowModal() == wx.ID_OK: #strictly empirical approx.
FP,tth = dlg.GetValues()
difC = 505.632*FP*sind(tth/2.)
sig1 = 50.+2.5e-6*(difC/tand(tth/2.))**2
bet1 = .00226+7.76e+11/difC**4
rd.instmsg = 'default: '+dI.defaultIparm_lbl[res]
Inst = self.ReadPowderInstprm(dI.defaultIparms[res],bank,numbanks,rd)
Inst[0]['difC'] = [difC,difC,0]
Inst[0]['sig-1'] = [sig1,sig1,0]
Inst[0]['beta-1'] = [bet1,bet1,0]
return Inst #this is [Inst1,Inst2] a pair of dicts
dlg.Destroy()
else:
rd.instmsg = 'default: '+dI.defaultIparm_lbl[res]
inst1,inst2 = self.ReadPowderInstprm(dI.defaultIparms[res],bank,numbanks,rd)
if rd.instdict.get('wave'):
inst1['Lam'][0] = rd.instdict.get('wave')
inst1['Lam'][1] = rd.instdict.get('wave')
return [inst1,inst2]
# stuff we might need from the reader
filename = rd.powderentry[0]
bank = rd.powderentry[2]
numbanks = rd.numbanks
#1st priority: is there an instrument parameter file matching the current file
# with extension .instprm, .prm, .inst, or .ins? If so read it
basename = os.path.splitext(filename)[0]
for ext in '.prm','.inst','.ins','.instprm':
if self.zipfile:
instfile = G2IO.ExtractFileFromZip(self.zipfile,
selection=os.path.split(basename + ext)[1],parent=self)
if instfile == None:
continue
else:
instfile = basename + ext
if not os.path.exists(instfile):
continue
if 'instprm' in instfile:
Lines = self.OpenPowderInstprm(instfile)
instParmList = self.ReadPowderInstprm(Lines,bank,numbanks,rd) #this is [Inst1,Inst2] a pair of dicts
if 'list' in str(type(instParmList)):
rd.instfile = instfile
rd.instmsg = 'GSAS-II file '+instfile
return instParmList
else:
#print 'debug: open/read failed',instfile
pass # fail silently
else:
Iparm = self.ReadPowderIparm(instfile,bank,numbanks,rd)
if Iparm:
#print 'debug: success'
rd.instfile = instfile
rd.instmsg = instfile + ' bank ' + str(rd.instbank)
return G2fil.SetPowderInstParms(Iparm,rd)
else:
#print 'debug: open/read failed',instfile
pass # fail silently
#2nd priority: is there an instrument parameter file defined for the current data set?
# or if this is a read on a set of set of files, use the last one again
#if rd.instparm as found in data file header or (lastdatafile == filename and lastIparmfile):
if rd.instparm or lastIparmfile:
if rd.instparm:
instfile = os.path.join(os.path.split(filename)[0],rd.instparm)
else:
# for multiple reads of one data file, reuse the inst parm file
instfile = lastIparmfile
# if self.zipfile:
# instfile = G2IO.ExtractFileFromZip(self.zipfile,
# selection=os.path.split(instfile)[1],parent=self)
if instfile != None and os.path.exists(instfile):
#print 'debug: try read',instfile
if 'instprm' in instfile: #GSAS-II file must have .instprm as extension
Lines = self.OpenPowderInstprm(instfile)
if Lines is not None:
instParmList = self.ReadPowderInstprm(Lines,bank,numbanks,rd) #this is [Inst1,Inst2] a pair of dicts
else: #old GSAS style iparm file - could be named anything!
Iparm = self.ReadPowderIparm(instfile,bank,numbanks,rd)
if Iparm:
#print 'debug: success'
rd.instfile = instfile
rd.instmsg = instfile + ' bank ' + str(rd.instbank)
instParmList = G2fil.SetPowderInstParms(Iparm,rd) #this is [Inst1,Inst2] a pair of dicts
if 'list' in str(type(instParmList)): #record stuff & return stuff
rd.instfile = instfile
rd.instmsg = 'GSAS-II file '+instfile
return instParmList
else: #bad iparms - try default
rd.instmsg = instParmList #an error message
return GetDefaultParms(self,rd)
else:
self.ErrorDialog('Open Error',u'Error opening instrument parameter file '
+u'{} requested by file '.format(instfile,filename))
#Finally - ask user for Instrument parametrs file - seems it can't be in a zip file
while True: # loop until we get a file that works or we get a cancel
instfile = ''
pth = G2G.GetImportPath(self)
if not pth: pth = '.'
extOrd = [0,1]
if GSASIIpath.GetConfigValue('Instprm_default',False):
extOrd = [1,0]
extList = ['GSAS iparm file (*.prm,*.inst,*.ins)|*.prm;*.inst;*.ins|','GSAS-II iparm file (*.instprm)|*.instprm|']
dlg = wx.FileDialog(self,
u'Choose inst. param file for "'+rd.idstring+u'" (or Cancel for default)',
pth, '',extList[extOrd[0]]+extList[extOrd[1]]+'All files (*.*)|*.*', wx.FD_OPEN)
if os.path.exists(lastIparmfile):
dlg.SetFilename(lastIparmfile)
if dlg.ShowModal() == wx.ID_OK:
instfile = dlg.GetPath()
dlg.Destroy()
if not instfile:
return GetDefaultParms(self,rd) #on Cancel/break
if 'instprm' in instfile:
Lines = self.OpenPowderInstprm(instfile)
if Lines is not None:
instParmList = self.ReadPowderInstprm(Lines,bank,numbanks,rd) #this is [Inst1,Inst2] a pair of dicts
if 'list' in str(type(instParmList)):
rd.instfile = instfile
rd.instmsg = 'GSAS-II file '+instfile
return instParmList
else:
rd.instmsg = instParmList #an error message
return GetDefaultParms(self,rd)
else:
Iparm = self.ReadPowderIparm(instfile,bank,numbanks,rd)
if Iparm:
#print 'debug: success with',instfile
rd.instfile = instfile
rd.instmsg = instfile + ' bank ' + str(rd.instbank)
return G2fil.SetPowderInstParms(Iparm,rd)
else:
self.ErrorDialog('Read Error',
u'Error opening/reading file {}'.format(instfile))
def EnableRefineCommand(self):
haveData = False
# check for phases connected to histograms
sub = GetGPXtreeItemId(self,self.root,'Phases')
if sub:
item, cookie = self.GPXtree.GetFirstChild(sub)
while item: # loop over phases
data = self.GPXtree.GetItemPyData(item)
item, cookie = self.GPXtree.GetNextChild(sub, cookie)
UseList = data['Histograms']
if UseList: haveData = True
if haveData:
self.dataWindow.DataMenu.Enable(G2G.wxID_DATADELETE,True)
for item in self.Refine: item.Enable(True)
else:
self.dataWindow.DataMenu.Enable(G2G.wxID_DATADELETE,False)
for item in self.Refine: item.Enable(False)
def OnImportPowder(self,event):
'''Called in response to an Import/Powder Data/... menu item
to read a powder diffraction data set.
dict self.ImportMenuId is used to look up the specific
reader item associated with the menu item, which will be
None for the last menu item, which is the "guess" option
where all appropriate formats will be tried.
Also reads an instrument parameter file for each dataset.
'''
# get a list of existing histograms
PWDRlist = []
if self.GPXtree.GetCount():
item, cookie = self.GPXtree.GetFirstChild(self.root)
while item:
name = self.GPXtree.GetItemText(item)
if name.startswith('PWDR ') and name not in PWDRlist:
PWDRlist.append(name)
item, cookie = self.GPXtree.GetNextChild(self.root, cookie)
# look up which format was requested
reqrdr = self.ImportMenuId.get(event.GetId())
rdlist = self.OnImportGeneric(
reqrdr,self.ImportPowderReaderlist,'Powder Data',multiple=True)
if len(rdlist) == 0: return
self.CheckNotebook()
Iparm = None
lastIparmfile = ''
lastdatafile = ''
newHistList = []
# lastVals = []
self.EnablePlot = False
Iparms = {}
for rd in rdlist:
if 'Instrument Parameters' in rd.pwdparms:
Iparm1,Iparm2 = rd.pwdparms['Instrument Parameters']
elif Iparms and not lastIparmfile:
Iparm1,Iparm2 = Iparms
else:
# get instrument parameters for each dataset, unless already set
# if lastIparmfile: # is this histogram like previous?
# if lastVals != (rd.powderdata[0].min(),rd.powderdata[0].max(),len(rd.powderdata[0])):
# lastIparmfile = ''
Iparms = self.GetPowderIparm(rd, Iparm, lastIparmfile, lastdatafile)
if not Iparms: #may have bailed out
Id = 0
continue
Iparm1,Iparm2 = Iparms
if rd.repeat_instparm:
lastIparmfile = rd.instfile
else:
Iparms = {}
# lastVals = (rd.powderdata[0].min(),rd.powderdata[0].max(),len(rd.powderdata[0]))
# override any keys in read instrument parameters with ones set in import
for key in Iparm1:
if key in rd.instdict:
Iparm1[key] = rd.instdict[key]
lastdatafile = rd.powderentry[0]
if 'phoenix' in wx.version():
HistName = 'PWDR '+rd.idstring
else:
HistName = 'PWDR '+G2obj.StripUnicode(rd.idstring,'_')
# make new histogram names unique
if HistName in PWDRlist:
dlg = wx.MessageDialog(self,'Skip %s?'%(HistName),'Duplicate data name',wx.YES_NO)
try:
if dlg.ShowModal() == wx.ID_YES:
Id = 0
continue
finally:
dlg.Destroy()
HistName = G2obj.MakeUniqueLabel(HistName,PWDRlist)
try:
print('Read powder data '+HistName+
' from file '+G2obj.StripUnicode(rd.readfilename) +
' (format: '+ rd.formatName +
'). Inst parameters from '+G2obj.StripUnicode(rd.instmsg))
except:
print('Read powder data')
# data are read, now store them in the tree
Id = self.GPXtree.AppendItem(parent=self.root,text=HistName)
if 'T' in Iparm1['Type'][0]:
if not rd.clockWd and rd.GSAS:
rd.powderdata[0] *= 100. #put back the CW centideg correction
cw = np.diff(rd.powderdata[0])
rd.powderdata[0] = rd.powderdata[0][:-1]+cw/2.
if rd.GSAS: #NB: old GSAS wanted intensities*CW even if normalized!
npts = min(len(rd.powderdata[0]),len(rd.powderdata[1]),len(cw))
rd.powderdata[1] = rd.powderdata[1][:npts]/cw[:npts]
rd.powderdata[2] = rd.powderdata[2][:npts]*cw[:npts]**2 #1/var=w at this point
else: #NB: from topas/fullprof type files
rd.powderdata[1] = rd.powderdata[1][:-1]
rd.powderdata[2] = rd.powderdata[2][:-1]
if 'Itype' in Iparm2:
Ibeg = np.searchsorted(rd.powderdata[0],Iparm2['Tminmax'][0])
Ifin = np.searchsorted(rd.powderdata[0],Iparm2['Tminmax'][1])
rd.powderdata[0] = rd.powderdata[0][Ibeg:Ifin]
YI,WYI = G2pwd.calcIncident(Iparm2,rd.powderdata[0])
rd.powderdata[1] = rd.powderdata[1][Ibeg:Ifin]/YI
var = 1./rd.powderdata[2][Ibeg:Ifin]
var += WYI*rd.powderdata[1]**2
var /= YI**2
rd.powderdata[2] = 1./var
rd.powderdata[1] = np.where(np.isinf(rd.powderdata[1]),0.,rd.powderdata[1])
rd.powderdata[3] = np.zeros_like(rd.powderdata[0])
rd.powderdata[4] = np.zeros_like(rd.powderdata[0])
rd.powderdata[5] = np.zeros_like(rd.powderdata[0])
Ymin = np.min(rd.powderdata[1])
Ymax = np.max(rd.powderdata[1])
valuesdict = {
'wtFactor':1.0,
'Dummy':False,
'ranId':ran.randint(0,sys.maxsize),
'Offset':[0.0,0.0],'delOffset':0.02*Ymax,'refOffset':-.1*Ymax,'refDelt':0.1*Ymax,
'Yminmax':[Ymin,Ymax]
}
# apply user-supplied corrections to powder data
if 'CorrectionCode' in Iparm1:
print('Applying corrections from instprm file')
corr = Iparm1['CorrectionCode'][0]
try:
exec(corr)
print('done')
except Exception as err:
print(u'error: {}'.format(err))
print('with commands -------------------')
print(corr)
print('---------------------------------')
finally:
del Iparm1['CorrectionCode']
rd.Sample['ranId'] = valuesdict['ranId'] # this should be removed someday
self.GPXtree.SetItemPyData(Id,[valuesdict,rd.powderdata])
self.GPXtree.SetItemPyData(
self.GPXtree.AppendItem(Id,text='Comments'),
rd.comments)
Tmin = min(rd.powderdata[0])
Tmax = max(rd.powderdata[0])
Tmin1 = Tmin
if 'NT' in Iparm1['Type'][0] and G2lat.Pos2dsp(Iparm1,Tmin) < 0.4:
Tmin1 = G2lat.Dsp2pos(Iparm1,0.4)
self.GPXtree.SetItemPyData(
self.GPXtree.AppendItem(Id,text='Limits'),
rd.pwdparms.get('Limits',[(Tmin,Tmax),[Tmin1,Tmax]])
)
self.PatternId = GetGPXtreeItemId(self,Id,'Limits')
self.GPXtree.SetItemPyData(
self.GPXtree.AppendItem(Id,text='Background'),
rd.pwdparms.get('Background',
[['chebyschev-1',True,3,1.0,0.0,0.0],{'nDebye':0,'debyeTerms':[],'nPeaks':0,'peaksList':[],
'background PWDR':['',1.0,False]}]))
self.GPXtree.SetItemPyData(
self.GPXtree.AppendItem(Id,text='Instrument Parameters'),
[Iparm1,Iparm2])
self.GPXtree.SetItemPyData(
self.GPXtree.AppendItem(Id,text='Sample Parameters'),
rd.Sample)
self.GPXtree.SetItemPyData(
self.GPXtree.AppendItem(Id,text='Peak List')
,{'peaks':[],'sigDict':{}})
self.GPXtree.SetItemPyData(
self.GPXtree.AppendItem(Id,text='Index Peak List'),
[[],[]])
self.GPXtree.SetItemPyData(
self.GPXtree.AppendItem(Id,text='Unit Cells List'),
[])
self.GPXtree.SetItemPyData(
self.GPXtree.AppendItem(Id,text='Reflection Lists'),
{})
# if any Control values have been set, move them into tree
Controls = self.GPXtree.GetItemPyData(GetGPXtreeItemId(self,self.root, 'Controls'))
Controls.update(rd.Controls)
newHistList.append(HistName)
rd.repeat_instparm = False #clear the iparm reuse flag
else:
self.EnablePlot = True
if Id:
self.GPXtree.Expand(Id)
self.GPXtree.SelectItem(Id)
if not newHistList: return # somehow, no new histograms
# make a list of phase names
phaseRIdList,usedHistograms = self.GetPhaseInfofromTree()
phaseNameList = list(usedHistograms.keys()) # phase names in use
if not phaseNameList: return # no phases yet, nothing to do
header = 'Select phase(s) to link\nto the newly-read data:'
for Name in newHistList:
header += '\n '+str(Name)
result = G2G.ItemSelector(phaseNameList,self,header,header='Add to phase(s)',multiple=True)
if not result: return
# connect new phases to histograms
sub = GetGPXtreeItemId(self,self.root,'Phases')
if not sub:
raise Exception('ERROR -- why are there no phases here?')
item, cookie = self.GPXtree.GetFirstChild(sub)
iph = -1
while item: # loop over (new) phases
iph += 1
data = self.GPXtree.GetItemPyData(item)
item, cookie = self.GPXtree.GetNextChild(sub, cookie)
if iph not in result: continue
generalData = data['General']
SGData = generalData['SGData']
UseList = data['Histograms']
NShkl = len(G2spc.MustrainNames(SGData))
NDij = len(G2spc.HStrainNames(SGData))
for histoName in newHistList:
UseList[histoName] = SetDefaultDData('PWDR',histoName,NShkl=NShkl,NDij=NDij)
Id = GetGPXtreeItemId(self,self.root,histoName)
refList = self.GPXtree.GetItemPyData(
GetGPXtreeItemId(self,Id,'Reflection Lists'))
refList[generalData['Name']] = []
self.EnableRefineCommand()
return # success
def OnDummyPowder(self,event):
'''Called in response to Import/Powder Data/Simulate menu item
to create a Dummy powder diffraction data set.
Reads an instrument parameter file and then gets input from the user
'''
# get a list of existing histograms
PWDRlist = []
if self.GPXtree.GetCount():
item, cookie = self.GPXtree.GetFirstChild(self.root)
while item:
name = self.GPXtree.GetItemText(item)
if name.startswith('PWDR ') and name not in PWDRlist:
PWDRlist.append(name)
item, cookie = self.GPXtree.GetNextChild(self.root, cookie)
# Initialize a base class reader
rd = G2obj.ImportPowderData(
extensionlist=tuple(),
strictExtension=False,
formatName = 'Simulate dataset',
longFormatName = 'Compute a simulated pattern')
rd.powderentry[0] = '' # no filename
# #self.powderentry[1] = pos # bank offset (N/A here)
rd.powderentry[2] = 1 # only one bank
rd.comments.append('This is a dummy dataset for powder pattern simulation')
self.CheckNotebook()
Iparm = None
lastdatafile = ''
self.zipfile = None
# get instrument parameters for it
Iparm = self.GetPowderIparm(rd, Iparm, '', lastdatafile)
if Iparm is None:
return
Iparm1, Iparm2 = Iparm
if 'T' in Iparm1['Type'][0]:
rd.idstring = ' TOF neutron simulation'
simType = 'TOF'
else:
# need to get name, 2theta start, end, step
rd.idstring = ' CW'
simType = 'CW'
if 'X' in Iparm1['Type'][0]:
rd.idstring = 'CW x-ray simulation'
else:
rd.idstring = 'CW neutron simulation'
# base initial range on wavelength
wave = Iparm1.get('Lam')
if wave:
wave = wave[0]
else:
wave = Iparm1.get('Lam1')
if wave:
wave = wave[0]
N = 0
while (N < 3): # insist on a dataset with a few points
if 'TOF' in rd.idstring:
names = ('dataset name', 'start TOF(ms)', 'end TOF(ms)', 'DT/T')
inp = [rd.idstring, 10.,80.,0.0005] # see names for what's what
dlg = G2G.ScrolledMultiEditor(
self,[inp] * len(inp),range(len(inp)),names,
header='Enter simulation name and range',
minvals=(None,.5,1.0,0.0001),
maxvals=(None,200.,200.,.001),
sizevals=((225,-1),)
)
else:
names = ('dataset name', 'start angle', 'end angle', 'step size')
if not wave or wave < 1.0:
inp = [rd.idstring, 10.,40.,0.005] # see names for what's what
else:
inp = [rd.idstring, 10.,80.,0.01] # see names for what's what
dlg = G2G.ScrolledMultiEditor(
self,[inp] * len(inp),range(len(inp)),names,
header='Enter simulation name and range',
minvals=(None,0.001,0.001,0.0001),
maxvals=(None,180.,180.,.1),
sizevals=((225,-1),)
)
dlg.CenterOnParent()
if dlg.ShowModal() == wx.ID_OK:
if inp[1] > inp[2]:
end,start,step = inp[1:]
else:
start,end,step = inp[1:]
step = abs(step)
else:
return False
if 'TOF' in rd.idstring:
N = (np.log(end)-np.log(start))/step
x = np.exp((np.arange(0,N))*step+np.log(start*1000.))
N = len(x)
else:
N = int((end-start)/step)+1
x = np.linspace(start,end,N,True)
N = len(x)
rd.powderdata = [
np.array(x), # x-axis values
np.zeros_like(x), # powder pattern intensities
np.ones_like(x), # 1/sig(intensity)^2 values (weights)
np.zeros_like(x), # calc. intensities (zero)
np.zeros_like(x), # calc. background (zero)
np.zeros_like(x), # obs-calc profiles
]
Tmin = rd.powderdata[0][0]
Tmax = rd.powderdata[0][-1]
# data are read, now store them in the tree
HistName = inp[0]
HistName = 'PWDR '+HistName
HistName = G2obj.MakeUniqueLabel(HistName,PWDRlist) # make new histogram names unique
Id = self.GPXtree.AppendItem(parent=self.root,text=HistName)
Ymin = np.min(rd.powderdata[1])
Ymax = np.max(rd.powderdata[1])
valuesdict = {
'wtFactor':1.0,
'Dummy':True,'simType':simType,
'ranId':ran.randint(0,sys.maxsize),
'Offset':[0.0,0.0],'delOffset':0.02*Ymax,'refOffset':-.1*Ymax,'refDelt':0.1*Ymax,
'Yminmax':[Ymin,Ymax]
}
self.GPXtree.SetItemPyData(Id,[valuesdict,rd.powderdata])
self.GPXtree.SetItemPyData(
self.GPXtree.AppendItem(Id,text='Comments'),
rd.comments)
self.GPXtree.SetItemPyData(
self.GPXtree.AppendItem(Id,text='Limits'),
[(Tmin,Tmax),[Tmin,Tmax]])
self.PatternId = GetGPXtreeItemId(self,Id,'Limits')
self.GPXtree.SetItemPyData(
self.GPXtree.AppendItem(Id,text='Background'),
[['chebyschev-1',True,3,1.0,0.0,0.0],
{'nDebye':0,'debyeTerms':[],'nPeaks':0,'peaksList':[],'background PWDR':['',1.0,False]}])
self.GPXtree.SetItemPyData(
self.GPXtree.AppendItem(Id,text='Instrument Parameters'),
[Iparm1,Iparm2])
self.GPXtree.SetItemPyData(
self.GPXtree.AppendItem(Id,text='Sample Parameters'),
rd.Sample)
self.GPXtree.SetItemPyData(
self.GPXtree.AppendItem(Id,text='Peak List')
,{'peaks':[],'sigDict':{}})
self.GPXtree.SetItemPyData(
self.GPXtree.AppendItem(Id,text='Index Peak List'),
[[],[]])
self.GPXtree.SetItemPyData(
self.GPXtree.AppendItem(Id,text='Unit Cells List'),
[])
self.GPXtree.SetItemPyData(
self.GPXtree.AppendItem(Id,text='Reflection Lists'),
{})
self.GPXtree.Expand(Id)
self.GPXtree.SelectItem(Id)
print(u'Added simulation powder data {}'.format(HistName)+
' with parameters from {}'.format(rd.instmsg))
# make a list of phase names
phaseRIdList,usedHistograms = self.GetPhaseInfofromTree()
phaseNameList = list(usedHistograms.keys()) # phase names in use
if not phaseNameList: return # no phases yet, nothing to do
header = 'Select phase(s) to add the new\npowder simulation (dummy) dataset to:'
result = G2G.ItemSelector(phaseNameList,self,header,header='Add to phase(s)',multiple=True)
if not result: return
# connect new phases to histograms
sub = GetGPXtreeItemId(self,self.root,'Phases')
if not sub:
raise Exception('ERROR -- why are there no phases here?')
item, cookie = self.GPXtree.GetFirstChild(sub)
iph = -1
while item: # loop over (new) phases
iph += 1
data = self.GPXtree.GetItemPyData(item)
item, cookie = self.GPXtree.GetNextChild(sub, cookie)
if iph not in result: continue
generalData = data['General']
SGData = generalData['SGData']
UseList = data['Histograms']
NShkl = len(G2spc.MustrainNames(SGData))
NDij = len(G2spc.HStrainNames(SGData))
UseList[HistName] = SetDefaultDData('PWDR',HistName,NShkl=NShkl,NDij=NDij)
Id = GetGPXtreeItemId(self,self.root,HistName)
refList = self.GPXtree.GetItemPyData(
GetGPXtreeItemId(self,Id,'Reflection Lists'))
refList[generalData['Name']] = []
cId = GetGPXtreeItemId(self,self.root, 'Controls')
Controls = self.GPXtree.GetItemPyData(cId)
Controls['max cyc'] = 0
self.EnableRefineCommand()
return # success
def AddSimulatedPowder(self,ttArr,intArr,HistName,Lam1,Lam2):
'''Create a PWDR entry for a computed powder pattern
'''
# get a list of existing histograms
PWDRlist = []
if self.GPXtree.GetCount():
item, cookie = self.GPXtree.GetFirstChild(self.root)
while item:
name = self.GPXtree.GetItemText(item)
if name.startswith('PWDR ') and name not in PWDRlist:
PWDRlist.append(name)
item, cookie = self.GPXtree.GetNextChild(self.root, cookie)
# Initialize a base class reader
rd = G2obj.ImportPowderData(
extensionlist=tuple(),
strictExtension=False,
formatName = 'FPA Simulated dataset',
longFormatName = 'Fundamental Parameters simulated pattern')
rd.powderentry[0] = '' # no filename
# #self.powderentry[1] = pos # bank offset (N/A here)
rd.powderentry[2] = 1 # only one bank
rd.comments.append('This is a powder pattern simulated with Fundamental Parameters')
self.CheckNotebook()
#self.zipfile = None
# get instrument parameters for it
rd.Sample.update({'Type':'Bragg-Brentano','Shift':[0.,False],'Transparency':[0.,False],
'SurfRoughA':[0.,False],'SurfRoughB':[0.,False]})
Iparm1, Iparm2 = G2fil.ReadPowderInstprm(dI.defaultIparms[0],1,1,rd)
rd.idstring = ' CW'
simType = 'CW'
# set wavelength
if Lam2:
Iparm1['Lam1'][0] = Lam1
Iparm1['Lam2'][0] = Lam2
Iparm1['Lam1'][1] = Lam1
Iparm1['Lam2'][1] = Lam2
else:
Iparm1['Lam'] = Iparm1['Lam1']
del Iparm1['Lam1'],Iparm1['Lam2']
Iparm1['Lam'][0] = Lam1
Iparm1['Lam'][1] = Lam1
rd.powderdata = [
np.array(ttArr), # x-axis values
np.array(intArr), # powder pattern intensities
np.ones_like(ttArr), # 1/sig(intensity)^2 values (weights)
np.zeros_like(intArr), # calc. intensities (zero)
np.zeros_like(ttArr), # calc. background (zero)
np.zeros_like(ttArr), # obs-calc profiles
]
Tmin = rd.powderdata[0][0]
Tmax = rd.powderdata[0][-1]
# data are read, now store them in the tree
HistName = 'PWDR '+HistName
HistName = G2obj.MakeUniqueLabel(HistName,PWDRlist) # make new histogram names unique
Id = self.GPXtree.AppendItem(parent=self.root,text=HistName)
Ymin = np.min(rd.powderdata[1])
Ymax = np.max(rd.powderdata[1])
valuesdict = {
'wtFactor':1.0,
'Dummy':True,'simType':simType,
'ranId':ran.randint(0,sys.maxsize),
'Offset':[0.0,0.0],'delOffset':0.02*Ymax,'refOffset':-.1*Ymax,'refDelt':0.1*Ymax,
'Yminmax':[Ymin,Ymax]
}
self.GPXtree.SetItemPyData(Id,[valuesdict,rd.powderdata])
self.GPXtree.SetItemPyData(
self.GPXtree.AppendItem(Id,text='Comments'),
rd.comments)
self.GPXtree.SetItemPyData(
self.GPXtree.AppendItem(Id,text='Limits'),
[(Tmin,Tmax),[Tmin,Tmax]])
self.PatternId = GetGPXtreeItemId(self,Id,'Limits')
self.GPXtree.SetItemPyData(
self.GPXtree.AppendItem(Id,text='Background'),
[['chebyschev-1',True,3,1.0,0.0,0.0],
{'nDebye':0,'debyeTerms':[],'nPeaks':0,'peaksList':[],'background PWDR':['',1.0,False]}])
self.GPXtree.SetItemPyData(
self.GPXtree.AppendItem(Id,text='Instrument Parameters'),
[Iparm1,Iparm2])
self.GPXtree.SetItemPyData(
self.GPXtree.AppendItem(Id,text='Sample Parameters'),
rd.Sample)
self.GPXtree.SetItemPyData(
self.GPXtree.AppendItem(Id,text='Peak List')
,{'peaks':[],'sigDict':{}})
self.GPXtree.SetItemPyData(
self.GPXtree.AppendItem(Id,text='Index Peak List'),
[[],[]])
self.GPXtree.SetItemPyData(
self.GPXtree.AppendItem(Id,text='Unit Cells List'),
[])
self.GPXtree.SetItemPyData(
self.GPXtree.AppendItem(Id,text='Reflection Lists'),
{})
self.GPXtree.Expand(Id)
self.GPXtree.SelectItem(Id)
print(u'Added simulation powder data {}'.format(HistName))
return Id
def OnPreferences(self,event):
'Edit the GSAS-II configuration variables'
dlg = G2G.SelectConfigSetting(self)
dlg.ShowModal() == wx.ID_OK
dlg.Destroy()
def EditProxyInfo(self,event):
'''Edit the proxy information used by subversion
'''
h,p,e = host,port,etc = GSASIIpath.getsvnProxy()
labels = ['Proxy address','proxy port']
values = [host,port]
i = 1
for item in etc:
i += 1
labels.append('extra svn arg #'+str(i))
values.append(item)
msg = '''This dialog allows customization of the subversion (svn)
command. If a proxy server is needed, the address/host and port
can be added supplied here. This will generate command-line options
--config-option servers:global:http-proxy-host=*host*
--config-option servers:global:http-proxy-port=*port*
Additional subversion command line options can be supplied here
by pressing the '+' button. As examples of options that might be of
value, use two extra lines to add:
--config-dir
DIR
to specify an alternate configuration location.
Or, use four extra lines to add
--config-option
servers:global:http-proxy-username=*account*
--config-option
servers:global:http-proxy-password=*password*
to specify a proxy user name and password.
Note that strings marked *value* are items that will be configured
by the user. See http://svnbook.red-bean.com for more information on
subversion.
'''
dlg = G2G.MultiStringDialog(self,'Enter proxy values',
labels,values,size=300,addRows=True,hlp=msg)
if dlg.Show():
values = dlg.GetValues()
h,p = values[:2]
e = values[2:]
dlg.Destroy()
if h != host or p != port or etc != e:
localproxy = proxyinfo = os.path.join(
os.path.expanduser('~/.G2local/'),
"proxyinfo.txt")
if not os.path.exists(proxyinfo):
proxyinfo = os.path.join(GSASIIpath.path2GSAS2,"proxyinfo.txt")
GSASIIpath.setsvnProxy(h,p,e)
if not h.strip() and not e:
if os.path.exists(localproxy): os.remove(localproxy)
if os.path.exists(proxyinfo): os.remove(proxyinfo)
return
try:
fp = open(proxyinfo,'w')
except:
fp = open(localproxy,'w')
proxyinfo = localproxy
try:
fp.write(h.strip()+'\n')
fp.write(p.strip()+'\n')
for i in e:
if i.strip():
fp.write(i.strip()+'\n')
fp.close()
except Exception as err:
print('Error writing file {}:\n{}'.format(proxyinfo,err))
print('File {} written'.format(proxyinfo))
def _Add_ImportMenu_smallangle(self,parent):
'''configure the Small Angle Data menus accord to the readers found in _init_Imports
'''
submenu = wx.Menu()
item = parent.AppendSubMenu(submenu,'Small Angle Data','Import small angle data')
for reader in self.ImportSmallAngleReaderlist:
item = submenu.Append(wx.ID_ANY,u'from '+reader.formatName+u' file',reader.longFormatName)
self.ImportMenuId[item.GetId()] = reader
self.Bind(wx.EVT_MENU, self.OnImportSmallAngle, id=item.GetId())
# item = submenu.Append(wx.ID_ANY,
# help='Import small angle data, use file to try to determine format',
# kind=wx.ITEM_NORMAL,text='guess format from file')
# self.Bind(wx.EVT_MENU, self.OnImportSmallAngle, id=item.GetId())
def OnImportSmallAngle(self,event):
'''Called in response to an Import/Small Angle Data/... menu item
to read a small angle diffraction data set.
dict self.ImportMenuId is used to look up the specific
reader item associated with the menu item, which will be
None for the last menu item, which is the "guess" option
where all appropriate formats will be tried.
'''
def GetSASDIparm(reader):
parm = reader.instdict
Iparm = {'Type':[parm['type'],parm['type'],0],'Lam':[parm['wave'],
parm['wave'],0],'Azimuth':[0.,0.,0]}
return Iparm,{}
# get a list of existing histograms
SASDlist = []
if self.GPXtree.GetCount():
item, cookie = self.GPXtree.GetFirstChild(self.root)
while item:
name = self.GPXtree.GetItemText(item)
if name.startswith('SASD ') and name not in SASDlist:
SASDlist.append(name)
item, cookie = self.GPXtree.GetNextChild(self.root, cookie)
# look up which format was requested
reqrdr = self.ImportMenuId.get(event.GetId())
rdlist = self.OnImportGeneric(
reqrdr,self.ImportSmallAngleReaderlist,'Small Angle Data',multiple=True)
if len(rdlist) == 0: return
self.CheckNotebook()
newHistList = []
self.EnablePlot = False
for rd in rdlist:
HistName = rd.idstring
HistName = 'SASD '+HistName
# make new histogram names unique
HistName = G2obj.MakeUniqueLabel(HistName,SASDlist)
print ('Read small angle data '+HistName+ \
' from file '+self.lastimport)
# data are read, now store them in the tree
Id = self.GPXtree.AppendItem(parent=self.root,text=HistName)
Iparm1,Iparm2 = GetSASDIparm(rd)
# if 'T' in Iparm1['Type'][0]:
# if not rd.clockWd and rd.GSAS:
# rd.powderdata[0] *= 100. #put back the CW centideg correction
# cw = np.diff(rd.powderdata[0])
# rd.powderdata[0] = rd.powderdata[0][:-1]+cw/2.
# rd.powderdata[1] = rd.powderdata[1][:-1]/cw
# rd.powderdata[2] = rd.powderdata[2][:-1]*cw**2 #1/var=w at this point
# if 'Itype' in Iparm2:
# Ibeg = np.searchsorted(rd.powderdata[0],Iparm2['Tminmax'][0])
# Ifin = np.searchsorted(rd.powderdata[0],Iparm2['Tminmax'][1])
# rd.powderdata[0] = rd.powderdata[0][Ibeg:Ifin]
# YI,WYI = G2pwd.calcIncident(Iparm2,rd.powderdata[0])
# rd.powderdata[1] = rd.powderdata[1][Ibeg:Ifin]/YI
# var = 1./rd.powderdata[2][Ibeg:Ifin]
# var += WYI*rd.powderdata[1]**2
# var /= YI**2
# rd.powderdata[2] = 1./var
# rd.powderdata[3] = np.zeros_like(rd.powderdata[0])
# rd.powderdata[4] = np.zeros_like(rd.powderdata[0])
# rd.powderdata[5] = np.zeros_like(rd.powderdata[0])
Tmin = min(rd.smallangledata[0])
Tmax = max(rd.smallangledata[0])
valuesdict = {
'wtFactor':1.0,
'Dummy':False,
'ranId':ran.randint(0,sys.maxsize),
'Offset':[0.0,0.0],
}
rd.Sample['ranId'] = valuesdict['ranId'] # this should be removed someday
self.GPXtree.SetItemPyData(Id,[valuesdict,rd.smallangledata])
self.GPXtree.SetItemPyData(
self.GPXtree.AppendItem(Id,text='Comments'),
rd.comments)
self.GPXtree.SetItemPyData(
self.GPXtree.AppendItem(Id,text='Limits'),
[(Tmin,Tmax),[Tmin,Tmax]])
self.PatternId = GetGPXtreeItemId(self,Id,'Limits')
self.GPXtree.SetItemPyData(
self.GPXtree.AppendItem(Id,text='Instrument Parameters'),
[Iparm1,Iparm2])
self.GPXtree.SetItemPyData(
self.GPXtree.AppendItem(Id,text='Substances'),G2pdG.SetDefaultSubstances())
self.GPXtree.SetItemPyData(
self.GPXtree.AppendItem(Id,text='Sample Parameters'),
rd.Sample)
self.GPXtree.SetItemPyData(
self.GPXtree.AppendItem(Id,text='Models'),G2pdG.SetDefaultSASDModel())
newHistList.append(HistName)
else:
self.EnablePlot = True
self.GPXtree.Expand(Id)
self.GPXtree.SelectItem(Id)
if not newHistList: return # somehow, no new histograms
return # success
def _Add_ImportMenu_reflectometry(self,parent):
'''configure the reflectometry Data menus accord to the readers found in _init_Imports
'''
submenu = wx.Menu()
item = parent.AppendSubMenu(submenu,'Reflectometry Data','Import reflectometry data')
for reader in self.ImportReflectometryReaderlist:
item = submenu.Append(wx.ID_ANY,u'from '+reader.formatName+u' file',reader.longFormatName)
self.ImportMenuId[item.GetId()] = reader
self.Bind(wx.EVT_MENU, self.OnImportReflectometry, id=item.GetId())
# item = submenu.Append(wx.ID_ANY,
# help='Import reflectometry data, use file to try to determine format',
# kind=wx.ITEM_NORMAL,text='guess format from file')
# self.Bind(wx.EVT_MENU, self.OnImportReflectometry, id=item.GetId())
def OnImportReflectometry(self,event):
'''Called in response to an Import/Reflectometry Data/... menu item
to read a reflectometry data set.
dict self.ImportMenuId is used to look up the specific
reader item associated with the menu item, which will be
None for the last menu item, which is the "guess" option
where all appropriate formats will be tried.
'''
def GetREFDIparm(reader):
parm = reader.instdict
Iparm = {'Type':[parm['type'],parm['type'],0],'Lam':[parm['wave'],
parm['wave'],0],'Azimuth':[0.,0.,0]}
return Iparm,{}
# get a list of existing histograms
REFDlist = []
if self.GPXtree.GetCount():
item, cookie = self.GPXtree.GetFirstChild(self.root)
while item:
name = self.GPXtree.GetItemText(item)
if name.startswith('REFD ') and name not in REFDlist:
REFDlist.append(name)
item, cookie = self.GPXtree.GetNextChild(self.root, cookie)
# look up which format was requested
reqrdr = self.ImportMenuId.get(event.GetId())
rdlist = self.OnImportGeneric(
reqrdr,self.ImportReflectometryReaderlist,'Reflectometry Data',multiple=True)
if len(rdlist) == 0: return
self.CheckNotebook()
newHistList = []
self.EnablePlot = False
for rd in rdlist:
HistName = rd.idstring
HistName = 'REFD '+HistName
# make new histogram names unique
HistName = G2obj.MakeUniqueLabel(HistName,REFDlist)
print ('Read reflectometry data '+HistName+ \
' from file '+self.lastimport)
# data are read, now store them in the tree
Id = self.GPXtree.AppendItem(parent=self.root,text=HistName)
Iparm1,Iparm2 = GetREFDIparm(rd)
# if 'T' in Iparm1['Type'][0]:
# if not rd.clockWd and rd.GSAS:
# rd.powderdata[0] *= 100. #put back the CW centideg correction
# cw = np.diff(rd.powderdata[0])
# rd.powderdata[0] = rd.powderdata[0][:-1]+cw/2.
# rd.powderdata[1] = rd.powderdata[1][:-1]/cw
# rd.powderdata[2] = rd.powderdata[2][:-1]*cw**2 #1/var=w at this point
# if 'Itype' in Iparm2:
# Ibeg = np.searchsorted(rd.powderdata[0],Iparm2['Tminmax'][0])
# Ifin = np.searchsorted(rd.powderdata[0],Iparm2['Tminmax'][1])
# rd.powderdata[0] = rd.powderdata[0][Ibeg:Ifin]
# YI,WYI = G2pwd.calcIncident(Iparm2,rd.powderdata[0])
# rd.powderdata[1] = rd.powderdata[1][Ibeg:Ifin]/YI
# var = 1./rd.powderdata[2][Ibeg:Ifin]
# var += WYI*rd.powderdata[1]**2
# var /= YI**2
# rd.powderdata[2] = 1./var
# rd.powderdata[3] = np.zeros_like(rd.powderdata[0])
# rd.powderdata[4] = np.zeros_like(rd.powderdata[0])
# rd.powderdata[5] = np.zeros_like(rd.powderdata[0])
Tmin = min(rd.reflectometrydata[0])
Tmax = max(rd.reflectometrydata[0])
ifDQ = np.any(rd.reflectometrydata[5])
valuesdict = {
'wtFactor':1.0,
'Dummy':False,
'ranId':ran.randint(0,sys.maxsize),
'Offset':[0.0,0.0],
'ifDQ':ifDQ
}
rd.Sample['ranId'] = valuesdict['ranId'] # this should be removed someday
self.GPXtree.SetItemPyData(Id,[valuesdict,rd.reflectometrydata])
self.GPXtree.SetItemPyData(
self.GPXtree.AppendItem(Id,text='Comments'),
rd.comments)
self.GPXtree.SetItemPyData(
self.GPXtree.AppendItem(Id,text='Limits'),
[(Tmin,Tmax),[Tmin,Tmax]])
self.PatternId = GetGPXtreeItemId(self,Id,'Limits')
self.GPXtree.SetItemPyData(
self.GPXtree.AppendItem(Id,text='Instrument Parameters'),
[Iparm1,Iparm2])
self.GPXtree.SetItemPyData(
self.GPXtree.AppendItem(Id,text='Substances'),G2pdG.SetDefaultSubstances())
self.GPXtree.SetItemPyData(
self.GPXtree.AppendItem(Id,text='Sample Parameters'),
rd.Sample)
self.GPXtree.SetItemPyData(
self.GPXtree.AppendItem(Id,text='Models'),G2pdG.SetDefaultREFDModel())
newHistList.append(HistName)
else:
self.EnablePlot = True
self.GPXtree.Expand(Id)
self.GPXtree.SelectItem(Id)
if not newHistList: return # somehow, no new histograms
return # success
def _Add_ImportMenu_PDF(self,parent):
'''configure the PDF Data menus accord to the readers found in _init_Imports
'''
submenu = wx.Menu()
item = parent.AppendSubMenu(submenu,'PDF G(R) Data','Import PDF G(R) data')
for reader in self.ImportPDFReaderlist:
item = submenu.Append(wx.ID_ANY,u'from '+reader.formatName+u' file',reader.longFormatName)
self.ImportMenuId[item.GetId()] = reader
self.Bind(wx.EVT_MENU, self.OnImportPDF, id=item.GetId())
submenu.AppendSeparator()
item = submenu.Append(wx.ID_ANY,'Auto Import','Import PDF files as found')
def OnAutoImport(event):
G2G.AutoLoadFiles(self,FileTyp='gr')
self.Bind(wx.EVT_MENU, OnAutoImport, id=item.GetId())
# item = submenu.Append(wx.ID_ANY,
# help='Import reflectometry data, use file to try to determine format',
# kind=wx.ITEM_NORMAL,text='guess format from file')
# self.Bind(wx.EVT_MENU, self.OnImportReflectometry, id=item.GetId())
def OnImportPDF(self,event):
'''Called in response to an Import/PDF G(R) Data/... menu item
to read a PDF G(R) data set.
dict self.ImportMenuId is used to look up the specific
reader item associated with the menu item, which will be
None for the last menu item, which is the "guess" option
where all appropriate formats will be tried.
'''
# get a list of existing histograms
PDFlist = []
if self.GPXtree.GetCount():
item, cookie = self.GPXtree.GetFirstChild(self.root)
while item:
name = self.GPXtree.GetItemText(item)
if name.startswith('PDF ') and name not in PDFlist:
PDFlist.append(name)
item, cookie = self.GPXtree.GetNextChild(self.root, cookie)
# look up which format was requested
reqrdr = self.ImportMenuId.get(event.GetId())
rdlist = self.OnImportGeneric(
reqrdr,self.ImportPDFReaderlist,'PDF G(R) Data',multiple=True)
if len(rdlist) == 0: return
self.CheckNotebook()
newHistList = []
self.EnablePlot = False
for rd in rdlist:
HistName = rd.idstring
HistName = 'PDF '+HistName
# make new histogram names unique
HistName = G2obj.MakeUniqueLabel(HistName,PDFlist)
print ('Read PDF G(R) data '+HistName+ \
' from file '+self.lastimport)
# data are read, now store them in the tree
Id = self.GPXtree.AppendItem(self.root,text=HistName)
Ymin = np.min(rd.pdfdata[1])
Ymax = np.max(rd.pdfdata[1])
valuesdict = {
'wtFactor':1.0,'Dummy':False,'ranId':ran.randint(0,sys.maxsize),
'Offset':[0.0,0.0],'delOffset':0.02*Ymax,
'Yminmax':[Ymin,Ymax],
}
self.GPXtree.SetItemPyData(
self.GPXtree.AppendItem(Id,text='PDF Controls'),
{'G(R)':[valuesdict,rd.pdfdata,HistName],
'diffGRname':'','diffMult':1.0,'Rmax':Ymax,})
self.GPXtree.SetItemPyData(self.GPXtree.AppendItem(Id,text='PDF Peaks'),
{'Limits':[1.,5.],'Background':[2,[0.,-0.2*np.pi],False],'Peaks':[]})
else:
self.EnablePlot = True
self.GPXtree.Expand(Id)
self.GPXtree.SelectItem(Id)
if not newHistList: return # somehow, no new histograms
return # success
def AddToNotebook(self,text):
Id = GetGPXtreeItemId(self,self.root,'Notebook')
data = self.GPXtree.GetItemPyData(Id)
data.append('Notebook entry @ %s: %s'%(time.ctime(),text))
###############################################################################
#Command logging
###############################################################################
def OnMacroRecordStatus(self,event,setvalue=None):
'''Called when the record macro menu item is used which toggles the
value. Alternately a value to be set can be provided. Note that this
routine is made more complex because on the Mac there are lots of menu
items (listed in self.MacroStatusList) and this loops over all of them.
'''
nextvalue = log.ShowLogStatus() != True
if setvalue is not None:
nextvalue = setvalue
if nextvalue:
log.LogOn()
set2 = True
else:
log.LogOff()
set2 = False
for menuitem in self.MacroStatusList:
menuitem.Check(set2)
def _init_Macro(self):
'''Define the items in the macro menu.
'''
menu = self.MacroMenu
item = menu.Append(
help='Start or stop recording of menu actions, etc.', id=wx.ID_ANY,
kind=wx.ITEM_CHECK,text='Record actions')
self.MacroStatusList.append(item)
item.Check(log.ShowLogStatus())
self.Bind(wx.EVT_MENU, self.OnMacroRecordStatus, item)
# this may only be of value for development work
item = menu.Append(
help='Show logged commands', id=wx.ID_ANY,
kind=wx.ITEM_NORMAL,text='Show log')
def OnShowLog(event):
print (70*'=')
print ('List of logged actions')
for i,line in enumerate(log.G2logList):
if line: print ('%d %s'%(i,line))
print (70*'=')
self.Bind(wx.EVT_MENU, OnShowLog, item)
item = menu.Append(
help='Clear logged commands', id=wx.ID_ANY,
kind=wx.ITEM_NORMAL,text='Clear log')
def OnClearLog(event): log.G2logList=[None]
self.Bind(wx.EVT_MENU, OnClearLog, item)
item = menu.Append(
help='Save logged commands to file', id=wx.ID_ANY,
kind=wx.ITEM_NORMAL,text='Save log')
def OnSaveLog(event):
defnam = os.path.splitext(os.path.split(self.GSASprojectfile)[1])[0]+'.gcmd'
dlg = wx.FileDialog(self,
'Choose an file to save past actions', '.', defnam,
'GSAS-II cmd output (*.gcmd)|*.gcmd',
wx.FD_SAVE|wx.FD_OVERWRITE_PROMPT)
dlg.CenterOnParent()
try:
if dlg.ShowModal() == wx.ID_OK:
filename = dlg.GetPath()
# make sure extension is correct
filename = os.path.splitext(filename)[0]+'.gcmd'
else:
filename = None
finally:
dlg.Destroy()
if filename:
fp = open(filename,'wb')
fp.write(str(len(log.G2logList)-1)+'\n')
for item in log.G2logList:
if item: cPickle.dump(item,fp)
fp.close()
self.Bind(wx.EVT_MENU, OnSaveLog, item)
item = menu.Append(
help='Load logged commands from file', id=wx.ID_ANY,
kind=wx.ITEM_NORMAL,text='Load log')
def OnLoadLog(event):
# this appends. Perhaps we should ask to clear?
defnam = os.path.splitext(
os.path.split(self.GSASprojectfile)[1])[0]+'.gcmd'
dlg = wx.FileDialog(self,
'Choose an file to read saved actions', '.', defnam,
'GSAS-II cmd output (*.gcmd)|*.gcmd',
wx.FD_OPEN)
dlg.CenterOnParent()
try:
if dlg.ShowModal() == wx.ID_OK:
filename = dlg.GetPath()
# make sure extension is correct
filename = os.path.splitext(filename)[0]+'.gcmd'
else:
filename = None
finally:
dlg.Destroy()
if filename and os.path.exists(filename):
fp = open(filename,'rb')
lines = fp.readline()
for i in range(int(lines)):
log.G2logList.append(cPickle.load(fp))
fp.close()
self.Bind(wx.EVT_MENU, OnLoadLog, item)
item = menu.Append(
help='Replay saved commands', id=wx.ID_ANY,
kind=wx.ITEM_NORMAL,text='Replay log')
self.Bind(wx.EVT_MENU, log.ReplayLog, item)
# End of logging ##############################################################
def _init_Exports(self,menu):
'''Find exporter routines and add them into menus
'''
# set up the top-level menus
projectmenu = wx.Menu()
item = menu.AppendSubMenu(projectmenu,'Entire project as','Export entire project')
self.ExportNonSeq.append([menu,item.Id])
phasemenu = wx.Menu()
item = menu.AppendSubMenu(phasemenu,'Phase as','Export phase or sometimes phases')
powdermenu = wx.Menu()
item = menu.AppendSubMenu(powdermenu,'Powder data as','Export powder diffraction histogram(s)')
sasdmenu = wx.Menu()
item = menu.AppendSubMenu(sasdmenu,'Small angle data as','Export small angle histogram(s)')
refdmenu = wx.Menu()
item = menu.AppendSubMenu(refdmenu,'Reflectometry data as','Export reflectometry histogram(s)')
singlemenu = wx.Menu()
item = menu.AppendSubMenu(singlemenu,'Single crystal data as','Export single crystal histogram(s)')
imagemenu = wx.Menu()
item = menu.AppendSubMenu(imagemenu,'Image data as','Export powder image(s) data')
mapmenu = wx.Menu()
item = menu.AppendSubMenu(mapmenu,'Maps as','Export density map(s)')
# sequential exports are handled differently; N.B. enabled in testSeqRefineMode
seqPhasemenu = wx.Menu()
item = menu.AppendSubMenu(seqPhasemenu,'Sequential phases','Export phases from sequential fit')
self.ExportSeq.append([menu,item.Id])
seqHistmenu = wx.Menu()
item = menu.AppendSubMenu(seqHistmenu,'Sequential histograms','Export histograms from sequential fit')
self.ExportSeq.append([menu,item.Id])
# find all the exporter files
if not self.exporterlist: # this only needs to be done once
self.exporterlist = G2fil.LoadExportRoutines(self)
# Add submenu item(s) for each Exporter by its self-declared type (can be more than one)
for obj in self.exporterlist:
#print 'exporter',obj
for typ in obj.exporttype:
if typ == "project":
submenu = projectmenu
elif typ == "phase":
submenu = phasemenu
elif typ == "powder":
submenu = powdermenu
elif typ == "single":
submenu = singlemenu
elif typ == "image":
submenu = imagemenu
elif typ == "map":
submenu = mapmenu
elif typ == "sasd":
submenu = sasdmenu
elif typ == "refd":
submenu = refdmenu
# elif typ == "pdf":
# submenu = pdfmenu
else:
print("Error, unknown type in "+str(obj))
break
item = submenu.Append(wx.ID_ANY,obj.formatName,obj.longFormatName)
self.Bind(wx.EVT_MENU, obj.Exporter, id=item.GetId())
self.ExportLookup[item.GetId()] = typ # lookup table for submenu item
for lbl,submenu in (('Phase',seqPhasemenu),
('Powder',seqHistmenu),
):
if lbl.lower() in obj.exporttype:
try:
obj.Writer
except AttributeError:
continue
# define a unique event handler for this menu item
def seqMenuItemEventHandler(event,obj=obj,typ=lbl):
'This handler has the needed exporter/type embedded'
# lookup sequential table
Id = GetGPXtreeItemId(self,self.root,'Sequential results')
if not Id:
print('Error in Seq seqMenuItemEventHandler for ',typ,'without Seq Res table')
return
data = self.GPXtree.GetItemPyData(Id)
G2IO.ExportSequential(self,data,obj,typ)
if '2' in platform.python_version_tuple()[0]:
if 'mode' in inspect.getargspec(obj.Writer)[0]:
item = submenu.Append(wx.ID_ANY,obj.formatName,obj.longFormatName)
self.Bind(wx.EVT_MENU, seqMenuItemEventHandler, item)
else:
if 'mode' in inspect.getfullargspec(obj.Writer)[0]:
item = submenu.Append(wx.ID_ANY,obj.formatName,obj.longFormatName)
self.Bind(wx.EVT_MENU, seqMenuItemEventHandler, item)
# self.SeqExportLookup[item.GetId()] = (obj,lbl) # lookup table for submenu item
# Bind is in UpdateSeqResults
item = imagemenu.Append(wx.ID_ANY,'Multiple image controls and masks',
'Export image controls and masks for multiple images')
self.Bind(wx.EVT_MENU, self.OnSaveMultipleImg, id=item.GetId())
#code to debug an Exporter. hard-code the routine below, to allow a reload before use
# def DebugExport(event):
# print 'start reload'
# reload(G2IO)
# import G2export_pwdr as dev
# reload(dev)
# dev.ExportPowderFXYE(self).Exporter(event)
# item = menu.Append(
# wx.ID_ANY,kind=wx.ITEM_NORMAL,
# help="debug exporter",text="test Export FXYE")
# self.Bind(wx.EVT_MENU, DebugExport, id=item.GetId())
# # #self.ExportLookup[item.GetId()] = 'image'
# self.ExportLookup[item.GetId()] = 'powder'
###############################################################################
# Exporters
###############################################################################
def _Add_ExportMenuItems(self,parent):
# item = parent.Append(
# help='Select PWDR item to enable',id=wx.ID_ANY,
# kind=wx.ITEM_NORMAL,
# text='Export Powder Patterns...')
# self.ExportPattern.append(item)
# item.Enable(False)
# self.Bind(wx.EVT_MENU, self.OnExportPatterns, id=item.GetId())
item = parent.Append(wx.ID_ANY,'Export All Peak Lists...','')
self.ExportPeakList.append(item)
item.Enable(True)
self.Bind(wx.EVT_MENU, self.OnExportPeakList, id=item.GetId())
item = parent.Append(wx.ID_ANY,'Export HKLs...','')
self.ExportHKL.append(item)
self.Bind(wx.EVT_MENU, self.OnExportHKL, id=item.GetId())
item = parent.Append(wx.ID_ANY,'Export PDF...','Select PDF item to enable')
self.ExportPDF.append(item)
item.Enable(False)
self.Bind(wx.EVT_MENU, self.OnExportPDF, id=item.GetId())
def FillMainMenu(self,menubar,addhelp=True):
'''Define contents of the main GSAS-II menu for the (main) data tree window.
For the mac, this is also called for the data item windows as well so that
the main menu items are data menu as well.
'''
File = wx.Menu(title='')
menubar.Append(menu=File, title='&File')
self._Add_FileMenuItems(File)
Data = wx.Menu(title='')
menubar.Append(menu=Data, title='Data')
self._Add_DataMenuItems(Data)
Calculate = wx.Menu(title='')
menubar.Append(menu=Calculate, title='&Calculate')
self._Add_CalculateMenuItems(Calculate)
Import = wx.Menu(title='')
menubar.Append(menu=Import, title='Import')
self._Add_ImportMenu_Image(Import)
self._Add_ImportMenu_Phase(Import)
self._Add_ImportMenu_powder(Import)
self._Add_ImportMenu_Sfact(Import)
self._Add_ImportMenu_smallangle(Import)
self._Add_ImportMenu_reflectometry(Import)
self._Add_ImportMenu_PDF(Import)
item = Import.Append(wx.ID_ANY,'Column metadata test','Test Column (.par) metadata import')
self.Bind(wx.EVT_MENU, self.OnColMetaTest, id=item.GetId())
#======================================================================
# Code to help develop/debug an importer, much is hard-coded below
# but module is reloaded before each use, allowing faster testing
# def DebugImport(event):
# print 'start reload'
# import G2phase_ISO as dev
# reload(dev)
# rd = dev.ISODISTORTPhaseReader()
# self.ImportMenuId[event.GetId()] = rd
# self.OnImportPhase(event)
# or ----------------------------------------------------------------------
#self.OnImportGeneric(rd,[],'test of ISODISTORTPhaseReader')
# special debug code
# or ----------------------------------------------------------------------
# filename = '/Users/toby/projects/branton/subgroup_cif.txt'
# if not rd.ContentsValidator(filename):
# print 'not validated'
# # make a list of used phase ranId's
# phaseRIdList = []
# sub = GetGPXtreeItemId(self,self.root,'Phases')
# if sub:
# item, cookie = self.GPXtree.GetFirstChild(sub)
# while item:
# phaseName = self.GPXtree.GetItemText(item)
# ranId = self.GPXtree.GetItemPyData(item).get('ranId')
# if ranId: phaseRIdList.append(ranId)
# item, cookie = self.GPXtree.GetNextChild(sub, cookie)
# if rd.Reader(filename,usedRanIdList=phaseRIdList):
# print 'read OK'
# item = Import.Append(
# wx.ID_ANY,kind=wx.ITEM_NORMAL,
# help="debug importer",text="test importer")
# self.Bind(wx.EVT_MENU, DebugImport, id=item.GetId())
#======================================================================
self.ExportMenu = wx.Menu(title='')
menubar.Append(menu=self.ExportMenu, title='Export')
self._init_Exports(self.ExportMenu)
self._Add_ExportMenuItems(self.ExportMenu)
if GSASIIpath.GetConfigValue('Enable_logging'):
self.MacroMenu = wx.Menu(title='')
menubar.Append(menu=self.MacroMenu, title='Macro')
self._init_Macro()
if addhelp:
HelpMenu=G2G.MyHelp(self,includeTree=True,
morehelpitems=[('&Tutorials\tCtrl+T','Tutorials'),])
menubar.Append(menu=HelpMenu,title='&Help')
def _init_ctrls(self, parent):
try:
size = GSASIIpath.GetConfigValue('Main_Size')
if type(size) is tuple:
pass
elif type(size) is str:
size = eval(size)
else:
raise Exception
except:
size = wx.Size(700,450)
wx.Frame.__init__(self, name='GSASII', parent=parent,
size=size,style=wx.DEFAULT_FRAME_STYLE, title='GSAS-II main window')
self._init_Imports()
#initialize Menu item objects (these contain lists of menu items that are enabled or disabled)
self.MakePDF = []
self.Refine = []
self.ExportSeq = []
self.ExportNonSeq = []
#self.ExportPattern = []
self.ExportPeakList = []
self.ExportHKL = []
self.ExportPDF = []
self.ExportPhase = []
self.ExportCIF = []
#
self.MacroStatusList = [] # logging
self.Status = self.CreateStatusBar()
self.Status.SetFieldsCount(2)
# Bob: note different ways to display the SplitterWindow. I like the 3d effect on the Mac
# as it makes the splitter bar a bit easier to "grab" -- this might need to be platform selected.
#self.mainPanel = wx.SplitterWindow(self, wx.ID_ANY, style=wx.SP_BORDER|wx.SP_LIVE_UPDATE)
#self.mainPanel = wx.SplitterWindow(self, wx.ID_ANY, style=wx.SP_BORDER|wx.SP_LIVE_UPDATE|wx.SP_3DSASH)
self.mainPanel = wx.SplitterWindow(self, wx.ID_ANY, style=wx.SP_LIVE_UPDATE|wx.SP_3D)
self.mainPanel.SetMinimumPaneSize(100)
self.treePanel = wx.Panel(self.mainPanel, wx.ID_ANY,
style = wx.TAB_TRAVERSAL|wx.SUNKEN_BORDER)
self.dataWindow = G2DataWindow(self.mainPanel)
dataSizer = wx.BoxSizer(wx.VERTICAL)
self.dataWindow.SetSizer(dataSizer)
self.mainPanel.SplitVertically(self.treePanel, self.dataWindow, 200)
self.Status.SetStatusWidths([200,-1]) # make these match?
G2G.wxID_GPXTREE = wx.NewId()
treeSizer = wx.BoxSizer(wx.VERTICAL)
self.treePanel.SetSizer(treeSizer)
self.GPXtree = G2G.G2TreeCtrl(id=G2G.wxID_GPXTREE,
parent=self.treePanel, size=self.treePanel.GetClientSize(),style=wx.TR_DEFAULT_STYLE )
treeSizer.Add(self.GPXtree,1,wx.EXPAND|wx.ALL,0)
self.GPXtree.Bind(wx.EVT_TREE_SEL_CHANGED,self.OnDataTreeSelChanged)
self.GPXtree.Bind(wx.EVT_TREE_ITEM_RIGHT_CLICK,self.OnDataTreeSelChanged)
self.GPXtree.Bind(wx.EVT_TREE_ITEM_COLLAPSED,
self.OnGPXtreeItemCollapsed, id=G2G.wxID_GPXTREE)
self.GPXtree.Bind(wx.EVT_TREE_ITEM_EXPANDED,
self.OnGPXtreeItemExpanded, id=G2G.wxID_GPXTREE)
self.GPXtree.Bind(wx.EVT_TREE_DELETE_ITEM,
self.OnGPXtreeItemDelete, id=G2G.wxID_GPXTREE)
self.GPXtree.Bind(wx.EVT_TREE_KEY_DOWN,
self.OnGPXtreeKeyDown, id=G2G.wxID_GPXTREE)
self.GPXtree.Bind(wx.EVT_TREE_BEGIN_RDRAG,
self.OnGPXtreeBeginRDrag, id=G2G.wxID_GPXTREE)
self.GPXtree.Bind(wx.EVT_TREE_END_DRAG,
self.OnGPXtreeEndDrag, id=G2G.wxID_GPXTREE)
self.root = self.GPXtree.root
try:
size = GSASIIpath.GetConfigValue('Plot_Size')
if type(size) is tuple:
pass
elif type(size) is str:
size = eval(size)
else:
raise Exception
except:
size = wx.Size(700,600)
self.plotFrame = wx.Frame(None,-1,'GSASII Plots',size=size,
style=wx.DEFAULT_FRAME_STYLE ^ wx.CLOSE_BOX)
self.G2plotNB = G2plt.G2PlotNoteBook(self.plotFrame,G2frame=self)
self.plotFrame.Show()
for win,var in ((self,'Main_Pos'),(self.plotFrame,'Plot_Pos')):
try:
pos = GSASIIpath.GetConfigValue(var)
if type(pos) is str: pos = eval(pos)
win.SetPosition(pos)
if GetDisplay(pos) is None: win.Center()
except:
if GSASIIpath.GetConfigValue(var):
print('Value for config {} {} is invalid'.format(var,GSASIIpath.GetConfigValue(var)))
win.Center()
################################################################################
#### init_vars
################################################################################
def init_vars(self):
# initialize default values for GSAS-II "global" variables (saved in main Frame)
self.oldFocus = None
self.undofile = ''
self.TreeItemDelete = False
self.Weight = False
self.IfPlot = False
self.DDShowAll = False
self.atmSel = ''
self.PatternId = 0
self.PickId = 0
self.PickIdText = None
self.PeakTable = []
self.LimitsTable = []
self.ifX20 = True #use M20 /= (1+X20) in powder indexing, etc.
self.HKL = []
self.Lines = [] # lines used for data limits & excluded regions
self.MagLines = [] # lines used for plot magnification
self.itemPicked = None
self.Interpolate = 'nearest'
self.ContourColor = GSASIIpath.GetConfigValue('Contour_color','Paired')
self.VcovColor = 'RdYlGn'
self.RamaColor = 'Blues'
self.Projection = 'equal area'
self.logPlot = False
self.plusPlot = True
self.ErrorBars = False
self.Contour = False
self.TforYaxis = False
self.Legend = False
self.SinglePlot = True
self.Waterfall = False
self.selections= None
self.PDFselections = None
self.SubBack = False
self.seqReverse = False
self.seqLines = True #draw lines between points
self.plotView = 0
self.Image = 0
self.oldImagefile = '' # the name of the last image file read
self.oldImageTag = None # the name of the tag for multi-image files
self.PauseIntegration = False
self.ImageZ = []
self.Integrate = 0
self.imageDefault = {}
self.IntgOutList = [] # list of integration tree item Ids created in G2IO.SaveIntegration
self.AutointPWDRnames = [] # list of autoint created PWDR tree item names (to be deleted on a reset)
self.autoIntFrame = None
self.IntegratedList = [] # list of already integrated IMG tree items
self.Sngl = False
self.ifGetRing = False
self.MaskKey = '' #trigger for making image masks
self.MskDelete = False #trigger for mask delete
self.StrainKey = '' #ditto for new strain d-zeros
self.EnablePlot = True
self.hist = '' # selected histogram in Phase/Data tab
self.dataDisplayPhaseText = ''
self.lastTreeSetting = [] # used to track the selected Tree item before a refinement
self.ExpandingAll = False
self.SeqTblHideList = None
self.newGPXfile = ''
self.lastSelectedPhaseTab = None # track the last tab pressed on a phase window
self.testRBObjSizers = {} #rigid body sizer datafile contents
self.RMCchoice = 'RMCProfile'
def __init__(self, parent):
self.ExportLookup = {}
self.exporterlist = []
self._init_ctrls(parent)
self.Image = wx.Image(
os.path.join(GSASIIpath.path2GSAS2,'gsas2.ico'),
wx.BITMAP_TYPE_ICO)
if "wxMSW" in wx.PlatformInfo:
img = self.Image.Scale(16, 16).ConvertToBitmap()
elif "wxGTK" in wx.PlatformInfo:
img = self.Image.Scale(22, 22).ConvertToBitmap()
else:
img = self.Image.ConvertToBitmap()
if 'phoenix' in wx.version():
self.SetIcon(wx.Icon(img))
else:
self.SetIcon(wx.IconFromBitmap(img))
self.Bind(wx.EVT_CLOSE, self.ExitMain)
self.GSASprojectfile = ''
self.dirname = os.path.abspath(os.path.expanduser('~')) #start in the users home directory by default; may be meaningless
self.TutorialImportDir = None # location to read tutorial files, set when a tutorial is viewed
self.LastImportDir = None # last-used directory where an import was done
self.LastGPXdir = None # directory where a GPX file was last read
self.LastExportDir = None # the last directory used for exports, if any.
self.dataDisplay = None
self.init_vars()
arg = sys.argv
if len(arg) > 1 and arg[1]:
try:
self.GSASprojectfile = os.path.splitext(arg[1])[0]+u'.gpx'
except:
self.GSASprojectfile = os.path.splitext(arg[1])[0]+'.gpx'
self.dirname = os.path.abspath(os.path.dirname(arg[1]))
if self.dirname:
self.GSASprojectfile = os.path.split(self.GSASprojectfile)[1]
os.chdir(self.dirname)
self.LastGPXdir = self.dirname
try:
#open the file if possible
if sys.platform == "darwin": # on Mac delay a bit so GUI can open
wx.CallAfter(self.StartProject)
else:
self.StartProject()
return
except Exception:
print ('Error opening or reading file'+arg[1])
import traceback
print (traceback.format_exc())
if GSASIIpath.GetConfigValue('Starting_directory'):
try:
pth = GSASIIpath.GetConfigValue('Starting_directory')
pth = os.path.expanduser(pth)
os.chdir(pth)
self.LastGPXdir = pth
except:
print('Ignoring Config Starting_directory value: '+
GSASIIpath.GetConfigValue('Starting_directory'))
def GetTreeItemsList(self,item):
return self.GPXtree._getTreeItemsList(item)
# def OnSize(self,event):
# 'Called to make GPXtree fill mainPanel'
# print 'OnSize'
# event.Skip()
# w,h = self.GetClientSizeTuple()
# self.dataWindow.SetupScrolling()
# self.mainPanel.SetSize(wx.Size(w,h))
# self.GPXtree.SetSize(wx.Size(w,h))
# self.dataWindow.SetSize(self.dataPanel.GetClientSize())
def SetDataSize(self):
'''this routine is a placeholder until all G2frame.SetDataSize calls are replaced
by G2frame.dataWindow.SetDataSize
'''
# TOTO: diagnostic patch
print ('G2frame.SetDataSize called rather than dataWindow.SetDataSize')
G2obj.HowDidIgetHere(True)
self.dataWindow.SetDataSize()
def OnDataTreeSelChanged(self, event):
'''Called when a data tree item is selected. May be called on item deletion as well.
'''
if self.TreeItemDelete:
self.TreeItemDelete = False
else:
if self.ExpandingAll:
if GSASIIpath.GetConfigValue('debug'): print('Skipping Tree selection due to ExpandAll')
return
pltNum = self.G2plotNB.nb.GetSelection()
if pltNum >= 0: #to avoid the startup with no plot!
self.G2plotNB.nb.GetPage(pltNum)
item = event.GetItem()
wx.CallAfter(SelectDataTreeItem,self,item,self.oldFocus)
#if self.oldFocus: # now done via last parameter on SelectDataTreeItem
# wx.CallAfter(self.oldFocus.SetFocus)
def OnGPXtreeItemCollapsed(self, event):
'Called when a tree item is collapsed - all children will be collapsed'
self.GPXtree.CollapseAllChildren(event.GetItem())
def OnGPXtreeItemExpanded(self, event):
'Called when a tree item is expanded'
event.Skip()
def OnGPXtreeItemDelete(self, event):
'Called when a tree item is deleted, inhibit the next tree item selection action'
self.TreeItemDelete = True
def OnGPXtreeItemActivated(self, event):
'Called when a tree item is activated'
event.Skip()
def OnGPXtreeBeginRDrag(self,event):
event.Allow()
self.BeginDragId = event.GetItem()
self.ParentId = self.GPXtree.GetItemParent(self.BeginDragId)
DragText = self.GPXtree.GetItemText(self.BeginDragId)
self.DragData = [[DragText,self.GPXtree.GetItemPyData(self.BeginDragId)],]
item, cookie = self.GPXtree.GetFirstChild(self.BeginDragId)
while item: #G2 data tree has no sub children under a child of a tree item
name = self.GPXtree.GetItemText(item)
self.DragData.append([name,self.GPXtree.GetItemPyData(item)])
item, cookie = self.GPXtree.GetNextChild(self.BeginDragId, cookie)
def OnGPXtreeEndDrag(self,event):
event.Allow()
self.EndDragId = event.GetItem()
try:
NewParent = self.GPXtree.GetItemParent(self.EndDragId)
except:
self.EndDragId = self.GPXtree.GetLastChild(self.root)
NewParent = self.root
if self.ParentId != NewParent:
self.ErrorDialog('Drag not allowed','Wrong parent for item dragged')
else:
Name,Item = self.DragData[0]
NewId = self.GPXtree.InsertItem(self.ParentId,self.EndDragId,Name,data=None)
self.GPXtree.SetItemPyData(NewId,Item)
for name,item in self.DragData[1:]: #loop over children
Id = self.GPXtree.AppendItem(parent=NewId,text=name)
self.GPXtree.SetItemPyData(Id,item)
self.GPXtree.Delete(self.BeginDragId)
SelectDataTreeItem(self,NewId)
def OnGPXtreeKeyDown(self,event): #doesn't exactly work right with Shift key down
'Allows stepping through the tree with the up/down arrow keys'
self.oldFocus = wx.Window.FindFocus()
keyevt = event.GetKeyEvent()
key = event.GetKeyCode()
item = self.GPXtree.GetSelection()
if type(item) is int: return # is this the toplevel in tree?
name = self.GPXtree.GetItemText(item)
parent = self.GPXtree.GetItemParent(item)
if key == wx.WXK_UP:
if keyevt.GetModifiers() == wx.MOD_SHIFT and parent != self.root:
if type(parent) is int: return # is this the toplevel in tree?
prev = self.GPXtree.GetPrevSibling(parent)
NewId = GetGPXtreeItemId(self,prev,name)
if NewId:
self.GPXtree.Collapse(parent)
self.GPXtree.Expand(prev)
self.oldFocus = wx.Window.FindFocus()
wx.CallAfter(self.GPXtree.SelectItem,NewId)
else:
wx.CallAfter(self.GPXtree.SelectItem,item)
elif sys.platform == "win32":
self.GPXtree.GetPrevSibling(item)
self.GPXtree.SelectItem(item)
else:
item = self.GPXtree.GetPrevSibling(item)
if item.IsOk(): self.GPXtree.SelectItem(item)
elif key == wx.WXK_DOWN:
if keyevt.GetModifiers() == wx.MOD_SHIFT and parent != self.root:
prev = self.GPXtree.GetNextSibling(parent)
NewId = GetGPXtreeItemId(self,prev,name)
if NewId:
self.GPXtree.Collapse(parent)
self.GPXtree.Expand(prev)
self.oldFocus = wx.Window.FindFocus()
wx.CallAfter(self.GPXtree.SelectItem,NewId)
else:
wx.CallAfter(self.GPXtree.SelectItem,item)
elif sys.platform == "win32":
self.GPXtree.GetNextSibling(item)
self.GPXtree.SelectItem(item)
else:
item = self.GPXtree.GetNextSibling(item)
if item.IsOk(): self.GPXtree.SelectItem(item)
def OnColMetaTest(self,event):
'Test the .par/.*lbls pair for contents'
G2imG.testColumnMetadata(self)
def OnPowderFPA(self,event):
'Perform FPA simulation/peak fitting'
G2fpa.GetFPAInput(self)
def OnReadPowderPeaks(self,event):
'Bound to menu Data/Read Powder Peaks'
self.CheckNotebook()
pth = G2G.GetImportPath(self)
if not pth: pth = '.'
dlg = wx.FileDialog(self, 'Choose file with peak list', pth, '',
'peak files (*.txt)|*.txt|All files (*.*)|*.*',wx.FD_MULTIPLE)
try:
if dlg.ShowModal() == wx.ID_OK:
for file_ajk in dlg.GetPaths():
self.HKL = []
self.powderfile = file_ajk
comments,peaks,limits,wave = G2IO.GetPowderPeaks(self.powderfile)
Id = self.GPXtree.AppendItem(parent=self.root,text='PKS '+os.path.basename(self.powderfile))
data = ['PKS',wave,0.0]
names = ['Type','Lam','Zero']
codes = [0,0,0]
inst = [G2fil.makeInstDict(names,data,codes),{}]
self.GPXtree.SetItemPyData(self.GPXtree.AppendItem(Id,text='Instrument Parameters'),inst)
self.GPXtree.SetItemPyData(self.GPXtree.AppendItem(Id,text='Comments'),comments)
self.GPXtree.SetItemPyData(self.GPXtree.AppendItem(Id,text='Limits'),[tuple(limits),limits])
self.GPXtree.SetItemPyData(self.GPXtree.AppendItem(Id,text='Index Peak List'),[peaks,[]])
self.GPXtree.SetItemPyData(self.GPXtree.AppendItem(Id,text='Unit Cells List'),[])
self.GPXtree.Expand(Id)
self.GPXtree.SelectItem(Id)
os.chdir(dlg.GetDirectory()) # to get Mac/Linux to change directory!
finally:
dlg.Destroy()
def CheckNotebook(self):
'''Make sure the data tree has the minimally expected controls.
'''
new = False
if not GetGPXtreeItemId(self,self.root,'Notebook'):
new = True
sub = self.GPXtree.AppendItem(parent=self.root,text='Notebook')
self.GPXtree.SetItemPyData(sub,[''])
if not GetGPXtreeItemId(self,self.root,'Controls'):
new = True
sub = self.GPXtree.AppendItem(parent=self.root,text='Controls')
self.GPXtree.SetItemPyData(sub,copy.copy(G2obj.DefaultControls))
if not GetGPXtreeItemId(self,self.root,'Covariance'):
new = True
sub = self.GPXtree.AppendItem(parent=self.root,text='Covariance')
self.GPXtree.SetItemPyData(sub,{})
if not GetGPXtreeItemId(self,self.root,'Constraints'):
new = True
sub = self.GPXtree.AppendItem(parent=self.root,text='Constraints')
self.GPXtree.SetItemPyData(sub,{'Hist':[],'HAP':[],'Phase':[]})
if not GetGPXtreeItemId(self,self.root,'Restraints'):
new = True
sub = self.GPXtree.AppendItem(parent=self.root,text='Restraints')
self.GPXtree.SetItemPyData(sub,{})
if not GetGPXtreeItemId(self,self.root,'Rigid bodies'):
new = True
sub = self.GPXtree.AppendItem(parent=self.root,text='Rigid bodies')
self.GPXtree.SetItemPyData(sub,{'Vector':{'AtInfo':{}},
'Residue':{'AtInfo':{}},'RBIds':{'Vector':[],'Residue':[]}})
if new:
self.GPXtree.Expand(self.GPXtree.root)
class CopyDialog(wx.Dialog):
'''Creates a dialog for copying control settings between
data tree items'''
def __init__(self,parent,title,text,data):
wx.Dialog.__init__(self,parent,-1,title,
pos=wx.DefaultPosition,style=wx.DEFAULT_DIALOG_STYLE)
self.data = data
panel = wx.Panel(self)
mainSizer = wx.BoxSizer(wx.VERTICAL)
topLabl = wx.StaticText(panel,-1,text)
mainSizer.Add((10,10),1)
mainSizer.Add(topLabl,0,wx.ALIGN_CENTER_VERTICAL|wx.LEFT,10)
mainSizer.Add((10,10),1)
ncols = len(data)/40+1
dataGridSizer = wx.FlexGridSizer(cols=ncols,hgap=2,vgap=2)
for Id,item in enumerate(self.data):
ckbox = wx.CheckBox(panel,Id,item[1])
ckbox.Bind(wx.EVT_CHECKBOX,self.OnCopyChange)
dataGridSizer.Add(ckbox,0,wx.LEFT,10)
mainSizer.Add(dataGridSizer,0,wx.EXPAND)
OkBtn = wx.Button(panel,-1,"Ok")
OkBtn.Bind(wx.EVT_BUTTON, self.OnOk)
cancelBtn = wx.Button(panel,-1,"Cancel")
cancelBtn.Bind(wx.EVT_BUTTON, self.OnCancel)
btnSizer = wx.BoxSizer(wx.HORIZONTAL)
btnSizer.Add((20,20),1)
btnSizer.Add(OkBtn)
btnSizer.Add((20,20),1)
btnSizer.Add(cancelBtn)
btnSizer.Add((20,20),1)
mainSizer.Add(btnSizer,0,wx.EXPAND|wx.BOTTOM|wx.TOP, 10)
panel.SetSizer(mainSizer)
panel.Fit()
self.Fit()
def OnCopyChange(self,event):
Id = event.GetId()
self.data[Id][0] = self.FindWindowById(Id).GetValue()
def OnOk(self,event):
parent = self.GetParent()
parent.Raise()
self.EndModal(wx.ID_OK)
def OnCancel(self,event):
parent = self.GetParent()
parent.Raise()
self.EndModal(wx.ID_CANCEL)
def GetData(self):
return self.data
class SumDialog(wx.Dialog):
'''Allows user to supply scale factor(s) when summing data
'''
def __init__(self,parent,title,text,dataType,data,dataList,Limits=None):
wx.Dialog.__init__(self,parent,-1,title,size=(400,250),
pos=wx.DefaultPosition,style=wx.DEFAULT_DIALOG_STYLE|wx.RESIZE_BORDER)
self.plotFrame = wx.Frame(self,-1,'Sum Plots',size=wx.Size(700,600), \
style=wx.DEFAULT_FRAME_STYLE ^ wx.CLOSE_BOX)
self.G2plotNB = G2plt.G2PlotNoteBook(self.plotFrame,G2frame=self)
self.text = text
self.data = data
self.average = False
self.selectData = copy.copy(data[:-1])
self.selectVals = len(data)*[0.0,]
self.dataList = dataList
self.Limits = Limits
self.filterlist = range(len(self.dataList)) # list of the choice numbers that have been filtered (list of int indices)
self.dataType = dataType
self.filterVal = ''
self.panel = None
self.Draw()
def Draw(self):
if self.panel:
self.panel.DestroyChildren() #safe: wx.Panel
self.panel.Destroy()
size = (480,350)
self.panel = wxscroll.ScrolledPanel(self, wx.ID_ANY,size=size,
style = wx.TAB_TRAVERSAL|wx.SUNKEN_BORDER)
mainSizer = wx.BoxSizer(wx.VERTICAL)
mainSizer.Add(wx.StaticText(self.panel,label=self.text),0)
mainSizer.Add((10,10))
self.dataGridSizer = wx.FlexGridSizer(cols=2,hgap=2,vgap=2)
self.dataGridSizer.Add((-1,-1))
topSizer = wx.BoxSizer(wx.HORIZONTAL)
topSizer.Add((-1,-1),1,wx.EXPAND,1)
topSizer.Add(wx.StaticText(self.panel,label='Filter: '),0,WACV)
self.timer = wx.Timer()
self.timer.Bind(wx.EVT_TIMER,self.OnFilter)
self.filterBox = wx.TextCtrl(self.panel, wx.ID_ANY, self.filterVal,
size=(80,-1),style=wx.TE_PROCESS_ENTER)
self.filterBox.Bind(wx.EVT_TEXT,self.onChar)
self.filterBox.Bind(wx.EVT_TEXT_ENTER,self.OnFilter)
topSizer.Add(self.filterBox,0,WACV)
self.dataGridSizer.Add(topSizer,1,wx.RIGHT|wx.BOTTOM|wx.EXPAND,1)
self.dataGridSizer.Add((-1,10))
self.dataGridSizer.Add((-1,10))
for Id,item in enumerate(self.selectData):
name = wx.TextCtrl(self.panel,-1,item,size=wx.Size(300,20))
name.SetEditable(False)
scale = G2G.ValidatedTxtCtrl(self.panel,self.selectVals,Id,nDig=(10,3),typeHint=float)
self.dataGridSizer.Add(scale,0,wx.LEFT,10)
self.dataGridSizer.Add(name,0,wx.RIGHT,10)
if self.dataType:
ScaleAll = wx.Button(self.panel,wx.ID_ANY,'Set all above')
ScaleAll.Bind(wx.EVT_BUTTON, self.OnAllScale)
if self.dataType == 'PWDR':
self.Avg = wx.CheckBox(self.panel,label=' Make average?')
self.Avg.Bind(wx.EVT_CHECKBOX,self.OnAve)
self.dataGridSizer.Add(ScaleAll,0,wx.LEFT,10)
if self.dataType == 'PWDR':
self.dataGridSizer.Add(self.Avg,0,wx.RIGHT,10)
self.dataGridSizer.Add(wx.StaticText(self.panel,-1,' Result type: '+self.dataType),1,
wx.LEFT|wx.ALIGN_CENTER_VERTICAL,1)
mainSizer.Add(self.dataGridSizer,0,wx.EXPAND)
self.name = G2G.ValidatedTxtCtrl(self.panel,self.data,-1,size=wx.Size(300,20))
mainSizer.Add(self.name,0,wx.RIGHT|wx.TOP,10)
self.OkBtn = wx.Button(self.panel,-1,"Ok")
self.OkBtn.Bind(wx.EVT_BUTTON, self.OnOk)
cancelBtn = wx.Button(self.panel,-1,"Cancel")
cancelBtn.Bind(wx.EVT_BUTTON, self.OnCancel)
btnSizer = wx.FlexGridSizer(0,3,10,20)
if self.dataType =='PWDR':
TestBtn = wx.Button(self.panel,-1,"Test")
TestBtn.Bind(wx.EVT_BUTTON, self.OnTest)
btnSizer.Add(TestBtn)
btnSizer.Add(self.OkBtn)
btnSizer.Add(cancelBtn)
btnSizer.Add((5,5))
self.panel.SetSizer(mainSizer)
self.panel.SetAutoLayout(1)
self.panel.SetupScrolling()
mainSizer.Add((10,10),1)
mainSizer.Add(btnSizer,0,wx.CENTER)
self.panel.SetSizer(mainSizer)
self.panel.Fit()
self.Fit()
def OnAve(self,event):
self.average = self.Avg.GetValue()
def OnFilter(self,event):
'''Read text from filter control and select entries that match.
'''
if self.timer.IsRunning():
self.timer.Stop()
self.filterVal = txt = self.filterBox.GetValue()
if txt:
txt = txt.lower()
ChoiceList = []
ChoiceVals = []
for i,item in enumerate(self.selectData):
if item.lower().find(txt) != -1:
ChoiceList.append(item)
ChoiceVals.append(self.selectVals[i])
self.selectData = ChoiceList
self.selectVals = ChoiceVals
else:
# self.selectData = copy.copy(self.data[:-1])
self.selectVals = len(self.data)*[0.0,]
wx.CallAfter(self.Draw)
def GetData(self):
if self.dataType == 'PWDR':
return self.selectData+[self.data[-1],],self.result
else:
return self.selectData+[self.data[-1],],self.selectVals
def onChar(self,event):
'Respond to keyboard events in the Filter box'
self.filterVal = self.filterBox.GetValue()
if self.timer.IsRunning():
self.timer.Stop()
self.timer.Start(1000,oneShot=True)
if event: event.Skip()
def OnAllScale(self,event):
dlg = G2G.SingleFloatDialog(self,'New scale',
'Enter new value for all scale factors',1.)
dlg.CenterOnParent()
if dlg.ShowModal() == wx.ID_OK:
val = dlg.GetValue()
dlg.Destroy()
else:
dlg.Destroy()
return
for Id,item in enumerate(self.selectData):
self.selectVals[Id] = val
wx.CallAfter(self.Draw)
def OnTest(self,event):
lenX = 0
Xminmax = [0,0]
XY = []
Xsum = []
Ysum = []
Vsum = []
for i,item in enumerate(self.selectData):
name = item
scale = self.selectVals[i]
Id = self.data.index(name)
data = self.dataList[Id]
if scale:
x,y,w,yc,yb,yd = data #numpy arrays!
if self.Limits is not None:
xMin = np.searchsorted(x,self.Limits[1][0])
xMax = np.searchsorted(x,self.Limits[1][1])
x = x[xMin:xMax+1]
y = y[xMin:xMax+1]
lenX = xMax-xMin+1
XY.append([x,scale*y])
v = 1./w[xMin:xMax+1]
if lenX:
if lenX != len(x):
self.GetParent().ErrorDialog('Data length error','Data to be summed must have same number of points'+
'\nExpected:'+str(lenX)+
'\nFound: '+str(len(x))+'\nfor '+name)
return
# self.OnCancel(event)
else:
lenX = len(x)
if Xminmax[1]:
if Xminmax != [x[0],x[-1]]:
self.GetParent().ErrorDialog('Data range error','Data to be summed must span same range'+
'\nExpected:'+str(Xminmax[0])+' '+str(Xminmax[1])+
'\nFound: '+str(x[0])+' '+str(x[-1])+'\nfor '+name)
return
# self.OnCancel(event)
else:
Xminmax = [x[0],x[-1]]
Xsum = x
if self.dataType == 'PWDR' and self.average:
Ysum.append(scale*y)
Vsum.append(abs(scale)*v)
else:
try:
Ysum += scale*y
Vsum += abs(scale)*v
except ValueError:
Ysum = scale*y
Vsum = abs(scale)*v
if self.dataType =='PWDR' and self.average:
maYsum = ma.masked_equal(Ysum,0)
Ysum = ma.mean(maYsum,axis=0)
Wsum = 1./np.array(Ysum)
else:
Wsum = 1./Vsum
YCsum = np.zeros(lenX)
YBsum = np.zeros(lenX)
YDsum = np.zeros(lenX)
XY.append([Xsum,Ysum])
self.result = [Xsum,Ysum,Wsum,YCsum,YBsum,YDsum]
# N.B. PlotXY expects the first arg to point to G2frame. In this case, we
# create a duplicate (temporary) Plot notebook window that is a child of the
# modal SumDialog dialog (self). This nicely gets deleted when the dialog is destroyed,
# but the plot window is not fully functional, at least on the Mac.
if len(XY[0][0]):
G2plt.PlotXY(self,XY,lines=True,Title='Sum:'+self.data[-1],labelY='Intensity',)
self.plotFrame.Show()
return True
def OnOk(self,event):
if self.dataType == 'PWDR':
if not self.OnTest(event): return
parent = self.GetParent()
parent.Raise()
self.EndModal(wx.ID_OK)
def OnCancel(self,event):
parent = self.GetParent()
parent.Raise()
self.EndModal(wx.ID_CANCEL)
def OnPwdrSum(self,event):
'Sum or Average together powder data(?)'
TextList = []
DataList = []
Limits = []
Names = []
Inst = None
Comments = ['Sum/Average equals: \n']
if self.GPXtree.GetCount():
item, cookie = self.GPXtree.GetFirstChild(self.root)
while item:
name = self.GPXtree.GetItemText(item)
Names.append(name)
if 'PWDR' in name:
TextList.append(name)
DataList.append(self.GPXtree.GetItemPyData(item)[1]) # (x,y,w,yc,yb,yd)
if not Inst:
Inst = self.GPXtree.GetItemPyData(GetGPXtreeItemId(self,item, 'Instrument Parameters'))
if not Limits:
Limits = self.GPXtree.GetItemPyData(GetGPXtreeItemId(self,item, 'Limits'))
item, cookie = self.GPXtree.GetNextChild(self.root, cookie)
if len(TextList) < 2:
self.ErrorDialog('Not enough data to sum/average','There must be more than one "PWDR" pattern')
return
TextList.append('default_ave_name')
dlg = self.SumDialog(self,'Sum/Average data','''
Enter scale for each pattern to be summed/averaged
Limits for first pattern used sets range for the sum
All patterns used must extend over this range
''','PWDR',
TextList,DataList,Limits)
try:
if dlg.ShowModal() == wx.ID_OK:
result,sumData = dlg.GetData()
Xsum,Ysum,Wsum,YCsum,YBsum,YDsum = sumData
Xminmax = [Xsum[0],Xsum[-1]]
outname = 'PWDR '+result[-1]
Id = 0
if outname in Names:
dlg2 = wx.MessageDialog(self,'Overwrite data?','Duplicate data name',wx.OK|wx.CANCEL)
try:
if dlg2.ShowModal() == wx.ID_OK:
Id = GetGPXtreeItemId(self,self.root,name)
self.GPXtree.Delete(Id)
finally:
dlg2.Destroy()
Id = self.GPXtree.AppendItem(parent=self.root,text=outname)
if Id:
Sample = G2obj.SetDefaultSample()
Ymin = np.min(Ysum)
Ymax = np.max(Ysum)
valuesdict = {
'wtFactor':1.0,
'Dummy':False,
'ranId':ran.randint(0,sys.maxsize),
'Offset':[0.0,0.0],'delOffset':0.02*Ymax,'refOffset':-.1*Ymax,'refDelt':0.1*Ymax,
'Yminmax':[Ymin,Ymax]
}
self.GPXtree.SetItemPyData(Id,[valuesdict,[np.array(Xsum),np.array(Ysum),np.array(Wsum),
np.array(YCsum),np.array(YBsum),np.array(YDsum)]])
self.GPXtree.SetItemPyData(self.GPXtree.AppendItem(Id,text='Comments'),Comments)
self.GPXtree.SetItemPyData(self.GPXtree.AppendItem(Id,text='Limits'),[tuple(Xminmax),Xminmax])
self.GPXtree.SetItemPyData(self.GPXtree.AppendItem(Id,text='Background'),[['chebyschev-1',True,3,1.0,0.0,0.0],
{'nDebye':0,'debyeTerms':[],'nPeaks':0,'peaksList':[],'background PWDR':['',1.0,False]}])
self.GPXtree.SetItemPyData(self.GPXtree.AppendItem(Id,text='Instrument Parameters'),Inst)
self.GPXtree.SetItemPyData(self.GPXtree.AppendItem(Id,text='Sample Parameters'),Sample)
self.GPXtree.SetItemPyData(self.GPXtree.AppendItem(Id,text='Peak List'),{'peaks':[],'sigDict':{}})
self.GPXtree.SetItemPyData(self.GPXtree.AppendItem(Id,text='Index Peak List'),[[],[]])
self.GPXtree.SetItemPyData(self.GPXtree.AppendItem(Id,text='Unit Cells List'),[])
self.GPXtree.SetItemPyData(self.GPXtree.AppendItem(Id,text='Reflection Lists'),{})
self.GPXtree.SelectItem(Id)
self.GPXtree.Expand(Id)
finally:
dlg.Destroy()
def OnImageSum(self,event):
'Sum together image data'
TextList = []
DataList = []
IdList = []
Names = []
Comments = ['Sum equals: \n']
if self.GPXtree.GetCount():
item, cookie = self.GPXtree.GetFirstChild(self.root)
while item:
name = self.GPXtree.GetItemText(item)
Names.append(name)
if 'IMG' in name:
TextList.append(name)
DataList.append(self.GPXtree.GetImageLoc(item)) #Size,Image,Tag
IdList.append(item)
Data = self.GPXtree.GetItemPyData(GetGPXtreeItemId(self,item,'Image Controls'))
item, cookie = self.GPXtree.GetNextChild(self.root, cookie)
if len(TextList) < 2:
self.ErrorDialog('Not enough data to sum','There must be more than one "IMG" pattern')
return
TextList.append('default_sum_name')
dlg = self.SumDialog(self,'Sum data',' Enter scale for each image to be summed','IMG',
TextList,DataList)
try:
if dlg.ShowModal() == wx.ID_OK:
imSize = 0
result,scales = dlg.GetData()
First = True
Found = False
for name,scale in zip(result,scales):
if scale:
Found = True
Comments.append("%10.3f %s" % (scale,' * '+name))
i = TextList.index(name)
Npix,imagefile,imagetag = DataList[i]
imagefile = G2IO.GetCheckImageFile(self,IdList[i])[1]
image = G2IO.GetImageData(self,imagefile,imageOnly=True,ImageTag=imagetag)
if First:
newImage = np.zeros_like(image)
First = False
if imSize:
if imSize != Npix:
self.ErrorDialog('Image size error','Images to be summed must be same size'+ \
'\nExpected:'+str(imSize)+ \
'\nFound: '+str(Npix)+'\nfor '+name)
return
newImage = newImage+scale*image
else:
imSize = Npix
newImage = newImage+scale*image
del(image)
if not Found:
self.ErrorDialog('Image sum error','No nonzero image multipliers found')
return
newImage = np.array(newImage,dtype=np.int32)
outname = 'IMG '+result[-1]
Id = 0
if outname in Names:
dlg2 = wx.MessageDialog(self,'Overwrite data?','Duplicate data name',wx.OK|wx.CANCEL)
try:
if dlg2.ShowModal() == wx.ID_OK:
Id = GetGPXtreeItemId(self,self.root,name)
finally:
dlg2.Destroy()
else:
Id = self.GPXtree.AppendItem(parent=self.root,text=outname)
if Id:
pth = os.path.split(os.path.abspath(imagefile))[0]
# pth = G2G.GetExportPath(self)
dlg = wx.FileDialog(self, 'Choose sum image filename', pth,outname.split('IMG ')[1],
'G2img files (*.G2img)|*.G2img',
wx.FD_SAVE|wx.FD_OVERWRITE_PROMPT)
if dlg.ShowModal() == wx.ID_OK:
newimagefile = dlg.GetPath()
newimagefile = G2IO.FileDlgFixExt(dlg,newimagefile)
G2IO.PutG2Image(newimagefile,Comments,Data,Npix,newImage)
Imax = np.amax(newImage)
Imin = np.amin(newImage)
newImage = []
self.GPXtree.SetItemPyData(Id,[imSize,newimagefile])
self.GPXtree.SetItemPyData(self.GPXtree.AppendItem(Id,text='Comments'),Comments)
del(newImage)
if self.imageDefault:
Data = copy.copy(self.imageDefault)
Data['formatName'] = 'GSAS-II image'
Data['showLines'] = True
Data['ring'] = []
Data['rings'] = []
Data['cutoff'] = 10
Data['pixLimit'] = 20
Data['ellipses'] = []
Data['calibrant'] = ''
Data['range'] = [(Imin,Imax),[Imin,Imax]]
self.GPXtree.SetItemPyData(self.GPXtree.AppendItem(Id,text='Image Controls'),Data)
Masks = {'Points':[],'Rings':[],'Arcs':[],'Polygons':[],
'Frames':[],'Thresholds':[(Imin,Imax),[Imin,Imax]],
'SpotMask':{'esdMul':2.,'spotMask':None}}
self.GPXtree.SetItemPyData(self.GPXtree.AppendItem(Id,text='Masks'),Masks)
self.GPXtree.SetItemPyData(self.GPXtree.AppendItem(Id,text='Stress/Strain'),
{'Type':'True','d-zero':[],'Sample phi':0.0,'Sample z':0.0,'Sample load':0.0})
self.GPXtree.SelectItem(Id)
self.GPXtree.Expand(Id)
self.PickId = GetGPXtreeItemId(self,self.root,outname)
self.Image = self.PickId
finally:
dlg.Destroy()
def OnAddPhase(self,event):
'Add a new, empty phase to the tree. Called by Data/Add Phase menu'
self.CheckNotebook()
if not GetGPXtreeItemId(self,self.root,'Phases'):
sub = self.GPXtree.AppendItem(parent=self.root,text='Phases')
else:
sub = GetGPXtreeItemId(self,self.root,'Phases')
PhaseName = ''
dlg = wx.TextEntryDialog(None,'Enter a name for this phase','Phase Name Entry','New phase',
style=wx.OK)
if dlg.ShowModal() == wx.ID_OK:
PhaseName = dlg.GetValue()
dlg.Destroy()
if not GetGPXtreeItemId(self,self.root,'Restraints'):
subr = self.GPXtree.AppendItem(parent=self.root,text='Restraints')
self.GPXtree.SetItemPyData(subr,{PhaseName:{}})
else:
subr = GetGPXtreeItemId(self,self.root,'Restraints')
self.GPXtree.GetItemPyData(subr).update({PhaseName:{}})
self.GPXtree.AppendItem(parent=subr,text=PhaseName)
newphase = self.GPXtree.AppendItem(parent=sub,text=PhaseName)
E,SGData = G2spc.SpcGroup('P 1')
self.GPXtree.SetItemPyData(newphase,G2obj.SetNewPhase(Name=PhaseName,SGData=SGData))
self.GPXtree.Expand(sub)
SelectDataTreeItem(self,newphase) #bring up new phase General tab
def OnDeletePhase(self,event):
'''Delete one or more phases from the tree. Called by Data/Delete Phase menu.
Also delete this phase from Reflection Lists for each PWDR histogram;
removes the phase from restraints and deletes any constraints
with variables from the phase.
If any deleted phase is marked as Used in a histogram, a more rigorous
"deep clean" is done and histogram refinement results are cleared, as well as
the covariance information and all plots are deleted
'''
selItem = self.GPXtree.GetSelection()
if self.dataWindow:
self.dataWindow.ClearData()
TextList = []
DelList = []
DelItemList = []
consDeleted = 0
usedPhase = False
if GetGPXtreeItemId(self,self.root,'Phases'):
sub = GetGPXtreeItemId(self,self.root,'Phases')
else:
return
if GetGPXtreeItemId(self,self.root,'Restraints'):
subr = GetGPXtreeItemId(self,self.root,'Restraints')
else:
subr = 0
if GetGPXtreeItemId(self,self.root,'Constraints'):
id = GetGPXtreeItemId(self,self.root,'Constraints')
constr = self.GPXtree.GetItemPyData(id)
else:
constr = {}
item, cookie = self.GPXtree.GetFirstChild(sub)
while item:
TextList.append(self.GPXtree.GetItemText(item))
item, cookie = self.GPXtree.GetNextChild(sub, cookie)
dlg = wx.MultiChoiceDialog(self, 'Which phase to delete?', 'Delete phase', TextList, wx.CHOICEDLG_STYLE)
try:
if dlg.ShowModal() == wx.ID_OK:
result = dlg.GetSelections()
for i in result: DelList.append([i,TextList[i]])
item, cookie = self.GPXtree.GetFirstChild(sub)
i = 0
while item:
if [i,self.GPXtree.GetItemText(item)] in DelList: DelItemList.append(item)
item, cookie = self.GPXtree.GetNextChild(sub, cookie)
i += 1
for item in DelItemList:
phase = self.GPXtree.GetItemPyData(item)
for h in phase['Histograms']:
if 'Use' not in phase['Histograms'][h]: continue
if phase['Histograms'][h]['Use']:
usedPhase = True
break
if 'pId' in phase:
p = phase['pId']
else:
p = '?'
self.GPXtree.Delete(item)
if item == selItem: selItem = self.root
# look for constraints to remove
for key in constr:
delThis = []
if key.startswith('_'): continue
for i,cons in enumerate(constr[key]):
for var in cons[0:-3]:
if str(var[1]).startswith(str(p)):
delThis.append(i)
break
for i in reversed(delThis):
consDeleted += 1
del constr[key][i]
# delete refinement results from histograms
item, cookie = self.GPXtree.GetFirstChild(self.root)
while item:
name = self.GPXtree.GetItemText(item)
if 'PWDR' in name:
data = self.GPXtree.GetItemPyData(item)
if usedPhase: # remove r-factors
dellist = [value for value in data[0] if ':' in value]
for v in dellist+['Durbin-Watson', 'R', 'wR', 'Rb',
'wRb', 'wRmin','Nobs']:
if v in data[0]: del data[0][v]
# could wipe out computed & difference patterns, but does not work
#data[1][3] = np.zeros_like(data[1][3])
#data[1][5] = np.zeros_like(data[1][5])
# always get rid of reflection lists
Id = GetGPXtreeItemId(self,item, 'Reflection Lists')
refList = self.GPXtree.GetItemPyData(Id)
if len(refList):
for i,item in DelList:
if item in refList:
del(refList[item])
elif 'HKLF' in name and usedPhase: # probably not needed if phase is not used
data = self.GPXtree.GetItemPyData(item)
data[0] = {}
item, cookie = self.GPXtree.GetNextChild(self.root, cookie)
finally:
dlg.Destroy()
if usedPhase: # clear info from last refinement for "deep clean" if a used phase is deleted
id = GetGPXtreeItemId(self,self.root,'Covariance')
if DelItemList and id:
self.GPXtree.SetItemPyData(id,{})
id = GetGPXtreeItemId(self,self.root,'Sequential results')
if DelItemList and id:
self.GPXtree.Delete(id)
if id == selItem: selItem = self.root
# delete all plots
for lbl in self.G2plotNB.plotList:
self.G2plotNB.Delete(lbl)
if subr and DelList: #remove restraints for deleted phase
DelList = [itm[1] for itm in DelList]
item, cookie = self.GPXtree.GetFirstChild(subr)
while item:
name = self.GPXtree.GetItemText(item)
if name in DelList:
self.GPXtree.Delete(item)
if item == selItem: selItem = self.root
item, cookie = self.GPXtree.GetNextChild(subr, cookie)
# force redisplay of current tree item if it was not deleted
self.PickId = 0
self.PatternId = 0
self.PickIdText = None
SelectDataTreeItem(self,selItem)
wx.CallAfter(self.GPXtree.SelectItem,selItem)
if consDeleted:
print('\n',consDeleted,'constraints were deleted')
def OnRenameData(self,event):
'''Renames an existing histogram. Called by Data/Rename Phase menu.
Must be used before a histogram is used in a phase.
'''
name = self.GPXtree.GetItemText(self.PickId)
Histograms,Phases = self.GetUsedHistogramsAndPhasesfromTree()
if name in Histograms:
G2G.G2MessageBox(self,
'Histogram is used. You must remove it from all phases before it can be renamed',
'Rename not allowed')
return
if 'PWDR' in name or 'HKLF' in name or 'IMG' in name:
if 'Bank' in name:
names = name.split('Bank')
names[1] = ' Bank'+names[1]
elif 'Azm' in name:
names = name.split('Azm')
names[1] = ' Azm'+names[1]
else:
names = [name,'']
dataType = names[0][:names[0].index(' ')+1] #includes the ' '
dlg = G2G.SingleStringDialog(self,'Change tree name',
'Data name: '+name,names[0][names[0].index(' ')+1:])
#if dlg.ShowModal() == wx.ID_OK:
if dlg.Show():
name = dataType+dlg.GetValue().strip()+names[1]
self.GPXtree.SetItemText(self.PickId,name)
if 'PWDR' in name:
self.GPXtree.GetItemPyData(self.PickId)[2] = name
dlg.Destroy()
def GetFileList(self,fileType,skip=None): #potentially useful?
'Appears unused. Note routine of same name in GSASIIpwdGUI'
fileList = []
Source = ''
Id, cookie = self.GPXtree.GetFirstChild(self.root)
while Id:
name = self.GPXtree.GetItemText(Id)
if fileType in name:
if Id == skip:
Source = name
else:
fileList.append([False,name,Id])
Id, cookie = self.GPXtree.GetNextChild(self.root, cookie)
if skip:
return fileList,Source
else:
return fileList
def OnDataDelete(self, event):
'''Delete one or more histograms from data tree. Called by the
Data/DeleteData menu
'''
TextList = []
DelList = []
DelItemList = []
nItems = {'PWDR':0,'SASD':0,'REFD':0,'IMG':0,'HKLF':0,'PDF':0}
PDFnames = []
selItem = self.GPXtree.GetSelection()
Histograms,Phases = self.GetUsedHistogramsAndPhasesfromTree()
if not self.GPXtree.GetCount():
G2G.G2MessageBox(self,'No tree items to be deleted',
'Nothing to delete')
return
item, cookie = self.GPXtree.GetFirstChild(self.root)
used = False
while item:
name = self.GPXtree.GetItemText(item)
if name not in ['Notebook','Controls','Covariance','Constraints',
'Restraints','Phases','Rigid bodies'] and 'Sequential' not in name:
if 'PWDR' in name[:4]:
nItems['PWDR'] += 1
if name in Histograms:
used = True
item, cookie = self.GPXtree.GetNextChild(self.root, cookie)
continue
if 'SASD' in name[:4]: nItems['SASD'] += 1
if 'REFD' in name[:4]: nItems['REFD'] += 1
if 'IMG' in name[:3]: nItems['IMG'] += 1
if 'HKLF' in name[:4]:
nItems['HKLF'] += 1
if name in Histograms:
used = True
item, cookie = self.GPXtree.GetNextChild(self.root, cookie)
continue
if 'PDF' in name[:3]:
PDFnames.append(name)
nItems['PDF'] += 1
TextList.append(name)
item, cookie = self.GPXtree.GetNextChild(self.root, cookie)
for pdfName in PDFnames:
try:
TextList.remove('PWDR'+pdfName[4:])
except ValueError:
print (u'PWDR'+pdfName[4:]+u' for '+pdfName+u' not found')
if len(TextList) == 0 and used:
G2G.G2MessageBox(self,'All histograms are used. You must remove them from phases before they can be deleted',
'Nothing to delete')
return
elif len(TextList) == 0:
G2G.G2MessageBox(self,'None of the tree items are allowed to be deleted',
'Nothing to delete')
return
dlg = G2G.G2MultiChoiceDialog(self, 'Which data to delete?', 'Delete data', TextList, wx.CHOICEDLG_STYLE)
try:
if dlg.ShowModal() == wx.ID_OK:
result = dlg.GetSelections()
for i in result: DelList.append(TextList[i])
item, cookie = self.GPXtree.GetFirstChild(self.root)
while item:
itemName = self.GPXtree.GetItemText(item)
if itemName in DelList:
if 'PWDR' in itemName[:4]: nItems['PWDR'] -= 1
elif 'SASD' in itemName[:4]: nItems['SASD'] -= 1
elif 'REFD' in itemName[:4]: nItems['REFD'] -= 1
elif 'IMG' in itemName[:3]: nItems['IMG'] -= 1
elif 'HKLF' in itemName[:4]: nItems['HKLF'] -= 1
elif 'PDF' in itemName[:3]: nItems['PDF'] -= 1
DelItemList.append(item)
item, cookie = self.GPXtree.GetNextChild(self.root, cookie)
for item in DelItemList:
self.GPXtree.Delete(item)
if item == selItem: selItem = self.root
if DelList:
self.PickId = 0
self.PickIdText = None
self.PatternId = 0
if nItems['PWDR']:
wx.CallAfter(G2plt.PlotPatterns,self,True)
else:
self.G2plotNB.Delete('Powder Patterns')
self.lastPlotType = None
if not nItems['IMG']:
self.G2plotNB.Delete('2D Powder Image')
if not nItems['HKLF']:
self.G2plotNB.Delete('Structure Factors')
if '3D Structure Factors' in self.G2plotNB.plotList:
self.G2plotNB.Delete('3D Structure Factors')
finally:
dlg.Destroy()
if DelList:
SelectDataTreeItem(self,selItem)
wx.CallAfter(self.GPXtree.SelectItem,selItem)
def OnPlotDelete(self,event):
'''Delete one or more plots from plot window. Called by the
Data/DeletePlots menu
'''
plotNames = self.G2plotNB.plotList
if len(plotNames):
dlg = G2G.G2MultiChoiceDialog(self, 'Which plots to delete?', 'Delete plots', plotNames, wx.CHOICEDLG_STYLE)
try:
if dlg.ShowModal() == wx.ID_OK:
result = dlg.GetSelections()
result.sort(reverse=True)
for i in result:
self.G2plotNB.Delete(plotNames[i])
finally:
dlg.Destroy()
def OnFileReopen(self, event):
files = GSASIIpath.GetConfigValue('previous_GPX_files')
if not files:
print('no previous projects found')
return
sellist = []
for f in files:
dirname,filroot = os.path.split(f)
if os.path.exists(f) and '.gpx' in f:
sellist.append("{} from {}".format(filroot,dirname))
# else:
# sellist.append("not found: {}".format(f))
dlg = G2G.G2SingleChoiceDialog(self,
'Select previous project to open',
'Select project',sellist)
if dlg.ShowModal() == wx.ID_OK:
sel = dlg.GetSelection()
dlg.Destroy()
else:
dlg.Destroy()
return
filroot,dirname = sellist[sel].split(' from ')
f = os.path.join(dirname,filroot)
if os.path.exists(f):
self.OnFileOpen(event, filename=f)
self.LastGPXdir = dirname
else:
print('file not found',f)
def OnFileOpen(self, event, filename=None):
'''Gets a GSAS-II .gpx project file in response to the
File/Open Project menu button
'''
def SaveOld():
'''See if we should save current project and continue
to read another.
returns True if the project load should continue
'''
if self.dataWindow:
self.dataWindow.ClearData()
dlg = wx.MessageDialog(self,
'Do you want to save and replace the current project?\n(Use No to read without saving or Cancel to continue with current project)',
'Save & Overwrite?',
wx.YES|wx.NO|wx.CANCEL)
try:
result = dlg.ShowModal()
finally:
dlg.Destroy()
if result == wx.ID_NO:
result = True
elif result == wx.ID_CANCEL:
return False
else:
if not self.OnFileSave(None): return False
self.GPXtree.DeleteChildren(self.root)
self.GSASprojectfile = ''
self.HKL = []
if self.G2plotNB.plotList:
self.G2plotNB.clear()
return True
def GetGPX():
if self.LastGPXdir:
pth = self.LastGPXdir
else:
pth = '.'
#if GSASIIpath.GetConfigValue('debug'): print('debug: open from '+pth)
dlg = wx.FileDialog(self, 'Choose GSAS-II project file', pth,
wildcard='GSAS-II project file (*.gpx)|*.gpx',style=wx.FD_OPEN)
try:
if dlg.ShowModal() != wx.ID_OK: return
self.GSASprojectfile = dlg.GetPath()
self.GSASprojectfile = G2IO.FileDlgFixExt(dlg,self.GSASprojectfile)
self.LastGPXdir = dlg.GetDirectory()
finally:
dlg.Destroy()
self.EnablePlot = False
if self.GPXtree.GetChildrenCount(self.root,False):
if not SaveOld(): return
if not filename:
GetGPX()
filename = self.GSASprojectfile
else:
try:
self.GSASprojectfile = os.path.splitext(filename)[0]+u'.gpx'
except:
self.GSASprojectfile = os.path.splitext(filename)[0]+'.gpx'
self.dirname = os.path.split(filename)[0]
self.init_vars()
try:
self.StartProject() #open the file if possible
except:
print ('\nError opening file '+filename)
import traceback
print (traceback.format_exc())
def StartProject(self):
'''Opens a GSAS-II project file & selects the 1st available data set to
display (PWDR, HKLF, REFD or SASD)
'''
Id = 0
phaseId = None
G2IO.ProjFileOpen(self)
self.GPXtree.SetItemText(self.root,'Project: '+self.GSASprojectfile)
self.GPXtree.Expand(self.root)
self.HKL = []
item, cookie = self.GPXtree.GetFirstChild(self.root)
while item:
name = self.GPXtree.GetItemText(item)
if name[:4] in ['PWDR','HKLF','IMG ','PDF ','SASD','REFD']:
if not Id:
if name[:4] == 'IMG ':
Id = GetGPXtreeItemId(self,item,'Image Controls')
else:
Id = item
elif name == "Phases":
phaseId = item
elif name == 'Controls':
data = self.GPXtree.GetItemPyData(item)
if data:
for item in self.Refine: item.Enable(True)
item, cookie = self.GPXtree.GetNextChild(self.root, cookie)
if phaseId: # show all phases
self.GPXtree.Expand(phaseId)
if Id:
self.EnablePlot = True
self.GPXtree.Expand(Id)
SelectDataTreeItem(self,Id)
self.GPXtree.SelectItem(Id) # needed on OSX or item is not selected in tree; perhaps not needed elsewhere
elif phaseId:
Id = phaseId
# open 1st phase
Id, unused = self.GPXtree.GetFirstChild(phaseId)
SelectDataTreeItem(self,Id)
self.GPXtree.SelectItem(Id) # as before for OSX
self.CheckNotebook()
if self.dirname: os.chdir(self.dirname) # to get Mac/Linux to change directory!
pth = os.path.split(os.path.abspath(self.GSASprojectfile))[0]
if GSASIIpath.GetConfigValue('Save_paths'):
G2G.SaveGPXdirectory(pth,write=False)
config = G2G.GetConfigValsDocs()
GSASIIpath.addPrevGPX(self.GSASprojectfile,config)
G2G.SaveConfigVars(config)
self.LastGPXdir = pth
def OnFileClose(self, event):
'''Clears the data tree in response to the
File/New Project menu button. User is given option to save
the project.
'''
dlg = wx.MessageDialog(self,
'Do you want to save the current project and start with an empty one?\n(Use No to clear without saving or Cancel to continue with current project)',
'Save & Clear?',
wx.YES | wx.NO | wx.CANCEL)
try:
result = dlg.ShowModal()
if result == wx.ID_OK:
self.OnFileSaveMenu(event)
if result != wx.ID_CANCEL:
self.GSASprojectfile = ''
self.GPXtree.SetItemText(self.root,'Project: ')
self.GPXtree.DeleteChildren(self.root)
self.dataWindow.ClearData()
if len(self.HKL): self.HKL = []
if self.G2plotNB.plotList:
self.G2plotNB.clear()
self.SetTitleByGPX()
self.EnableRefineCommand()
self.init_vars()
finally:
dlg.Destroy()
def OnFileSave(self, event):
'''Save the current project in response to the
File/Save Project menu button
'''
if self.GSASprojectfile:
self.GPXtree.SetItemText(self.root,'Project: '+self.GSASprojectfile)
self.CheckNotebook()
G2IO.ProjFileSave(self)
return True
else:
return self.OnFileSaveas(event)
def OnNewGSASII(self, event):
'''Gets a GSAS-II .gpx project file in response to the
File/Open new window menu button. Runs only on Mac.
'''
if self.LastGPXdir:
pth = self.LastGPXdir
else:
pth = '.'
GSASprojectfile = ''
dlg = wx.FileDialog(self, 'Choose GSAS-II project file', pth,
wildcard='GSAS-II project file (*.gpx)|*.gpx',style=wx.FD_OPEN)
try:
if dlg.ShowModal() == wx.ID_OK:
GSASprojectfile = dlg.GetPath()
GSASprojectfile = G2IO.FileDlgFixExt(dlg,GSASprojectfile)
self.LastGPXdir = dlg.GetDirectory()
finally:
dlg.Destroy()
G2script = os.path.join(os.path.split(__file__)[0],'GSASII.py')
GSASIIpath.MacStartGSASII(G2script,GSASprojectfile)
def SetTitleByGPX(self):
'''Set the title for the two window frames
'''
projName = os.path.split(self.GSASprojectfile)[1]
if not projName: projName = "<unnamed project>"
if self.testSeqRefineMode():
s = u' (sequential refinement)'
else:
s = u''
self.SetTitle("GSAS-II project: "+projName + s)
self.plotFrame.SetTitle("GSAS-II plots: "+projName)
def OnFileSaveas(self, event):
'''Save the current project in response to the
File/Save as menu button
'''
if GSASIIpath.GetConfigValue('Starting_directory'):
pth = GSASIIpath.GetConfigValue('Starting_directory')
pth = os.path.expanduser(pth)
elif self.LastGPXdir:
pth = self.LastGPXdir
else:
pth = '.'
dlg = wx.FileDialog(self, 'Choose GSAS-II project file name', pth, self.newGPXfile,
'GSAS-II project file (*.gpx)|*.gpx',wx.FD_SAVE|wx.FD_OVERWRITE_PROMPT)
try:
if dlg.ShowModal() == wx.ID_OK: #TODO: what about Cancel?
self.GSASprojectfile = dlg.GetPath()
self.GSASprojectfile = G2IO.FileDlgFixExt(dlg,self.GSASprojectfile)
self.GPXtree.SetItemText(self.root,'Project: '+self.GSASprojectfile)
self.CheckNotebook()
G2IO.ProjFileSave(self)
self.SetTitleByGPX()
os.chdir(dlg.GetDirectory()) # to get Mac/Linux to change directory!
config = G2G.GetConfigValsDocs()
GSASIIpath.addPrevGPX(self.GSASprojectfile,config)
return True
else:
return False
finally:
dlg.Destroy()
def ExpandAll(self,event):
'''Expand all tree items or those of a single type
'''
txt = self.GetMenuBar().GetLabel(event.Id)
if txt == 'all':
self.ExpandingAll = True
try:
self.GPXtree.ExpandAll()
finally:
self.ExpandingAll = False
else:
self.ExpandingAll = True
try:
item, cookie = self.GPXtree.GetFirstChild(self.root)
while item:
name = self.GPXtree.GetItemText(item)
if name.startswith(txt+' '): self.GPXtree.Expand(item)
item, cookie = self.GPXtree.GetNextChild(self.root, cookie)
finally:
self.ExpandingAll = False
def MoveTreeItems(self,event):
'''Move tree items of a single type to the end of the tree
'''
txt = self.GetMenuBar().GetLabel(event.Id)
# make a list of items to copy
copyList = []
item, cookie = self.GPXtree.GetFirstChild(self.root)
while item:
if self.GPXtree.GetItemText(item).startswith(txt+' '):
copyList.append(item)
item, cookie = self.GPXtree.GetNextChild(self.root, cookie)
self.ExpandingAll = True
try:
for item in copyList:
name = self.GPXtree.GetItemText(item)
newId = self.GPXtree.AppendItem(self.root,name)
self.GPXtree.SetItemPyData(newId,self.GPXtree.GetItemPyData(item))
chld, chldcookie = self.GPXtree.GetFirstChild(item)
while chld:
chname = self.GPXtree.GetItemText(chld)
newCh = self.GPXtree.AppendItem(newId,chname)
self.GPXtree.SetItemPyData(newCh,self.GPXtree.GetItemPyData(chld))
chld, chldcookie = self.GPXtree.GetNextChild(item, chldcookie)
self.GPXtree.Delete(item)
finally:
self.ExpandingAll = False
SelectDataTreeItem(self,self.root)
def ExitMain(self, event):
'''Called if exit selected or the main window is closed
rescord last position of data & plot windows; saved to config.py file
NB: not called if console window closed
'''
if self.GPXtree.GetCount() > 1:
dlg = wx.MessageDialog(self,
'Do you want to save and exit?\n(Use No to exit without save or Cancel to prevent exiting)',
'Confirm exit/save?',
wx.YES|wx.NO|wx.CANCEL)
try:
result = dlg.ShowModal()
finally:
dlg.Destroy()
else:
result = wx.ID_NO
if result == wx.ID_NO:
pass
elif result == wx.ID_CANCEL:
return
else:
if not self.OnFileSave(event): return
FrameInfo = {'Main_Pos':tuple(self.GetPosition()),
'Main_Size':tuple(self.GetSize()),
'Plot_Pos':tuple(self.plotFrame.GetPosition()),
'Plot_Size':tuple(self.plotFrame.GetSize())}
GSASIIpath.SetConfigValue(FrameInfo)
# FramePos = {'Main_Pos':tuple(self.GetPosition()),'Plot_Pos':tuple(self.plotFrame.GetPosition())}
# GSASIIpath.SetConfigValue(FramePos)
config = G2G.GetConfigValsDocs()
G2G.SaveConfigVars(config)
if self.G2plotNB:
self.G2plotNB.Destroy()
if self.undofile:
os.remove(self.undofile)
sys.exit()
def OnExportPeakList(self,event):
pth = G2G.GetExportPath(self)
dlg = wx.FileDialog(self, 'Choose output peak list file name', pth, '',
'(*.*)|*.*',wx.FD_SAVE|wx.FD_OVERWRITE_PROMPT)
try:
if dlg.ShowModal() == wx.ID_OK:
self.peaklistfile = dlg.GetPath()
self.peaklistfile = G2IO.FileDlgFixExt(dlg,self.peaklistfile)
file = open(self.peaklistfile,'w')
item, cookie = self.GPXtree.GetFirstChild(self.root)
while item:
name = self.GPXtree.GetItemText(item)
if 'PWDR' in name:
item2, cookie2 = self.GPXtree.GetFirstChild(item)
wave = 0.0
while item2:
name2 = self.GPXtree.GetItemText(item2)
if name2 == 'Instrument Parameters':
Inst = self.GPXtree.GetItemPyData(item2)[0]
Type = Inst['Type'][0]
if 'T' not in Type:
wave = G2mth.getWave(Inst)
elif name2 == 'Peak List':
pkdata = self.GPXtree.GetItemPyData(item2)
peaks = pkdata['peaks']
sigDict = pkdata['sigDict']
item2, cookie2 = self.GPXtree.GetNextChild(item, cookie2)
file.write("#%s \n" % (name+' Peak List'))
if wave:
file.write('#wavelength = %10.6f\n'%(wave))
if 'T' in Type:
file.write('#%9s %10s %10s %12s %10s %10s %10s %10s %10s %10s\n'%('pos','dsp','esd','int','esd','alp','bet','sig','gam','FWHM'))
else:
file.write('#%9s %10s %10s %12s %10s %10s %10s %10s\n'%('pos','dsp','esd','int','esd','sig','gam','FWHM'))
for ip,peak in enumerate(peaks):
dsp = G2lat.Pos2dsp(Inst,peak[0])
if 'T' in Type: #TOF - more cols
esds = {'pos':0.,'int':0.,'alp':0.,'bet':0.,'sig':0.,'gam':0.}
for name in list(esds.keys()):
esds[name] = sigDict.get('%s%d'%(name,ip),0.)
sig = np.sqrt(peak[8])
gam = peak[10]
esddsp = abs(G2lat.Pos2dsp(Inst,peak[0]-esds['pos'])-G2lat.Pos2dsp(Inst,peak[0]+esds['pos']))/2.
FWHM = G2pwd.getgamFW(gam,sig) +(peak[4]+peak[6])*np.log(2.)/(peak[4]*peak[6]) #to get delta-TOF from Gam(peak)
file.write("%10.2f %10.5f %10.5f %12.2f%10.2f %10.3f %10.3f %10.3f %10.3f %10.3f\n" % \
(peak[0],dsp,esddsp,peak[2],esds['int'],peak[4],peak[6],peak[8],peak[10],FWHM))
else: #CW
#get esds from sigDict for each peak & put in output - esds for sig & gam from UVWXY?
esds = {'pos':0.,'int':0.,'sig':0.,'gam':0.}
for name in list(esds.keys()):
esds[name] = sigDict.get('%s%d'%(name,ip),0.)
sig = np.sqrt(peak[4]) #var -> sig
gam = peak[6]
esddsp = abs(G2lat.Pos2dsp(Inst,peak[0]-esds['pos'])-G2lat.Pos2dsp(Inst,peak[0]+esds['pos']))/2.
FWHM = G2pwd.getgamFW(gam,sig) #to get delta-2-theta in deg. from Gam(peak)
file.write("%10.4f %10.5f %10.5f %12.2f %10.2f %10.5f %10.5f %10.5f \n" % \
(peak[0],dsp,esddsp,peak[2],esds['int'],np.sqrt(max(0.0001,peak[4]))/100.,peak[6]/100.,FWHM/100.)) #convert to deg
item, cookie = self.GPXtree.GetNextChild(self.root, cookie)
file.close()
finally:
dlg.Destroy()
def OnExportHKL(self,event):
pth = G2G.GetExportPath(self)
dlg = wx.FileDialog(self, 'Choose output reflection list file name', pth, '',
'(*.*)|*.*',wx.FD_SAVE|wx.FD_OVERWRITE_PROMPT)
try:
if dlg.ShowModal() == wx.ID_OK:
self.peaklistfile = dlg.GetPath()
self.peaklistfile = G2IO.FileDlgFixExt(dlg,self.peaklistfile)
file = open(self.peaklistfile,'w')
item, cookie = self.GPXtree.GetFirstChild(self.root)
while item:
name = self.GPXtree.GetItemText(item)
if 'PWDR' in name:
item2, cookie2 = self.GPXtree.GetFirstChild(item)
while item2:
name2 = self.GPXtree.GetItemText(item2)
if name2 == 'Reflection Lists':
data = self.GPXtree.GetItemPyData(item2)
phases = data.keys()
for phase in phases:
peaks = data[phase]
I100 = peaks['RefList'].T[8]*np.array([refl[11] for refl in peaks['RefList']])
Imax = np.max(I100)
if Imax:
I100 *= 100.0/Imax
file.write("%s %s %s \n" % (name,phase,' Reflection List'))
if 'T' in peaks.get('Type','PXC'):
file.write('%s \n'%(' h k l m d-space TOF wid Fo**2 Fc**2 Icorr Prfo Trans ExtP I100'))
else:
file.write('%s \n'%(' h k l m d-space 2-theta wid Fo**2 Fc**2 Icorr Prfo Trans ExtP I100'))
for ipk,peak in enumerate(peaks['RefList']):
if 'T' in peaks.get('Type','PXC'):
sig = np.sqrt(peak[6])
gam = peak[7]
FWHM = G2pwd.getgamFW(gam,sig)
file.write(" %3d %3d %3d %3d%10.5f%10.2f%10.5f%10.3f%10.3f%10.3f%10.3f%10.3f%10.3f%10.3f\n" % \
(int(peak[0]),int(peak[1]),int(peak[2]),int(peak[3]),peak[4],peak[5],FWHM,peak[8],
peak[9],peak[11],peak[12],peak[13],peak[14],I100[ipk]))
else:
sig =
|
np.sqrt(peak[6])
|
numpy.sqrt
|
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 29 19:24:42 2021
@author: 56153805
"""
import numpy, scipy.optimize
def fit_sin(tt, yy):
'''Fit sin to the input time sequence, and return fitting parameters "amp", "omega", "phase", "offset", "freq", "period" and "fitfunc"'''
tt = numpy.array(tt)
yy = numpy.array(yy)
ff = numpy.fft.fftfreq(len(tt), (tt[1]-tt[0])) # assume uniform spacing
Fyy = abs(
|
numpy.fft.fft(yy)
|
numpy.fft.fft
|
"""Validate the synapse input max rates found by check_max_synapse_rates
Bin the max rates into spike generator rates
Pair-by-pair check that synapses indeed satisfy the max rates predicted
"""
import os
from time import sleep
import argparse
import numpy as np
import matplotlib.pyplot as plt
from pystorm.hal import HAL
from pystorm.hal.neuromorph import graph
from pystorm.PyDriver import bddriver as bd
from utils.exp import clear_overflows, compute_spike_gen_rates
from utils.file_io import load_txt_data
np.set_printoptions(precision=2)
CORE = 0
NRN_N = 4096
SYN_N = 1024
RUN_TIME = 2. # time to sample
INTER_RUN_TIME = 0.1 # time between samples
RUN_TIME_NS = int(RUN_TIME*1E9)
INTER_RUN_TIME_NS = int(INTER_RUN_TIME*1E9)
SPIKE_GEN_TIME_UNIT_NS = 10000 # time unit of fpga spike generator
MAX_SPIKE_GEN = 256 # depends on SPIKE_GEN_TIME_UNIT_NS
MIN_RATE = 5000 # minimum rate to test
MAX_RATE = 100000 # maximum rate to test
SPIKE_GEN_RATES = compute_spike_gen_rates(MIN_RATE, MAX_RATE, SPIKE_GEN_TIME_UNIT_NS)
FIFO_BUFFER_SIZE = 512
CAUTION_THRESHOLD = 2 * FIFO_BUFFER_SIZE
GROUP_SIZES = [4, 2] # sizes of groups to test
SYN_PD_PU = 1024 # analog bias setting
DATA_DIR = "./data/" + os.path.basename(__file__)[:-3] + "/"
if not os.path.isdir(DATA_DIR):
os.makedirs(DATA_DIR, exist_ok=True)
MAX_RATE_MARGIN = 0.00 # margin for binning max rates
def parse_args():
"""Parse command line arguments"""
parser = argparse.ArgumentParser(description='Characterize the synapse max input firing rates')
parser.add_argument("-r", action="store_true", dest="use_saved_data", help='reuse cached data')
parser.add_argument("--max_rates", type=str, dest="max_rates",
help='Name of file containing max synapse rates')
args = parser.parse_args()
return args
def syn_to_soma_addr(syn_n):
"""Convert synapse flat address to soma flat address"""
soma_n = syn_n * 4
sqrt_syn_n = int(np.round(np.sqrt(syn_n)))
sqrt_soma_n = int(np.round(np.sqrt(soma_n)))
soma_syn_addrs = np.zeros(syn_n, dtype=int)
for syn_idx in range(syn_n):
syn_x = syn_idx % sqrt_syn_n
syn_y = syn_idx // sqrt_syn_n
soma_x = syn_x * 2
soma_y = syn_y * 2
soma_syn_addrs[syn_idx] = soma_y*sqrt_soma_n + soma_x
return soma_syn_addrs
def set_analog():
"""Sets the synapse config bits and the bias currents"""
HAL.driver.SetDACCount(CORE, bd.bdpars.BDHornEP.DAC_SYN_PD, SYN_PD_PU)
HAL.driver.SetDACCount(CORE, bd.bdpars.BDHornEP.DAC_SYN_PU, SYN_PD_PU)
for n_idx in range(NRN_N):
HAL.driver.SetSomaEnableStatus(CORE, n_idx, bd.bdpars.SomaStatusId.DISABLED)
for s_idx in range(SYN_N):
HAL.driver.SetSynapseEnableStatus(CORE, s_idx, bd.bdpars.SynapseStatusId.ENABLED)
HAL.flush()
def build_net_groups(u_rates, binned_rates, syn_idxs, group_size):
"""Build a network for testing groups of synapses
Parameters
----------
u_rates: array
lists the rate bin values
binned_max_rates: array of numbers
array of synapse max rates binned to the rates in u_rates
syn_idxs: array of ints
indices of synapses corresponeding to binned_max_rates entries
group_size: int
size of groups to bundle synapses into
"""
dim_rate_idxs = {}
encoder_dim = 0
for rate in u_rates:
idxs = syn_idxs[binned_rates == rate]
groups = len(idxs) // group_size
idxs = idxs[:groups*group_size] # clip off remainder
for g_idx in range(groups):
dim_rate_idxs[encoder_dim] = (rate, idxs[g_idx*group_size:(g_idx+1)*group_size])
encoder_dim += 1
tap_matrix_syn =
|
np.zeros((SYN_N, encoder_dim))
|
numpy.zeros
|
import pandas_datareader.data as web
import pandas as pd
import time
from functools import lru_cache
import numpy as np
import time
@lru_cache(maxsize=500)
def get_prices(ticker,n_obs,interval, max_attempts=10):
print('Getting '+ticker)
time.sleep(5)
success = False
attempts = 0
while not success:
try:
data = web.get_data_yahoo(ticker, interval=interval)[-n_obs - 1:]['Close'].values
success = True
except Exception as e:
print(str(e))
print('backing off')
time.sleep(10)
attempts += 1
if attempts>=max_attempts:
raise ValueError
return data
def live_equity_returns(tickers, n_obs=60, interval='m', k=1):
df = pd.DataFrame()
for ticker in tickers:
try:
data = get_prices(ticker=ticker, n_obs=n_obs, interval=interval)
assert len(data)==n_obs+1
values = list(np.diff(
|
np.log(data)
|
numpy.log
|
import json
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
P_list = np.load('../processed_data/P_list.npy', allow_pickle=True)
arr_outcomes = np.load('../processed_data/arr_outcomes.npy', allow_pickle=True)
ts_params =
|
np.load('../processed_data/ts_params.npy', allow_pickle=True)
|
numpy.load
|
# pytest test_ismember.py
import numpy as np
import pandas as pd
from ismember import ismember
def test_ismember():
# Test 1
a_vec = [1,2,3,None]
b_vec = [4,1,2]
[I,idx] = ismember(a_vec,b_vec)
assert np.all(np.array(a_vec)[I]==np.array(b_vec)[idx])
# Test 2
a_vec = pd.DataFrame(['aap','None','mies','aap','boom','mies',None,'mies','mies','pies',None])
b_vec = pd.DataFrame([None,'mies','mies','pies',None])
[I,idx] = ismember(a_vec,b_vec)
assert np.all(a_vec.values[I] == b_vec.values[idx].flatten())
# Test 3
a_vec = np.array([1,2,3,None])
b_vec = np.array([1,2,4])
[I,idx] = ismember(a_vec,b_vec)
assert np.all(a_vec[I]==b_vec[idx])
# Test 4
a_vec = np.array(['boom','aap','mies','aap'])
b_vec = np.array(['aap','boom','aap'])
[I,idx] = ismember(a_vec,b_vec)
assert np.all(a_vec[I]==b_vec[idx])
# Test 5: elements matrices
a_vec =
|
np.random.randint(0,10,(5,8))
|
numpy.random.randint
|
import cv2
from collections import deque
import numpy as np
Lower_black = np.array([20, 100, 100])
Upper_black = np.array([30, 255, 255])
cap = cv2.VideoCapture(0)
pts = deque(maxlen=512)
blackboard =
|
np.zeros((480, 640, 3), dtype=np.uint8)
|
numpy.zeros
|
import numpy as np
from sfepy.base.goptions import goptions
from sfepy.discrete.fem import Field
try:
from sfepy.discrete.fem import FEDomain as Domain
except ImportError:
from sfepy.discrete.fem import Domain
from sfepy.discrete import (FieldVariable, Material, Integral, Function,
Equation, Equations, Problem)
from sfepy.terms import Term
from sfepy.discrete.conditions import Conditions, EssentialBC, PeriodicBC
from sfepy.solvers.ls import ScipyDirect
from sfepy.solvers.nls import Newton
import sfepy.discrete.fem.periodic as per
from sfepy.discrete import Functions
from sfepy.mesh.mesh_generators import gen_block_mesh
from sfepy.mechanics.matcoefs import ElasticConstants
from sfepy.base.base import output
from sfepy.discrete.conditions import LinearCombinationBC
goptions['verbose'] = False
output.set_output(quiet=True)
class ElasticFESimulation(object):
"""
Use SfePy to solve a linear strain problem in 2D with a varying
microstructure on a rectangular grid. The rectangle (cube) is held
at the negative edge (plane) and displaced by 1 on the positive x
edge (plane). Periodic boundary conditions are applied to the
other boundaries.
The microstructure is of shape (n_samples, n_x, n_y) or (n_samples, n_x,
n_y, n_z).
>>> X = np.zeros((1, 3, 3), dtype=int)
>>> X[0, :, 1] = 1
>>> sim = ElasticFESimulation(elastic_modulus=(1.0, 10.0),
... poissons_ratio=(0., 0.))
>>> sim.run(X)
>>> y = sim.strain
y is the strain with components as follows
>>> exx = y[..., 0]
>>> eyy = y[..., 1]
>>> exy = y[..., 2]
In this example, the strain is only in the x-direction and has a
uniform value of 1 since the displacement is always 1 and the size
of the domain is 1.
>>> assert np.allclose(exx, 1)
>>> assert np.allclose(eyy, 0)
>>> assert np.allclose(exy, 0)
The following example is for a system with contrast. It tests the
left/right periodic offset and the top/bottom periodicity.
>>> X = np.array([[[1, 0, 0, 1],
... [0, 1, 1, 1],
... [0, 0, 1, 1],
... [1, 0, 0, 1]]])
>>> n_samples, N, N = X.shape
>>> macro_strain = 0.1
>>> sim = ElasticFESimulation((10.0,1.0), (0.3,0.3), macro_strain=0.1)
>>> sim.run(X)
>>> u = sim.displacement[0]
Check that the offset for the left/right planes is `N *
macro_strain`.
>>> assert np.allclose(u[-1,:,0] - u[0,:,0], N * macro_strain)
Check that the left/right side planes are periodic in y.
>>> assert np.allclose(u[0,:,1], u[-1,:,1])
Check that the top/bottom planes are periodic in both x and y.
>>> assert np.allclose(u[:,0], u[:,-1])
"""
def __init__(self, elastic_modulus, poissons_ratio, macro_strain=1.,):
"""Instantiate a ElasticFESimulation.
Args:
elastic_modulus (1D array): array of elastic moduli for phases
poissons_ratio (1D array): array of Possion's ratios for phases
macro_strain (float, optional): Scalar for macroscopic strain
"""
self.macro_strain = macro_strain
self.dx = 1.0
self.elastic_modulus = elastic_modulus
self.poissons_ratio = poissons_ratio
if len(elastic_modulus) != len(poissons_ratio):
raise RuntimeError(
'elastic_modulus and poissons_ratio must be the same length')
def _convert_properties(self, dim):
"""
Convert from elastic modulus and Poisson's ratio to the Lame
parameter and shear modulus
>>> model = ElasticFESimulation(elastic_modulus=(1., 2.),
... poissons_ratio=(1., 1.))
>>> result = model._convert_properties(2)
>>> answer = np.array([[-0.5, 1. / 6.], [-1., 1. / 3.]])
>>> assert(np.allclose(result, answer))
Args:
dim (int): Scalar value for the dimension of the microstructure.
Returns:
array with the Lame parameter and the shear modulus for each phase.
"""
def _convert(E, nu):
ec = ElasticConstants(young=E, poisson=nu)
mu = dim / 3. * ec.mu
lame = ec.lam
return lame, mu
return np.array([_convert(E, nu) for E,
nu in zip(self.elastic_modulus, self.poissons_ratio)])
def _get_property_array(self, X):
"""
Generate property array with elastic_modulus and poissons_ratio for
each phase.
Test case for 2D with 3 phases.
>>> X2D = np.array([[[0, 1, 2, 1],
... [2, 1, 0, 0],
... [1, 0, 2, 2]]])
>>> model2D = ElasticFESimulation(elastic_modulus=(1., 2., 3.),
... poissons_ratio=(1., 1., 1.))
>>> lame = lame0, lame1, lame2 = -0.5, -1., -1.5
>>> mu = mu0, mu1, mu2 = 1. / 6, 1. / 3, 1. / 2
>>> lm = zip(lame, mu)
>>> X2D_property = np.array([[lm[0], lm[1], lm[2], lm[1]],
... [lm[2], lm[1], lm[0], lm[0]],
... [lm[1], lm[0], lm[2], lm[2]]])
>>> assert(np.allclose(model2D._get_property_array(X2D), X2D_property))
Test case for 3D with 2 phases.
>>> model3D = ElasticFESimulation(elastic_modulus=(1., 2.),
... poissons_ratio=(1., 1.))
>>> X3D = np.array([[[0, 1],
... [0, 0]],
... [[1, 1],
... [0, 1]]])
>>> X3D_property = np.array([[[lm[0], lm[1]],
... [lm[0], lm[0]]],
... [[lm[1], lm[1]],
... [lm[0], lm[1]]]])
>>> assert(np.allclose(model3D._get_property_array(X3D), X3D_property))
"""
dim = len(X.shape) - 1
n_phases = len(self.elastic_modulus)
if not issubclass(X.dtype.type, np.integer):
raise TypeError("X must be an integer array")
if np.max(X) >= n_phases or np.min(X) < 0:
raise RuntimeError(
"X must be between 0 and {N}.".format(N=n_phases - 1))
if not (2 <= dim <= 3):
raise RuntimeError("the shape of X is incorrect")
return self._convert_properties(dim)[X]
def run(self, X):
"""
Run the simulation.
Args:
X (ND array): microstructure with shape (n_samples, n_x, ...)
"""
X_property = self._get_property_array(X)
strain = []
displacement = []
stress = []
for x in X_property:
strain_, displacement_, stress_ = self._solve(x)
strain.append(strain_)
displacement.append(displacement_)
stress.append(stress_)
self.strain = np.array(strain)
self.displacement = np.array(displacement)
self.stress = np.array(stress)
@property
def response(self):
return self.strain[..., 0]
def _get_material(self, property_array, domain):
"""
Creates an SfePy material from the material property fields for the
quadrature points.
Args:
property_array: array of the properties with shape (n_x, n_y, n_z, 2)
Returns:
an SfePy material
"""
min_xyz = domain.get_mesh_bounding_box()[0]
dims = domain.get_mesh_bounding_box().shape[1]
def _material_func_(ts, coors, mode=None, **kwargs):
if mode == 'qp':
ijk_out = np.empty_like(coors, dtype=int)
ijk = np.floor((coors - min_xyz[None]) / self.dx,
ijk_out, casting="unsafe")
ijk_tuple = tuple(ijk.swapaxes(0, 1))
property_array_qp = property_array[ijk_tuple]
lam = property_array_qp[..., 0]
mu = property_array_qp[..., 1]
lam = np.ascontiguousarray(lam.reshape((lam.shape[0], 1, 1)))
mu = np.ascontiguousarray(mu.reshape((mu.shape[0], 1, 1)))
from sfepy.mechanics.matcoefs import stiffness_from_lame
stiffness = stiffness_from_lame(dims, lam=lam, mu=mu)
return {'lam': lam, 'mu': mu, 'D': stiffness}
else:
return
material_func = Function('material_func', _material_func_)
return Material('m', function=material_func)
def _subdomain_func(self, x=(), y=(), z=(), max_x=None):
"""
Creates a function to mask subdomains in Sfepy.
Args:
x: tuple of lines or points to be masked in the x-plane
y: tuple of lines or points to be masked in the y-plane
z: tuple of lines or points to be masked in the z-plane
Returns:
array of masked location indices
"""
eps = 1e-3 * self.dx
def _func(coords, domain=None):
flag_x = len(x) == 0
flag_y = len(y) == 0
flag_z = len(z) == 0
for x_ in x:
flag = (coords[:, 0] < (x_ + eps)) & \
(coords[:, 0] > (x_ - eps))
flag_x = flag_x | flag
for y_ in y:
flag = (coords[:, 1] < (y_ + eps)) & \
(coords[:, 1] > (y_ - eps))
flag_y = flag_y | flag
for z_ in z:
flag = (coords[:, 2] < (z_ + eps)) & \
(coords[:, 2] > (z_ - eps))
flag_z = flag_z | flag
flag = flag_x & flag_y & flag_z
if max_x is not None:
flag = flag & (coords[:, 0] < (max_x - eps))
return
|
np.where(flag)
|
numpy.where
|
#!/usr/bin/env python
"""
defoc.py -- evaluate defocalization probabilities for various diffusion models
"""
import os
import numpy as np
import pandas as pd
# Caching
from functools import lru_cache
# Cubic spline interpolation, for FBM defocalization function
from scipy import interpolate
# Names of likelihood function
from .constants import RBME, GAMMA, RBME_MARGINAL, FBME
# saspt module directory
PACKAGE_DIR = os.path.split(os.path.abspath(__file__))[0]
# Directory with spline coefficients for FBM defocalization
SPLINE_DIR = os.path.join(PACKAGE_DIR, "splines")
###############
## UTILITIES ##
###############
@lru_cache(maxsize=1)
def load_fbm_defoc_spline(dz=0.7):
"""
Given a focal depth, get a spline interpolator that enables calculation
of the fraction of FBMs that defocalize at various frame intervals.
args
----
dz : float, the focal depth in um
returns
-------
5-tuple, the *tck* argument expected by scipy.interpolate's spline
evaluators -- specifically scipy.interpolate.bisplev
"""
# Available frame intervals
avail_dz = np.array([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0,
1.1, 1.2, 1.3, 1.4, 1.5, 1.6])
# Get the closest available focal depth
m = np.argmin(np.abs(avail_dz - dz))
sel_dz = avail_dz[m]
# Path to this file
path = os.path.join(SPLINE_DIR, "fbm_defoc_splines_dz-%.1f.csv" % sel_dz)
# Load the spline coefficients
tcks = load_spline_coefs_multiple_frame_interval(path)
return tcks
def load_spline_coefs_multiple_frame_interval(path):
"""
Load multiple sets of bivariate spline coefficients from a file.
These are in the format required by scipy.interpolate for
evaluation of bivariate splines.
The individual sets of spline coefficients are ;-delimited, while
the different parts of the coefficient 5-tuple are newline-delimited and
the individual numbers are ,-delimited.
args
----
path : str, path to a file of the type written by
save_spline_coefs_multiple()
returns
-------
list of 5-tuple, the bivariate spline coefficients for each
frame interval
"""
with open(path, "r") as f:
S = f.read().split(";")
S = [j.split("\n") for j in S]
result = []
for lines in S:
x = np.asarray([float(j) for j in lines[0].split(",")])
y = np.asarray([float(j) for j in lines[1].split(",")])
coefs = np.array([float(j) for j in lines[2].split(",")])
kx = int(lines[3])
ky = int(lines[4])
result.append((x, y, coefs, kx, ky))
return result
def eval_spline(x, y, tck):
"""
Evaluate a bivariate spline on the Cartesian product of a set of X points
and a set of Y points.
args
----
x : 1D ndarray, the array of unique x points
y : 1D ndarray, the array of unique y points
tck : 5-tuple, bivariate spline coefficients of the type
read by *load_spline_coefs*
returns
-------
2D ndarray of shape (y.shape[0], x.shape[0]), the evaluated
bivariate spline at each combination of the input points
"""
return interpolate.bisplev(x, y, tck).T
def f_remain_rbm(D, n_frames, frame_interval, dz):
"""
Calculate the fraction of regular Brownian particles that
remain in a microscope's depth of field after some number
of frames.
args
----
D : float, diffusion coefficient
in um^2 s^-1
n_frames : int, the number of frames
frame_interval : float, seconds
dz : float, depth of field in um
returns
-------
1D ndarray of shape (n_frames,), the probability
to remain at each frame interval
"""
if (dz is np.inf) or (dz is None) or (D <= 0.0):
return
|
np.ones(n_frames, dtype=np.float64)
|
numpy.ones
|
#! /usr/bin/env python
import os
import logging
import re
import pathlib
from datetime import date, datetime
from collections import namedtuple
import time as timer
import scipy.io as spio
import numpy as np
import pandas as pd
import decimal
import warnings
import datajoint as dj
from pybpodgui_api.models.project import Project as BPodProject
from . import util, InvalidBehaviorTrialError
from pipeline import lab, experiment
from pipeline import get_schema_name, dict_to_hash
schema = dj.schema(get_schema_name('ingest_behavior'))
warnings.simplefilter(action='ignore', category=FutureWarning)
log = logging.getLogger(__name__)
# ================ PHOTOSTIM PROTOCOL ===============
photostim_duration = 0.5 # (s)
skull_ref = 'Bregma'
photostims = {
4: {'photo_stim': 4, 'photostim_device': 'OBIS470', 'duration': photostim_duration,
'locations': [{'skull_reference': skull_ref, 'brain_area': 'ALM',
'ap_location': 2500, 'ml_location': -1500, 'depth': 0,
'theta': 15, 'phi': 15}]},
5: {'photo_stim': 5, 'photostim_device': 'OBIS470', 'duration': photostim_duration,
'locations': [{'skull_reference': skull_ref, 'brain_area': 'ALM',
'ap_location': 2500, 'ml_location': 1500, 'depth': 0,
'theta': 15, 'phi': 15}]},
6: {'photo_stim': 6, 'photostim_device': 'OBIS470', 'duration': photostim_duration,
'locations': [{'skull_reference': skull_ref, 'brain_area': 'ALM',
'ap_location': 2500, 'ml_location': -1500, 'depth': 0,
'theta': 15, 'phi': 15},
{'skull_reference': skull_ref, 'brain_area': 'ALM',
'ap_location': 2500, 'ml_location': 1500, 'depth': 0,
'theta': 15, 'phi': 15}
]}}
def get_behavior_paths():
'''
retrieve behavior rig paths from dj.config
config should be in dj.config of the format:
dj.config = {
...,
'custom': {
'behavior_data_paths':
[
["RRig", "/path/string", 0],
["RRig2", "/path2/string2", 1]
],
}
...
}
where 'behavior_data_paths' is a list of multiple possible path for behavior data, each in format:
[rig name, rig full path, search order]
'''
paths = dj.config.get('custom', {}).get('behavior_data_paths', None)
if paths is None:
raise ValueError("Missing 'behavior_data_paths' in dj.config['custom']")
return sorted(paths, key=lambda x: x[-1])
def get_session_user():
'''
Determine desired 'session user' for a session.
- 1st, try dj.config['custom']['session.user']
- 2nd, try dj.config['database.user']
- else, use 'unknown'
TODO: multi-user / bulk ingest support
'''
session_user = dj.config.get('custom', {}).get('session.user', None)
session_user = (dj.config.get('database.user')
if not session_user else session_user)
if len(lab.Person() & {'username': session_user}):
return session_user
else:
return 'unknown'
@schema
class BehaviorIngest(dj.Imported):
definition = """
-> experiment.Session
"""
class BehaviorFile(dj.Part):
''' files in rig-specific storage '''
definition = """
-> master
behavior_file: varchar(255) # behavior file name
"""
class CorrectedTrialEvents(dj.Part):
''' TrialEvents containing auto-corrected data '''
definition = """
-> BehaviorIngest
-> experiment.TrialEvent
"""
@property
def key_source(self):
# 2 letters, anything, _, anything, 8 digits, _, 6 digits, .mat
# where:
# (2 letters, anything): water restriction
# (anything): task name
# (8 digits): date YYYYMMDD
# (6 digits): time HHMMSS
rexp = '^[a-zA-Z]{2}.*_.*_[0-9]{8}_[0-9]{6}.mat$'
# water_restriction_number -> subject
h2os = {k: v for k, v in zip(*lab.WaterRestriction().fetch(
'water_restriction_number', 'subject_id'))}
def buildrec(rig, rigpath, root, f):
if not re.match(rexp, f):
log.debug("{f} skipped - didn't match rexp".format(f=f))
return
log.debug('found file {f}'.format(f=f))
fullpath = pathlib.Path(root, f)
subpath = fullpath.relative_to(rigpath)
fsplit = subpath.stem.split('_')
h2o = fsplit[0]
ymd = fsplit[-2:-1][0]
if h2o not in h2os:
log.warning('{f} skipped - no animal for {h2o}'.format(
f=f, h2o=h2o))
return
animal = h2os[h2o]
log.debug('animal is {animal}'.format(animal=animal))
return {
'subject_id': animal,
'session_date': date(
int(ymd[0:4]), int(ymd[4:6]), int(ymd[6:8])),
'rig': rig,
'rig_data_path': rigpath.as_posix(),
'subpath': subpath.as_posix()
}
recs = []
found = set()
known = set(BehaviorIngest.BehaviorFile().fetch('behavior_file'))
rigs = get_behavior_paths()
for (rig, rigpath, _) in rigs:
rigpath = pathlib.Path(rigpath)
log.info('RigDataFile.make(): traversing {}'.format(rigpath))
for root, dirs, files in os.walk(rigpath):
log.debug('RigDataFile.make(): entering {}'.format(root))
for f in files:
log.debug('RigDataFile.make(): visiting {}'.format(f))
r = buildrec(rig, rigpath, root, f)
if not r:
continue
if f in set.union(known, found):
log.info('skipping already ingested file {}'.format(
r['subpath']))
else:
found.add(f) # block duplicate path conf
recs.append(r)
return recs
def populate(self, *args, **kwargs):
# 'populate' which won't require upstream tables
# 'reserve_jobs' not parallel, overloaded to mean "don't exit on error"
for k in self.key_source:
try:
with dj.conn().transaction:
self.make(k)
except Exception as e:
log.warning('session key {} error: {}'.format(k, repr(e)))
if not kwargs.get('reserve_jobs', False):
raise
def make(self, key):
log.info('BehaviorIngest.make(): key: {key}'.format(key=key))
# File paths conform to the pattern:
# dl7/TW_autoTrain/Session Data/dl7_TW_autoTrain_20180104_132813.mat
# which is, more generally:
# {h2o}/{training_protocol}/Session Data/{h2o}_{training protocol}_{YYYYMMDD}_{HHMMSS}.mat
path = pathlib.Path(key['rig_data_path'], key['subpath'])
# distinguishing "delay-response" task or "multi-target-licking" task
task_type = detect_task_type(path)
# skip too small behavior file (only for 'delay-response' task)
if task_type == 'delay-response' and os.stat(path).st_size / 1024 < 1000:
log.info('skipping file {} - too small'.format(path))
return
log.debug('loading file {}'.format(path))
# Read from behavior file and parse all trial info (the heavy lifting here)
skey, rows = BehaviorIngest._load(key, path, task_type)
# Session Insertion
log.info('BehaviorIngest.make(): adding session record')
experiment.Session.insert1(skey)
# Behavior Insertion
log.info('BehaviorIngest.make(): bulk insert phase')
log.info('BehaviorIngest.make(): saving ingest {d}'.format(d=skey))
self.insert1(skey, ignore_extra_fields=True, allow_direct_insert=True)
log.info('BehaviorIngest.make(): ... experiment.Session.Trial')
experiment.SessionTrial.insert(
rows['trial'], ignore_extra_fields=True, allow_direct_insert=True)
log.info('BehaviorIngest.make(): ... experiment.BehaviorTrial')
experiment.BehaviorTrial.insert(
rows['behavior_trial'], ignore_extra_fields=True,
allow_direct_insert=True)
log.info('BehaviorIngest.make(): ... experiment.TrialNote')
experiment.TrialNote.insert(
rows['trial_note'], ignore_extra_fields=True,
allow_direct_insert=True)
log.info('BehaviorIngest.make(): ... experiment.TrialEvent')
experiment.TrialEvent.insert(
rows['trial_event'], ignore_extra_fields=True,
allow_direct_insert=True, skip_duplicates=True)
log.info('BehaviorIngest.make(): ... experiment.ActionEvent')
experiment.ActionEvent.insert(
rows['action_event'], ignore_extra_fields=True,
allow_direct_insert=True)
# Photostim Insertion
photostim_ids = np.unique(
[r['photo_stim'] for r in rows['photostim_trial_event']])
unknown_photostims = np.setdiff1d(
photostim_ids, list(photostims.keys()))
if unknown_photostims:
raise ValueError(
'Unknown photostim protocol: {}'.format(unknown_photostims))
if photostim_ids.size > 0:
log.info('BehaviorIngest.make(): ... experiment.Photostim')
for stim in photostim_ids:
experiment.Photostim.insert1(
dict(skey, **photostims[stim]), ignore_extra_fields=True)
experiment.Photostim.PhotostimLocation.insert(
(dict(skey, **loc,
photo_stim=photostims[stim]['photo_stim'])
for loc in photostims[stim]['locations']),
ignore_extra_fields=True)
log.info('BehaviorIngest.make(): ... experiment.PhotostimTrial')
experiment.PhotostimTrial.insert(rows['photostim_trial'],
ignore_extra_fields=True,
allow_direct_insert=True)
log.info('BehaviorIngest.make(): ... experiment.PhotostimTrialEvent')
experiment.PhotostimEvent.insert(rows['photostim_trial_event'],
ignore_extra_fields=True,
allow_direct_insert=True)
if task_type == 'multi-target-licking':
# Multi-target-licking specifics
log.info('BehaviorIngest.make(): ... experiment.MultiTargetLickingSessionBlock')
experiment.MultiTargetLickingSessionBlock.insert(
rows['session_block'],
ignore_extra_fields=True,
allow_direct_insert=True)
log.info('BehaviorIngest.make(): ... experiment.MultiTargetLickingSessionBlock.WaterPort')
experiment.MultiTargetLickingSessionBlock.WaterPort.insert(
rows['session_block_waterport'],
ignore_extra_fields=True,
allow_direct_insert=True)
log.info('BehaviorIngest.make(): ... experiment.MultiTargetLickingSessionBlock.BlockTrial')
experiment.MultiTargetLickingSessionBlock.BlockTrial.insert(
rows['session_block_trial'],
ignore_extra_fields=True,
allow_direct_insert=True)
# Behavior Ingest Insertion
log.info('BehaviorIngest.make(): ... BehaviorIngest.BehaviorFile')
BehaviorIngest.BehaviorFile.insert1(
dict(skey, behavior_file=os.path.basename(key['subpath'])),
ignore_extra_fields=True, allow_direct_insert=True)
@classmethod
def _load(cls, key, path, task_type):
"""
Method to load the behavior file (.mat), parse trial info and prepare for insertion
(no table insertion is done here)
:param key: session_key
:param path: (str) filepath of the behavior file (.mat)
:param task_type: (str) "delay-response" or "multi-target-licking"
:return: skey, rows
+ skey: session_key
+ rows: a dictionary containing all per-trial information to be inserted
"""
path = pathlib.Path(path)
h2o = (lab.WaterRestriction() & {'subject_id': key['subject_id']}).fetch1(
'water_restriction_number')
ymd = key['session_date']
datestr = ymd.strftime('%Y%m%d')
log.info('h2o: {h2o}, date: {d}'.format(h2o=h2o, d=datestr))
# session key
skey = {}
skey['subject_id'] = key['subject_id']
skey['session_date'] = ymd
skey['username'] = get_session_user()
skey['rig'] = key['rig']
skey['h2o'] = h2o
# synthesizing session ID
log.debug('synthesizing session ID')
session = (dj.U().aggr(experiment.Session()
& {'subject_id': skey['subject_id']},
n='max(session)').fetch1('n') or 0) + 1
log.info('generated session id: {session}'.format(session=session))
skey['session'] = session
if task_type == 'multi-target-licking':
rows = load_multi_target_licking_matfile(skey, path)
elif task_type == 'delay-response':
rows = load_delay_response_matfile(skey, path)
else:
raise ValueError('Unknown task-type: {}'.format(task_type))
return skey, rows
@schema
class BehaviorBpodIngest(dj.Imported):
definition = """
-> experiment.Session
"""
class BehaviorFile(dj.Part):
''' files in rig-specific storage '''
definition = """
-> master
behavior_file: varchar(255) # behavior file name
"""
water_port_name_mapper = {'left': 'L', 'right': 'R', 'middle': 'M'}
@staticmethod
def get_bpod_projects():
projectdirs = dj.config.get('custom', {}).get('behavior_bpod', []).get('project_paths')
# construct a list of BPod Projects
projects = []
for projectdir in projectdirs:
projects.append(BPodProject())
projects[-1].load(projectdir)
return projects
@property
def key_source(self):
key_source = []
IDs = {k: v for k, v in zip(*lab.WaterRestriction().fetch(
'water_restriction_number', 'subject_id'))}
for subject_now, subject_id_now in IDs.items():
meta_dir = dj.config.get('custom', {}).get('behavior_bpod', []).get('meta_dir')
subject_csv = pathlib.Path(meta_dir) / '{}.csv'.format(subject_now)
if subject_csv.exists():
df_wr = pd.read_csv(subject_csv)
else:
log.info('No metadata csv found for {}'.format(subject_now))
continue
for r_idx, df_wr_row in df_wr.iterrows():
# we use it when both start and end times are filled in and Water during training > 0; restriction, freewater and handling is skipped
if (df_wr_row['Time'] and isinstance(df_wr_row['Time'], str)
and df_wr_row['Time-end'] and isinstance(df_wr_row['Time-end'], str)
and df_wr_row['Training type'] != 'restriction'
and df_wr_row['Training type'] != 'handling'
and df_wr_row['Training type'] != 'freewater'
and df_wr_row['Water during training'] > 0):
try:
date_now = datetime.strptime(df_wr_row.Date, '%Y-%m-%d').date()
except:
try:
date_now = datetime.strptime(df_wr_row.Date, '%Y/%m/%d').date()
except:
log.info('Unable to parse session date: {}. Skipping...'.format(
df_wr_row.Date))
continue
if not (experiment.Session & {'subject_id': subject_id_now,
'session_date': date_now}):
key_source.append({'subject_id': subject_id_now,
'session_date': date_now,
'session_comment': str(df_wr_row['Notes']),
'session_weight': df_wr_row['Weight'],
'session_water_earned': df_wr_row[
'Water during training'],
'session_water_extra': df_wr_row['Extra water']})
return key_source
def populate(self, *args, **kwargs):
# Load project info (just once)
self.projects = self.get_bpod_projects()
# 'populate' which won't require upstream tables
# 'reserve_jobs' not parallel, overloaded to mean "don't exit on error"
for k in self.key_source:
try:
with dj.conn().transaction:
self.make(k)
except Exception as e:
log.warning('session key {} error: {}'.format(k, repr(e)))
if not kwargs.get('reserve_jobs', False):
raise
def make(self, key):
log.info(
'----------------------\nBehaviorBpodIngest.make(): key: {key}'.format(key=key))
subject_id_now = key['subject_id']
subject_now = (lab.WaterRestriction() & {'subject_id': subject_id_now}).fetch1(
'water_restriction_number')
date_now_str = key['session_date'].strftime('%Y%m%d')
log.info('h2o: {h2o}, date: {d}'.format(h2o=subject_now, d=date_now_str))
# ---- Ingest information for BPod projects ----
sessions_now, session_start_times_now, experimentnames_now = [], [], []
for proj in self.projects: #
exps = proj.experiments
for exp in exps:
stps = exp.setups
for stp in stps:
for session in stp.sessions:
if (session.subjects and session.subjects[0].find(subject_now) > -1
and session.name.startswith(date_now_str)):
sessions_now.append(session)
session_start_times_now.append(session.started)
experimentnames_now.append(exp.name)
bpodsess_order = np.argsort(session_start_times_now)
# --- Handle missing BPod session ---
if len(bpodsess_order) == 0:
log.error('BPod session not found!')
return
# ---- Concatenate bpod sessions (and corresponding trials) into one datajoint session ----
tbls_2_insert = ('sess_trial', 'behavior_trial', 'trial_note',
'sess_block', 'sess_block_trial',
'trial_choice', 'trial_event', 'action_event',
'photostim', 'photostim_location', 'photostim_trial',
'photostim_trial_event',
'valve_setting', 'valve_open_dur', 'available_reward')
# getting started
concat_rows = {k: list() for k in tbls_2_insert}
sess_key = None
trial_num = 0 # trial numbering starts at 1
for s_idx, session_idx in enumerate(bpodsess_order):
session = sessions_now[session_idx]
experiment_name = experimentnames_now[session_idx]
csvfilename = (pathlib.Path(session.path) / (
pathlib.Path(session.path).name + '.csv'))
# ---- Special parsing for csv file ----
log.info('Load session file(s) ({}/{}): {}'.format(s_idx + 1, len(bpodsess_order),
csvfilename))
df_behavior_session = util.load_and_parse_a_csv_file(csvfilename)
# ---- Integrity check of the current bpodsess file ---
# It must have at least one 'trial start' and 'trial end'
trial_start_idxs = df_behavior_session[(df_behavior_session['TYPE'] == 'TRIAL') & (
df_behavior_session['MSG'] == 'New trial')].index
if not len(trial_start_idxs):
log.info('No "trial start" for {}. Skipping...'.format(csvfilename))
continue # Make sure 'start' exists, otherwise move on to try the next bpodsess file if exists
trial_end_idxs = df_behavior_session[
(df_behavior_session['TYPE'] == 'TRANSITION') & (
df_behavior_session['MSG'] == 'End')].index
if not len(trial_end_idxs):
log.info('No "trial end" for {}. Skipping...'.format(csvfilename))
continue # Make sure 'end' exists, otherwise move on to try the next bpodsess file if exists
# It must be a foraging session
# extracting task protocol - hard-code implementation
if 'foraging' in experiment_name.lower() or (
'bari' in experiment_name.lower() and 'cohen' in experiment_name.lower()):
if 'var:lickport_number' in df_behavior_session and \
df_behavior_session['var:lickport_number'][0] == 3:
task = 'foraging 3lp'
task_protocol = 101
lick_ports = ['left', 'right', 'middle']
else:
task = 'foraging'
task_protocol = 100
lick_ports = ['left', 'right']
else:
log.info('ERROR: unhandled task name {}. Skipping...'.format(experiment_name))
continue # Make sure this is a foraging bpodsess, otherwise move on to try the next bpodsess file if exists
# ---- New session - construct a session key (from the first bpodsess that passes the integrity check) ----
if sess_key is None:
session_time = df_behavior_session['PC-TIME'][trial_start_idxs[0]]
if session.setup_name.lower() in ['day1', 'tower-2', 'day2-7', 'day_1',
'real foraging']:
setupname = 'Training-Tower-2'
elif session.setup_name.lower() in ['tower-3', 'tower-3beh', ' tower-3', '+',
'tower 3']:
setupname = 'Training-Tower-3'
elif session.setup_name.lower() in ['tower-1']:
setupname = 'Training-Tower-1'
elif session.setup_name.lower() in ['ephys_han']:
setupname = 'Ephys-Han'
else:
log.info('ERROR: unhandled setup name {} (from {}). Skipping...'.format(
session.setup_name, session.path))
continue # Another integrity check here
log.debug('synthesizing session ID')
key['session'] = (dj.U().aggr(experiment.Session()
& {'subject_id': subject_id_now},
n='max(session)').fetch1('n') or 0) + 1
sess_key = {**key,
'session_time': session_time.time(),
'username': df_behavior_session['experimenter'][0],
'rig': setupname}
# ---- channel for water ports ----
water_port_channels = {}
for lick_port in lick_ports:
chn_varname = 'var:WaterPort_{}_ch_in'.format(
self.water_port_name_mapper[lick_port])
if chn_varname not in df_behavior_session:
log.error(
'Bpod CSV KeyError: {} - Available columns: {}'.format(chn_varname,
df_behavior_session.columns))
return
water_port_channels[lick_port] = df_behavior_session[chn_varname][0]
# ---- Ingestion of trials ----
# extracting trial data
session_start_time = datetime.combine(sess_key['session_date'], sess_key['session_time'])
trial_start_idxs = df_behavior_session[(df_behavior_session['TYPE'] == 'TRIAL') & (df_behavior_session['MSG'] == 'New trial')].index
trial_start_idxs -= 2 # To reflect the change that bitcode is moved before the "New trial" line
trial_start_idxs = pd.Index([0]).append(trial_start_idxs[1:]) # so the random seed will be present
trial_end_idxs = trial_start_idxs[1:].append(pd.Index([(max(df_behavior_session.index))]))
# trial_end_idxs = df_behavior_session[(df_behavior_session['TYPE'] == 'END-TRIAL')].index
prevtrialstarttime = np.nan
blocknum_local_prev = np.nan
# getting ready
rows = {k: list() for k in
tbls_2_insert} # lists of various records for batch-insert
for trial_start_idx, trial_end_idx in zip(trial_start_idxs, trial_end_idxs):
df_behavior_trial = df_behavior_session[trial_start_idx:trial_end_idx + 1]
# Trials without GoCue are skipped
if not len(
df_behavior_trial[(df_behavior_trial['MSG'] == 'GoCue') & (
df_behavior_trial['TYPE'] == 'STATE')]):
continue
# ---- session trial ----
trial_num += 1 # increment trial number
trial_uid = len(experiment.SessionTrial & {'subject_id': subject_id_now}) + trial_num # Fix trial_uid here
# Note that the following trial_start/stop_time SHOULD NEVER BE USED in ephys related analysis
# because they are PC-TIME, which is not accurate (4 ms average delay, sometimes up to several seconds!)!
# In fact, from bpod.csv, we can only accurately retrieve (local) trial-wise, but not (global) session-wise, times
# See comments below.
trial_start_time = df_behavior_session['PC-TIME'][
trial_start_idx].to_pydatetime() - session_start_time
trial_stop_time = df_behavior_session['PC-TIME'][
trial_end_idx].to_pydatetime() - session_start_time
sess_trial_key = {**sess_key,
'trial': trial_num,
'trial_uid': trial_uid,
'start_time': trial_start_time.total_seconds(),
'stop_time': trial_stop_time.total_seconds()}
rows['sess_trial'].append(sess_trial_key)
# ---- session block ----
if 'Block_number' in df_behavior_session:
if np.isnan(df_behavior_trial['Block_number'].to_list()[0]):
blocknum_local = 0 if np.isnan(
blocknum_local_prev) else blocknum_local_prev
else:
blocknum_local = int(
df_behavior_trial['Block_number'].to_list()[0]) - 1
blocknum_local_prev = blocknum_local
reward_probability = {}
for lick_port in lick_ports:
p_reward_varname = 'var:reward_probabilities_{}'.format(
self.water_port_name_mapper[lick_port])
reward_probability[lick_port] = decimal.Decimal(
df_behavior_session[p_reward_varname][0][blocknum_local]).quantize(
decimal.Decimal(
'.001')) # Note: Reward probabilities never changes during a **bpod** session
# determine if this is a new block: compare reward probability with the previous block
if rows['sess_block']:
itsanewblock = dict_to_hash(reward_probability) != dict_to_hash(
rows['sess_block'][-1]['reward_probability'])
else:
itsanewblock = True
if itsanewblock:
all_blocks = [b['block'] for b in
rows['sess_block'] + concat_rows['sess_block']]
block_num = (np.max(all_blocks) + 1 if all_blocks else 1)
rows['sess_block'].append({**sess_key,
'block': block_num,
'block_start_time': trial_start_time.total_seconds(),
'reward_probability': reward_probability})
else:
block_num = rows['sess_block'][-1]['block']
rows['sess_block_trial'].append({**sess_trial_key, 'block': block_num})
# ====== Event times ======
# Foraging trial structure: (*...*: events of interest in experiment.EventType; [...]: optional)
# -> (ITI) -> *bitcodestart* -> bitcode -> lickport movement -> *delay* (lickport in position)
# -> [effective delay period] -> *go* -> [*choice*] -> [*reward*] -> *trialend* -> (ITI) ->
# Notes:
# 1. Differs from the delay-task:
# (1) no sample and presample epoch
# (2) effective delay period could be zero (ITI as an inherent delay).
# Also note that if the delay_period in bpod protocol < lickport movement time (~100 ms), the effective delay period is also zero,
# where the go-cue sound actually appears BEFORE the lickport stops moving.
# (3) we are interested in not only go-cue aligned PSTH (ephys.Unit.TrialSpikes), but need more flexible event alignments, especially ITI firings.
# So we should use the session-wise untrialized spike times stored in ephys.Unit['spike_times']. See below.
# 2. Two "trial start"s:
# (1) *bitcodestart* = onset of the first bitcode = *sTrig* in NIDQ bitcode.mat
# (2) `trial_start_idx` in this for loop = the start of bpod-trial ('New Trial' in bpod csv file)
# = the reference point of BPOD-TIME = NIDQ bpod-trial channel
# They are far from each other because I start the bpod trial at the middle of ITI (Foraging_bpod: e9a8ffd6) to cover video recording during ITI.
# 3. In theory, *bitcodestart* = *delay* (since there's no sample period),
# but in practice, the bitcode (21*20=420 ms) and lickport movement (~100 ms) also take some time.
# Note that bpod doesn't know the exact time when lickports are in place, so we can get *delay* only from NIDQ zaber channel (ephys.TrialEvent.'zaberinposition').
# 4. In early lick trials, effective delay start should be the last 'DelayStart' (?)
# 5. Finally, to perform session-wise alignment between behavior and ephys, there are two ways, which could be cross-checked with each other:
# (1) (most straightforward) use all event markers directly from NIDQ bitcode.mat,
# then align them to ephys.Unit['spike_times'] by looking at the *sTrig* of the first trial of a session
# (2) (can be used as a sanity check) extract trial-wise BPOD-TIME from pybpod.csv,
# and then convert the local trial-wise times to global session-wise times by aligning
# the same events from pybpod.csv and bitcode.mat across all trials, e.g., *bitcodestart* <--> *sTrig*, or 0 <--> NIDQ bpod-"trial trigger" channel
# Note that one should NEVER use PC-TIME from the bpod csv files (at least for ephys-related alignment)!!!
# ----- BPOD STATES (all events except licks) -----
bpod_states_this_trial = df_behavior_trial[(df_behavior_trial['TYPE'] == 'STATE') & (df_behavior_trial['BPOD-INITIAL-TIME'] > 0)] # All states of this trial
trial_event_count = 0
# Use BPOD-INITIAL-TIME and BPOD-FINAL-TIME (all relative to bpod-trialstart)
bpod_states_of_interest = { # experiment.TrialEventType: Bpod state name
'videostart': ['ITIBeforeVideoOn'],
'bitcodestart': ['Start'],
'delay': ['DelayStart'], # (1) in a non early lick trial, effective delay start = max(DelayStart, LickportInPosition).
# where LickportIntInPosition is only available from NIDQ
# (2) in an early lick trial, there are multiple DelayStarts, the last of which is the effective delay start
'go': ['GoCue'],
'choice': [f'Choice_{lickport}' for lickport in self.water_port_name_mapper.values()],
'reward': [f'Reward_{lickport}' for lickport in self.water_port_name_mapper.values()],
'doubledip': ['Double_dipped'], # Only for non-double-dipped trials, ITI = last lick + 1 sec (maybe I should not use double dipping punishment for ehpys?)
'trialend': ['ITI'],
'videoend': ['ITIAfterVideoOff'],
}
for trial_event_type, bpod_state in bpod_states_of_interest.items():
_idx = bpod_states_this_trial.index[bpod_states_this_trial['MSG'].isin(bpod_state)] # One state could have multiple appearances, such as DelayStart in early-lick trials
if not len(_idx):
continue
initials, finals = bpod_states_this_trial.loc[_idx][['BPOD-INITIAL-TIME', 'BPOD-FINAL-TIME']].values.T.astype(float)
initials[initials > 9999] = 9999 # Wordaround for bug #9: BPod protocol was paused and then resumed after an impossible long period of time (> decimal(8, 4)).
finals[finals > 9999] = 9999
# cache event times
for idx, (initial, final) in enumerate(zip(initials, finals)):
rows['trial_event'].extend(
[{**sess_trial_key,
'trial_event_id': trial_event_count + idx,
'trial_event_type': trial_event_type,
'trial_event_time': initial,
'duration': final - initial}]) # list comprehension doesn't work here
trial_event_count += len(initials)
# save gocue time for early-lick below
if trial_event_type == 'go':
gocue_time = initials[0]
# ------ Licks (use EVENT instead of STATE because not all licks triggered a state change) -------
lick_times = {}
for lick_port in lick_ports:
lick_times[lick_port] = df_behavior_trial['BPOD-INITIAL-TIME'][(
df_behavior_trial['+INFO'] == water_port_channels[lick_port])].to_numpy()
# cache licks
all_lick_types = np.concatenate(
[[ltype] * len(ltimes) for ltype, ltimes in lick_times.items()])
all_lick_times = np.concatenate(
[ltimes for ltimes in lick_times.values()])
# sort by lick times
sorted_licks = sorted(zip(all_lick_types, all_lick_times), key=lambda x: x[-1])
rows['action_event'].extend([{**sess_trial_key, 'action_event_id': idx,
'action_event_type': '{} lick'.format(ltype),
'action_event_time': ltime} for
idx, (ltype, ltime)
in enumerate(sorted_licks)])
# ====== Trial facts (nontemporal) ======
# WaterPort Choice
trial_choice = {'water_port': None}
for lick_port in lick_ports:
if any((df_behavior_trial['MSG'] == 'Choice_{}'.format(
self.water_port_name_mapper[lick_port]))
& (df_behavior_trial['TYPE'] == 'TRANSITION')):
trial_choice['water_port'] = lick_port
break
rows['trial_choice'].append({**sess_trial_key, **trial_choice})
# early lick
early_lick = 'no early'
if any(all_lick_times < gocue_time):
early_lick = 'early'
# outcome
outcome = 'miss' if trial_choice['water_port'] else 'ignore'
for lick_port in lick_ports:
if any((df_behavior_trial['MSG'] == 'Reward_{}'.format(
self.water_port_name_mapper[lick_port]))
& (df_behavior_trial['TYPE'] == 'TRANSITION')):
outcome = 'hit'
break
# ---- accumulated reward ----
for lick_port in lick_ports:
reward_var_name = 'reward_{}_accumulated'.format(
self.water_port_name_mapper[lick_port])
if reward_var_name not in df_behavior_trial:
log.error('Bpod CSV KeyError: {} - Available columns: {}'.format(
reward_var_name, df_behavior_trial.columns))
return
reward = df_behavior_trial[reward_var_name].values[0]
rows['available_reward'].append({
**sess_trial_key, 'water_port': lick_port,
'reward_available': False if np.isnan(reward) else reward})
# ---- auto water and notes ----
auto_water = False
auto_water_times = {}
for lick_port in lick_ports:
auto_water_varname = 'Auto_Water_{}'.format(
self.water_port_name_mapper[lick_port])
auto_water_ind = (df_behavior_trial['TYPE'] == 'STATE') & (
df_behavior_trial['MSG'] == auto_water_varname)
if any(auto_water_ind):
auto_water = True
auto_water_times[lick_port] = float(
df_behavior_trial['+INFO'][auto_water_ind.idxmax()])
if auto_water_times:
auto_water_ports = [k for k, v in auto_water_times.items() if v > 0.001]
rows['trial_note'].append({**sess_trial_key,
'trial_note_type': 'autowater',
'trial_note': 'and '.join(auto_water_ports)})
# add random seed start note
if any(df_behavior_trial['MSG'] == 'Random seed:'):
seedidx = (df_behavior_trial['MSG'] == 'Random seed:').idxmax() + 1
rows['trial_note'].append({**sess_trial_key,
'trial_note_type': 'random_seed_start',
'trial_note': str(df_behavior_trial['MSG'][seedidx])})
# add randomID (TrialBitCode)
if any(df_behavior_trial['MSG'] == 'TrialBitCode: '):
bitcode_ind = (df_behavior_trial['MSG'] == 'TrialBitCode: ').idxmax() + 1
rows['trial_note'].append({**sess_trial_key,
'trial_note_type': 'bitcode',
'trial_note': str(df_behavior_trial['MSG'][bitcode_ind])})
# ---- Behavior Trial ----
rows['behavior_trial'].append({**sess_trial_key,
'task': task,
'task_protocol': task_protocol,
'trial_instruction': 'none',
'early_lick': early_lick,
'outcome': outcome,
'auto_water': auto_water,
'free_water': False}) # TODO: verify this
# ---- Water Valve Setting ----
valve_setting = {**sess_trial_key}
if 'var_motor:LickPort_Lateral_pos' in df_behavior_trial.keys():
valve_setting['water_port_lateral_pos'] = \
df_behavior_trial['var_motor:LickPort_Lateral_pos'].values[0]
if 'var_motor:LickPort_RostroCaudal_pos' in df_behavior_trial.keys():
valve_setting['water_port_rostrocaudal_pos'] = \
df_behavior_trial['var_motor:LickPort_RostroCaudal_pos'].values[0]
if 'var_motor:LickPort_DorsoVentral_pos' in df_behavior_trial.keys():
valve_setting['water_port_dorsoventral_pos'] = \
df_behavior_trial['var_motor:LickPort_DorsoVentral_pos'].values[0]
rows['valve_setting'].append(valve_setting)
for lick_port in lick_ports:
valve_open_varname = 'var:ValveOpenTime_{}'.format(
self.water_port_name_mapper[lick_port])
if valve_open_varname in df_behavior_trial:
rows['valve_open_dur'].append({
**sess_trial_key, 'water_port': lick_port,
'open_duration': df_behavior_trial[valve_open_varname].values[0]})
# add to the session-concat
for tbl in tbls_2_insert:
concat_rows[tbl].extend(rows[tbl])
# ---- The insertions to relevant tables ----
# Session, SessionComment, SessionDetails insert
log.info('BehaviorIngest.make(): adding session record')
experiment.Session.insert1(sess_key, ignore_extra_fields=True)
experiment.SessionComment.insert1(sess_key, ignore_extra_fields=True)
experiment.SessionDetails.insert1(sess_key, ignore_extra_fields=True)
# Behavior Insertion
insert_settings = {'ignore_extra_fields': True, 'allow_direct_insert': True}
log.info('BehaviorIngest.make(): bulk insert phase')
log.info('BehaviorIngest.make(): ... experiment.Session.Trial')
experiment.SessionTrial.insert(concat_rows['sess_trial'], **insert_settings)
log.info('BehaviorIngest.make(): ... experiment.BehaviorTrial')
experiment.BehaviorTrial.insert(concat_rows['behavior_trial'], **insert_settings)
log.info('BehaviorIngest.make(): ... experiment.WaterPortChoice')
experiment.WaterPortChoice.insert(concat_rows['trial_choice'], **insert_settings)
log.info('BehaviorIngest.make(): ... experiment.TrialNote')
experiment.TrialNote.insert(concat_rows['trial_note'], **insert_settings)
log.info('BehaviorIngest.make(): ... experiment.TrialEvent')
experiment.TrialEvent.insert(concat_rows['trial_event'], **insert_settings)
log.info('BehaviorIngest.make(): ... experiment.ActionEvent')
experiment.ActionEvent.insert(concat_rows['action_event'], **insert_settings)
log.info('BehaviorIngest.make(): ... experiment.SessionBlock')
experiment.SessionBlock.insert(concat_rows['sess_block'], **insert_settings)
experiment.SessionBlock.BlockTrial.insert(concat_rows['sess_block_trial'],
**insert_settings)
block_reward_prob = []
for block in concat_rows['sess_block']:
block_reward_prob.extend(
[{**block, 'water_port': water_port, 'reward_probability': reward_p}
for water_port, reward_p in block.pop('reward_probability').items()])
experiment.SessionBlock.WaterPortRewardProbability.insert(block_reward_prob,
**insert_settings)
log.info('BehaviorIngest.make(): ... experiment.TrialAvailableReward')
experiment.TrialAvailableReward.insert(concat_rows['available_reward'],
**insert_settings)
log.info('BehaviorIngest.make(): ... experiment.WaterValveSetting')
experiment.WaterPortSetting.insert(concat_rows['valve_setting'], **insert_settings)
experiment.WaterPortSetting.OpenDuration.insert(concat_rows['valve_open_dur'],
**insert_settings)
# Behavior Ingest Insertion
log.info('BehaviorBpodIngest.make(): saving ingest {}'.format(sess_key))
self.insert1(sess_key, **insert_settings)
self.BehaviorFile.insert(
[{**sess_key, 'behavior_file': pathlib.Path(s.path).as_posix()}
for s in sessions_now], **insert_settings)
# --------------------- HELPER LOADER FUNCTIONS -----------------
def detect_task_type(path):
"""
Method to detect if a behavior matlab file is for "delay-response" or "multi-target-licking" task
:param path: (str) filepath of the behavior file (.mat)
:return task_type: (str) "delay-response" or "multi-target-licking"
"""
# distinguishing "delay-response" task or "multi-target-licking" task
mat = spio.loadmat(path.as_posix(), squeeze_me=True, struct_as_record=False)
GUI_fields = set(mat['SessionData'].SettingsFile.GUI._fieldnames)
if ({'X_center', 'Y_center', 'Z_center'}.issubset(GUI_fields)
and not {'SamplePeriod', 'DelayPeriod'}.issubset(GUI_fields)):
task_type = 'multi-target-licking'
else:
task_type = 'delay-response'
return task_type
def load_delay_response_matfile(skey, matlab_filepath):
"""
Loading routine for delay-response task - from .mat behavior data
:param skey: session_key
:param matlab_filepath: full-path to the .mat file containing the delay-response behavior data
:return: nested list of all rows to be inserted into the various experiment-related tables
"""
matlab_filepath = pathlib.Path(matlab_filepath)
h2o = skey.pop('h2o')
SessionData = spio.loadmat(matlab_filepath.as_posix(),
squeeze_me=True, struct_as_record=False)['SessionData']
# parse session datetime
session_datetime_str = str('').join((str(SessionData.Info.SessionDate), ' ',
str(SessionData.Info.SessionStartTime_UTC)))
session_datetime = datetime.strptime(
session_datetime_str, '%d-%b-%Y %H:%M:%S')
AllTrialTypes = SessionData.TrialTypes
AllTrialSettings = SessionData.TrialSettings
AllTrialStarts = SessionData.TrialStartTimestamp
AllTrialStarts = AllTrialStarts - AllTrialStarts[0] # real 1st trial
RawData = SessionData.RawData
AllStateNames = RawData.OriginalStateNamesByNumber
AllStateData = RawData.OriginalStateData
AllEventData = RawData.OriginalEventData
AllStateTimestamps = RawData.OriginalStateTimestamps
AllEventTimestamps = RawData.OriginalEventTimestamps
AllRawEvents = SessionData.RawEvents.Trial
# verify trial-related data arrays are all same length
assert (all((x.shape[0] == AllStateTimestamps.shape[0] for x in
(AllTrialTypes, AllTrialSettings,
AllStateNames, AllStateData, AllEventData,
AllEventTimestamps, AllTrialStarts, AllTrialStarts, AllRawEvents))))
# AllStimTrials optional special case
if 'StimTrials' in SessionData._fieldnames:
log.debug('StimTrials detected in session - will include')
AllStimTrials = SessionData.StimTrials
assert (AllStimTrials.shape[0] == AllStateTimestamps.shape[0])
else:
log.debug('StimTrials not detected in session - will skip')
AllStimTrials = np.array([
None for _ in enumerate(range(AllStateTimestamps.shape[0]))])
# AllFreeTrials optional special case
if 'FreeTrials' in SessionData._fieldnames:
log.debug('FreeTrials detected in session - will include')
AllFreeTrials = SessionData.FreeTrials
assert (AllFreeTrials.shape[0] == AllStateTimestamps.shape[0])
else:
log.debug('FreeTrials not detected in session - synthesizing')
AllFreeTrials = np.zeros(AllStateTimestamps.shape[0], dtype=np.uint8)
# Photostim Period: early-delay, late-delay (default is early-delay)
# Infer from filename for now, only applicable to Susu's sessions (i.e. "SC" in h2o)
# If RecordingRig3, then 'late-delay'
photostim_period = 'early-delay'
rig_name = re.search('Recording(Rig\d)_', matlab_filepath.name)
if re.match('SC', h2o) and rig_name:
rig_name = rig_name.groups()[0]
if rig_name == "Rig3":
photostim_period = 'late-delay'
log.info('Photostim Period: {}'.format(photostim_period))
trials = list(zip(AllTrialTypes, AllStimTrials, AllFreeTrials,
AllTrialSettings, AllStateTimestamps, AllStateNames,
AllStateData, AllEventData, AllEventTimestamps,
AllTrialStarts, AllRawEvents))
if not trials:
log.warning('skipping date {d}, no valid files'.format(d=date))
return
#
# Trial data seems valid; synthesize session id & add session record
# XXX: note - later breaks can result in Sessions without valid trials
#
assert skey['session_date'] == session_datetime.date()
skey['session_date'] = session_datetime.date()
skey['session_time'] = session_datetime.time()
#
# Actually load the per-trial data
#
log.info('BehaviorIngest.make(): trial parsing phase')
# lists of various records for batch-insert
rows = {k: list() for k in ('trial', 'behavior_trial', 'trial_note',
'trial_event', 'corrected_trial_event',
'action_event', 'photostim',
'photostim_location', 'photostim_trial',
'photostim_trial_event')}
trial = namedtuple( # simple structure to track per-trial vars
'trial', ('ttype', 'stim', 'free', 'settings', 'state_times',
'state_names', 'state_data', 'event_data',
'event_times', 'trial_start', 'trial_raw_events'))
trial_number = 0 # trial numbering starts at 1
for t in trials:
#
# Misc
#
t = trial(*t) # convert list of items to a 'trial' structure
trial_number += 1 # increment trial counter
log.debug('BehaviorIngest.make(): parsing trial {i}'.format(i=trial_number))
# covert state data names into a lookup dictionary
#
# names (seem to be? are?):
#
# Trigtrialstart, PreSamplePeriod, SamplePeriod, DelayPeriod
# EarlyLickDelay, EarlyLickSample, ResponseCue, GiveLeftDrop
# GiveRightDrop, GiveLeftDropShort, GiveRightDropShort
# AnswerPeriod, Reward, RewardConsumption, NoResponse
# TimeOut, StopLicking, StopLickingReturn, TrialEnd
#
states = {k: (v + 1) for v, k in enumerate(t.state_names)}
required_states = ('PreSamplePeriod', 'SamplePeriod',
'DelayPeriod', 'ResponseCue', 'StopLicking',
'TrialEnd')
missing = list(k for k in required_states if k not in states)
if len(missing):
log.warning('skipping trial {i}; missing {m}'
.format(i=trial_number, m=missing))
continue
gui = t.settings.GUI
# ProtocolType - only ingest protocol >= 3
#
# 1 Water-Valve-Calibration 2 Licking 3 Autoassist
# 4 No autoassist 5 DelayEnforce 6 SampleEnforce 7 Fixed
#
if 'ProtocolType' not in gui._fieldnames:
log.warning('skipping trial {i}; protocol undefined'
.format(i=trial_number))
continue
protocol_type = gui.ProtocolType
if gui.ProtocolType < 3:
log.warning('skipping trial {i}; protocol {n} < 3'
.format(i=trial_number, n=gui.ProtocolType))
continue
#
# Top-level 'Trial' record
#
tkey = dict(skey)
startindex = np.where(t.state_data == states['PreSamplePeriod'])[0]
endindex = np.where(t.state_data == states['TrialEnd'])[0]
log.debug('states\n' + str(states))
log.debug('state_data\n' + str(t.state_data))
log.debug('startindex\n' + str(startindex))
log.debug('endindex\n' + str(endindex))
if not (len(startindex) and len(endindex)):
log.warning('skipping {}: start/end mismatch: {}/{}'.format(
trial_number, str(startindex), str(endindex)))
continue
try:
tkey['trial'] = trial_number
tkey['trial_uid'] = trial_number
tkey['start_time'] = t.trial_start
tkey['stop_time'] = t.trial_start + t.state_times[endindex][0]
except IndexError:
log.warning('skipping {}: IndexError: {}/{} -> {}'.format(
trial_number, str(startindex), str(endindex), str(t.state_times)))
continue
log.debug('tkey' + str(tkey))
rows['trial'].append(tkey)
#
# Specific BehaviorTrial information for this trial
#
bkey = dict(tkey)
bkey['task'] = 'audio delay' # hard-coded here
bkey['task_protocol'] = 1 # hard-coded here
# determine trial instruction
trial_instruction = 'left' # hard-coded here
if gui.Reversal == 1:
if t.ttype == 1:
trial_instruction = 'left'
elif t.ttype == 0:
trial_instruction = 'right'
elif gui.Reversal == 2:
if t.ttype == 1:
trial_instruction = 'right'
elif t.ttype == 0:
trial_instruction = 'left'
bkey['trial_instruction'] = trial_instruction
# determine early lick
early_lick = 'no early'
if (protocol_type >= 5
and 'EarlyLickDelay' in states
and
|
np.any(t.state_data == states['EarlyLickDelay'])
|
numpy.any
|
'''Filer design
'''
import numpy as np
from scipy.linalg import solve_toeplitz
from scipy.signal import decimate as scipydecimate
from scipy.signal import firwin
from .fft import rawfft, rawifft
from .misc import primefactors
from .plt import figure
from .signal import fftwconvolve
def get_filterbankfilters(N, fc=0.25):
'''
Make filters for filterbank decomposition and recomposition
These are even order FIR filters
Parameters
----------
N : int
The filter length. Must be even number
fc : float
Normalized cutoff frequency <0.0, 0.5>
Returns
-------
f0, f1, h0, h1 : arrays of float
The filter kernels
'''
# N must to even to make the best filter!
assert N % 2 == 0
def spectral_flip(h):
g = np.copy(h)
g[0::2] = -g[0::2]
return g
h0 = firwin(N+1, fc, nyq=0.5)
h1 = spectral_flip(h0)
f0 = spectral_flip(h1)*2
f1 = - spectral_flip(h0)*2
return h0, h1, f0, f1
def get_filterbankfilters_kurtogram(N=16):
'''
Acquire the filterbank filters used in:
Antoni, Jerome. "Fast computation of the kurtogram for the detection of transient faults."
Mechanical Systems and Signal Processing 21.1 (2007): 108-124.
Parameters
----------
N : int
Number of filterbank coefficients
Returns
-------
h : float 1D array
Lowpass filter
g : float 1D array
Highpass filter
'''
fc = 0.4
h = firwin(N+1,fc)*np.exp(2*1j*np.pi*np.arange(0, N+1)*0.125)
n = np.arange(2, N+2, 1)
g = np.flipud(h)[1:]*(-1.0)**(1-n)
return h, g
def _fbdecompose(x, h0, h1):
x0 = fftwconvolve(x, h0, 'same')
x1 = fftwconvolve(x, h1, 'same')
v0 = np.copy(x0[0::2])
v1 = np.copy(x1[1::2])
xsize = x0.size
return v0, v1, xsize
def _fbcompose(x0, x1, f0, f1, xsize):
c0 = np.zeros(xsize)
c0[0::2] = np.copy(x0)
c1 = np.zeros(xsize)
c1[1::2] = np.copy(x1)
y0 = fftwconvolve(c0, f0, 'same')
y1 = fftwconvolve(c1, f1, 'same')
return y0+y1
def filterbank_decompose(x, h0, h1, level):
'''
Decompose a signal using supplied filters for a certain numebr of levels
Parameters
----------
x : float 1D array
Signal
h0, h1 : float 1D arrays
Filter kernels
h0 is low-pass, h1 is highpass
level : int
The filter decomposition level
Returns
-------
xbank : list of float 1D arrays
The filter-bank coefficients ranging from lowest frequency to highest frequency
xsizes : list of lists of integers
The sizes of signals before decomposing.
Only needed for recomposing using filterbank_compose()
See also
--------
get_filterbankfilters() : Makes the h0 and h1 filter kernel
filterbank_compose() : Re-combposes an xbank into a signal
'''
xbank = [x,]
xsizes = []
for i in range(0, level):
xnew = []
xsizes.append([])
for j in range(0, len(xbank)):
v0, v1, xsize = _fbdecompose(xbank[j], h0, h1)
xnew.append(v0)
xnew.append(v1)
xsizes[i].append(xsize)
xbank = xnew
return xbank, xsizes
def filterbank_compose(xbank, f0, f1, xsizes):
'''
Recompose the filter bank to a single signal
Parameters
----------
xbank : float 1D array
The filterbank
f0, f1 : float 1D arrays
The filter kernels
xsizes : list of list of ints
The sizes of signals before decomposing
Returns
-------
x_hat : float array
The recomposed signal. Should be close to the original
signal x after applying the lag
lag : int
The lag of the recomposed signal
Should ideally use x_hat[lag:-lag] after recomposition
x_hat[lag:-lag] approximates x[0:-lag*2]
'''
level = int(np.log2(len(xbank)))
for i in range(0, level):
xbank_new = []
for j in range(0, len(xbank), 2):
xsize = xsizes[len(xsizes)-i-1][j//2]
y = _fbcompose(xbank[j], xbank[j+1], f0, f1, xsize)
xbank_new.append(y)
xbank = xbank_new
lag = int(2**level - 1)
return xbank[0], lag
def waveletfilter(f0, sigma, Fs, N):
'''
Constructs the frequency transformed wavelet filter. Can be used to
filter a frequency transformed signal by taking Y*Ksi.
Parameters
----------
f0 : float
The center frequency for the bandpass filter in Hz
sigma : float
The width of the filter in Hz
Fs : float
The sampling frequency of the signal in Hz
N : int
The number of samples in the signal in Hz
Returns
-------
Ksi : float 1D array
Filter in the frequency domain.
'''
dt = 1.0/Fs
T = dt*float(N)
df = 1.0/T
f = np.arange(0, Fs/2.0, df)
Ksi = np.exp(-(np.pi**2.0/sigma**2.0)*(f - f0)**2.0)
Ksi = np.concatenate((Ksi, np.zeros(N - Ksi.size)))
return Ksi
def blinddeconvolution(z, L, part=1.0, k=4.0, maxIter=1000, maxMu=2.0, stopCrit=0.01, debug=False):
'''
Iteratively identifies a filter g that deconvolves the filter h
originally applied to z to return the deconvolved signal x.
The iterator tries to maximize the kurtosis (impulsivity) of the
deconvolved signal.
The deconvolution is afterwards performed using:
x = pyvib.signal.fftwconvolve(z, gNew, 'valid')
Parameters
----------
z : float 1D array
Signal to deconvolve
L : int
Length of filter
part : float, optional
Percentage of the data to train the filter on.
Must be within <0, 1>
k - float, optional
Exponent of the objective. 4 gives kurtosis
maxIter : int, optional
Maximum number of iterations to run
maxMu : float, optional
Maximum training coefficient
stopCrit : float, optional
Stopping criterion
debug : boolean, optional
Print progression if true
Returns
-------
gNew : float 1D array
Filter kernel that deconvolves the signal
'''
temp = np.ones(L)
temp[::2] *= -1.0
gNew = temp*np.random.rand(L)
g = np.copy(gNew)
assert 0.0 < part <= 1.0
i1 = 0
i2 = i1 + int(part*z.size)
N = i2 - i1
Rxx = fftwconvolve(
|
np.flipud(z[i1:i2])
|
numpy.flipud
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os
from datetime import datetime
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import f1_score
from time import mktime
from timeit import default_timer as dt
from .feature_selection import dropHighCorrs, getHighCorrs, naiveVarDrop
from .metrics import aicCalc, glm_regularized_AIC as glmAIC
from .modeling import crossVal, prettyConfMat
from .parallel import p_prog_simp
from .utils import print_time, updateProgBar
def calcAICsLasso(penalty, Xtrain, Ytrain, sample_size, unreg_full_mod=None,
random_state=123):
"""
Utility function for quick_analysis. Need to update this to include more
GLM models.
Parameters
----------
penalty : flost
The strength of the L1 penalty.
Xtrain : numpy array or pandas dataframe
The design or feature matrix.
Ytrain : numpy array or pandas series
The target or response variable.
sample_size : int
The number of observations.
unreg_full_mod : sklearn object, or similar, optional
The unregularized full model. The default is None.
random_state : int, optional
Random seed for the process. The default is 123.
Returns
-------
nzc : list
A list indicating the non-zero coefficients.
aics : list
A list containing the AIC values.
"""
cur_mod_reg = LogisticRegression(C=penalty, max_iter=10000, penalty="l1",
solver='liblinear',
random_state=random_state,
n_jobs=1).fit(Xtrain, Ytrain)
Yprob = cur_mod_reg.predict_proba(Xtrain)[:, 1]
Ypred = cur_mod_reg.predict(Xtrain)
mod_coef = np.concatenate((cur_mod_reg.intercept_,
|
np.squeeze(cur_mod_reg.coef_)
|
numpy.squeeze
|
#pflee.py
# A parallelized implementation of FLEE (with original rules)
#example to run: mpiexec -n 4 python3 pflee.py 100
import numpy as np
import sys
import random
from flee.SimulationSettings import SimulationSettings
from flee import flee
from mpi4py import MPI
from mpi4py.MPI import ANY_SOURCE
class MPIManager:
def __init__(self):
if not MPI.Is_initialized():
print("Manual MPI_Init performed.")
MPI.Init()
self.comm = MPI.COMM_WORLD
self.rank = self.comm.Get_rank()
self.size = self.comm.Get_size()
def CalcCommWorldTotalSingle(self, i):
total = np.array([-1])
# If you want this number on rank 0, just use Reduce.
self.comm.Allreduce(
|
np.array([i])
|
numpy.array
|
# <EMAIL>
# Code: SIR-files tools
"""
This file include:
1.- lambda_mA, stokesIQUV, [nL,posi,nN] = lperfil(filename)
2.- wperfil(filename, numberLine, lambda_mA, stokes)
3.- [tau, todoPlot] = lmodel8(filename, verbose=True)
4.- wmodel8(modelo, filename, verbose=False)
5.- mapa = readSIRMap(resultadoSir, magnitud)
6.- [height, width, nlambda] = shapeSIRMap(resultadoSir)
7.- mapa = readSIRProfileMap(resultadoSir, Nstoke)
MODEL ATMOSPHERE FILES TO BE USED WITH SIR
Each model file contains the macroturbulent velocity (km/s), the
filling factor (only to be used with two-component models, ranging
from 0 to 1), and the stray light factor (in percent) in the first line.
Then, eight columns follow:
Column 1: log tau_5 (logarithm of the continuum optical depth at 5000 A)
Column 2: Temperature (K)
Column 3: Electron pressures (dyn/cm^2)
Column 4: Microturbulent velocity (cm/s)
Column 5: Magnetic field strength (G)
Column 6: Line-of-sight velocity (cm/s)
Column 7: Inclination angle of the magnetic field vector in deg
from 0 (pointing to the observer) to 180 (pointing away from the
observer)
Column 8: Azimuthal angle of the magnetic field vector in deg.
Column 9: Geometrical scale (km)
Column 10: Gas presure (dyn/cm^2)
Column 11: Gas density (gr/cm^3)
"""
# ====================================================================
def circular_mean(alpha):
import numpy as np
return np.arctan2(np.sum(np.sin(alpha*np.pi/180.)), np.sum(np.cos(alpha*np.pi/180.)))*180./np.pi
# ====================================================================
def circular_map_smooth(mapa, cuanto=1):
import numpy as np
new_mapa = np.copy(mapa)
for i in range(new_mapa.shape[0]):
for j in range(new_mapa.shape[1]):
if i > cuanto and j>cuanto:
new_mapa[i,j] = (circular_mean(2*mapa[i-cuanto:i+cuanto,j-cuanto:j+cuanto])/2. +360. ) %180.
else:
new_mapa[i,j] = (circular_mean(2*mapa[i:i+cuanto,j:j+cuanto])/2. +360. ) %180.
return new_mapa
# ====================================================================
def vectorMapa(phiMap, sep, color, suma, difu,xscale=1.,yscale=1.):
import numpy as np
import matplotlib.pyplot as plt
import scipy.ndimage as sn
plt.autoscale(False)
newPhi = sn.filters.gaussian_filter(phiMap, difu)
for j in range(0, phiMap.shape[0], sep):
for i in range(0, phiMap.shape[1], sep):
plt.plot(np.array([i, i+1.*np.cos((newPhi[j, i]+suma)/180.*np.pi)])*xscale, np.array([j, j+1.*np.sin((newPhi[j, i]+suma)/180.*np.pi)])*yscale, color=color, lw=0.5)
# ====================================================================
def corrphi(mapa):
mapa[:] = (mapa[:]+360.) %180.
pass
# ====================================================================
def lperfil(filename, verbose=False):
"""Read SIR Stokes profile
Args:
filename (string)
Returns:
lambda_mA, stokesIQUV, [nL,posi,nN] = lperfil(filename)
"""
from numpy import array
fo = open(filename, 'r')
Nn=[]; NumeroLineas = 1; PosiNn0T= []
x0=[]
StokeI0=[]; StokeQ0=[]; StokeU0=[]; StokeV0=[]
x=[]
StokeI=[]; StokeQ=[]; StokeU=[]; StokeV=[]
for ii in fo:
linea_split = ii.split()
Nn.append(float(linea_split[0]))
x0.append(float(linea_split[1]))
StokeI0.append(float(linea_split[2]))
StokeQ0.append(float(linea_split[3]))
StokeU0.append(float(linea_split[4]))
StokeV0.append(float(linea_split[5]))
# Conversion a array:
x0=array(x0)
StokeI0 = array(StokeI0)
StokeQ0 = array(StokeQ0)
StokeU0 = array(StokeU0)
StokeV0 = array(StokeV0)
lenNn = len(Nn)
# Posiciones de las distintas lineas del filename
PosiNn0T.append(0)
try:
NnInit = Nn[0]
PosiNn0 = 0
for NextI in range(0,lenNn-1):
if (Nn[NextI] != NnInit):
PosiNn0T.append(NextI)
NnInit = Nn[NextI]
except:
print('Only1Line') #REVISAR!!
PosiNn0T.append(lenNn-1)
NumeroLineas = len(PosiNn0T)-1
# Almaceno las lineas dentro del array
for Index in range(NumeroLineas):
StokeI.append(StokeI0[PosiNn0T[Index]:PosiNn0T[Index+1]-1])
StokeQ.append(StokeQ0[PosiNn0T[Index]:PosiNn0T[Index+1]-1])
StokeU.append(StokeU0[PosiNn0T[Index]:PosiNn0T[Index+1]-1])
StokeV.append(StokeV0[PosiNn0T[Index]:PosiNn0T[Index+1]-1])
x.append(x0[PosiNn0T[Index]:PosiNn0T[Index+1]-1])
PosiNn0T = PosiNn0T[:-1]
# Si hay una linea sola
if len(x) == 1:
x = x0; StokeI = StokeI0; StokeQ = StokeQ0;
StokeU = StokeU0; StokeV = StokeV0
if verbose:
print('NumeroLineas:'+str(NumeroLineas))
print('Info: lambda in mA')
print('lambda_mA, stokesIQUV, [nL,posi,nN]')
fo.close()
return [x, [StokeI, StokeQ, StokeU, StokeV], [NumeroLineas, PosiNn0T, Nn]]
# ====================================================================
def wperfil(filename, numberLine, lambda_mA, stokes):
"""Write SIR Stokes profile in a file
Args:
filename (TYPE): Description
numberLine (TYPE): Description
lambda_mA (TYPE): Description
stokes (TYPE): Description
Returns:
TYPE: Description
"""
si = stokes[0]
sq = stokes[1]
su = stokes[2]
sv = stokes[3]
fo = open(filename, 'w')
for i in range(len(lambda_mA)):
fo.write(' {0} {1:3.4f} {2:2.6E} {3:2.6e} {4:2.6e} {5:2.6e}\n'\
.format(numberLine,lambda_mA[i],si[i],sq[i],su[i],sv[i]))
fo.close()
return
# ====================================================================
def lmodel8(modelo, verbose=False):
from numpy import array
fo = open(modelo, 'r')
tau = []
temp = []
Pres = []
vmic = []
BMag = []
vlos = []
gamma = []
phi = []
c = 0
for ii in fo:
linea_split = ii.split()
if c == 0:
vmac = float(linea_split[0])
fill = float(linea_split[1])
stray = float(linea_split[2])
if c != 0:
tau.append(float(linea_split[0]))
temp.append(float(linea_split[1]))
Pres.append(float(linea_split[2]))
vmic.append(float(linea_split[3]))
BMag.append(float(linea_split[4]))
vlos.append(float(linea_split[5]))
gamma.append(float(linea_split[6]))
phi.append(float(linea_split[7]))
c += 1
# Conversion a array:
tau = array(tau)
temp = array(temp)
Pres = array(Pres)
vmic = array(vmic)
BMag = array(BMag)
vlos = array(vlos)
gamma = array(gamma)
phi = array(phi)
lenTau = len(tau)
todoPlot = [temp/1000.,Pres,vmic*1E-5,BMag/1000.,vlos*1E-5,gamma,phi,vmac,fill,stray]
fo.close()
if verbose:
print('temp[kK], Pres[dyn cm^-3], vmic[km/s], BMag[kG], vlos[km/s], gamma[deg], phi[deg], vmac[km/s], fill, stray')
print('Out: {tau, magnitudes}')
return [tau, todoPlot]
# ====================================================================
def wmodel8(modelo, filename, verbose=False):
[tau, todoPlot] = modelo
temp = 1000.*todoPlot[0]
Pres = todoPlot[1]
vmic = todoPlot[2]/1E-5
Bmag = todoPlot[3]*1000.
vlos = todoPlot[4]/1E-5
gamma = todoPlot[5]
phi = todoPlot[6]
vmac = todoPlot[7]
fill = todoPlot[8]
stray = todoPlot[9]
fo = open(filename, 'w')
for i in range(-1,len(temp)):
if i == -1:
fo.write(' {0:3.4f} {1:3.4f} {2:3.4f}\n'.format(vmac, fill, stray))
if i != -1:
fo.write(' {0:2.4f} {1:2.6e} {2:2.6e} {3:2.6e} {4:2.6e} {5:2.6e} {6:2.6e} {7:2.6e}\n'
.format(tau[i], temp[i], Pres[i], vmic[i], Bmag[i], vlos[i], gamma[i], phi[i]))
fo.close()
return
# ====================================================================
def lmodel12(modelo, verbose=False):
'''
MODEL ATMOSPHERE FILES TO BE USED WITH SIR
Each model file contains the macroturbulent velocity (km/s), the
filling factor (only to be used with two-component models, ranging
from 0 to 1), and the stray light factor (in percent) in the first line.
Then, eight columns follow:
Column 1: log tau_5 (logarithm of the continuum optical depth at 5000 A)
Column 2: Temperature (K)
Column 3: Electron pressures (dyn/cm^2)
Column 4: Microturbulent velocity (cm/s)
Column 5: Magnetic field strength (G)
Column 6: Line-of-sight velocity (cm/s)
Column 7: Inclination angle of the magnetic field vector in deg
from 0 (pointing to the observer) to 180 (pointing away from the
observer)
Column 8: Azimuthal angle of the magnetic field vector in deg.
Column 9: Geometrical scale (km)
Column 10: Gas presure (dyn/cm^2)
Column 11: Gas density (gr/cm^3)
'''
from numpy import array
fo = open(modelo, 'r')
tau = []
temp = []
Pres = []
vmic = []
BMag = []
vlos = []
gamma = []
phi = []
zz = []
pgas = []
rho = []
c = 0
for ii in fo:
linea_split = ii.split()
if c == 0:
vmac = float(linea_split[0])
fill = float(linea_split[1])
stray = float(linea_split[2])
if c != 0:
tau.append(float(linea_split[0]))
temp.append(float(linea_split[1]))
Pres.append(float(linea_split[2]))
vmic.append(float(linea_split[3]))
BMag.append(float(linea_split[4]))
vlos.append(float(linea_split[5]))
gamma.append(float(linea_split[6]))
phi.append(float(linea_split[7]))
zz.append(float(linea_split[8]))
pgas.append(float(linea_split[9]))
rho.append(float(linea_split[10]))
c += 1
# Conversion a array:
tau = array(tau)
temp = array(temp)
Pres = array(Pres)
vmic =
|
array(vmic)
|
numpy.array
|
import numpy as np
from scipy.special import expit
def sigmoid(x):
"""
Sigmoid function. It can be replaced with scipy.special.expit.
:param x:
:return:
"""
return expit(x)
def sigmoid_der(x):
"""
Derivative of the sigmoid function.
:param y:
:return:
"""
return sigmoid(x) * (1.0 - sigmoid(x))
def leaky_relu(x, alpha=0.01):
"""
Leaky rectified linear unit.
:param x:
:param float alpha: (optional) value of leak.
:return:
"""
return np.maximum(alpha * x, x)
def relu(x):
"""
Rectified linear unit.
:param x:
:param float alpha: (optional) value of leak.
:return:
"""
return np.maximum(0.0, x)
def leaky_relu_der(x, alpha=0.01):
"""
Derivative of leaky relu.
:param x:
:param float alpha: (optional) value of leak.
:return:
"""
y = np.ones_like(x)
y[x > 0] = alpha
return y
def tanh(x):
"""
Hyperbolic tangent
:param x:
:return:
"""
return np.tanh(x)
def arctan(x):
"""
Tan^-1
:param x:
:return:
"""
return np.arctan(x)
def tanh_der(x):
"""
Derivative of the hyperbolic tangent function.
:param x:
:return:
"""
return 1.0 - np.power(tanh(x), 2)
def linear(x):
"""
Linear function.
:param x:
:return:
"""
return x
def linear_der(x):
"""
Derivate of the linear function.
:param x:
:return:
"""
return 1.0
def soft_plus(x):
"""
Soft plus function.
:param x:
:return:
"""
return np.log(1.0 + np.exp(x))
def soft_plus_der(x):
"""
Soft plus function.
:param x:
:return:
"""
return np.power(1.0 + np.exp(-x), -1)
def selu(x, lambda_=1.0507, alpha=1.67326):
"""
Scaled exponential linear unit.
:param x:
:param float lambda_:
:param float alpha:
:return:
"""
a = x
a[x < 0.0] = alpha * (np.exp(a[x < 0.0]) - 1.0)
return lambda_ * a
def sinc(x):
"""
Sinc function.
:param x:
:return:
"""
return np.sinc(x)
def gaussian(x):
"""
:param x:
:return:
"""
return np.exp(- np.power(x, 2))
# Activation dict for Neural Network with their derivatives
nn_activation_dict = {
'sigmoid': {'activation': sigmoid,
'derivative': sigmoid_der},
'sin': {'activation': np.sin,
'derivative': np.cos},
# 'leaky_relu': {'activation': leaky_relu, # It works bad
# 'derivative': leaky_relu_der},
'tanh': {'activation': tanh,
'derivative': tanh_der},
'linear': {'activation': linear,
'derivative': linear_der},
'soft_plus': {'activation': soft_plus,
'derivative': soft_plus_der}
}
# Activation dict for ELM
activation_dict = {
'sin': np.sin,
'cos': np.cos,
'relu': relu,
'leaky_relu': leaky_relu,
'hard': lambda x: np.array(x > 0.0, dtype=float),
'linear': linear,
'tanh': tanh,
'sigmoid': sigmoid,
'sinc': sinc,
'gaussian': gaussian,
'selu': selu,
'arctan': arctan,
'soft_plus': soft_plus,
}
def linear_kernel(gamma: float = 1.0, X=None, Y=None):
"""
:param gamma:
:param X:
:param Y:
:return:
"""
n = X.shape[0]
X = gamma * X
if Y is None:
XXh = np.dot(np.sum(X**2, 1, dtype=np.float64).reshape((n, 1)), np.ones((1, n), dtype=np.float64))
omega = XXh + XXh.transpose() - 2.0 * np.dot(X, X.transpose())
else:
m = Y.shape[0]
XXh = np.dot(np.sum(X**2, 1, dtype=np.float64).reshape((n, 1)), np.ones((1, m), dtype=np.float64))
YYh = np.dot(np.sum(Y**2, 1, dtype=np.float64).reshape((m, 1)),
|
np.ones((1, n), dtype=np.float64)
|
numpy.ones
|
import torch
import numpy as np
import math
from models.with_mobilenet import PoseEstimationWithMobileNet
from modules.keypoints import extract_keypoints, group_keypoints
from modules.pose import Pose, track_poses
import collections
def load_state(net, checkpoint):
source_state = checkpoint['state_dict']
target_state = net.state_dict()
new_target_state = collections.OrderedDict()
for target_key, target_value in target_state.items():
if target_key in source_state and source_state[target_key].size() == target_state[target_key].size():
new_target_state[target_key] = source_state[target_key]
else:
new_target_state[target_key] = target_state[target_key]
print('[WARNING] Not found pre-trained parameters for {}'.format(target_key))
net.load_state_dict(new_target_state)
def normalize(img, img_mean, img_scale):
img = np.array(img, dtype=np.float32)
img = (img - img_mean) * img_scale
return img
def pad_width(img, stride, min_dims):
_, _, h, w = img.shape
h = min(min_dims[0], h)
min_dims[0] = math.ceil(min_dims[0] / float(stride)) * stride
min_dims[1] = max(min_dims[1], w)
min_dims[1] = math.ceil(min_dims[1] / float(stride)) * stride
pad = []
pad.append(int(math.floor((min_dims[1] - w) / 2.0)))
pad.append(int(min_dims[1] - w - pad[0]))
pad.append(int(math.floor((min_dims[0] - h) / 2.0)))
pad.append(int(min_dims[0] - h - pad[2]))
# padded_img = cv2.copyMakeBorder(img, pad[0], pad[2], pad[1], pad[3],
# cv2.BORDER_CONSTANT, value=pad_value)
padded_img = torch.nn.functional.pad(img, pad)
return padded_img, pad
def init_pose(checkpoint_path):
net = PoseEstimationWithMobileNet()
checkpoint = torch.load(checkpoint_path, map_location='cpu')
load_state(net, checkpoint)
net.eval()
net = net.cuda()
env = [net, []]
return None, env
def infer_fast(net, img, net_input_height_size, stride, upsample_ratio):
_, _, height, width = img.shape
scale = net_input_height_size / height
# scaled_img = cv2.resize(img, (0, 0), fx=scale, fy=scale, interpolation=cv2.INTER_CUBIC)
# scaled_img = normalize(scaled_img, img_mean, img_scale)
scaled_img = torch.nn.functional.interpolate(img, scale_factor=scale, mode='bilinear', align_corners=False)
scaled_img -= 0.5
# print("SCALED", scaled_img.shape, scaled_img.min(), scaled_img.max())
min_dims = [net_input_height_size, max(scaled_img.shape[3], net_input_height_size)]
tensor_img, pad = pad_width(scaled_img, stride, min_dims)
stages_output = net(tensor_img)
stage2_heatmaps = stages_output[-2]
heatmaps = np.transpose(stage2_heatmaps.squeeze().cpu().data.numpy(), (1, 2, 0))
heatmaps = cv2.resize(heatmaps, (0, 0), fx=upsample_ratio, fy=upsample_ratio, interpolation=cv2.INTER_CUBIC)
# heatmaps = torch.nn.functional.interpolate(stage2_heatmaps, scale_factor=upsample_ratio, mode='bicubic', align_corners=False)
# heatmaps = np.transpose(heatmaps.squeeze().cpu().data.numpy(), (1, 2, 0))
stage2_pafs = stages_output[-1]
pafs = np.transpose(stage2_pafs.squeeze().cpu().data.numpy(), (1, 2, 0))
pafs = cv2.resize(pafs, (0, 0), fx=upsample_ratio, fy=upsample_ratio, interpolation=cv2.INTER_CUBIC)
# pafs = torch.nn.functional.interpolate(stage2_pafs, scale_factor=upsample_ratio, mode='bicubic', align_corners=False)
# pafs = np.transpose(pafs.squeeze().cpu().data.numpy(), (1, 2, 0))
return heatmaps, pafs, scale, pad
def anime_frame(rgb, env, size=None, useSigmod=False, useTwice=False):
if env is None:
# return init_pose("./checkpoint_iter_370000.pth")
# return init_pose("./default_checkpoints/R.pth")
return init_pose("./refine4_checkpoints/checkpoint_iter_14000.pth")
net, previous_poses = env
stride = 8
upsample_ratio = 4
heatmaps, pafs, scale, pad = infer_fast(net, rgb, 368, stride, upsample_ratio)
num_keypoints = Pose.num_kpts
total_keypoints_num = 0
all_keypoints_by_type = []
for kpt_idx in range(num_keypoints): # 19th for bg
total_keypoints_num += extract_keypoints(heatmaps[:, :, kpt_idx], all_keypoints_by_type,
total_keypoints_num)
pose_entries, all_keypoints = group_keypoints(all_keypoints_by_type, pafs, demo=True)
for kpt_id in range(all_keypoints.shape[0]):
all_keypoints[kpt_id, 0] = (all_keypoints[kpt_id, 0] * stride / upsample_ratio - pad[
1]) / scale
all_keypoints[kpt_id, 1] = (all_keypoints[kpt_id, 1] * stride / upsample_ratio - pad[
0]) / scale
current_poses = []
for n in range(len(pose_entries)):
if len(pose_entries[n]) == 0:
continue
pose_keypoints =
|
np.ones((num_keypoints, 2), dtype=np.int32)
|
numpy.ones
|
import pandas as pd
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import roc_auc_score
from skopt import BayesSearchCV
train_X = np.load(r'X:\Hackathon\AV - AMEXPERT\train_amex\xgb\np_train_X_res.npy')
train_Y = np.load(r'X:\Hackathon\AV - AMEXPERT\train_amex\xgb\np_train_y_res.npy')
va_X = np.load(r'X:\Hackathon\AV - AMEXPERT\train_amex\xgb\np_va_X.npy')
va_Y = np.load(r'X:\Hackathon\AV - AMEXPERT\train_amex\xgb\np_va_y.npy')
test_X =
|
np.load(r'X:\Hackathon\AV - AMEXPERT\train_amex\xgb\np_test_X.npy')
|
numpy.load
|
from __future__ import absolute_import
import xarray as xr
import h5py
import numpy as np
import pandas as pd
import datetime
import scipy
import scipy.interpolate
import os
#turn off warnings so i can use the progressbar
import warnings
warnings.filterwarnings('ignore')
class GPMDPR():
"""
Author: <NAME>. This class is intended to help with the efficient processing of GPM-DPR radar files.
Currently, xarray cannot read NASA's HDF files directly (2A.GPM.DPR*). So here is an attempt to do so.
Once in xarray format, the effcient search functions can be used.
**NOTE 1: Currently, I do not have this function pass all variables through (there is quite the list of them.
Maybe in the future I will generalize it to do so. But right now its a bit tedious to code up all the units and such
**NOTE 2: Outerswath code not ready yet. Do not turn the flag on
Feel free to reach out to me on twitter (@dopplerchase) or email <EMAIL>
For your reference, please check out the ATBD: https://pps.gsfc.nasa.gov/GPMprelimdocs.html
"""
def __init__(self,filename=[],boundingbox=None,outer_swath=False,auto_run=True):
""" Initializes things.
filename: str, path to GPM-DPR file
boundingbox: list of floats, if you would like to cut the gpm to a lat lon box
send in a list of [lon_min,lon_mat,lat_min,lat_max]
"""
self.filename = filename
self.xrds = None
self.datestr=None
self.height= None
self.corners = boundingbox
self.retrieval_flag = 0
self.interp_flag = 0
self.outer_swath = outer_swath
#determine if you have to use the file variable name changes
if (filename.find('X') >= 0):
self.legacy = False
self.v07 = False
elif (filename.find('V9') >= 0):
self.legacy = False
self.v07 = True
else:
self.legacy = True
if auto_run:
#this reads the hdf5 file
self.read()
#this calculates the range height for the 2D cross-sections
self.calc_heights()
#this will convert the hdf to an xarray dataset
self.toxr()
def read(self):
"""
This method simply reads the HDF file and gives it to the class.
"""
self.hdf = h5py.File(self.filename,'r')
if self.legacy:
###set some global parameters
#whats the common shape of the DPR files
if self.outer_swath:
shape = self.hdf['NS']['PRE']['zFactorMeasured'][:,:,:].shape
self.along_track = np.arange(0,shape[0])
self.cross_track = np.arange(0,shape[1])
self.range = np.arange(0,shape[2])
else:
shape = self.hdf['NS']['PRE']['zFactorMeasured'][:,12:37,:].shape
self.along_track = np.arange(0,shape[0])
self.cross_track = np.arange(0,shape[1])
self.range = np.arange(0,shape[2])
else:
shape = self.hdf['FS']['PRE']['zFactorMeasured'][:,:,:].shape
self.along_track = np.arange(0,shape[0])
self.cross_track = np.arange(0,shape[1])
self.range = np.arange(0,shape[2])
def calc_heights(self):
""" Here we calculate the atitude above mean sea level. Surprisingly this was not
provided in version 6, but is included in the new version. Please not there is a
difference between this method and the supplied heights in the new version. It
seems to be less than 200 m error. Just keep that in mind!"""
x2 = 2. * 17 #total degrees is 48 (from -17 to +17)
re = 6378. #radius of the earth km
theta = -1 *(x2/2.) + (x2/48.)*np.arange(0,49) #break the -17 to 17 into equal degrees
theta2 = np.zeros(theta.shape[0]+1)
theta = theta - 0.70833333/2. #shift thing to get left edge for pcolors
theta2[:-1] = theta
theta2[-1] = theta[-1] + 0.70833333
theta = theta2 * (np.pi/180.) #convert to radians
prh = np.zeros([49,176]) #set up matrix
for i in np.arange(0,176): #loop over num range gates
for j in np.arange(0,49): #loop over scans
a = np.arcsin(((re+407)/re)*np.sin(theta[j]))-theta[j] #407 km is the orbit height, re radius of earth,
prh[j,i] = (176-(i))*0.125*np.cos(theta[j]+a) #more geometry
da = xr.DataArray(prh[:,:], dims=['cross_track','range'])
da.to_netcdf('./HEIGHTS_full.nc')
da = xr.DataArray(prh[12:37,:], dims=['cross_track','range'])
da.to_netcdf('./HEIGHTS.nc')
def toxr(self,ptype=None,clutter=False,echotop=False,precipflag=10):
"""
This is the main method of the package. It directly creates the xarray dataset from the HDF file.
To save computational time, it does first check to see if you set a box of interest.
Then it uses xarray effcient searching to make sure there are some profiles in that box.
"""
#set the precip type of interest. If none, give back all data...
self.ptype= ptype
self.snow = False
self.precip = False
if (self.ptype=='precip') or (self.ptype=='Precip') or \
(self.ptype=='PRECIP') or (self.ptype=='snow') or \
(self.ptype=='Snow') or (self.ptype=='SNOW'):
self.precip=True
if (self.ptype=='snow') or (self.ptype=='Snow') or (self.ptype=='SNOW'):
self.snow=True
#set the killflag to false. If this is True at the end, it means no points in the box were found.
self.killflag = False
#first thing first, check to make sure there are points in the bounding box.
#cut points to make sure there are points in your box.This should save you time.
if self.corners is not None:
#load data out of hdf
if self.outer_swath:
if self.legacy:
lons = self.hdf['NS']['Longitude'][:,:]
lats = self.hdf['NS']['Latitude'][:,:]
else:
lons = self.hdf['FS']['Longitude'][:,:]
lats = self.hdf['FS']['Latitude'][:,:]
else:
lons = self.hdf['NS']['Longitude'][:,12:37]
lats = self.hdf['NS']['Latitude'][:,12:37]
#shove it into a dataarray
da = xr.DataArray(np.zeros(lons.shape), dims=['along_track', 'cross_track'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats)})
#cut the the edges of the box
da = da.where((da.lons >= self.corners[0]) & \
(da.lons <= self.corners[1]) & \
(da.lats >= self.corners[2]) & \
(da.lats <= self.corners[3]),drop=False)
#okay, now drop nans
da = da.dropna(dim='along_track',how='all')
#if there are no profiles, the len is 0, and we will set the kill flag
if da.along_track.shape[0]==0:
self.killflag = True
#if there were no points it will not waste time with processing or io stuff
if self.killflag:
pass
else:
if self.datestr is None:
self.parse_dtime()
if self.height is None:
if self.legacy:
if self.outer_swath:
height = xr.open_dataarray('./HEIGHTS_full.nc')
height = height.values[np.newaxis,:,:]
height = np.tile(height,(self.hdf['NS']['Longitude'].shape[0],1,1))
self.height = height
else:
height = xr.open_dataarray('./HEIGHTS.nc')
height = height.values[np.newaxis,:,:]
height = np.tile(height,(self.hdf['NS']['Longitude'].shape[0],1,1))
self.height = height
else:
height = xr.open_dataarray('./HEIGHTS_full.nc')
height = height.values[np.newaxis,:,:]
height = np.tile(height,(self.hdf['FS']['Longitude'].shape[0],1,1))
self.height = height
if self.corners is None:
if self.legacy:
if self.outer_swath:
lons = self.hdf['NS']['Longitude'][:,:]
lats = self.hdf['NS']['Latitude'][:,:]
else:
lons = self.hdf['NS']['Longitude'][:,12:37]
lats = self.hdf['NS']['Latitude'][:,12:37]
else:
lons = self.hdf['FS']['Longitude'][:,:]
lats = self.hdf['FS']['Latitude'][:,:]
if self.legacy:
if self.outer_swath:
#need to fill the outerswath with nans
flagSurfaceSnowfall = np.ones([len(self.along_track),len(self.cross_track)],dtype=int)*255
flagSurfaceSnowfall[:,12:37] = self.hdf['MS']['Experimental']['flagSurfaceSnowfall'][:,:]
da = xr.DataArray(flagSurfaceSnowfall,
dims=['along_track', 'cross_track'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr)})
da.fillna(value=255)
da.attrs['units'] = 'none'
da.attrs['standard_name'] = 'experimental flag to diagnose snow at surface'
#make xr dataset
self.xrds = da.to_dataset(name = 'flagSurfaceSnow')
#
#ADD BBtop and Bottom
da = xr.DataArray(self.hdf['NS']['CSF']['binBBTop'][:,:],
dims=['along_track', 'cross_track'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr)})
da.fillna(value=-9999)
da.attrs['units'] = 'none'
da.attrs['standard_name'] = 'ind of BBtop'
self.xrds['binBBTop'] = da
da = xr.DataArray(self.hdf['NS']['CSF']['binBBBottom'][:,:],
dims=['along_track', 'cross_track'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr)})
da.fillna(value=-9999)
da.attrs['units'] = 'none'
da.attrs['standard_name'] = 'ind of BBtop'
self.xrds['binBBBottom'] = da
flagPrecip = np.ones([len(self.along_track),len(self.cross_track)],dtype=int)*-9999
flagPrecip[:,12:37] = self.hdf['MS']['PRE']['flagPrecip'][:,:]
da = xr.DataArray(flagPrecip,
dims=['along_track', 'cross_track'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr)})
da.fillna(value=-9999)
da.attrs['units'] = 'none'
da.attrs['standard_name'] = 'flag to diagnose precip at surface' + \
'11 is precip from both, 10 is preicp from just Ku-band'
#fill dataset
self.xrds['flagPrecip'] = da
#
typePrecip = np.ones([len(self.along_track),len(self.cross_track)],dtype=int)*-9999
typePrecip[:,12:37] = self.hdf['MS']['CSF']['typePrecip'][:]
typePrecip = np.asarray(typePrecip,dtype=float)
ind = np.where(typePrecip == -1111)
typePrecip[ind] = np.nan
ind = np.where(typePrecip == -9999)
typePrecip[ind] = np.nan
typePrecip = np.trunc(typePrecip/10000000)
typePrecip = np.asarray(typePrecip,dtype=int)
da = xr.DataArray(typePrecip, dims=['along_track', 'cross_track'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr)})
da.fillna(value=-9999)
da.attrs['units'] = 'none'
da.attrs['standard_name'] = 'flag to diagnose raintype. If 1: Strat. If 2: Conv. If 3:other '
self.xrds['typePrecip'] = da
#Get the phaseNearSurface (0 is snow, 1 is mixed 2, 2.55 is missing )
phaseNearSurface = self.hdf['NS']['SLV']['phaseNearSurface'][:,:]/100
phaseNearSurface[phaseNearSurface == 2.55] = -9999
phaseNearSurface =np.asarray(np.trunc(phaseNearSurface),dtype=int)
da = xr.DataArray(phaseNearSurface, dims=['along_track', 'cross_track'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr)})
da.fillna(value=-9999)
da.attrs['units'] = 'none'
da.attrs['standard_name'] = 'flag to diagnose near surface phase.'+ \
'0 is snow, 1 is mixed, 2 is rain. This is included to compare to Skofronick-Jackson 2019'
self.xrds['phaseNearSurface'] = da
#Get the precipRateNearSurf (needed for skofronick-jackson 2019 comparison)
precipRateNearSurface = self.hdf['NS']['SLV']['precipRateNearSurface'][:,:]
da = xr.DataArray(precipRateNearSurface,
dims=['along_track', 'cross_track'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr)})
da.fillna(value=-9999)
da.attrs['units'] = 'none'
da.attrs['standard_name'] = 'Near surface R from the GPM-DPR algo.'
self.xrds['precipRateNearSurface'] = da
if clutter:
self.get_highest_clutter_bin()
da = xr.DataArray(self.dummy,
dims=['along_track', 'cross_track','range'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr),
'alt':(['along_track', 'cross_track','range'],self.height)})
da.attrs['units'] = 'none'
da.attrs['standard_name'] = 'flag to remove ground clutter'
self.xrds['clutter'] = da
da = xr.DataArray(self.hdf['NS']['SLV']['zFactorCorrectedNearSurface'][:,:],
dims=['along_track', 'cross_track'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr)})
da.attrs['units'] = 'dBZ'
da.attrs['standard_name'] = 'near surface Ku'
da = da.where(da >= 12)
self.xrds['nearsurfaceKu'] = da
kanearsurf = np.ones([len(self.along_track),len(self.cross_track)],dtype=int)*-9999
kanearsurf[:,12:37] = self.hdf['MS']['SLV']['zFactorCorrectedNearSurface'][:,:]
da = xr.DataArray(kanearsurf,
dims=['along_track', 'cross_track'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr)})
da.attrs['units'] = 'dBZ'
da.attrs['standard_name'] = 'near surface Ka'
da = da.where(da >= 15)
self.xrds['nearsurfaceKa'] = da
da = xr.DataArray(self.hdf['NS']['SLV']['zFactorCorrected'][:,:,:],
dims=['along_track', 'cross_track','range'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr),
'alt':(['along_track', 'cross_track','range'],self.height)})
da.attrs['units'] = 'dBZ'
da.attrs['standard_name'] = 'corrected KuPR'
if clutter:
da = da.where(self.xrds.clutter==0)
da = da.where(da >= 12)
self.xrds['NSKu_c'] = da
da = xr.DataArray(self.hdf['NS']['SLV']['epsilon'][:,:,:],
dims=['along_track', 'cross_track','range'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr),
'alt':(['along_track', 'cross_track','range'],self.height)})
da.fillna(value=-9999.9)
da = da.where(da >= 0)
da.attrs['units'] = 'none'
da.attrs['standard_name'] = 'epsilon value for retrieval'
self.xrds['epsilon'] = da
MSKa_c = np.ones([len(self.along_track),len(self.cross_track),len(self.range)],dtype=float)*-9999
MSKa_c[:,12:37,:] = self.hdf['MS']['SLV']['zFactorCorrected'][:,:,:]
da = xr.DataArray(MSKa_c,
dims=['along_track', 'cross_track','range'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr),
'alt':(['along_track', 'cross_track','range'],self.height)})
da.attrs['units'] = 'dBZ'
da.attrs['standard_name'] = 'corrected KaPR, MS scan'
if clutter:
da = da.where(self.xrds.clutter==0)
da = da.where(da >= 15)
self.xrds['MSKa_c'] = da
if echotop:
self.echotop()
da = xr.DataArray(self.dummy2,
dims=['along_track', 'cross_track','range'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr),
'alt':(['along_track', 'cross_track','range'],self.height)})
da.attrs['units'] = 'none'
da.attrs['standard_name'] = 'flag to remove noise outside cloud/precip top'
self.xrds['echotop'] = da
da = xr.DataArray(self.hdf['NS']['PRE']['zFactorMeasured'][:,:,:],
dims=['along_track', 'cross_track','range'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr),
'alt':(['along_track', 'cross_track','range'],self.height)})
da.attrs['units'] = 'dBZ'
da.attrs['standard_name'] = 'measured KuPR'
if clutter:
da = da.where(self.xrds.clutter==0)
if echotop:
da = da.where(self.xrds.echotop==0)
da = da.where(da >= 0)
self.xrds['NSKu'] = da
MSKa = np.ones([len(self.along_track),len(self.cross_track),len(self.range)],dtype=float)*-9999
MSKa[:,12:37,:] = self.hdf['MS']['PRE']['zFactorMeasured'][:,:,:]
da = xr.DataArray(MSKa,
dims=['along_track', 'cross_track','range'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr),
'alt':(['along_track', 'cross_track','range'],self.height)})
da.attrs['units'] = 'dBZ'
da.attrs['standard_name'] = 'measured KaPR, MS scan'
if clutter:
da = da.where(self.xrds.clutter==0)
if echotop:
da = da.where(self.xrds.echotop==0)
da = da.where(da >= 0)
self.xrds['MSKa'] = da
da = xr.DataArray(self.hdf['NS']['SLV']['precipRate'][:,:,:],
dims=['along_track', 'cross_track','range'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr),
'alt':(['along_track', 'cross_track','range'],self.height)})
da.attrs['units'] = 'mm hr^-1'
da.attrs['standard_name'] = 'retrieved R, from DPR algo'
if clutter:
da = da.where(self.xrds.clutter==0)
if echotop:
da = da.where(self.xrds.echotop==0)
self.xrds['R'] = da
da = xr.DataArray(self.hdf['NS']['SLV']['paramDSD'][:,:,:,1],
dims=['along_track', 'cross_track','range'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr),
'alt':(['along_track', 'cross_track','range'],self.height)})
da.attrs['units'] = 'mm'
da.attrs['standard_name'] = 'retrieved Dm, from DPR algo'
if clutter:
da = da.where(self.xrds.clutter==0)
if echotop:
da = da.where(self.xrds.echotop==0)
da = da.where(da >= 0)
self.xrds['Dm_dpr'] = da
da = xr.DataArray(self.hdf['NS']['SLV']['paramDSD'][:,:,:,0],
dims=['along_track', 'cross_track','range'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr),
'alt':(['along_track', 'cross_track','range'],self.height)})
da.attrs['units'] = 'dBNw'
da.attrs['standard_name'] = 'retrieved Nw, from DPR algo'
if clutter:
da = da.where(self.xrds.clutter==0)
if echotop:
da = da.where(self.xrds.echotop==0)
da = da.where(da >= 0)
self.xrds['Nw_dpr'] = da
if self.precip:
#change this to 10 if you want to relax the conditions, because the ka band has bad sensativity
self.xrds = self.xrds.where(self.xrds.flagPrecip>=precipflag)
if self.corners is not None:
self.setboxcoords()
#as before, makes sure there is data...
if self.xrds.along_track.shape[0]==0:
self.killflag = True
else:
da = xr.DataArray(self.hdf['MS']['Experimental']['flagSurfaceSnowfall'][:,:],
dims=['along_track', 'cross_track'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr)})
da.fillna(value=255)
da.attrs['units'] = 'none'
da.attrs['standard_name'] = 'experimental flag to diagnose snow at surface'
#make xr dataset
self.xrds = da.to_dataset(name = 'flagSurfaceSnow')
#
#ADD BBtop and Bottom
da = xr.DataArray(self.hdf['NS']['CSF']['binBBTop'][:,12:37],
dims=['along_track', 'cross_track'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr)})
da.fillna(value=-9999)
da.attrs['units'] = 'none'
da.attrs['standard_name'] = 'ind of BBtop'
self.xrds['binBBTop'] = da
da = xr.DataArray(self.hdf['NS']['CSF']['binBBBottom'][:,12:37],
dims=['along_track', 'cross_track'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr)})
da.fillna(value=-9999)
da.attrs['units'] = 'none'
da.attrs['standard_name'] = 'ind of BBtop'
self.xrds['binBBBottom'] = da
da = xr.DataArray(self.hdf['MS']['PRE']['flagPrecip'][:,:],
dims=['along_track', 'cross_track'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr)})
da.fillna(value=-9999)
da.attrs['units'] = 'none'
da.attrs['standard_name'] = 'flag to diagnose precip at surface' + \
'11 is precip from both, 10 is preicp from just Ku-band'
#fill dataset
self.xrds['flagPrecip'] = da
#
typePrecip = self.hdf['MS']['CSF']['typePrecip'][:]
typePrecip = np.asarray(typePrecip,dtype=float)
ind = np.where(typePrecip == -1111)
typePrecip[ind] = np.nan
ind = np.where(typePrecip == -9999)
typePrecip[ind] = np.nan
typePrecip = np.trunc(typePrecip/10000000)
typePrecip = np.asarray(typePrecip,dtype=int)
da = xr.DataArray(typePrecip, dims=['along_track', 'cross_track'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr)})
da.fillna(value=-9999)
da.attrs['units'] = 'none'
da.attrs['standard_name'] = 'flag to diagnose raintype. If 1: Strat. If 2: Conv. If 3:other '
self.xrds['typePrecip'] = da
#Get the phaseNearSurface (0 is snow, 1 is mixed 2, 2.55 is missing )
phaseNearSurface = self.hdf['NS']['SLV']['phaseNearSurface'][:,12:37]/100
phaseNearSurface[phaseNearSurface == 2.55] = -9999
phaseNearSurface =np.asarray(np.trunc(phaseNearSurface),dtype=int)
da = xr.DataArray(phaseNearSurface, dims=['along_track', 'cross_track'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr)})
da.fillna(value=-9999)
da.attrs['units'] = 'none'
da.attrs['standard_name'] = 'flag to diagnose near surface phase.'+ \
'0 is snow, 1 is mixed, 2 is rain. This is included to compare to Skofronick-Jackson 2019'
self.xrds['phaseNearSurface'] = da
#Get the precipRateNearSurf (needed for skofronick-jackson 2019 comparison)
precipRateNearSurface = self.hdf['NS']['SLV']['precipRateNearSurface'][:,12:37]
da = xr.DataArray(precipRateNearSurface,
dims=['along_track', 'cross_track'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr)})
da.fillna(value=-9999)
da.attrs['units'] = 'none'
da.attrs['standard_name'] = 'Near surface R from the GPM-DPR algo.'
self.xrds['precipRateNearSurface'] = da
if clutter:
self.get_highest_clutter_bin()
da = xr.DataArray(self.dummy,
dims=['along_track', 'cross_track','range'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr),
'alt':(['along_track', 'cross_track','range'],self.height)})
da.attrs['units'] = 'none'
da.attrs['standard_name'] = 'flag to remove ground clutter'
self.xrds['clutter'] = da
da = xr.DataArray(self.hdf['NS']['SLV']['zFactorCorrectedNearSurface'][:,12:37],
dims=['along_track', 'cross_track'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr)})
da.attrs['units'] = 'dBZ'
da.attrs['standard_name'] = 'near surface Ku'
da = da.where(da >= 12)
self.xrds['nearsurfaceKu'] = da
da = xr.DataArray(self.hdf['MS']['SLV']['zFactorCorrectedNearSurface'][:,:],
dims=['along_track', 'cross_track'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr)})
da.attrs['units'] = 'dBZ'
da.attrs['standard_name'] = 'near surface Ka'
da = da.where(da >= 15)
self.xrds['nearsurfaceKa'] = da
da = xr.DataArray(self.hdf['NS']['SLV']['zFactorCorrected'][:,12:37,:],
dims=['along_track', 'cross_track','range'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr),
'alt':(['along_track', 'cross_track','range'],self.height)})
da.attrs['units'] = 'dBZ'
da.attrs['standard_name'] = 'corrected KuPR'
if clutter:
da = da.where(self.xrds.clutter==0)
da = da.where(da >= 12)
self.xrds['NSKu_c'] = da
da = xr.DataArray(self.hdf['NS']['SLV']['epsilon'][:,12:37,:],
dims=['along_track', 'cross_track','range'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr),
'alt':(['along_track', 'cross_track','range'],self.height)})
da.fillna(value=-9999.9)
da = da.where(da >= 0)
da.attrs['units'] = 'none'
da.attrs['standard_name'] = 'epsilon value for retrieval'
self.xrds['epsilon'] = da
da = xr.DataArray(self.hdf['MS']['SLV']['zFactorCorrected'][:,:,:],
dims=['along_track', 'cross_track','range'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr),
'alt':(['along_track', 'cross_track','range'],self.height)})
da.attrs['units'] = 'dBZ'
da.attrs['standard_name'] = 'corrected KaPR, MS scan'
if clutter:
da = da.where(self.xrds.clutter==0)
da = da.where(da >= 15)
self.xrds['MSKa_c'] = da
if echotop:
self.echotop()
da = xr.DataArray(self.dummy2,
dims=['along_track', 'cross_track','range'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr),
'alt':(['along_track', 'cross_track','range'],self.height)})
da.attrs['units'] = 'none'
da.attrs['standard_name'] = 'flag to remove noise outside cloud/precip top'
self.xrds['echotop'] = da
da = xr.DataArray(self.hdf['NS']['PRE']['zFactorMeasured'][:,12:37,:],
dims=['along_track', 'cross_track','range'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr),
'alt':(['along_track', 'cross_track','range'],self.height)})
da.attrs['units'] = 'dBZ'
da.attrs['standard_name'] = 'measured KuPR'
if clutter:
da = da.where(self.xrds.clutter==0)
if echotop:
da = da.where(self.xrds.echotop==0)
da = da.where(da >= 0)
self.xrds['NSKu'] = da
da = xr.DataArray(self.hdf['MS']['PRE']['zFactorMeasured'][:,:,:],
dims=['along_track', 'cross_track','range'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr),
'alt':(['along_track', 'cross_track','range'],self.height)})
da.attrs['units'] = 'dBZ'
da.attrs['standard_name'] = 'measured KaPR, MS scan'
if clutter:
da = da.where(self.xrds.clutter==0)
if echotop:
da = da.where(self.xrds.echotop==0)
da = da.where(da >= 0)
self.xrds['MSKa'] = da
da = xr.DataArray(self.hdf['NS']['SLV']['precipRate'][:,12:37,:],
dims=['along_track', 'cross_track','range'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr),
'alt':(['along_track', 'cross_track','range'],self.height)})
da.attrs['units'] = 'mm hr^-1'
da.attrs['standard_name'] = 'retrieved R, from DPR algo'
if clutter:
da = da.where(self.xrds.clutter==0)
if echotop:
da = da.where(self.xrds.echotop==0)
self.xrds['R'] = da
da = xr.DataArray(self.hdf['NS']['SLV']['paramDSD'][:,12:37,:,1],
dims=['along_track', 'cross_track','range'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr),
'alt':(['along_track', 'cross_track','range'],self.height)})
da.attrs['units'] = 'mm'
da.attrs['standard_name'] = 'retrieved Dm, from DPR algo'
if clutter:
da = da.where(self.xrds.clutter==0)
if echotop:
da = da.where(self.xrds.echotop==0)
da = da.where(da >= 0)
self.xrds['Dm_dpr'] = da
da = xr.DataArray(self.hdf['NS']['SLV']['paramDSD'][:,12:37,:,0],
dims=['along_track', 'cross_track','range'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr),
'alt':(['along_track', 'cross_track','range'],self.height)})
da.attrs['units'] = 'dBNw'
da.attrs['standard_name'] = 'retrieved Nw, from DPR algo'
if clutter:
da = da.where(self.xrds.clutter==0)
if echotop:
da = da.where(self.xrds.echotop==0)
da = da.where(da >= 0)
self.xrds['Nw_dpr'] = da
if self.precip:
#change this to 10 if you want to relax the conditions, because the ka band has bad sensativity
self.xrds = self.xrds.where(self.xrds.flagPrecip>=precipflag)
# if self.snow:
# self.xrds = self.xrds.where(self.xrds.flagSurfaceSnow==1)
if self.corners is not None:
self.setboxcoords()
#to reduce size of data, drop empty cross-track sections
# self.xrds = self.xrds.dropna(dim='along_track',how='all')
#as before, makes sure there is data...
if self.xrds.along_track.shape[0]==0:
self.killflag = True
else:
da = xr.DataArray(self.hdf['FS']['Experimental']['flagSurfaceSnowfall'][:,:],
dims=['along_track', 'cross_track'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr)})
da.fillna(value=255)
da.attrs['units'] = 'none'
da.attrs['standard_name'] = 'experimental flag to diagnose snow at surface'
#make xr dataset
self.xrds = da.to_dataset(name = 'flagSurfaceSnow')
#
#ADD BBtop and Bottom
da = xr.DataArray(self.hdf['FS']['CSF']['binBBTop'][:,:],
dims=['along_track', 'cross_track'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr)})
da.fillna(value=-9999)
da.attrs['units'] = 'none'
da.attrs['standard_name'] = 'ind of BBtop'
self.xrds['binBBTop'] = da
da = xr.DataArray(self.hdf['FS']['CSF']['binBBBottom'][:,:],
dims=['along_track', 'cross_track'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr)})
da.fillna(value=-9999)
da.attrs['units'] = 'none'
da.attrs['standard_name'] = 'ind of BBtop'
self.xrds['binBBBottom'] = da
da = xr.DataArray(self.hdf['FS']['PRE']['flagPrecip'][:,:],
dims=['along_track', 'cross_track'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr)})
da.fillna(value=-9999)
da.attrs['units'] = 'none'
da.attrs['standard_name'] = 'flag to diagnose precip at surface' + \
'11 is precip from both, 10 is preicp from just Ku-band'
#fill dataset
self.xrds['flagPrecip'] = da
#
typePrecip = self.hdf['FS']['CSF']['typePrecip'][:]
typePrecip = np.asarray(typePrecip,dtype=float)
ind = np.where(typePrecip == -1111)
typePrecip[ind] = np.nan
ind = np.where(typePrecip == -9999)
typePrecip[ind] = np.nan
typePrecip = np.trunc(typePrecip/10000000)
typePrecip = np.asarray(typePrecip,dtype=int)
da = xr.DataArray(typePrecip, dims=['along_track', 'cross_track'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr)})
da.fillna(value=-9999)
da.attrs['units'] = 'none'
da.attrs['standard_name'] = 'flag to diagnose raintype. If 1: Strat. If 2: Conv. If 3:other '
self.xrds['typePrecip'] = da
#Get the phaseNearSurface (0 is snow, 1 is mixed 2, 2.55 is missing )
phaseNearSurface = self.hdf['FS']['SLV']['phaseNearSurface'][:,:]/100
phaseNearSurface[phaseNearSurface == 2.55] = -9999
phaseNearSurface =np.asarray(np.trunc(phaseNearSurface),dtype=int)
da = xr.DataArray(phaseNearSurface, dims=['along_track', 'cross_track'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr)})
da.fillna(value=-9999)
da.attrs['units'] = 'none'
da.attrs['standard_name'] = 'flag to diagnose near surface phase.'+ \
'0 is snow, 1 is mixed, 2 is rain. This is included to compare to Skofronick-Jackson 2019'
self.xrds['phaseNearSurface'] = da
#Get the precipRateNearSurf (needed for skofronick-jackson 2019 comparison)
precipRateNearSurface = self.hdf['FS']['SLV']['precipRateNearSurface'][:,:]
da = xr.DataArray(precipRateNearSurface,
dims=['along_track', 'cross_track'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr)})
da.fillna(value=-9999)
da.attrs['units'] = 'none'
da.attrs['standard_name'] = 'Near surface R from the GPM-DPR algo.'
self.xrds['precipRateNearSurface'] = da
if clutter:
self.get_highest_clutter_bin()
da = xr.DataArray(self.dummy,
dims=['along_track', 'cross_track','range'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr),
'alt':(['along_track', 'cross_track','range'],self.height)})
da.attrs['units'] = 'none'
da.attrs['standard_name'] = 'flag to remove ground clutter'
self.xrds['clutter'] = da
#note, the v07 files use zFactorFinalNearSurf... have to adjust the key here
if self.v07:
temp_key = 'zFactorFinalNearSurface'
else:
temp_key = 'zFactorCorrectedNearSurface'
da = xr.DataArray(self.hdf['FS']['SLV'][temp_key][:,:,0],
dims=['along_track', 'cross_track'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr)})
da.attrs['units'] = 'dBZ'
da.attrs['standard_name'] = 'near surface Ku'
da = da.where(da >= 12)
self.xrds['nearsurfaceKu'] = da
da = xr.DataArray(self.hdf['FS']['SLV'][temp_key][:,:,1],
dims=['along_track', 'cross_track'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr)})
da.attrs['units'] = 'dBZ'
da.attrs['standard_name'] = 'near surface Ka'
da = da.where(da >= 15)
self.xrds['nearsurfaceKa'] = da
#note, the v07 files use zFactorFinal.. have to adjust the key here
if self.v07:
temp_key = 'zFactorFinal'
else:
temp_key = 'zFactorCorrected'
da = xr.DataArray(self.hdf['FS']['SLV'][temp_key][:,:,:,0],
dims=['along_track', 'cross_track','range'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr),
'alt':(['along_track', 'cross_track','range'],self.height)})
da.attrs['units'] = 'dBZ'
da.attrs['standard_name'] = 'corrected KuPR'
if clutter:
da = da.where(self.xrds.clutter==0)
da = da.where(da >= 12)
self.xrds['NSKu_c'] = da
da = xr.DataArray(self.hdf['FS']['SLV']['epsilon'][:,:,:],
dims=['along_track', 'cross_track','range'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr),
'alt':(['along_track', 'cross_track','range'],self.height)})
da.fillna(value=-9999.9)
da = da.where(da >= 0)
da.attrs['units'] = 'none'
da.attrs['standard_name'] = 'epsilon value for retrieval'
self.xrds['epsilon'] = da
da = xr.DataArray(self.hdf['FS']['SLV'][temp_key][:,:,:,1],
dims=['along_track', 'cross_track','range'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr),
'alt':(['along_track', 'cross_track','range'],self.height)})
da.attrs['units'] = 'dBZ'
da.attrs['standard_name'] = 'corrected KaPR, MS scan'
if clutter:
da = da.where(self.xrds.clutter==0)
da = da.where(da >= 15)
self.xrds['MSKa_c'] = da
if echotop:
self.echotop()
da = xr.DataArray(self.dummy2,
dims=['along_track', 'cross_track','range'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr),
'alt':(['along_track', 'cross_track','range'],self.height)})
da.attrs['units'] = 'none'
da.attrs['standard_name'] = 'flag to remove noise outside cloud/precip top'
self.xrds['echotop'] = da
da = xr.DataArray(self.hdf['FS']['PRE']['zFactorMeasured'][:,:,:,0],
dims=['along_track', 'cross_track','range'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr),
'alt':(['along_track', 'cross_track','range'],self.height)})
da.attrs['units'] = 'dBZ'
da.attrs['standard_name'] = 'measured KuPR'
if clutter:
da = da.where(self.xrds.clutter==0)
if echotop:
da = da.where(self.xrds.echotop==0)
da = da.where(da >= 0)
self.xrds['NSKu'] = da
da = xr.DataArray(self.hdf['FS']['PRE']['zFactorMeasured'][:,:,:,1],
dims=['along_track', 'cross_track','range'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr),
'alt':(['along_track', 'cross_track','range'],self.height)})
da.attrs['units'] = 'dBZ'
da.attrs['standard_name'] = 'measured KaPR, MS scan'
if clutter:
da = da.where(self.xrds.clutter==0)
if echotop:
da = da.where(self.xrds.echotop==0)
da = da.where(da >= 0)
self.xrds['MSKa'] = da
da = xr.DataArray(self.hdf['FS']['SLV']['precipRate'][:,:,:],
dims=['along_track', 'cross_track','range'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr),
'alt':(['along_track', 'cross_track','range'],self.height)})
da.attrs['units'] = 'mm hr^-1'
da.attrs['standard_name'] = 'retrieved R, from DPR algo'
if clutter:
da = da.where(self.xrds.clutter==0)
if echotop:
da = da.where(self.xrds.echotop==0)
self.xrds['R'] = da
da = xr.DataArray(self.hdf['FS']['SLV']['paramDSD'][:,:,:,1],
dims=['along_track', 'cross_track','range'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr),
'alt':(['along_track', 'cross_track','range'],self.height)})
da.attrs['units'] = 'mm'
da.attrs['standard_name'] = 'retrieved Dm, from DPR algo'
if clutter:
da = da.where(self.xrds.clutter==0)
if echotop:
da = da.where(self.xrds.echotop==0)
da = da.where(da >= 0)
self.xrds['Dm_dpr'] = da
da = xr.DataArray(self.hdf['FS']['SLV']['paramDSD'][:,:,:,0],
dims=['along_track', 'cross_track','range'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr),
'alt':(['along_track', 'cross_track','range'],self.height)})
da.attrs['units'] = 'dBNw'
da.attrs['standard_name'] = 'retrieved Nw, from DPR algo'
if clutter:
da = da.where(self.xrds.clutter==0)
if echotop:
da = da.where(self.xrds.echotop==0)
da = da.where(da >= 0)
self.xrds['Nw_dpr'] = da
if self.precip:
#change this to 10 if you want to relax the conditions, because the ka band has bad sensativity
self.xrds = self.xrds.where(self.xrds.flagPrecip>=precipflag)
# if self.snow:
# self.xrds = self.xrds.where(self.xrds.flagSurfaceSnow==1)
if self.corners is not None:
self.setboxcoords()
#to reduce size of data, drop empty cross-track sections
# self.xrds = self.xrds.dropna(dim='along_track',how='all')
#as before, makes sure there is data...
if self.xrds.along_track.shape[0]==0:
self.killflag = True
def get_highest_clutter_bin(self):
"""
This method makes us ground clutter conservative by supplying a clutter mask to apply to the fields.
It is based off the algorithim output of 'binClutterFreeBottom', which can be a bit conservative (~ 1km)
"""
if self.legacy:
if self.outer_swath:
ku = self.hdf['NS']['PRE']['binClutterFreeBottom'][:,:]
ku = np.reshape(ku,[1,ku.shape[0],ku.shape[1]])
ka = np.ones([len(self.along_track),len(self.cross_track)],dtype=int)*-9999
ka[:,12:37] = self.hdf['MS']['PRE']['binClutterFreeBottom'][:]
ka = np.reshape(ka,[1,ka.shape[0],ka.shape[1]])
both = np.vstack([ku,ka])
pick_max = np.argmin(both,axis=0)
ku = self.hdf['NS']['PRE']['binClutterFreeBottom'][:,:]
ka = np.ones([len(self.along_track),len(self.cross_track)],dtype=int)*-9999
ka[:,12:37] = self.hdf['MS']['PRE']['binClutterFreeBottom'][:]
inds_to_pick = np.zeros(ku.shape,dtype=int)
ind = np.where(pick_max == 0)
inds_to_pick[ind] = ku[ind]
ind = np.where(pick_max == 1)
inds_to_pick[ind] = ka[ind]
dummy_matrix = np.ma.zeros([inds_to_pick.shape[0],inds_to_pick.shape[1],176])
for i in np.arange(0,dummy_matrix.shape[0]):
for j in np.arange(0,dummy_matrix.shape[1]):
dummy_matrix[i,j,inds_to_pick[i,j]:] = 1
self.dummy = np.ma.asarray(dummy_matrix,dtype=int)
else:
ku = self.hdf['NS']['PRE']['binClutterFreeBottom'][:,12:37]
ku = np.reshape(ku,[1,ku.shape[0],ku.shape[1]])
ka = self.hdf['MS']['PRE']['binClutterFreeBottom'][:]
ka = np.reshape(ka,[1,ka.shape[0],ka.shape[1]])
both = np.vstack([ku,ka])
pick_max = np.argmin(both,axis=0)
ku = self.hdf['NS']['PRE']['binClutterFreeBottom'][:,12:37]
ka = self.hdf['MS']['PRE']['binClutterFreeBottom'][:]
inds_to_pick = np.zeros(ku.shape,dtype=int)
ind = np.where(pick_max == 0)
inds_to_pick[ind] = ku[ind]
ind = np.where(pick_max == 1)
inds_to_pick[ind] = ka[ind]
dummy_matrix = np.ma.zeros([inds_to_pick.shape[0],inds_to_pick.shape[1],176])
for i in np.arange(0,dummy_matrix.shape[0]):
for j in np.arange(0,dummy_matrix.shape[1]):
dummy_matrix[i,j,inds_to_pick[i,j]:] = 1
self.dummy = np.ma.asarray(dummy_matrix,dtype=int)
else:
ku = self.hdf['FS']['PRE']['binClutterFreeBottom'][:,:]
ku = np.reshape(ku,[1,ku.shape[0],ku.shape[1]])
ka = np.ones([len(self.along_track),len(self.cross_track)],dtype=int)*-9999
ka[:,12:37] = self.hdf['FS']['PRE']['binClutterFreeBottom'][:,12:37]
ka = np.reshape(ka,[1,ka.shape[0],ka.shape[1]])
both = np.vstack([ku,ka])
pick_max = np.argmin(both,axis=0)
ku = self.hdf['FS']['PRE']['binClutterFreeBottom'][:,:]
ka = np.ones([len(self.along_track),len(self.cross_track)],dtype=int)*-9999
ka[:,12:37] = self.hdf['FS']['PRE']['binClutterFreeBottom'][:,12:37]
inds_to_pick = np.zeros(ku.shape,dtype=int)
ind = np.where(pick_max == 0)
inds_to_pick[ind] = ku[ind]
ind = np.where(pick_max == 1)
inds_to_pick[ind] = ka[ind]
dummy_matrix = np.ma.zeros([inds_to_pick.shape[0],inds_to_pick.shape[1],176])
for i in np.arange(0,dummy_matrix.shape[0]):
for j in np.arange(0,dummy_matrix.shape[1]):
dummy_matrix[i,j,inds_to_pick[i,j]:] = 1
self.dummy = np.ma.asarray(dummy_matrix,dtype=int)
def echotop(self):
"""
This method takes the already clutter filtered data for the corrected reflectivity and cuts the
noisy uncorrected reflectivity to the same height. Again, the method is a bit conservative, but is
a good place to start.
"""
if self.legacy:
if self.outer_swath:
#HEADS UP, will default to using Ku in the outerswath because there is no Ka
keeper = self.range
keeper = np.reshape(keeper,[1,keeper.shape[0]])
keeper = np.tile(keeper,(49,1))
keeper = np.reshape(keeper,[1,keeper.shape[0],keeper.shape[1]])
keeper = np.tile(keeper,(self.xrds.NSKu_c.values.shape[0],1,1))
keeper[np.isnan(self.xrds.NSKu_c)] = 9999
inds_to_pick = np.argmin(keeper,axis=2)
dummy_matrix = np.ma.zeros([inds_to_pick.shape[0],inds_to_pick.shape[1],176])
for i in np.arange(0,dummy_matrix.shape[0]):
for j in np.arange(0,dummy_matrix.shape[1]):
dummy_matrix[i,j,:inds_to_pick[i,j]] = 1
self.dummy2 = np.ma.asarray(dummy_matrix,dtype=int)
else:
keeper = self.range
keeper = np.reshape(keeper,[1,keeper.shape[0]])
keeper = np.tile(keeper,(25,1))
keeper = np.reshape(keeper,[1,keeper.shape[0],keeper.shape[1]])
keeper = np.tile(keeper,(self.xrds.MSKa_c.values.shape[0],1,1))
keeper[np.isnan(self.xrds.MSKa_c)] = 9999
inds_to_pick = np.argmin(keeper,axis=2)
dummy_matrix = np.ma.zeros([inds_to_pick.shape[0],inds_to_pick.shape[1],176])
for i in np.arange(0,dummy_matrix.shape[0]):
for j in np.arange(0,dummy_matrix.shape[1]):
dummy_matrix[i,j,:inds_to_pick[i,j]] = 1
self.dummy2 = np.ma.asarray(dummy_matrix,dtype=int)
else:
#HEADS UP, will default to using Ku in the outerswath because there is no Ka
keeper = self.range
keeper = np.reshape(keeper,[1,keeper.shape[0]])
keeper = np.tile(keeper,(49,1))
keeper = np.reshape(keeper,[1,keeper.shape[0],keeper.shape[1]])
keeper = np.tile(keeper,(self.xrds.NSKu_c.values.shape[0],1,1))
keeper[np.isnan(self.xrds.NSKu_c)] = 9999
inds_to_pick = np.argmin(keeper,axis=2)
dummy_matrix = np.ma.zeros([inds_to_pick.shape[0],inds_to_pick.shape[1],176])
for i in np.arange(0,dummy_matrix.shape[0]):
for j in np.arange(0,dummy_matrix.shape[1]):
dummy_matrix[i,j,:inds_to_pick[i,j]] = 1
self.dummy2 = np.ma.asarray(dummy_matrix,dtype=int)
def setboxcoords(self):
"""
This method sets all points outside the box to nan.
"""
if len(self.corners) > 0:
self.ll_lon = self.corners[0]
self.ur_lon = self.corners[1]
self.ll_lat = self.corners[2]
self.ur_lat = self.corners[3]
self.xrds = self.xrds.where((self.xrds.lons >= self.ll_lon) & (self.xrds.lons <= self.ur_lon) & (self.xrds.lats >= self.ll_lat) & (self.xrds.lats <= self.ur_lat),drop=False)
else:
print('ERROR, not boxcoods set...did you mean to do this?')
def parse_dtime(self):
"""
This method creates datetime objects from the hdf file in a timely mannor.
Typically run this after you already filtered for precip/snow to save additional time.
"""
if self.legacy:
if self.outer_swath:
year = self.hdf['NS']['ScanTime']['Year'][:]
ind = np.where(year == -9999)[0]
year = np.asarray(year,dtype=str)
year = list(year)
month = self.hdf['NS']['ScanTime']['Month'][:]
month = np.asarray(month,dtype=str)
month = np.char.rjust(month, 2, fillchar='0')
month = list(month)
day = self.hdf['NS']['ScanTime']['DayOfMonth'][:]
day = np.asarray(day,dtype=str)
day = np.char.rjust(day, 2, fillchar='0')
day = list(day)
hour = self.hdf['NS']['ScanTime']['Hour'][:]
hour = np.asarray(hour,dtype=str)
hour = np.char.rjust(hour, 2, fillchar='0')
hour = list(hour)
minute = self.hdf['NS']['ScanTime']['Minute'][:]
minute = np.asarray(minute,dtype=str)
minute = np.char.rjust(minute, 2, fillchar='0')
minute = list(minute)
second = self.hdf['NS']['ScanTime']['Second'][:]
second = np.asarray(second,dtype=str)
second = np.char.rjust(second, 2, fillchar='0')
second = list(second)
datestr = [year[i] +"-"+ month[i]+ "-" + day[i] + \
' ' + hour[i] + ':' + minute[i] + ':' + second[i] for i in range(len(year))]
datestr = np.asarray(datestr,dtype=str)
datestr[ind] = '1970-01-01 00:00:00'
datestr = np.reshape(datestr,[len(datestr),1])
datestr = np.tile(datestr,(1,49))
self.datestr = np.asarray(datestr,dtype=np.datetime64)
else:
year = self.hdf['MS']['ScanTime']['Year'][:]
ind = np.where(year == -9999)[0]
year = np.asarray(year,dtype=str)
year = list(year)
month = self.hdf['MS']['ScanTime']['Month'][:]
month = np.asarray(month,dtype=str)
month = np.char.rjust(month, 2, fillchar='0')
month = list(month)
day = self.hdf['MS']['ScanTime']['DayOfMonth'][:]
day = np.asarray(day,dtype=str)
day = np.char.rjust(day, 2, fillchar='0')
day = list(day)
hour = self.hdf['MS']['ScanTime']['Hour'][:]
hour = np.asarray(hour,dtype=str)
hour = np.char.rjust(hour, 2, fillchar='0')
hour = list(hour)
minute = self.hdf['MS']['ScanTime']['Minute'][:]
minute = np.asarray(minute,dtype=str)
minute = np.char.rjust(minute, 2, fillchar='0')
minute = list(minute)
second = self.hdf['MS']['ScanTime']['Second'][:]
second = np.asarray(second,dtype=str)
second = np.char.rjust(second, 2, fillchar='0')
second = list(second)
datestr = [year[i] +"-"+ month[i]+ "-" + day[i] + \
' ' + hour[i] + ':' + minute[i] + ':' + second[i] for i in range(len(year))]
datestr = np.asarray(datestr,dtype=str)
datestr[ind] = '1970-01-01 00:00:00'
datestr = np.reshape(datestr,[len(datestr),1])
datestr = np.tile(datestr,(1,25))
self.datestr = np.asarray(datestr,dtype=np.datetime64)
else:
year = self.hdf['FS']['ScanTime']['Year'][:]
ind = np.where(year == -9999)[0]
year = np.asarray(year,dtype=str)
year = list(year)
month = self.hdf['FS']['ScanTime']['Month'][:]
month = np.asarray(month,dtype=str)
month = np.char.rjust(month, 2, fillchar='0')
month = list(month)
day = self.hdf['FS']['ScanTime']['DayOfMonth'][:]
day = np.asarray(day,dtype=str)
day = np.char.rjust(day, 2, fillchar='0')
day = list(day)
hour = self.hdf['FS']['ScanTime']['Hour'][:]
hour = np.asarray(hour,dtype=str)
hour = np.char.rjust(hour, 2, fillchar='0')
hour = list(hour)
minute = self.hdf['FS']['ScanTime']['Minute'][:]
minute = np.asarray(minute,dtype=str)
minute = np.char.rjust(minute, 2, fillchar='0')
minute = list(minute)
second = self.hdf['FS']['ScanTime']['Second'][:]
second = np.asarray(second,dtype=str)
second = np.char.rjust(second, 2, fillchar='0')
second = list(second)
datestr = [year[i] +"-"+ month[i]+ "-" + day[i] + \
' ' + hour[i] + ':' + minute[i] + ':' + second[i] for i in range(len(year))]
datestr = np.asarray(datestr,dtype=str)
datestr[ind] = '1970-01-01 00:00:00'
datestr = np.reshape(datestr,[len(datestr),1])
datestr = np.tile(datestr,(1,49))
self.datestr = np.asarray(datestr,dtype=np.datetime64)
def run_retrieval(self,path_to_models=None,old=False,notebook=False):
"""
This method is a way to run our neural network trained retreival to get Dm in snowfall.
Please see this AMS presentation until the paper comes out: *LINK HERE*.
This method requires the use of tensorflow. So go install that.
"""
#load scalers
from pickle import load
import tensorflow as tf
from tensorflow.python.keras import losses
#set number of threads = 1, this was crashing my parallel code, If in notebook comment this
if notebook:
pass
else:
tf.config.threading.set_inter_op_parallelism_threads(1)
# tf.config.threading.set_intra_op_parallelism_threads(1)
# print('Number of threads set to {}'.format(tf.config.threading.get_inter_op_parallelism_threads()))
if old:
scaler_X = load(open('/data/gpm/a/randyjc2/DRpy/drpy/models/scaler_X.pkl', 'rb'))
scaler_y = load(open('/data/gpm/a/randyjc2/DRpy/drpy/models/scaler_y.pkl', 'rb'))
else:
scaler_X = load(open('/data/gpm/a/randyjc2/DRpy/drpy/models/scaler_X_V2.pkl', 'rb'))
scaler_y = load(open('/data/gpm/a/randyjc2/DRpy/drpy/models/scaler_y_V2.pkl', 'rb'))
#supress warnings. skrews up my progress bar when running in parallel
def warn(*args, **kwargs):
pass
import warnings
warnings.warn = warn
if path_to_models is None:
print('Please insert path to NN models')
else:
if old:
model = tf.keras.models.load_model(path_to_models + 'NN_4by8.h5',custom_objects=None,compile=True)
else:
model = tf.keras.models.load_model(path_to_models + 'NN_6by8.h5',custom_objects=None,compile=True)
#now we have to reshape things to make sure they are in the right shape for the NN model [n_samples,n_features]
Ku = self.xrds.NSKu.values
shape_step1 = Ku.shape
Ku = Ku.reshape([Ku.shape[0],Ku.shape[1]*Ku.shape[2]])
shape_step2 = Ku.shape
Ku = Ku.reshape([Ku.shape[0]*Ku.shape[1]])
Ka = self.xrds.MSKa.values
Ka = Ka.reshape([Ka.shape[0],Ka.shape[1]*Ka.shape[2]])
Ka = Ka.reshape([Ka.shape[0]*Ka.shape[1]])
T = self.xrds['T'].values - 273.15 #expects in degC
T = T.reshape([T.shape[0],T.shape[1]*T.shape[2]])
T = T.reshape([T.shape[0]*T.shape[1]])
#Make sure we only run in on non-nan values.
ind_masked = np.isnan(Ku)
ind_masked2 = np.isnan(Ka)
Ku_nomask = np.zeros(Ku.shape)
Ka_nomask = np.zeros(Ka.shape)
T_nomask = np.zeros(T.shape)
Ku_nomask[~ind_masked] = Ku[~ind_masked]
Ka_nomask[~ind_masked] = Ka[~ind_masked]
T_nomask[~ind_masked] = T[~ind_masked]
ind = np.where(Ku_nomask!=0)[0]
#scale the input vectors by the mean that it was trained with
X = np.zeros([Ku_nomask.shape[0],3])
X[:,0] = (Ku_nomask - scaler_X.mean_[0])/scaler_X.scale_[0] #ku
X[:,1] = ((Ku_nomask - Ka_nomask)- scaler_X.mean_[1])/scaler_X.scale_[1] #dfr
X[:,2] = (T_nomask - scaler_X.mean_[2])/scaler_X.scale_[2] #T
#
yhat = model.predict(X[ind,0:3],batch_size=len(X[ind,0]))
yhat = scaler_y.inverse_transform(yhat)
yhat[:,1] = 10**yhat[:,1] #unlog Dm liquid
yhat[:,2] = 10**yhat[:,2] #unlog Dm solid
ind =
|
np.where(Ku_nomask!=0)
|
numpy.where
|
import scipy.io
import numpy as np
import scipy.misc
from matplotlib import pyplot as plt
from PIL import Image
from scipy import fftpack, signal
from scipy.linalg import circulant
import sklearn
from sklearn.decomposition import PCA
import sklearn.neighbors
import cv2
## shrinks matrix by ratio alpha
def downsample_shrink_matrix(mat, alpha):
# print(mat.shape)
(mat_shape_x, mat_shape_y) = mat.shape
new_size =(int(mat_shape_x / alpha), int(mat_shape_y / alpha))
downsampled = np.zeros(new_size)
for i in range(new_size[0]):
for j in range(new_size[1]):
downsampled[i, j] = mat[alpha * i, alpha * j]
return downsampled
def downsample_shrink_matrix_1d(mat, alpha):
(mat_shape_x, mat_shape_y) = mat.shape
new_size =(int(mat_shape_x / alpha), int(mat_shape_y))
downsampled = np.zeros(new_size)
for i in range(new_size[0]):
downsampled[i,:] = mat[alpha * i, :]
return downsampled
## only fills zeros wherever a point is going to be gone when shrinking
def downsample_zeros_matrix(mat, alpha):
(mat_shape_x, mat_shape_y) = mat.shape
for i in range(mat_shape_x):
if (i % alpha):
mat[i, :] = 0
mat[:, i] = 0
return mat
## upsample matrix by factor alpha, using cubic interpolation
def upsample_matrix(mat,alpha):
(mat_shape_x, mat_shape_y) = mat.shape
new_size = (int(mat_shape_y * alpha), int(mat_shape_x * alpha))
upsampled_filtered_image = cv2.resize(mat, dsize=new_size, interpolation=cv2.INTER_CUBIC)
return upsampled_filtered_image
## creates a gaussian matrix
def gaussian(window_size, range, mu, sigma):
z = np.linspace(-range, range, window_size)
x, y = np.meshgrid(z, z)
d = np.sqrt(x*x+y*y)
g = np.exp(-((d-mu)**2 / (2.0 * sigma**2)))
g = g / g.sum()
return g
## creates a sinc matrix
def sinc(window_size, range):
x = np.linspace(-range, range, window_size)
xx = np.outer(x, x)
s = np.sinc(xx)
s = s / s.sum()
return s
def __sinc__(window_size):
#x = np.linspace(- filter_range, filter_range, window_size)
#xx = np.outer(x, x)
edge = window_size // 2
x =
|
np.linspace(-edge, edge, num=window_size)
|
numpy.linspace
|
from __future__ import (absolute_import, division, print_function)
"""
Module for plotting data on maps with matplotlib.
Contains the :class:`Basemap` class (which does most of the
heavy lifting), and the following functions:
:func:`interp`: bilinear interpolation between rectilinear grids.
:func:`maskoceans`: mask 'wet' points of an input array.
:func:`shiftgrid`: shifts global lat/lon grids east or west.
:func:`addcyclic`: Add cyclic (wraparound) point in longitude.
"""
from distutils.version import LooseVersion
try:
from urllib import urlretrieve
from urllib2 import urlopen
except ImportError:
from urllib.request import urlretrieve, urlopen
from matplotlib import __version__ as _matplotlib_version
try:
from inspect import cleandoc as dedent
except ImportError:
# Deprecated as of version 3.1. Not quite the same
# as textwrap.dedent.
from matplotlib.cbook import dedent
# check to make sure matplotlib is not too old.
_matplotlib_version = LooseVersion(_matplotlib_version)
_mpl_required_version = LooseVersion('0.98')
if _matplotlib_version < _mpl_required_version:
msg = dedent("""
your matplotlib is too old - basemap requires version %s or
higher, you have version %s""" %
(_mpl_required_version,_matplotlib_version))
raise ImportError(msg)
from matplotlib import rcParams, is_interactive
from matplotlib.collections import LineCollection, PolyCollection
from matplotlib.patches import Ellipse, Circle, Polygon, FancyArrowPatch
from matplotlib.lines import Line2D
from matplotlib.transforms import Bbox
import pyproj
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib.image import imread
import sys, os, math
from .proj import Proj
import numpy as np
import numpy.ma as ma
import _geoslib
import functools
# basemap data files now installed in lib/matplotlib/toolkits/basemap/data
# check to see if environment variable BASEMAPDATA set to a directory,
# and if so look for the data there.
if 'BASEMAPDATA' in os.environ:
basemap_datadir = os.environ['BASEMAPDATA']
if not os.path.isdir(basemap_datadir):
raise RuntimeError('Path in environment BASEMAPDATA not a directory')
else:
from mpl_toolkits import basemap_data
basemap_datadir = os.path.abspath(list(basemap_data.__path__)[0])
__version__ = "1.3.1+dev"
# module variable that sets the default value for the 'latlon' kwarg.
# can be set to True by user so plotting functions can take lons,lats
# in degrees by default, instead of x,y (map projection coords in meters).
latlon_default = False
# supported map projections.
_projnames = {'cyl' : 'Cylindrical Equidistant',
'merc' : 'Mercator',
'tmerc' : 'Transverse Mercator',
'omerc' : 'Oblique Mercator',
'mill' : 'Miller Cylindrical',
'gall' : 'Gall Stereographic Cylindrical',
'cea' : 'Cylindrical Equal Area',
'lcc' : 'Lambert Conformal',
'laea' : 'Lambert Azimuthal Equal Area',
'nplaea' : 'North-Polar Lambert Azimuthal',
'splaea' : 'South-Polar Lambert Azimuthal',
'eqdc' : 'Equidistant Conic',
'aeqd' : 'Azimuthal Equidistant',
'npaeqd' : 'North-Polar Azimuthal Equidistant',
'spaeqd' : 'South-Polar Azimuthal Equidistant',
'aea' : 'Albers Equal Area',
'stere' : 'Stereographic',
'npstere' : 'North-Polar Stereographic',
'spstere' : 'South-Polar Stereographic',
'cass' : 'Cassini-Soldner',
'poly' : 'Polyconic',
'ortho' : 'Orthographic',
'geos' : 'Geostationary',
'nsper' : 'Near-Sided Perspective',
'sinu' : 'Sinusoidal',
'moll' : 'Mollweide',
'hammer' : 'Hammer',
'robin' : 'Robinson',
'kav7' : 'Kavrayskiy VII',
'eck4' : 'Eckert IV',
'vandg' : 'van der Grinten',
'mbtfpq' : 'McBryde-Thomas Flat-Polar Quartic',
'gnom' : 'Gnomonic',
'rotpole' : 'Rotated Pole',
}
supported_projections = []
for _items in _projnames.items():
supported_projections.append(" %-17s%-40s\n" % (_items))
supported_projections = ''.join(supported_projections)
_cylproj = ['cyl','merc','mill','gall','cea']
_pseudocyl = ['moll','robin','eck4','kav7','sinu','mbtfpq','vandg','hammer']
_dg2rad = math.radians(1.)
_rad2dg = math.degrees(1.)
# projection specific parameters.
projection_params = {'cyl' : 'corners only (no width/height)',
'merc' : 'corners plus lat_ts (no width/height)',
'tmerc' : 'lon_0,lat_0,k_0',
'omerc' : 'lon_0,lat_0,lat_1,lat_2,lon_1,lon_2,no_rot,k_0',
'mill' : 'corners only (no width/height)',
'gall' : 'corners only (no width/height)',
'cea' : 'corners only plus lat_ts (no width/height)',
'lcc' : 'lon_0,lat_0,lat_1,lat_2,k_0',
'laea' : 'lon_0,lat_0',
'nplaea' : 'bounding_lat,lon_0,lat_0,no corners or width/height',
'splaea' : 'bounding_lat,lon_0,lat_0,no corners or width/height',
'eqdc' : 'lon_0,lat_0,lat_1,lat_2',
'aeqd' : 'lon_0,lat_0',
'npaeqd' : 'bounding_lat,lon_0,lat_0,no corners or width/height',
'spaeqd' : 'bounding_lat,lon_0,lat_0,no corners or width/height',
'aea' : 'lon_0,lat_0,lat_1',
'stere' : 'lon_0,lat_0,lat_ts,k_0',
'npstere' : 'bounding_lat,lon_0,lat_0,no corners or width/height',
'spstere' : 'bounding_lat,lon_0,lat_0,no corners or width/height',
'cass' : 'lon_0,lat_0',
'poly' : 'lon_0,lat_0',
'ortho' : 'lon_0,lat_0,llcrnrx,llcrnry,urcrnrx,urcrnry,no width/height',
'geos' : 'lon_0,satellite_height,llcrnrx,llcrnry,urcrnrx,urcrnry,no width/height',
'nsper' : 'lon_0,satellite_height,llcrnrx,llcrnry,urcrnrx,urcrnry,no width/height',
'sinu' : 'lon_0,lat_0,no corners or width/height',
'moll' : 'lon_0,lat_0,no corners or width/height',
'hammer' : 'lon_0,lat_0,no corners or width/height',
'robin' : 'lon_0,lat_0,no corners or width/height',
'eck4' : 'lon_0,lat_0,no corners or width/height',
'kav7' : 'lon_0,lat_0,no corners or width/height',
'vandg' : 'lon_0,lat_0,no corners or width/height',
'mbtfpq' : 'lon_0,lat_0,no corners or width/height',
'gnom' : 'lon_0,lat_0',
'rotpole' : 'lon_0,o_lat_p,o_lon_p,corner lat/lon or corner x,y (no width/height)'
}
# create dictionary that maps epsg codes to Basemap kwargs.
epsgf = open(os.path.join(basemap_datadir, 'epsg'))
epsg_dict={}
for line in epsgf:
if line.startswith("#"):
continue
l = line.split()
code = l[0].strip("<>")
parms = ' '.join(l[1:-1])
_kw_args={}
for s in l[1:-1]:
try:
k,v = s.split('=')
except:
pass
k = k.strip("+")
if k=='proj':
if v == 'longlat': v = 'cyl'
if v not in _projnames:
continue
k='projection'
if k=='k':
k='k_0'
if k in ['projection','lat_1','lat_2','lon_0','lat_0',\
'a','b','k_0','lat_ts','ellps','datum']:
if k not in ['projection','ellps','datum']:
v = float(v)
_kw_args[k]=v
if 'projection' in _kw_args:
if 'a' in _kw_args:
if 'b' in _kw_args:
_kw_args['rsphere']=(_kw_args['a'],_kw_args['b'])
del _kw_args['b']
else:
_kw_args['rsphere']=_kw_args['a']
del _kw_args['a']
if 'datum' in _kw_args:
if _kw_args['datum'] == 'NAD83':
_kw_args['ellps'] = 'GRS80'
elif _kw_args['datum'] == 'NAD27':
_kw_args['ellps'] = 'clrk66'
elif _kw_args['datum'] == 'WGS84':
_kw_args['ellps'] = 'WGS84'
del _kw_args['datum']
# supported epsg projections.
# omerc not supported yet, since we can't handle
# alpha,gamma and lonc keywords.
if _kw_args['projection'] != 'omerc':
epsg_dict[code]=_kw_args
epsgf.close()
# The __init__ docstring is pulled out here because it is so long;
# Having it in the usual place makes it hard to get from the
# __init__ argument list to the code that uses the arguments.
_Basemap_init_doc = """
Sets up a basemap with specified map projection.
and creates the coastline data structures in map projection
coordinates.
Calling a Basemap class instance with the arguments lon, lat will
convert lon/lat (in degrees) to x/y map projection coordinates
(in meters). The inverse transformation is done if the optional keyword
``inverse`` is set to True.
The desired projection is set with the projection keyword. Default is ``cyl``.
Supported values for the projection keyword are:
============== ====================================================
Value Description
============== ====================================================
%(supported_projections)s
============== ====================================================
For most map projections, the map projection region can either be
specified by setting these keywords:
.. tabularcolumns:: |l|L|
============== ====================================================
Keyword Description
============== ====================================================
llcrnrlon longitude of lower left hand corner of the desired map
domain (degrees).
llcrnrlat latitude of lower left hand corner of the desired map
domain (degrees).
urcrnrlon longitude of upper right hand corner of the desired map
domain (degrees).
urcrnrlat latitude of upper right hand corner of the desired map
domain (degrees).
============== ====================================================
or these
.. tabularcolumns:: |l|L|
============== ====================================================
Keyword Description
============== ====================================================
width width of desired map domain in projection coordinates
(meters).
height height of desired map domain in projection coordinates
(meters).
lon_0 center of desired map domain (in degrees).
lat_0 center of desired map domain (in degrees).
============== ====================================================
For ``sinu``, ``moll``, ``hammer``, ``npstere``, ``spstere``, ``nplaea``, ``splaea``,
``npaeqd``, ``spaeqd``, ``robin``, ``eck4``, ``kav7``, or ``mbtfpq``, the values of
llcrnrlon, llcrnrlat, urcrnrlon, urcrnrlat, width and height are ignored
(because either they are computed internally, or entire globe is
always plotted).
For the cylindrical projections (``cyl``, ``merc``, ``mill``, ``cea`` and ``gall``),
the default is to use
llcrnrlon=-180,llcrnrlat=-90, urcrnrlon=180 and urcrnrlat=90). For all other
projections except ``ortho``, ``geos`` and ``nsper``, either the lat/lon values of the
corners or width and height must be specified by the user.
For ``ortho``, ``geos`` and ``nsper``, the lat/lon values of the corners may be specified,
or the x/y values of the corners (llcrnrx,llcrnry,urcrnrx,urcrnry) in the
coordinate system of the global projection (with x=0,y=0 at the center
of the global projection). If the corners are not specified,
the entire globe is plotted.
For ``rotpole``, the lat/lon values of the corners on the unrotated sphere
may be provided as llcrnrlon,llcrnrlat,urcrnrlon,urcrnrlat, or the lat/lon
values of the corners on the rotated sphere can be given as
llcrnrx,llcrnry,urcrnrx,urcrnry.
Other keyword arguments:
.. tabularcolumns:: |l|L|
============== ====================================================
Keyword Description
============== ====================================================
resolution resolution of boundary database to use. Can be ``c``
(crude), ``l`` (low), ``i`` (intermediate), ``h``
(high), ``f`` (full) or None.
If None, no boundary data will be read in (and
class methods such as drawcoastlines will raise an
if invoked).
Resolution drops off by roughly 80%% between datasets.
Higher res datasets are much slower to draw.
Default ``c``. Coastline data is from the GSHHS
(http://www.soest.hawaii.edu/wessel/gshhs/gshhs.html).
State, country and river datasets from the Generic
Mapping Tools (http://gmt.soest.hawaii.edu).
area_thresh coastline or lake with an area smaller than
area_thresh in km^2 will not be plotted.
Default 10000,1000,100,10,1 for resolution
``c``, ``l``, ``i``, ``h``, ``f``.
rsphere radius of the sphere used to define map projection
(default 6370997 meters, close to the arithmetic mean
radius of the earth). If given as a sequence, the
first two elements are interpreted as the radii
of the major and minor axes of an ellipsoid.
Note: sometimes an ellipsoid is specified by the
major axis and an inverse flattening parameter (if).
The minor axis (b) can be computed from the major
axis (a) and the inverse flattening parameter using
the formula if = a/(a-b).
ellps string describing ellipsoid ('GRS80' or 'WGS84',
for example). If both rsphere and ellps are given,
rsphere is ignored. Default None. See pyproj.pj_ellps
for allowed values.
suppress_ticks suppress automatic drawing of axis ticks and labels
in map projection coordinates. Default True,
so parallels and meridians can be labelled instead.
If parallel or meridian labelling is requested
(using drawparallels and drawmeridians methods),
automatic tick labelling will be supressed even if
suppress_ticks=False. suppress_ticks=False
is useful if you want to use your own custom tick
formatter, or if you want to let matplotlib label
the axes in meters using map projection
coordinates.
fix_aspect fix aspect ratio of plot to match aspect ratio
of map projection region (default True).
anchor determines how map is placed in axes rectangle
(passed to axes.set_aspect). Default is ``C``,
which means map is centered.
Allowed values are
``C``, ``SW``, ``S``, ``SE``, ``E``, ``NE``,
``N``, ``NW``, and ``W``.
celestial use astronomical conventions for longitude (i.e.
negative longitudes to the east of 0). Default False.
Implies resolution=None.
ax set default axes instance
(default None - matplotlib.pyplot.gca() may be used
to get the current axes instance).
If you do not want matplotlib.pyplot to be imported,
you can either set this to a pre-defined axes
instance, or use the ``ax`` keyword in each Basemap
method call that does drawing. In the first case,
all Basemap method calls will draw to the same axes
instance. In the second case, you can draw to
different axes with the same Basemap instance.
You can also use the ``ax`` keyword in individual
method calls to selectively override the default
axes instance.
============== ====================================================
The following keywords are map projection parameters which all default to
None. Not all parameters are used by all projections, some are ignored.
The module variable ``projection_params`` is a dictionary which
lists which parameters apply to which projections.
.. tabularcolumns:: |l|L|
================ ====================================================
Keyword Description
================ ====================================================
lat_ts latitude of true scale. Optional for stereographic,
cylindrical equal area and mercator projections.
default is lat_0 for stereographic projection.
default is 0 for mercator and cylindrical equal area
projections.
lat_1 first standard parallel for lambert conformal,
albers equal area and equidistant conic.
Latitude of one of the two points on the projection
centerline for oblique mercator. If lat_1 is not given, but
lat_0 is, lat_1 is set to lat_0 for lambert
conformal, albers equal area and equidistant conic.
lat_2 second standard parallel for lambert conformal,
albers equal area and equidistant conic.
Latitude of one of the two points on the projection
centerline for oblique mercator. If lat_2 is not
given it is set to lat_1 for lambert conformal,
albers equal area and equidistant conic.
lon_1 Longitude of one of the two points on the projection
centerline for oblique mercator.
lon_2 Longitude of one of the two points on the projection
centerline for oblique mercator.
k_0 Scale factor at natural origin (used
by 'tmerc', 'omerc', 'stere' and 'lcc').
no_rot only used by oblique mercator.
If set to True, the map projection coordinates will
not be rotated to true North. Default is False
(projection coordinates are automatically rotated).
lat_0 central latitude (y-axis origin) - used by all
projections.
lon_0 central meridian (x-axis origin) - used by all
projections.
o_lat_p latitude of rotated pole (only used by 'rotpole')
o_lon_p longitude of rotated pole (only used by 'rotpole')
boundinglat bounding latitude for pole-centered projections
(npstere,spstere,nplaea,splaea,npaeqd,spaeqd).
These projections are square regions centered
on the north or south pole.
The longitude lon_0 is at 6-o'clock, and the
latitude circle boundinglat is tangent to the edge
of the map at lon_0.
round cut off pole-centered projection at boundinglat
(so plot is a circle instead of a square). Only
relevant for npstere,spstere,nplaea,splaea,npaeqd
or spaeqd projections. Default False.
satellite_height height of satellite (in m) above equator -
only relevant for geostationary
and near-sided perspective (``geos`` or ``nsper``)
projections. Default 35,786 km.
================ ====================================================
Useful instance variables:
.. tabularcolumns:: |l|L|
================ ====================================================
Variable Name Description
================ ====================================================
projection map projection. Print the module variable
``supported_projections`` to see a list of allowed
values.
epsg EPSG code defining projection (see
http://spatialreference.org for a list of
EPSG codes and their definitions).
aspect map aspect ratio
(size of y dimension / size of x dimension).
llcrnrlon longitude of lower left hand corner of the
selected map domain.
llcrnrlat latitude of lower left hand corner of the
selected map domain.
urcrnrlon longitude of upper right hand corner of the
selected map domain.
urcrnrlat latitude of upper right hand corner of the
selected map domain.
llcrnrx x value of lower left hand corner of the
selected map domain in map projection coordinates.
llcrnry y value of lower left hand corner of the
selected map domain in map projection coordinates.
urcrnrx x value of upper right hand corner of the
selected map domain in map projection coordinates.
urcrnry y value of upper right hand corner of the
selected map domain in map projection coordinates.
rmajor equatorial radius of ellipsoid used (in meters).
rminor polar radius of ellipsoid used (in meters).
resolution resolution of boundary dataset being used (``c``
for crude, ``l`` for low, etc.).
If None, no boundary dataset is associated with the
Basemap instance.
proj4string the string describing the map projection that is
used by PROJ.4.
================ ====================================================
**Converting from Geographic (lon/lat) to Map Projection (x/y) Coordinates**
Calling a Basemap class instance with the arguments lon, lat will
convert lon/lat (in degrees) to x/y map projection
coordinates (in meters). If optional keyword ``inverse`` is
True (default is False), the inverse transformation from x/y
to lon/lat is performed.
For cylindrical equidistant projection (``cyl``), this
does nothing (i.e. x,y == lon,lat).
For non-cylindrical projections, the inverse transformation
always returns longitudes between -180 and 180 degrees. For
cylindrical projections (self.projection == ``cyl``, ``mill``,
``cea``, ``gall`` or ``merc``)
the inverse transformation will return longitudes between
self.llcrnrlon and self.llcrnrlat.
Input arguments lon, lat can be either scalar floats, sequences
or numpy arrays.
**Example Usage:**
>>> from mpl_toolkits.basemap import Basemap
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> # read in topo data (on a regular lat/lon grid)
>>> etopo = np.loadtxt('etopo20data.gz')
>>> lons = np.loadtxt('etopo20lons.gz')
>>> lats = np.loadtxt('etopo20lats.gz')
>>> # create Basemap instance for Robinson projection.
>>> m = Basemap(projection='robin',lon_0=0.5*(lons[0]+lons[-1]))
>>> # compute map projection coordinates for lat/lon grid.
>>> x, y = m(*np.meshgrid(lons,lats))
>>> # make filled contour plot.
>>> cs = m.contourf(x,y,etopo,30,cmap=plt.cm.jet)
>>> m.drawcoastlines() # draw coastlines
>>> m.drawmapboundary() # draw a line around the map region
>>> m.drawparallels(np.arange(-90.,120.,30.),labels=[1,0,0,0]) # draw parallels
>>> m.drawmeridians(np.arange(0.,420.,60.),labels=[0,0,0,1]) # draw meridians
>>> plt.title('Robinson Projection') # add a title
>>> plt.show()
[this example (simpletest.py) plus many others can be found in the
examples directory of source distribution. The "OO" version of this
example (which does not use matplotlib.pyplot) is called "simpletest_oo.py".]
""" % locals()
# unsupported projection error message.
_unsupported_projection = ["'%s' is an unsupported projection.\n"]
_unsupported_projection.append("The supported projections are:\n")
_unsupported_projection.append(supported_projections)
_unsupported_projection = ''.join(_unsupported_projection)
def _validated_ll(param, name, minval, maxval):
param = float(param)
if param > maxval or param < minval:
raise ValueError('%s must be between %f and %f degrees' %
(name, minval, maxval))
return param
def _validated_or_none(param, name, minval, maxval):
if param is None:
return None
return _validated_ll(param, name, minval, maxval)
def _insert_validated(d, param, name, minval, maxval):
if param is not None:
d[name] = _validated_ll(param, name, minval, maxval)
def _transform(plotfunc):
# shift data and longitudes to map projection region, then compute
# transformation to map projection coordinates.
@functools.wraps(plotfunc)
def with_transform(self,x,y,data,*args,**kwargs):
# input coordinates are latitude/longitude, not map projection coords.
if kwargs.pop('latlon', latlon_default):
# shift data to map projection region for
# cylindrical and pseudo-cylindrical projections.
if self.projection in _cylproj or self.projection in _pseudocyl:
x, data = self.shiftdata(x, data,
fix_wrap_around=plotfunc.__name__ not in ["scatter"])
# convert lat/lon coords to map projection coords.
x, y = self(x,y)
return plotfunc(self,x,y,data,*args,**kwargs)
return with_transform
def _transform1d(plotfunc):
# shift data and longitudes to map projection region, then compute
# transformation to map projection coordinates.
@functools.wraps(plotfunc)
def with_transform(self,x,y,*args,**kwargs):
x = np.asarray(x)
# input coordinates are latitude/longitude, not map projection coords.
if kwargs.pop('latlon', latlon_default):
# shift data to map projection region for
# cylindrical and pseudo-cylindrical projections.
if self.projection in _cylproj or self.projection in _pseudocyl:
if x.ndim == 1:
x = self.shiftdata(x, fix_wrap_around=plotfunc.__name__ not in ["scatter"])
elif x.ndim == 0:
if x > 180:
x = x - 360.
# convert lat/lon coords to map projection coords.
x, y = self(x,y)
return plotfunc(self,x,y,*args,**kwargs)
return with_transform
def _transformuv(plotfunc):
# shift data and longitudes to map projection region, then compute
# transformation to map projection coordinates. Works when call
# signature has two data arrays instead of one.
@functools.wraps(plotfunc)
def with_transform(self,x,y,u,v,*args,**kwargs):
# input coordinates are latitude/longitude, not map projection coords.
if kwargs.pop('latlon', latlon_default):
# shift data to map projection region for
# cylindrical and pseudo-cylindrical projections.
if self.projection in _cylproj or self.projection in _pseudocyl:
x1, u = self.shiftdata(x, u)
x, v = self.shiftdata(x, v)
# convert lat/lon coords to map projection coords.
x, y = self(x,y)
return plotfunc(self,x,y,u,v,*args,**kwargs)
return with_transform
class Basemap(object):
def __init__(self, llcrnrlon=None, llcrnrlat=None,
urcrnrlon=None, urcrnrlat=None,
llcrnrx=None, llcrnry=None,
urcrnrx=None, urcrnry=None,
width=None, height=None,
projection='cyl', resolution='c',
area_thresh=None, rsphere=6370997.0,
ellps=None, lat_ts=None,
lat_1=None, lat_2=None,
lat_0=None, lon_0=None,
lon_1=None, lon_2=None,
o_lon_p=None, o_lat_p=None,
k_0=None,
no_rot=False,
suppress_ticks=True,
satellite_height=35786000,
boundinglat=None,
fix_aspect=True,
anchor='C',
celestial=False,
round=False,
epsg=None,
ax=None):
# docstring is added after __init__ method definition
# set epsg code if given, set to 4326 for projection='cyl':
if epsg is not None:
self.epsg = epsg
elif projection == 'cyl':
self.epsg = 4326
# replace kwarg values with those implied by epsg code,
# if given.
if hasattr(self,'epsg'):
if str(self.epsg) not in epsg_dict:
raise ValueError('%s is not a supported EPSG code' %
self.epsg)
epsg_params = epsg_dict[str(self.epsg)]
for k in epsg_params:
if k == 'projection':
projection = epsg_params[k]
elif k == 'rsphere':
rsphere = epsg_params[k]
elif k == 'ellps':
ellps = epsg_params[k]
elif k == 'lat_1':
lat_1 = epsg_params[k]
elif k == 'lat_2':
lat_2 = epsg_params[k]
elif k == 'lon_0':
lon_0 = epsg_params[k]
elif k == 'lat_0':
lat_0 = epsg_params[k]
elif k == 'lat_ts':
lat_ts = epsg_params[k]
elif k == 'k_0':
k_0 = epsg_params[k]
# fix aspect to ratio to match aspect ratio of map projection
# region
self.fix_aspect = fix_aspect
# where to put plot in figure (default is 'C' or center)
self.anchor = anchor
# geographic or celestial coords?
self.celestial = celestial
# map projection.
self.projection = projection
# bounding lat (for pole-centered plots)
self.boundinglat = boundinglat
# is a round pole-centered plot desired?
self.round = round
# full disk projection?
self._fulldisk = False # default value
# set up projection parameter dict.
projparams = {}
projparams['proj'] = projection
# if ellps keyword specified, it over-rides rsphere.
if ellps is not None:
try:
elldict = pyproj.pj_ellps[ellps]
except KeyError:
raise ValueError(
'illegal ellps definition, allowed values are %s' %
pyproj.pj_ellps.keys())
projparams['a'] = elldict['a']
if 'b' in elldict:
projparams['b'] = elldict['b']
else:
projparams['b'] = projparams['a']*(1.0-(1.0/elldict['rf']))
else:
try:
if rsphere[0] > rsphere[1]:
projparams['a'] = rsphere[0]
projparams['b'] = rsphere[1]
else:
projparams['a'] = rsphere[1]
projparams['b'] = rsphere[0]
except:
if projection == 'tmerc':
# use bR_a instead of R because of obscure bug
# in proj4 for tmerc projection.
projparams['bR_a'] = rsphere
else:
projparams['R'] = rsphere
# set units to meters.
projparams['units']='m'
# check for sane values of lon_0, lat_0, lat_ts, lat_1, lat_2
lat_0 = _validated_or_none(lat_0, 'lat_0', -90, 90)
lat_1 = _validated_or_none(lat_1, 'lat_1', -90, 90)
lat_2 = _validated_or_none(lat_2, 'lat_2', -90, 90)
lat_ts = _validated_or_none(lat_ts, 'lat_ts', -90, 90)
lon_0 = _validated_or_none(lon_0, 'lon_0', -360, 720)
lon_1 = _validated_or_none(lon_1, 'lon_1', -360, 720)
lon_2 = _validated_or_none(lon_2, 'lon_2', -360, 720)
llcrnrlon = _validated_or_none(llcrnrlon, 'llcrnrlon', -360, 720)
urcrnrlon = _validated_or_none(urcrnrlon, 'urcrnrlon', -360, 720)
llcrnrlat = _validated_or_none(llcrnrlat, 'llcrnrlat', -90, 90)
urcrnrlat = _validated_or_none(urcrnrlat, 'urcrnrlat', -90, 90)
_insert_validated(projparams, lat_0, 'lat_0', -90, 90)
_insert_validated(projparams, lat_1, 'lat_1', -90, 90)
_insert_validated(projparams, lat_2, 'lat_2', -90, 90)
_insert_validated(projparams, lat_ts, 'lat_ts', -90, 90)
_insert_validated(projparams, lon_0, 'lon_0', -360, 720)
_insert_validated(projparams, lon_1, 'lon_1', -360, 720)
_insert_validated(projparams, lon_2, 'lon_2', -360, 720)
if projection in ['geos','nsper']:
projparams['h'] = satellite_height
# check for sane values of projection corners.
using_corners = (None not in [llcrnrlon,llcrnrlat,urcrnrlon,urcrnrlat])
if using_corners:
self.llcrnrlon = _validated_ll(llcrnrlon, 'llcrnrlon', -360, 720)
self.urcrnrlon = _validated_ll(urcrnrlon, 'urcrnrlon', -360, 720)
self.llcrnrlat = _validated_ll(llcrnrlat, 'llcrnrlat', -90, 90)
self.urcrnrlat = _validated_ll(urcrnrlat, 'urcrnrlat', -90, 90)
# for each of the supported projections,
# compute lat/lon of domain corners
# and set values in projparams dict as needed.
if projection in ['lcc', 'eqdc', 'aea']:
if projection == 'lcc' and k_0 is not None:
projparams['k_0']=k_0
# if lat_0 is given, but not lat_1,
# set lat_1=lat_0
if lat_1 is None and lat_0 is not None:
lat_1 = lat_0
projparams['lat_1'] = lat_1
if lat_1 is None or lon_0 is None:
raise ValueError('must specify lat_1 or lat_0 and lon_0 for %s basemap (lat_2 is optional)' % _projnames[projection])
if lat_2 is None:
projparams['lat_2'] = lat_1
if not using_corners:
using_cornersxy = (None not in [llcrnrx,llcrnry,urcrnrx,urcrnry])
if using_cornersxy:
llcrnrlon,llcrnrlat,urcrnrlon,urcrnrlat = _choosecornersllur(llcrnrx,llcrnry,urcrnrx,urcrnry,**projparams)
self.llcrnrlon = llcrnrlon; self.llcrnrlat = llcrnrlat
self.urcrnrlon = urcrnrlon; self.urcrnrlat = urcrnrlat
else:
if width is None or height is None:
raise ValueError('must either specify lat/lon values of corners (llcrnrlon,llcrnrlat,urcrnrlon,urcrnrlat) in degrees or width and height in meters')
if lon_0 is None or lat_0 is None:
raise ValueError('must specify lon_0 and lat_0 when using width, height to specify projection region')
llcrnrlon,llcrnrlat,urcrnrlon,urcrnrlat = _choosecorners(width,height,**projparams)
self.llcrnrlon = llcrnrlon; self.llcrnrlat = llcrnrlat
self.urcrnrlon = urcrnrlon; self.urcrnrlat = urcrnrlat
elif projection == 'stere':
if k_0 is not None:
projparams['k_0']=k_0
if lat_0 is None or lon_0 is None:
raise ValueError('must specify lat_0 and lon_0 for Stereographic basemap (lat_ts is optional)')
if not using_corners:
if width is None or height is None:
raise ValueError('must either specify lat/lon values of corners (llcrnrlon,llcrnrlat,urcrnrlon,urcrnrlat) in degrees or width and height in meters')
if lon_0 is None or lat_0 is None:
raise ValueError('must specify lon_0 and lat_0 when using width, height to specify projection region')
llcrnrlon,llcrnrlat,urcrnrlon,urcrnrlat = _choosecorners(width,height,**projparams)
self.llcrnrlon = llcrnrlon; self.llcrnrlat = llcrnrlat
self.urcrnrlon = urcrnrlon; self.urcrnrlat = urcrnrlat
elif projection in ['spstere', 'npstere',
'splaea', 'nplaea',
'spaeqd', 'npaeqd']:
if (projection == 'splaea' and boundinglat >= 0) or\
(projection == 'nplaea' and boundinglat <= 0):
msg='boundinglat cannot extend into opposite hemisphere'
raise ValueError(msg)
if boundinglat is None or lon_0 is None:
raise ValueError('must specify boundinglat and lon_0 for %s basemap' % _projnames[projection])
if projection[0] == 's':
sgn = -1
else:
sgn = 1
rootproj = projection[2:]
projparams['proj'] = rootproj
if rootproj == 'stere':
projparams['lat_ts'] = sgn * 90.
projparams['lat_0'] = sgn * 90.
self.llcrnrlon = lon_0 - sgn*45.
self.urcrnrlon = lon_0 + sgn*135.
proj = pyproj.Proj(projparams)
x,y = proj(lon_0,boundinglat)
lon,self.llcrnrlat = proj(math.sqrt(2.)*y,0.,inverse=True)
self.urcrnrlat = self.llcrnrlat
if width is not None or height is not None:
sys.stdout.write('warning: width and height keywords ignored for %s projection' % _projnames[projection])
elif projection == 'laea':
if lat_0 is None or lon_0 is None:
raise ValueError('must specify lat_0 and lon_0 for Lambert Azimuthal basemap')
if not using_corners:
if width is None or height is None:
raise ValueError('must either specify lat/lon values of corners (llcrnrlon,llcrnrlat,urcrnrlon,urcrnrlat) in degrees or width and height in meters')
if lon_0 is None or lat_0 is None:
raise ValueError('must specify lon_0 and lat_0 when using width, height to specify projection region')
llcrnrlon,llcrnrlat,urcrnrlon,urcrnrlat = _choosecorners(width,height,**projparams)
self.llcrnrlon = llcrnrlon; self.llcrnrlat = llcrnrlat
self.urcrnrlon = urcrnrlon; self.urcrnrlat = urcrnrlat
elif projection in ['tmerc','gnom','cass','poly'] :
if projection == 'tmerc' and k_0 is not None:
projparams['k_0']=k_0
if projection == 'gnom' and 'R' not in projparams:
raise ValueError('gnomonic projection only works for perfect spheres - not ellipsoids')
if lat_0 is None or lon_0 is None:
raise ValueError('must specify lat_0 and lon_0 for Transverse Mercator, Gnomonic, Cassini-Soldnerr and Polyconic basemap')
if not using_corners:
if width is None or height is None:
raise ValueError('must either specify lat/lon values of corners (llcrnrlon,llcrnrlat,urcrnrlon,urcrnrlat) in degrees or width and height in meters')
if lon_0 is None or lat_0 is None:
raise ValueError('must specify lon_0 and lat_0 when using width, height to specify projection region')
llcrnrlon,llcrnrlat,urcrnrlon,urcrnrlat = _choosecorners(width,height,**projparams)
self.llcrnrlon = llcrnrlon; self.llcrnrlat = llcrnrlat
self.urcrnrlon = urcrnrlon; self.urcrnrlat = urcrnrlat
elif projection == 'ortho':
if 'R' not in projparams:
raise ValueError('orthographic projection only works for perfect spheres - not ellipsoids')
if lat_0 is None or lon_0 is None:
raise ValueError('must specify lat_0 and lon_0 for Orthographic basemap')
if (lat_0 == 90 or lat_0 == -90) and\
None in [llcrnrx,llcrnry,urcrnrx,urcrnry]:
# for ortho plot centered on pole, set boundinglat to equator.
# (so meridian labels can be drawn in this special case).
self.boundinglat = 0
self.round = True
if width is not None or height is not None:
sys.stdout.write('warning: width and height keywords ignored for %s projection' % _projnames[self.projection])
if not using_corners:
llcrnrlon = -180.
llcrnrlat = -90.
urcrnrlon = 180
urcrnrlat = 90.
self._fulldisk = True
else:
self._fulldisk = False
self.llcrnrlon = llcrnrlon; self.llcrnrlat = llcrnrlat
self.urcrnrlon = urcrnrlon; self.urcrnrlat = urcrnrlat
# FIXME: won't work for points exactly on equator??
if np.abs(lat_0) < 1.e-2: lat_0 = 1.e-2
projparams['lat_0'] = lat_0
elif projection == 'geos':
if lat_0 is not None and lat_0 != 0:
raise ValueError('lat_0 must be zero for Geostationary basemap')
if lon_0 is None:
raise ValueError('must specify lon_0 for Geostationary basemap')
if width is not None or height is not None:
sys.stdout.write('warning: width and height keywords ignored for %s projection' % _projnames[self.projection])
if not using_corners:
llcrnrlon = -180.
llcrnrlat = -90.
urcrnrlon = 180
urcrnrlat = 90.
self._fulldisk = True
else:
self._fulldisk = False
self.llcrnrlon = llcrnrlon; self.llcrnrlat = llcrnrlat
self.urcrnrlon = urcrnrlon; self.urcrnrlat = urcrnrlat
elif projection == 'nsper':
if 'R' not in projparams:
raise ValueError('near-sided perspective projection only works for perfect spheres - not ellipsoids')
if lat_0 is None or lon_0 is None:
msg='must specify lon_0 and lat_0 for near-sided perspective Basemap'
raise ValueError(msg)
if width is not None or height is not None:
sys.stdout.write('warning: width and height keywords ignored for %s projection' % _projnames[self.projection])
if not using_corners:
llcrnrlon = -180.
llcrnrlat = -90.
urcrnrlon = 180
urcrnrlat = 90.
self._fulldisk = True
else:
self._fulldisk = False
self.llcrnrlon = llcrnrlon; self.llcrnrlat = llcrnrlat
self.urcrnrlon = urcrnrlon; self.urcrnrlat = urcrnrlat
elif projection in _pseudocyl:
if lon_0 is None:
raise ValueError('must specify lon_0 for %s projection' % _projnames[self.projection])
if width is not None or height is not None:
sys.stdout.write('warning: width and height keywords ignored for %s projection' % _projnames[self.projection])
llcrnrlon = lon_0-180.
llcrnrlat = -90.
urcrnrlon = lon_0+180
urcrnrlat = 90.
self.llcrnrlon = llcrnrlon; self.llcrnrlat = llcrnrlat
self.urcrnrlon = urcrnrlon; self.urcrnrlat = urcrnrlat
elif projection == 'omerc':
if k_0 is not None:
projparams['k_0']=k_0
if lat_1 is None or lon_1 is None or lat_2 is None or lon_2 is None:
raise ValueError('must specify lat_1,lon_1 and lat_2,lon_2 for Oblique Mercator basemap')
projparams['lat_1'] = lat_1
projparams['lon_1'] = lon_1
projparams['lat_2'] = lat_2
projparams['lon_2'] = lon_2
projparams['lat_0'] = lat_0
if no_rot:
projparams['no_rot']=''
#if not using_corners:
# raise ValueError, 'cannot specify map region with width and height keywords for this projection, please specify lat/lon values of corners'
if not using_corners:
if width is None or height is None:
raise ValueError('must either specify lat/lon values of corners (llcrnrlon,llcrnrlat,urcrnrlon,urcrnrlat) in degrees or width and height in meters')
if lon_0 is None or lat_0 is None:
raise ValueError('must specify lon_0 and lat_0 when using width, height to specify projection region')
llcrnrlon,llcrnrlat,urcrnrlon,urcrnrlat = _choosecorners(width,height,**projparams)
self.llcrnrlon = llcrnrlon; self.llcrnrlat = llcrnrlat
self.urcrnrlon = urcrnrlon; self.urcrnrlat = urcrnrlat
elif projection == 'aeqd':
if lat_0 is None or lon_0 is None:
raise ValueError('must specify lat_0 and lon_0 for Azimuthal Equidistant basemap')
if not using_corners:
if width is None or height is None:
self._fulldisk = True
llcrnrlon = -180.
llcrnrlat = -90.
urcrnrlon = 180
urcrnrlat = 90.
else:
self._fulldisk = False
if lon_0 is None or lat_0 is None:
raise ValueError('must specify lon_0 and lat_0 when using width, height to specify projection region')
if not self._fulldisk:
llcrnrlon,llcrnrlat,urcrnrlon,urcrnrlat = _choosecorners(width,height,**projparams)
self.llcrnrlon = llcrnrlon; self.llcrnrlat = llcrnrlat
self.urcrnrlon = urcrnrlon; self.urcrnrlat = urcrnrlat
elif projection in _cylproj:
if projection == 'merc' or projection == 'cea':
if lat_ts is None:
lat_ts = 0.
projparams['lat_ts'] = lat_ts
if not using_corners:
llcrnrlat = -90.
urcrnrlat = 90.
if lon_0 is not None:
llcrnrlon = lon_0-180.
urcrnrlon = lon_0+180.
else:
llcrnrlon = -180.
urcrnrlon = 180
if projection == 'merc':
# clip plot region to be within -89.99S to 89.99N
# (mercator is singular at poles)
if llcrnrlat < -89.99: llcrnrlat = -89.99
if llcrnrlat > 89.99: llcrnrlat = 89.99
if urcrnrlat < -89.99: urcrnrlat = -89.99
if urcrnrlat > 89.99: urcrnrlat = 89.99
self.llcrnrlon = llcrnrlon; self.llcrnrlat = llcrnrlat
self.urcrnrlon = urcrnrlon; self.urcrnrlat = urcrnrlat
if width is not None or height is not None:
sys.stdout.write('warning: width and height keywords ignored for %s projection' % _projnames[self.projection])
if lon_0 is not None:
projparams['lon_0'] = lon_0
else:
projparams['lon_0']=0.5*(llcrnrlon+urcrnrlon)
elif projection == 'rotpole':
if lon_0 is None or o_lon_p is None or o_lat_p is None:
msg='must specify lon_0,o_lat_p,o_lon_p for rotated pole Basemap'
raise ValueError(msg)
if width is not None or height is not None:
sys.stdout.write('warning: width and height keywords ignored for %s projection' % _projnames[self.projection])
projparams['lon_0']=lon_0
projparams['o_lon_p']=o_lon_p
projparams['o_lat_p']=o_lat_p
projparams['o_proj']='longlat'
projparams['proj']='ob_tran'
if not using_corners and None in [llcrnrx,llcrnry,urcrnrx,urcrnry]:
raise ValueError('must specify lat/lon values of corners in degrees')
if None not in [llcrnrx,llcrnry,urcrnrx,urcrnry]:
p = pyproj.Proj(projparams)
llcrnrx = _dg2rad*llcrnrx; llcrnry = _dg2rad*llcrnry
urcrnrx = _dg2rad*urcrnrx; urcrnry = _dg2rad*urcrnry
llcrnrlon, llcrnrlat = p(llcrnrx,llcrnry,inverse=True)
urcrnrlon, urcrnrlat = p(urcrnrx,urcrnry,inverse=True)
self.llcrnrlon = llcrnrlon; self.llcrnrlat = llcrnrlat
self.urcrnrlon = urcrnrlon; self.urcrnrlat = urcrnrlat
else:
raise ValueError(_unsupported_projection % projection)
# initialize proj4
proj = Proj(projparams,self.llcrnrlon,self.llcrnrlat,self.urcrnrlon,self.urcrnrlat)
# make sure axis ticks are suppressed.
self.noticks = suppress_ticks
# map boundary not yet drawn.
self._mapboundarydrawn = False
# make Proj instance a Basemap instance variable.
self.projtran = proj
# copy some Proj attributes.
atts = ['rmajor','rminor','esq','flattening','ellipsoid','projparams']
for att in atts:
self.__dict__[att] = proj.__dict__[att]
# these only exist for geostationary projection.
if hasattr(proj,'_width'):
self.__dict__['_width'] = proj.__dict__['_width']
if hasattr(proj,'_height'):
self.__dict__['_height'] = proj.__dict__['_height']
# spatial reference string (useful for georeferencing output
# images with gdal_translate).
if hasattr(self,'_proj4'):
#self.srs = proj._proj4.srs
self.srs = proj._proj4.pjinitstring
else:
pjargs = []
for key,value in self.projparams.items():
# 'cyl' projection translates to 'eqc' in PROJ.4
if projection == 'cyl' and key == 'proj':
value = 'eqc'
# ignore x_0 and y_0 settings for 'cyl' projection
# (they are not consistent with what PROJ.4 uses)
elif projection == 'cyl' and key in ['x_0','y_0']:
continue
pjargs.append('+'+key+"="+str(value)+' ')
self.srs = ''.join(pjargs)
self.proj4string = self.srs
# set instance variables defining map region.
self.xmin = proj.xmin
self.xmax = proj.xmax
self.ymin = proj.ymin
self.ymax = proj.ymax
if projection == 'cyl':
self.aspect = (self.urcrnrlat-self.llcrnrlat)/(self.urcrnrlon-self.llcrnrlon)
else:
self.aspect = (proj.ymax-proj.ymin)/(proj.xmax-proj.xmin)
if projection in ['geos','ortho','nsper'] and \
None not in [llcrnrx,llcrnry,urcrnrx,urcrnry]:
self.llcrnrx = llcrnrx+0.5*proj.xmax
self.llcrnry = llcrnry+0.5*proj.ymax
self.urcrnrx = urcrnrx+0.5*proj.xmax
self.urcrnry = urcrnry+0.5*proj.ymax
self._fulldisk = False
else:
self.llcrnrx = proj.llcrnrx
self.llcrnry = proj.llcrnry
self.urcrnrx = proj.urcrnrx
self.urcrnry = proj.urcrnry
if self.projection == 'rotpole':
lon0,lat0 = self(0.5*(self.llcrnrx + self.urcrnrx),\
0.5*(self.llcrnry + self.urcrnry),\
inverse=True)
self.projparams['lat_0']=lat0
# if ax == None, pyplot.gca may be used.
self.ax = ax
self.lsmask = None
# This will record hashs of Axes instances.
self._initialized_axes = set()
# set defaults for area_thresh.
self.resolution = resolution
# celestial=True implies resolution=None (no coastlines).
if self.celestial:
self.resolution=None
if area_thresh is None and self.resolution is not None:
if resolution == 'c':
area_thresh = 10000.
elif resolution == 'l':
area_thresh = 1000.
elif resolution == 'i':
area_thresh = 100.
elif resolution == 'h':
area_thresh = 10.
elif resolution == 'f':
area_thresh = 1.
else:
raise ValueError("boundary resolution must be one of 'c','l','i','h' or 'f'")
self.area_thresh = area_thresh
# define map boundary polygon (in lat/lon coordinates)
blons, blats, self._boundarypolyll, self._boundarypolyxy = self._getmapboundary()
self.boundarylats = blats
self.boundarylons = blons
# set min/max lats for projection domain.
if self.projection in _cylproj:
self.latmin = self.llcrnrlat
self.latmax = self.urcrnrlat
self.lonmin = self.llcrnrlon
self.lonmax = self.urcrnrlon
elif self.projection in ['ortho','geos','nsper'] + _pseudocyl:
self.latmin = -90.
self.latmax = 90.
self.lonmin = self.llcrnrlon
self.lonmax = self.urcrnrlon
else:
lons, lats = self.makegrid(1001,1001)
lats = ma.masked_where(lats > 1.e20,lats)
lons = ma.masked_where(lons > 1.e20,lons)
self.latmin = lats.min()
self.latmax = lats.max()
self.lonmin = lons.min()
self.lonmax = lons.max()
NPole = _geoslib.Point(self(0.,90.))
SPole = _geoslib.Point(self(0.,-90.))
if lat_0 is None:
lon_0, lat_0 =\
self(0.5*(self.xmin+self.xmax),
0.5*(self.ymin+self.ymax),inverse=True)
Dateline = _geoslib.Point(self(180.,lat_0))
Greenwich = _geoslib.Point(self(0.,lat_0))
hasNP = NPole.within(self._boundarypolyxy)
hasSP = SPole.within(self._boundarypolyxy)
hasPole = hasNP or hasSP
hasDateline = Dateline.within(self._boundarypolyxy)
hasGreenwich = Greenwich.within(self._boundarypolyxy)
# projection crosses dateline (and not Greenwich or pole).
if not hasPole and hasDateline and not hasGreenwich:
if self.lonmin < 0 and self.lonmax > 0.:
lons = np.where(lons < 0, lons+360, lons)
self.lonmin = lons.min()
self.lonmax = lons.max()
# read in coastline polygons, only keeping those that
# intersect map boundary polygon.
if self.resolution is not None:
self.coastsegs, self.coastpolygontypes =\
self._readboundarydata('gshhs',as_polygons=True)
# reformat for use in matplotlib.patches.Polygon.
self.coastpolygons = []
for seg in self.coastsegs:
x, y = list(zip(*seg))
self.coastpolygons.append((x,y))
# replace coastsegs with line segments (instead of polygons)
self.coastsegs, types =\
self._readboundarydata('gshhs',as_polygons=False)
# create geos Polygon structures for land areas.
# currently only used in is_land method.
self.landpolygons=[]
self.lakepolygons=[]
if self.resolution is not None and len(self.coastpolygons) > 0:
#self.islandinlakepolygons=[]
#self.lakeinislandinlakepolygons=[]
x, y = list(zip(*self.coastpolygons))
for x,y,typ in zip(x,y,self.coastpolygontypes):
b = np.asarray([x,y]).T
if typ == 1: self.landpolygons.append(_geoslib.Polygon(b))
if typ == 2: self.lakepolygons.append(_geoslib.Polygon(b))
#if typ == 3: self.islandinlakepolygons.append(_geoslib.Polygon(b))
#if typ == 4: self.lakeinislandinlakepolygons.append(_geoslib.Polygon(b))
# set __init__'s docstring
__init__.__doc__ = _Basemap_init_doc
def __call__(self,x,y,inverse=False):
"""
Calling a Basemap class instance with the arguments lon, lat will
convert lon/lat (in degrees) to x/y map projection
coordinates (in meters). If optional keyword ``inverse`` is
True (default is False), the inverse transformation from x/y
to lon/lat is performed.
For cylindrical equidistant projection (``cyl``), this
does nothing (i.e. x,y == lon,lat).
For non-cylindrical projections, the inverse transformation
always returns longitudes between -180 and 180 degrees. For
cylindrical projections (self.projection == ``cyl``,
``cea``, ``mill``, ``gall`` or ``merc``)
the inverse transformation will return longitudes between
self.llcrnrlon and self.llcrnrlat.
Input arguments lon, lat can be either scalar floats,
sequences, or numpy arrays.
"""
if self.celestial:
# don't assume center of map is at greenwich
# (only relevant for cyl or pseudo-cyl projections)
if self.projection in _pseudocyl or self.projection in _cylproj:
lon_0=self.projparams['lon_0']
else:
lon_0 = 0.
if self.celestial and not inverse:
try:
x = 2.*lon_0-x
except TypeError:
x = [2*lon_0-xx for xx in x]
if self.projection == 'rotpole' and inverse:
try:
x = _dg2rad*x
except TypeError:
x = [_dg2rad*xx for xx in x]
try:
y = _dg2rad*y
except TypeError:
y = [_dg2rad*yy for yy in y]
xout,yout = self.projtran(x,y,inverse=inverse)
if self.celestial and inverse:
try:
xout = -2.*lon_0-xout
except:
xout = [-2.*lon_0-xx for xx in xout]
if self.projection == 'rotpole' and not inverse:
try:
xout = _rad2dg*xout
xout = np.where(xout < 0., xout+360, xout)
except TypeError:
xout = [_rad2dg*xx for xx in xout]
xout = [xx+360. if xx < 0 else xx for xx in xout]
try:
yout = _rad2dg*yout
except TypeError:
yout = [_rad2dg*yy for yy in yout]
return xout,yout
def makegrid(self,nx,ny,returnxy=False):
"""
return arrays of shape (ny,nx) containing lon,lat coordinates of
an equally spaced native projection grid.
If ``returnxy = True``, the x,y values of the grid are returned also.
"""
return self.projtran.makegrid(nx,ny,returnxy=returnxy)
def _readboundarydata(self,name,as_polygons=False):
"""
read boundary data, clip to map projection region.
"""
msg = dedent("""
Unable to open boundary dataset file. Only the 'crude', 'low' and
'intermediate' resolution datasets are installed by default. If you
are requesting a 'high' or 'full' resolution dataset, you need to
install the `basemap-data-hires` package.""")
# only gshhs coastlines can be polygons.
if name != 'gshhs': as_polygons=False
try:
bdatfile = open(os.path.join(basemap_datadir,name+'_'+self.resolution+'.dat'),'rb')
bdatmetafile = open(os.path.join(basemap_datadir,name+'meta_'+self.resolution+'.dat'),'r')
except:
raise IOError(msg)
polygons = []
polygon_types = []
# coastlines are polygons, other boundaries are line segments.
if name == 'gshhs':
Shape = _geoslib.Polygon
else:
Shape = _geoslib.LineString
# see if map projection region polygon contains a pole.
NPole = _geoslib.Point(self(0.,90.))
SPole = _geoslib.Point(self(0.,-90.))
boundarypolyxy = self._boundarypolyxy
boundarypolyll = self._boundarypolyll
hasNP = NPole.within(boundarypolyxy)
hasSP = SPole.within(boundarypolyxy)
containsPole = hasNP or hasSP
# these projections cannot cross pole.
if containsPole and\
self.projection in _cylproj + _pseudocyl + ['geos']:
raise ValueError('%s projection cannot cross pole'%(self.projection))
# make sure some projections have has containsPole=True
# we will compute the intersections in stereographic
# coordinates, then transform back. This is
# because these projections are only defined on a hemisphere, and
# some boundary features (like Eurasia) would be undefined otherwise.
tostere =\
['omerc','ortho','gnom','nsper','nplaea','npaeqd','splaea','spaeqd']
if self.projection in tostere and name == 'gshhs':
containsPole = True
lon_0=self.projparams['lon_0']
lat_0=self.projparams['lat_0']
re = self.projparams['R']
# center of stereographic projection restricted to be
# nearest one of 6 points on the sphere (every 90 deg lat/lon).
lon0 = 90.*(np.around(lon_0/90.))
lat0 = 90.*(np.around(lat_0/90.))
if np.abs(int(lat0)) == 90: lon0=0.
maptran = pyproj.Proj(proj='stere',lon_0=lon0,lat_0=lat0,R=re)
# boundary polygon for ortho/gnom/nsper projection
# in stereographic coordinates.
b = self._boundarypolyll.boundary
blons = b[:,0]; blats = b[:,1]
b[:,0], b[:,1] = maptran(blons, blats)
boundarypolyxy = _geoslib.Polygon(b)
for line in bdatmetafile:
linesplit = line.split()
area = float(linesplit[1])
south = float(linesplit[3])
north = float(linesplit[4])
crossdatelineE=False; crossdatelineW=False
if name == 'gshhs':
id = linesplit[7]
if id.endswith('E'):
crossdatelineE = True
elif id.endswith('W'):
crossdatelineW = True
# make sure south/north limits of dateline crossing polygons
# (Eurasia) are the same, since they will be merged into one.
# (this avoids having one filtered out and not the other).
if crossdatelineE:
south_save=south
north_save=north
if crossdatelineW:
south=south_save
north=north_save
if area < 0.: area = 1.e30
useit = self.latmax>=south and self.latmin<=north and area>self.area_thresh
if useit:
typ = int(linesplit[0])
npts = int(linesplit[2])
offsetbytes = int(linesplit[5])
bytecount = int(linesplit[6])
bdatfile.seek(offsetbytes,0)
# read in binary string convert into an npts by 2
# numpy array (first column is lons, second is lats).
polystring = bdatfile.read(bytecount)
# binary data is little endian.
b = np.array(np.frombuffer(polystring,dtype='<f4'),'f8')
b.shape = (npts,2)
b2 = b.copy()
# merge polygons that cross dateline.
poly = Shape(b)
# hack to try to avoid having Antartica filled polygon
# covering entire map (if skipAnart = False, this happens
# for ortho lon_0=-120, lat_0=60, for example).
skipAntart = self.projection in tostere and south < -89 and \
not hasSP
if crossdatelineE and not skipAntart:
if not poly.is_valid(): poly=poly.fix()
polyE = poly
continue
elif crossdatelineW and not skipAntart:
if not poly.is_valid(): poly=poly.fix()
b = poly.boundary
b[:,0] = b[:,0]+360.
poly = Shape(b)
poly = poly.union(polyE)
if not poly.is_valid(): poly=poly.fix()
b = poly.boundary
b2 = b.copy()
# fix Antartica.
if name == 'gshhs' and south < -89:
b = b[4:,:]
b2 = b.copy()
poly = Shape(b)
# if map boundary polygon is a valid one in lat/lon
# coordinates (i.e. it does not contain either pole),
# the intersections of the boundary geometries
# and the map projection region can be computed before
# transforming the boundary geometry to map projection
# coordinates (this saves time, especially for small map
# regions and high-resolution boundary geometries).
if not containsPole:
# close Antarctica.
if name == 'gshhs' and south < -89:
lons2 = b[:,0]
lats = b[:,1]
lons1 = lons2 - 360.
lons3 = lons2 + 360.
lons = lons1.tolist()+lons2.tolist()+lons3.tolist()
lats = lats.tolist()+lats.tolist()+lats.tolist()
lonstart,latstart = lons[0], lats[0]
lonend,latend = lons[-1], lats[-1]
lons.insert(0,lonstart)
lats.insert(0,-90.)
lons.append(lonend)
lats.append(-90.)
b = np.empty((len(lons),2),np.float64)
b[:,0] = lons; b[:,1] = lats
poly = Shape(b)
if not poly.is_valid(): poly=poly.fix()
# if polygon instersects map projection
# region, process it.
if poly.intersects(boundarypolyll):
if name != 'gshhs' or as_polygons:
geoms = poly.intersection(boundarypolyll)
else:
# convert polygons to line segments
poly = _geoslib.LineString(poly.boundary)
geoms = poly.intersection(boundarypolyll)
# iterate over geometries in intersection.
for psub in geoms:
b = psub.boundary
blons = b[:,0]; blats = b[:,1]
bx, by = self(blons, blats)
polygons.append(list(zip(bx,by)))
polygon_types.append(typ)
else:
# create duplicate polygons shifted by -360 and +360
# (so as to properly treat polygons that cross
# Greenwich meridian).
b2[:,0] = b[:,0]-360
poly1 = Shape(b2)
b2[:,0] = b[:,0]+360
poly2 = Shape(b2)
polys = [poly1,poly,poly2]
for poly in polys:
# try to fix "non-noded intersection" errors.
if not poly.is_valid(): poly=poly.fix()
# if polygon instersects map projection
# region, process it.
if poly.intersects(boundarypolyll):
if name != 'gshhs' or as_polygons:
geoms = poly.intersection(boundarypolyll)
else:
# convert polygons to line segments
# note: use fix method here or Eurasia
# line segments sometimes disappear.
poly = _geoslib.LineString(poly.fix().boundary)
geoms = poly.intersection(boundarypolyll)
# iterate over geometries in intersection.
for psub in geoms:
b = psub.boundary
blons = b[:,0]; blats = b[:,1]
# transformation from lat/lon to
# map projection coordinates.
bx, by = self(blons, blats)
if not as_polygons or len(bx) > 4:
polygons.append(list(zip(bx,by)))
polygon_types.append(typ)
# if map boundary polygon is not valid in lat/lon
# coordinates, compute intersection between map
# projection region and boundary geometries in map
# projection coordinates.
else:
# transform coordinates from lat/lon
# to map projection coordinates.
# special case for ortho/gnom/nsper, compute coastline polygon
# vertices in stereographic coords.
if name == 'gshhs' and as_polygons and self.projection in tostere:
b[:,0], b[:,1] = maptran(b[:,0], b[:,1])
else:
b[:,0], b[:,1] = self(b[:,0], b[:,1])
goodmask = np.logical_and(b[:,0]<1.e20,b[:,1]<1.e20)
# if less than two points are valid in
# map proj coords, skip this geometry.
if np.sum(goodmask) <= 1: continue
if name != 'gshhs' or (name == 'gshhs' and not as_polygons):
# if not a polygon,
# just remove parts of geometry that are undefined
# in this map projection.
bx = np.compress(goodmask, b[:,0])
by = np.compress(goodmask, b[:,1])
# split coastline segments that jump across entire plot.
xd = (bx[1:]-bx[0:-1])**2
yd = (by[1:]-by[0:-1])**2
dist = np.sqrt(xd+yd)
split = dist > 0.1*(self.xmax-self.xmin)
if np.sum(split) and self.projection not in _cylproj:
ind = (np.compress(split,np.squeeze(split*np.indices(xd.shape)))+1).tolist()
iprev = 0
ind.append(len(xd))
for i in ind:
# don't add empty lists.
if len(list(range(iprev,i))):
polygons.append(list(zip(bx[iprev:i],by[iprev:i])))
iprev = i
else:
polygons.append(list(zip(bx,by)))
polygon_types.append(typ)
continue
# create a GEOS geometry object.
if name == 'gshhs' and not as_polygons:
# convert polygons to line segments
poly = _geoslib.LineString(poly.boundary)
else:
# this is a workaround to avoid
# GEOS_ERROR: CGAlgorithmsDD::orientationIndex encountered NaN/Inf numbers
b[np.isposinf(b)] = 1e20
b[np.isneginf(b)] = -1e20
poly = Shape(b)
# this is a workaround to avoid
# "GEOS_ERROR: TopologyException:
# found non-noded intersection between ..."
if not poly.is_valid(): poly=poly.fix()
# if geometry instersects map projection
# region, and doesn't have any invalid points, process it.
if goodmask.all() and poly.intersects(boundarypolyxy):
# if geometry intersection calculation fails,
# just move on.
try:
geoms = poly.intersection(boundarypolyxy)
except:
continue
# iterate over geometries in intersection.
for psub in geoms:
b = psub.boundary
# if projection in ['ortho','gnom','nsper'],
# transform polygon from stereographic
# to ortho/gnom/nsper coordinates.
if self.projection in tostere:
# if coastline polygon covers more than 99%
# of map region for fulldisk projection,
# it's probably bogus, so skip it.
#areafrac = psub.area()/boundarypolyxy.area()
#if self.projection == ['ortho','nsper']:
# if name == 'gshhs' and\
# self._fulldisk and\
# areafrac > 0.99: continue
# inverse transform from stereographic
# to lat/lon.
b[:,0], b[:,1] = maptran(b[:,0], b[:,1], inverse=True)
# orthographic/gnomonic/nsper.
b[:,0], b[:,1]= self(b[:,0], b[:,1])
if not as_polygons or len(b) > 4:
polygons.append(list(zip(b[:,0],b[:,1])))
polygon_types.append(typ)
bdatfile.close()
bdatmetafile.close()
return polygons, polygon_types
def _getmapboundary(self):
"""
create map boundary polygon (in lat/lon and x/y coordinates)
"""
nx = 100; ny = 100
maptran = self
if self.projection in ['ortho','geos','nsper']:
# circular region.
thetas = np.linspace(0.,2.*np.pi,2*nx*ny)[:-1]
rminor = self._height
rmajor = self._width
x = rmajor*np.cos(thetas) + rmajor
y = rminor*np.sin(thetas) + rminor
b = np.empty((len(x),2),np.float64)
b[:,0]=x; b[:,1]=y
boundaryxy = _geoslib.Polygon(b)
# compute proj instance for full disk, if necessary.
if not self._fulldisk:
projparms = self.projparams.copy()
del projparms['x_0']
del projparms['y_0']
if self.projection == 'ortho':
llcrnrx = -self.rmajor
llcrnry = -self.rmajor
urcrnrx = -llcrnrx
urcrnry = -llcrnry
else:
llcrnrx = -self._width
llcrnry = -self._height
urcrnrx = -llcrnrx
urcrnry = -llcrnry
projparms['x_0']=-llcrnrx
projparms['y_0']=-llcrnry
maptran = pyproj.Proj(projparms)
elif self.projection == 'aeqd' and self._fulldisk:
# circular region.
thetas = np.linspace(0.,2.*np.pi,2*nx*ny)[:-1]
rminor = self._height
rmajor = self._width
x = rmajor*np.cos(thetas) + rmajor
y = rminor*np.sin(thetas) + rminor
b = np.empty((len(x),2),np.float64)
b[:,0]=x; b[:,1]=y
boundaryxy = _geoslib.Polygon(b)
elif self.projection in _pseudocyl:
nx = 10*nx; ny = 10*ny
# quasi-elliptical region.
lon_0 = self.projparams['lon_0']
# left side
lats1 = np.linspace(-89.9999,89.9999,ny).tolist()
lons1 = len(lats1)*[lon_0-179.9]
# top.
lons2 = np.linspace(lon_0-179.9,lon_0+179.9,nx).tolist()
lats2 = len(lons2)*[89.9999]
# right side
lats3 = np.linspace(89.9999,-89.9999,ny).tolist()
lons3 = len(lats3)*[lon_0+179.9]
# bottom.
lons4 = np.linspace(lon_0+179.9,lon_0-179.9,nx).tolist()
lats4 = len(lons4)*[-89.9999]
lons = np.array(lons1+lons2+lons3+lons4,np.float64)
lats = np.array(lats1+lats2+lats3+lats4,np.float64)
x, y = maptran(lons,lats)
b = np.empty((len(x),2),np.float64)
b[:,0]=x; b[:,1]=y
boundaryxy = _geoslib.Polygon(b)
else: # all other projections are rectangular.
nx = 100*nx; ny = 100*ny
# left side (x = xmin, ymin <= y <= ymax)
yy = np.linspace(self.ymin, self.ymax, ny)[:-1]
x = len(yy)*[self.xmin]; y = yy.tolist()
# top (y = ymax, xmin <= x <= xmax)
xx = np.linspace(self.xmin, self.xmax, nx)[:-1]
x = x + xx.tolist()
y = y + len(xx)*[self.ymax]
# right side (x = xmax, ymin <= y <= ymax)
yy = np.linspace(self.ymax, self.ymin, ny)[:-1]
x = x + len(yy)*[self.xmax]; y = y + yy.tolist()
# bottom (y = ymin, xmin <= x <= xmax)
xx = np.linspace(self.xmax, self.xmin, nx)[:-1]
x = x + xx.tolist()
y = y + len(xx)*[self.ymin]
x = np.array(x,np.float64)
y = np.array(y,np.float64)
b = np.empty((4,2),np.float64)
b[:,0]=[self.xmin,self.xmin,self.xmax,self.xmax]
b[:,1]=[self.ymin,self.ymax,self.ymax,self.ymin]
boundaryxy = _geoslib.Polygon(b)
if self.projection in _cylproj:
# make sure map boundary doesn't quite include pole.
if self.urcrnrlat > 89.9999:
urcrnrlat = 89.9999
else:
urcrnrlat = self.urcrnrlat
if self.llcrnrlat < -89.9999:
llcrnrlat = -89.9999
else:
llcrnrlat = self.llcrnrlat
lons = [self.llcrnrlon, self.llcrnrlon, self.urcrnrlon, self.urcrnrlon]
lats = [llcrnrlat, urcrnrlat, urcrnrlat, llcrnrlat]
self.boundarylonmin = min(lons)
self.boundarylonmax = max(lons)
x, y = self(lons, lats)
b = np.empty((len(x),2),np.float64)
b[:,0]=x; b[:,1]=y
boundaryxy = _geoslib.Polygon(b)
else:
if self.projection not in _pseudocyl:
lons, lats = maptran(x,y,inverse=True)
# fix lons so there are no jumps.
n = 1
lonprev = lons[0]
for lon,lat in zip(lons[1:],lats[1:]):
if np.abs(lon-lonprev) > 90.:
if lonprev < 0:
lon = lon - 360.
else:
lon = lon + 360
lons[n] = lon
lonprev = lon
n = n + 1
self.boundarylonmin = lons.min()
self.boundarylonmax = lons.max()
# for circular full disk projections where boundary is
# a latitude circle, set boundarylonmax and boundarylonmin
# to cover entire world (so parallels will be drawn).
if self._fulldisk and \
np.abs(self.boundarylonmax-self.boundarylonmin) < 1.:
self.boundarylonmin = -180.
self.boundarylonmax = 180.
b = np.empty((len(lons),2),np.float64)
b[:,0] = lons; b[:,1] = lats
boundaryll = _geoslib.Polygon(b)
return lons, lats, boundaryll, boundaryxy
def drawmapboundary(self,color='k',linewidth=1.0,fill_color=None,\
zorder=None,ax=None):
"""
draw boundary around map projection region, optionally
filling interior of region.
.. tabularcolumns:: |l|L|
============== ====================================================
Keyword Description
============== ====================================================
linewidth line width for boundary (default 1.)
color color of boundary line (default black)
fill_color fill the map region background with this
color (default is to fill with axis
background color). If set to the string
'none', no filling is done.
zorder sets the zorder for filling map background
(default 0).
ax axes instance to use
(default None, use default axes instance).
============== ====================================================
returns matplotlib.collections.PatchCollection representing map boundary.
"""
# get current axes instance (if none specified).
ax = ax or self._check_ax()
# if no fill_color given, use axes background color.
# if fill_color is string 'none', really don't fill.
if fill_color is None:
if _matplotlib_version >= '2.0':
fill_color = ax.get_facecolor()
else:
fill_color = ax.get_axis_bgcolor()
elif fill_color == 'none' or fill_color == 'None':
fill_color = None
limb = None
if self.projection in ['ortho','geos','nsper'] or (self.projection=='aeqd' and\
self._fulldisk):
limb = Ellipse((self._width,self._height),2.*self._width,2.*self._height)
if self.projection in ['ortho','geos','nsper','aeqd'] and self._fulldisk:
# elliptical region.
ax.set_frame_on(False)
elif self.projection in _pseudocyl: # elliptical region.
ax.set_frame_on(False)
nx = 100; ny = 100
if self.projection == 'vandg':
nx = 10*nx; ny = 10*ny
# quasi-elliptical region.
lon_0 = self.projparams['lon_0']
# left side
lats1 = np.linspace(-89.9999,89.99999,ny).tolist()
lons1 = len(lats1)*[lon_0-179.9]
# top.
lons2 = np.linspace(lon_0-179.9999,lon_0+179.9999,nx).tolist()
lats2 = len(lons2)*[89.9999]
# right side
lats3 = np.linspace(89.9999,-89.9999,ny).tolist()
lons3 = len(lats3)*[lon_0+179.9999]
# bottom.
lons4 = np.linspace(lon_0+179.9999,lon_0-179.9999,nx).tolist()
lats4 = len(lons4)*[-89.9999]
lons = np.array(lons1+lons2+lons3+lons4,np.float64)
lats = np.array(lats1+lats2+lats3+lats4,np.float64)
x, y = self(lons,lats)
xy = list(zip(x,y))
limb = Polygon(xy)
elif self.round:
ax.set_frame_on(False)
limb = Circle((0.5*(self.xmax+self.xmin),0.5*(self.ymax+self.ymin)),
radius=0.5*(self.xmax-self.xmin),fc='none')
else: # all other projections are rectangular.
ax.set_frame_on(True)
for spine in ax.spines.values():
spine.set_linewidth(linewidth)
spine.set_edgecolor(color)
if zorder is not None:
spine.set_zorder(zorder)
if self.projection not in ['geos','ortho','nsper']:
limb = ax.patch
if limb is not None:
if limb is not ax.patch:
ax.add_patch(limb)
self._mapboundarydrawn = limb
if fill_color is None:
limb.set_fill(False)
else:
limb.set_facecolor(fill_color)
limb.set_zorder(0)
limb.set_edgecolor(color)
limb.set_linewidth(linewidth)
if zorder is not None:
limb.set_zorder(zorder)
limb.set_clip_on(True)
# set axes limits to fit map region.
self.set_axes_limits(ax=ax)
return limb
def fillcontinents(self,color='0.8',lake_color=None,ax=None,zorder=None,alpha=None):
"""
Fill continents.
.. tabularcolumns:: |l|L|
============== ====================================================
Keyword Description
============== ====================================================
color color to fill continents (default gray).
lake_color color to fill inland lakes (default axes background).
ax axes instance (overrides default axes instance).
zorder sets the zorder for the continent polygons (if not
specified, uses default zorder for a Polygon patch).
Set to zero if you want to paint over the filled
continents).
alpha sets alpha transparency for continent polygons
============== ====================================================
After filling continents, lakes are re-filled with
axis background color.
returns a list of matplotlib.patches.Polygon objects.
"""
if self.resolution is None:
raise AttributeError('there are no boundary datasets associated with this Basemap instance')
# get current axes instance (if none specified).
ax = ax or self._check_ax()
# get axis background color.
if _matplotlib_version >= '2.0':
axisbgc = ax.get_facecolor()
else:
axisbgc = ax.get_axis_bgcolor()
npoly = 0
polys = []
for x,y in self.coastpolygons:
xa = np.array(x,np.float32)
ya = np.array(y,np.float32)
# check to see if all four corners of domain in polygon (if so,
# don't draw since it will just fill in the whole map).
# ** turn this off for now since it prevents continents that
# fill the whole map from being filled **
#delx = 10; dely = 10
#if self.projection in ['cyl']:
# delx = 0.1
# dely = 0.1
#test1 = np.fabs(xa-self.urcrnrx) < delx
#test2 = np.fabs(xa-self.llcrnrx) < delx
#test3 = np.fabs(ya-self.urcrnry) < dely
#test4 = np.fabs(ya-self.llcrnry) < dely
#hasp1 = np.sum(test1*test3)
#hasp2 = np.sum(test2*test3)
#hasp4 = np.sum(test2*test4)
#hasp3 = np.sum(test1*test4)
#if not hasp1 or not hasp2 or not hasp3 or not hasp4:
if 1:
xy = list(zip(xa.tolist(),ya.tolist()))
if self.coastpolygontypes[npoly] not in [2,4]:
poly = Polygon(xy,facecolor=color,edgecolor=color,linewidth=0)
else: # lakes filled with background color by default
if lake_color is None:
poly = Polygon(xy,facecolor=axisbgc,edgecolor=axisbgc,linewidth=0)
else:
poly = Polygon(xy,facecolor=lake_color,edgecolor=lake_color,linewidth=0)
if zorder is not None:
poly.set_zorder(zorder)
if alpha is not None:
poly.set_alpha(alpha)
ax.add_patch(poly)
polys.append(poly)
npoly = npoly + 1
# set axes limits to fit map region.
self.set_axes_limits(ax=ax)
# clip continent polygons to map limbs
polys,c = self._cliplimb(ax,polys)
return polys
def _cliplimb(self,ax,coll):
if not self._mapboundarydrawn:
return coll, None
c = self._mapboundarydrawn
if c not in ax.patches:
p = ax.add_patch(c)
#p.set_clip_on(False)
try:
coll.set_clip_path(c)
except:
for item in coll:
item.set_clip_path(c)
return coll,c
def drawcoastlines(self,linewidth=1.,linestyle='solid',color='k',antialiased=1,ax=None,zorder=None):
"""
Draw coastlines.
.. tabularcolumns:: |l|L|
============== ====================================================
Keyword Description
============== ====================================================
linewidth coastline width (default 1.)
linestyle coastline linestyle (default solid)
color coastline color (default black)
antialiased antialiasing switch for coastlines (default True).
ax axes instance (overrides default axes instance)
zorder sets the zorder for the coastlines (if not specified,
uses default zorder for
matplotlib.patches.LineCollections).
============== ====================================================
returns a matplotlib.patches.LineCollection object.
"""
if self.resolution is None:
raise AttributeError('there are no boundary datasets associated with this Basemap instance')
# get current axes instance (if none specified).
ax = ax or self._check_ax()
coastlines = LineCollection(self.coastsegs,antialiaseds=(antialiased,))
coastlines.set_color(color)
coastlines.set_linestyle(linestyle)
coastlines.set_linewidth(linewidth)
coastlines.set_label('_nolabel_')
if zorder is not None:
coastlines.set_zorder(zorder)
ax.add_collection(coastlines)
# set axes limits to fit map region.
self.set_axes_limits(ax=ax)
# clip to map limbs
coastlines,c = self._cliplimb(ax,coastlines)
return coastlines
def drawcountries(self,linewidth=0.5,linestyle='solid',color='k',antialiased=1,ax=None,zorder=None):
"""
Draw country boundaries.
.. tabularcolumns:: |l|L|
============== ====================================================
Keyword Description
============== ====================================================
linewidth country boundary line width (default 0.5)
linestyle coastline linestyle (default solid)
color country boundary line color (default black)
antialiased antialiasing switch for country boundaries (default
True).
ax axes instance (overrides default axes instance)
zorder sets the zorder for the country boundaries (if not
specified uses default zorder for
matplotlib.patches.LineCollections).
============== ====================================================
returns a matplotlib.patches.LineCollection object.
"""
if self.resolution is None:
raise AttributeError('there are no boundary datasets associated with this Basemap instance')
# read in country line segments, only keeping those that
# intersect map boundary polygon.
if not hasattr(self,'cntrysegs'):
self.cntrysegs, types = self._readboundarydata('countries')
# get current axes instance (if none specified).
ax = ax or self._check_ax()
countries = LineCollection(self.cntrysegs,antialiaseds=(antialiased,))
countries.set_color(color)
countries.set_linestyle(linestyle)
countries.set_linewidth(linewidth)
countries.set_label('_nolabel_')
if zorder is not None:
countries.set_zorder(zorder)
ax.add_collection(countries)
# set axes limits to fit map region.
self.set_axes_limits(ax=ax)
# clip countries to map limbs
countries,c = self._cliplimb(ax,countries)
return countries
def drawstates(self,linewidth=0.5,linestyle='solid',color='k',antialiased=1,ax=None,zorder=None):
"""
Draw state boundaries in Americas.
.. tabularcolumns:: |l|L|
============== ====================================================
Keyword Description
============== ====================================================
linewidth state boundary line width (default 0.5)
linestyle coastline linestyle (default solid)
color state boundary line color (default black)
antialiased antialiasing switch for state boundaries
(default True).
ax axes instance (overrides default axes instance)
zorder sets the zorder for the state boundaries (if not
specified, uses default zorder for
matplotlib.patches.LineCollections).
============== ====================================================
returns a matplotlib.patches.LineCollection object.
"""
if self.resolution is None:
raise AttributeError('there are no boundary datasets associated with this Basemap instance')
# read in state line segments, only keeping those that
# intersect map boundary polygon.
if not hasattr(self,'statesegs'):
self.statesegs, types = self._readboundarydata('states')
# get current axes instance (if none specified).
ax = ax or self._check_ax()
states = LineCollection(self.statesegs,antialiaseds=(antialiased,))
states.set_color(color)
states.set_linestyle(linestyle)
states.set_linewidth(linewidth)
states.set_label('_nolabel_')
if zorder is not None:
states.set_zorder(zorder)
ax.add_collection(states)
# set axes limits to fit map region.
self.set_axes_limits(ax=ax)
# clip states to map limbs
states,c = self._cliplimb(ax,states)
return states
def drawcounties(self,linewidth=0.1,linestyle='solid',color='k',antialiased=1,
facecolor='none',ax=None,zorder=None,drawbounds=False):
"""
Draw county boundaries in US. The county boundary shapefile
originates with the NOAA Coastal Geospatial Data Project
(http://coastalgeospatial.noaa.gov/data_gis.html).
.. tabularcolumns:: |l|L|
============== ====================================================
Keyword Description
============== ====================================================
linewidth county boundary line width (default 0.1)
linestyle coastline linestyle (default solid)
color county boundary line color (default black)
facecolor fill color of county (default is no fill)
antialiased antialiasing switch for county boundaries
(default True).
ax axes instance (overrides default axes instance)
zorder sets the zorder for the county boundaries (if not
specified, uses default zorder for
matplotlib.patches.LineCollections).
============== ====================================================
returns a matplotlib.patches.LineCollection object.
"""
ax = ax or self._check_ax()
gis_file = os.path.join(basemap_datadir,'UScounties')
county_info = self.readshapefile(gis_file,'counties',\
default_encoding='latin-1',drawbounds=drawbounds)
counties = [coords for coords in self.counties]
counties = PolyCollection(counties)
counties.set_linestyle(linestyle)
counties.set_linewidth(linewidth)
counties.set_edgecolor(color)
counties.set_facecolor(facecolor)
counties.set_label('counties')
if zorder:
counties.set_zorder(zorder)
ax.add_collection(counties)
return counties
def drawrivers(self,linewidth=0.5,linestyle='solid',color='k',antialiased=1,ax=None,zorder=None):
"""
Draw major rivers.
.. tabularcolumns:: |l|L|
============== ====================================================
Keyword Description
============== ====================================================
linewidth river boundary line width (default 0.5)
linestyle coastline linestyle (default solid)
color river boundary line color (default black)
antialiased antialiasing switch for river boundaries (default
True).
ax axes instance (overrides default axes instance)
zorder sets the zorder for the rivers (if not
specified uses default zorder for
matplotlib.patches.LineCollections).
============== ====================================================
returns a matplotlib.patches.LineCollection object.
"""
if self.resolution is None:
raise AttributeError('there are no boundary datasets associated with this Basemap instance')
# read in river line segments, only keeping those that
# intersect map boundary polygon.
if not hasattr(self,'riversegs'):
self.riversegs, types = self._readboundarydata('rivers')
# get current axes instance (if none specified).
ax = ax or self._check_ax()
rivers = LineCollection(self.riversegs,antialiaseds=(antialiased,))
rivers.set_color(color)
rivers.set_linestyle(linestyle)
rivers.set_linewidth(linewidth)
rivers.set_label('_nolabel_')
if zorder is not None:
rivers.set_zorder(zorder)
ax.add_collection(rivers)
# set axes limits to fit map region.
self.set_axes_limits(ax=ax)
# clip rivers to map limbs
rivers,c = self._cliplimb(ax,rivers)
return rivers
def is_land(self,xpt,ypt):
"""
Returns True if the given x,y point (in projection coordinates) is
over land, False otherwise. The definition of land is based upon
the GSHHS coastline polygons associated with the class instance.
Points over lakes inside land regions are not counted as land points.
"""
if self.resolution is None: return None
landpt = False
for poly in self.landpolygons:
landpt = _geoslib.Point((xpt,ypt)).within(poly)
if landpt: break
lakept = False
for poly in self.lakepolygons:
lakept = _geoslib.Point((xpt,ypt)).within(poly)
if lakept: break
return landpt and not lakept
def readshapefile(self,shapefile,name,drawbounds=True,zorder=None,
linewidth=0.5,color='k',antialiased=1,ax=None,
default_encoding='utf-8'):
"""
Read in shape file, optionally draw boundaries on map.
.. note::
- Assumes shapes are 2D
- only works for Point, MultiPoint, Polyline and Polygon shapes.
- vertices/points must be in geographic (lat/lon) coordinates.
Mandatory Arguments:
.. tabularcolumns:: |l|L|
============== ====================================================
Argument Description
============== ====================================================
shapefile path to shapefile components. Example:
shapefile='/home/jeff/esri/world_borders' assumes
that world_borders.shp, world_borders.shx and
world_borders.dbf live in /home/jeff/esri.
name name for Basemap attribute to hold the shapefile
vertices or points in map projection
coordinates. Class attribute name+'_info' is a list
of dictionaries, one for each shape, containing
attributes of each shape from dbf file, For
example, if name='counties', self.counties
will be a list of x,y vertices for each shape in
map projection coordinates and self.counties_info
will be a list of dictionaries with shape
attributes. Rings in individual Polygon
shapes are split out into separate polygons, and
additional keys 'RINGNUM' and 'SHAPENUM' are added
to the shape attribute dictionary.
============== ====================================================
The following optional keyword arguments are only relevant for Polyline
and Polygon shape types, for Point and MultiPoint shapes they are
ignored.
.. tabularcolumns:: |l|L|
============== ====================================================
Keyword Description
============== ====================================================
drawbounds draw boundaries of shapes (default True).
zorder shape boundary zorder (if not specified,
default for mathplotlib.lines.LineCollection
is used).
linewidth shape boundary line width (default 0.5)
color shape boundary line color (default black)
antialiased antialiasing switch for shape boundaries
(default True).
ax axes instance (overrides default axes instance)
============== ====================================================
A tuple (num_shapes, type, min, max) containing shape file info
is returned.
num_shapes is the number of shapes, type is the type code (one of
the SHPT* constants defined in the shapelib module, see
http://shapelib.maptools.org/shp_api.html) and min and
max are 4-element lists with the minimum and maximum values of the
vertices. If ``drawbounds=True`` a
matplotlib.patches.LineCollection object is appended to the tuple.
"""
import shapefile as shp
from shapefile import Reader
shp.default_encoding = default_encoding
if not os.path.exists('%s.shp'%shapefile):
raise IOError('cannot locate %s.shp'%shapefile)
if not os.path.exists('%s.shx'%shapefile):
raise IOError('cannot locate %s.shx'%shapefile)
if not os.path.exists('%s.dbf'%shapefile):
raise IOError('cannot locate %s.dbf'%shapefile)
# open shapefile, read vertices for each object, convert
# to map projection coordinates (only works for 2D shape types).
try:
shf = Reader(shapefile, encoding=default_encoding)
except:
raise IOError('error reading shapefile %s.shp' % shapefile)
fields = shf.fields
coords = []; attributes = []
msg=dedent("""
shapefile must have lat/lon vertices - it looks like this one has vertices
in map projection coordinates. You can convert the shapefile to geographic
coordinates using the shpproj utility from the shapelib tools
(http://shapelib.maptools.org/shapelib-tools.html)""")
shptype = shf.shapes()[0].shapeType
bbox = shf.bbox.tolist()
info = (shf.numRecords,shptype,bbox[0:2]+[0.,0.],bbox[2:]+[0.,0.])
npoly = 0
for shprec in shf.shapeRecords():
shp = shprec.shape; rec = shprec.record
npoly = npoly + 1
if shptype != shp.shapeType:
raise ValueError('readshapefile can only handle a single shape type per file')
if shptype not in [1,3,5,8]:
raise ValueError('readshapefile can only handle 2D shape types')
verts = shp.points
if shptype in [1,8]: # a Point or MultiPoint shape.
lons, lats = list(zip(*verts))
if max(lons) > 721. or min(lons) < -721. or max(lats) > 90.01 or min(lats) < -90.01:
raise ValueError(msg)
# if latitude is slightly greater than 90, truncate to 90
lats = [max(min(lat, 90.0), -90.0) for lat in lats]
if len(verts) > 1: # MultiPoint
x,y = self(lons, lats)
coords.append(list(zip(x,y)))
else: # single Point
x,y = self(lons[0], lats[0])
coords.append((x,y))
attdict={}
for r,key in zip(rec,fields[1:]):
attdict[key[0]]=r
attributes.append(attdict)
else: # a Polyline or Polygon shape.
parts = shp.parts.tolist()
ringnum = 0
for indx1,indx2 in zip(parts,parts[1:]+[len(verts)]):
ringnum = ringnum + 1
lons, lats = list(zip(*verts[indx1:indx2]))
if max(lons) > 721. or min(lons) < -721. or max(lats) > 90.01 or min(lats) < -90.01:
raise ValueError(msg)
# if latitude is slightly greater than 90, truncate to 90
lats = [max(min(lat, 90.0), -90.0) for lat in lats]
x, y = self(lons, lats)
coords.append(list(zip(x,y)))
attdict={}
for r,key in zip(rec,fields[1:]):
attdict[key[0]]=r
# add information about ring number to dictionary.
attdict['RINGNUM'] = ringnum
attdict['SHAPENUM'] = npoly
attributes.append(attdict)
# draw shape boundaries for polylines, polygons using LineCollection.
if shptype not in [1,8] and drawbounds:
# get current axes instance (if none specified).
ax = ax or self._check_ax()
# make LineCollections for each polygon.
lines = LineCollection(coords,antialiaseds=(1,))
lines.set_color(color)
lines.set_linewidth(linewidth)
lines.set_label('_nolabel_')
if zorder is not None:
lines.set_zorder(zorder)
ax.add_collection(lines)
# set axes limits to fit map region.
self.set_axes_limits(ax=ax)
# clip boundaries to map limbs
lines,c = self._cliplimb(ax,lines)
info = info + (lines,)
self.__dict__[name]=coords
self.__dict__[name+'_info']=attributes
return info
def drawparallels(self,circles,color='k',textcolor='k',linewidth=1.,zorder=None, \
dashes=[1,1],labels=[0,0,0,0],labelstyle=None, \
fmt='%g',xoffset=None,yoffset=None,ax=None,latmax=None,
**text_kwargs):
"""
Draw and label parallels (latitude lines) for values (in degrees)
given in the sequence ``circles``.
.. tabularcolumns:: |l|L|
============== ====================================================
Keyword Description
============== ====================================================
color color to draw parallels (default black).
textcolor color to draw labels (default black).
linewidth line width for parallels (default 1.)
zorder sets the zorder for parallels (if not specified,
uses default zorder for matplotlib.lines.Line2D
objects).
dashes dash pattern for parallels (default [1,1], i.e.
1 pixel on, 1 pixel off).
labels list of 4 values (default [0,0,0,0]) that control
whether parallels are labelled where they intersect
the left, right, top or bottom of the plot. For
example labels=[1,0,0,1] will cause parallels
to be labelled where they intersect the left and
and bottom of the plot, but not the right and top.
labelstyle if set to "+/-", north and south latitudes are
labelled with "+" and "-", otherwise they are
labelled with "N" and "S".
fmt a format string to format the parallel labels
(default '%g') **or** a function that takes a
latitude value in degrees as it's only argument
and returns a formatted string.
xoffset label offset from edge of map in x-direction
(default is 0.01 times width of map in map
projection coordinates).
yoffset label offset from edge of map in y-direction
(default is 0.01 times height of map in map
projection coordinates).
ax axes instance (overrides default axes instance)
latmax absolute value of latitude to which meridians are drawn
(default is 80).
\**text_kwargs additional keyword arguments controlling text
for labels that are passed on to
the text method of the axes instance (see
matplotlib.pyplot.text documentation).
============== ====================================================
returns a dictionary whose keys are the parallel values, and
whose values are tuples containing lists of the
matplotlib.lines.Line2D and matplotlib.text.Text instances
associated with each parallel. Deleting an item from the
dictionary removes the corresponding parallel from the plot.
"""
text_kwargs['color']=textcolor # pass textcolor kwarg on to ax.text
# if celestial=True, don't use "N" and "S" labels.
if labelstyle is None and self.celestial:
labelstyle="+/-"
# get current axes instance (if none specified).
ax = ax or self._check_ax()
# don't draw meridians past latmax, always draw parallel at latmax.
if latmax is None: latmax = 80.
# offset for labels.
if yoffset is None:
yoffset = (self.urcrnry-self.llcrnry)/100.
if self.aspect > 1:
yoffset = self.aspect*yoffset
else:
yoffset = yoffset/self.aspect
if xoffset is None:
xoffset = (self.urcrnrx-self.llcrnrx)/100.
if self.projection in _cylproj + _pseudocyl:
lons = np.linspace(self.llcrnrlon, self.urcrnrlon, 10001)
elif self.projection in ['tmerc']:
lon_0 = self.projparams['lon_0']
# tmerc only defined within +/- 90 degrees of lon_0
lons = np.linspace(lon_0-90,lon_0+90,100001)
else:
lonmin = self.boundarylonmin; lonmax = self.boundarylonmax
lons = np.linspace(lonmin, lonmax, 10001)
# make sure latmax degree parallel is drawn if projection not merc or cyl or miller
try:
circlesl = list(circles)
except:
circlesl = circles
if self.projection not in _cylproj + _pseudocyl:
if max(circlesl) > 0 and latmax not in circlesl:
circlesl.append(latmax)
if min(circlesl) < 0 and -latmax not in circlesl:
circlesl.append(-latmax)
xdelta = 0.01*(self.xmax-self.xmin)
ydelta = 0.01*(self.ymax-self.ymin)
linecolls = {}
for circ in circlesl:
lats = circ*np.ones(len(lons),np.float32)
x,y = self(lons,lats)
# remove points outside domain.
# leave a little slop around edges (3*xdelta)
# don't really know why, but this appears to be needed to
# or lines sometimes don't reach edge of plot.
testx = np.logical_and(x>=self.xmin-3*xdelta,x<=self.xmax+3*xdelta)
x = np.compress(testx, x)
y = np.compress(testx, y)
testy = np.logical_and(y>=self.ymin-3*ydelta,y<=self.ymax+3*ydelta)
x = np.compress(testy, x)
y = np.compress(testy, y)
lines = []
if len(x) > 1 and len(y) > 1:
# split into separate line segments if necessary.
# (not necessary for cylindrical or pseudocylindricl projections)
xd = (x[1:]-x[0:-1])**2
yd = (y[1:]-y[0:-1])**2
dist = np.sqrt(xd+yd)
if self.projection not in ['cyl','rotpole']:
split = dist > self.rmajor/10.
else:
split = dist > 1.
if np.sum(split) and self.projection not in _cylproj:
ind = (np.compress(split,np.squeeze(split*np.indices(xd.shape)))+1).tolist()
xl = []
yl = []
iprev = 0
ind.append(len(xd))
for i in ind:
xl.append(x[iprev:i])
yl.append(y[iprev:i])
iprev = i
else:
xl = [x]
yl = [y]
# draw each line segment.
for x,y in zip(xl,yl):
# skip if only a point.
if len(x) > 1 and len(y) > 1:
l = Line2D(x,y,linewidth=linewidth)
l.set_color(color)
l.set_dashes(dashes)
l.set_label('_nolabel_')
if zorder is not None:
l.set_zorder(zorder)
ax.add_line(l)
lines.append(l)
linecolls[circ] = (lines,[])
# draw labels for parallels
# parallels not labelled for fulldisk orthographic or geostationary
if self.projection in ['ortho','geos','nsper','vandg','aeqd'] and max(labels):
if self.projection == 'vandg' or self._fulldisk:
sys.stdout.write('Warning: Cannot label parallels on %s basemap' % _projnames[self.projection])
labels = [0,0,0,0]
# search along edges of map to see if parallels intersect.
# if so, find x,y location of intersection and draw a label there.
dx = (self.xmax-self.xmin)/1000.
dy = (self.ymax-self.ymin)/1000.
if self.projection in _pseudocyl:
lon_0 = self.projparams['lon_0']
for dolab,side in zip(labels,['l','r','t','b']):
if not dolab: continue
# for cylindrical projections, don't draw parallels on top or bottom.
if self.projection in _cylproj + _pseudocyl and side in ['t','b']: continue
if side in ['l','r']:
nmax = int((self.ymax-self.ymin)/dy+1)
yy = np.linspace(self.llcrnry,self.urcrnry,nmax)
if side == 'l':
if self.projection in _pseudocyl:
lats = np.linspace(-89.99,89,99,nmax)
if self.celestial:
lons = (self.projparams['lon_0']+180.)*np.ones(len(lats),lats.dtype)
else:
lons = (self.projparams['lon_0']-180.)*np.ones(len(lats),lats.dtype)
xx, yy = self(lons, lats)
else:
xx = self.llcrnrx*np.ones(yy.shape,yy.dtype)
lons,lats = self(xx,yy,inverse=True)
lons = lons.tolist(); lats = lats.tolist()
else:
if self.projection in _pseudocyl:
lats = np.linspace(-89.99,89,99,nmax)
if self.celestial:
lons = (self.projparams['lon_0']-180.)*np.ones(len(lats),lats.dtype)
else:
lons = (self.projparams['lon_0']+180.)*np.ones(len(lats),lats.dtype)
xx, yy = self(lons, lats)
else:
xx = self.urcrnrx*np.ones(yy.shape,yy.dtype)
lons,lats = self(xx,yy,inverse=True)
lons = lons.tolist(); lats = lats.tolist()
if max(lons) > 1.e20 or max(lats) > 1.e20:
raise ValueError('inverse transformation undefined - please adjust the map projection region')
# adjust so 0 <= lons < 360
lons = [(lon+360) % 360 for lon in lons]
else:
nmax = int((self.xmax-self.xmin)/dx+1)
xx = np.linspace(self.llcrnrx,self.urcrnrx,nmax)
if side == 'b':
lons,lats = self(xx,self.llcrnry*np.ones(xx.shape,np.float32),inverse=True)
lons = lons.tolist(); lats = lats.tolist()
else:
lons,lats = self(xx,self.urcrnry*np.ones(xx.shape,np.float32),inverse=True)
lons = lons.tolist(); lats = lats.tolist()
if max(lons) > 1.e20 or max(lats) > 1.e20:
raise ValueError('inverse transformation undefined - please adjust the map projection region')
# adjust so 0 <= lons < 360
lons = [(lon+360) % 360 for lon in lons]
for lat in circles:
# don't label parallels for round polar plots
if self.round: continue
# find index of parallel (there may be two, so
# search from left and right).
nl = _searchlist(lats,lat)
nr = _searchlist(lats[::-1],lat)
if nr != -1: nr = len(lons)-nr-1
latlab = _setlatlab(fmt,lat,labelstyle)
# parallels can intersect each map edge twice.
for i,n in enumerate([nl,nr]):
# don't bother if close to the first label.
if i and abs(nr-nl) < 100: continue
if n >= 0:
t = None
if side == 'l':
if self.projection in _pseudocyl:
if self.celestial:
xlab,ylab = self(lon_0+179.9,lat)
else:
xlab,ylab = self(lon_0-179.9,lat)
else:
xlab = self.llcrnrx
xlab = xlab-xoffset
if self.projection in _pseudocyl:
if lat>0:
t=ax.text(xlab,yy[n],latlab,horizontalalignment='right',verticalalignment='bottom',**text_kwargs)
elif lat<0:
t=ax.text(xlab,yy[n],latlab,horizontalalignment='right',verticalalignment='top',**text_kwargs)
else:
t=ax.text(xlab,yy[n],latlab,horizontalalignment='right',verticalalignment='center',**text_kwargs)
else:
t=ax.text(xlab,yy[n],latlab,horizontalalignment='right',verticalalignment='center',**text_kwargs)
elif side == 'r':
if self.projection in _pseudocyl:
if self.celestial:
xlab,ylab = self(lon_0-179.9,lat)
else:
xlab,ylab = self(lon_0+179.9,lat)
else:
xlab = self.urcrnrx
xlab = xlab+xoffset
if self.projection in _pseudocyl:
if lat>0:
t=ax.text(xlab,yy[n],latlab,horizontalalignment='left',verticalalignment='bottom',**text_kwargs)
elif lat<0:
t=ax.text(xlab,yy[n],latlab,horizontalalignment='left',verticalalignment='top',**text_kwargs)
else:
t=ax.text(xlab,yy[n],latlab,horizontalalignment='left',verticalalignment='center',**text_kwargs)
else:
t=ax.text(xlab,yy[n],latlab,horizontalalignment='left',verticalalignment='center',**text_kwargs)
elif side == 'b':
t = ax.text(xx[n],self.llcrnry-yoffset,latlab,horizontalalignment='center',verticalalignment='top',**text_kwargs)
else:
t = ax.text(xx[n],self.urcrnry+yoffset,latlab,horizontalalignment='center',verticalalignment='bottom',**text_kwargs)
if t is not None: linecolls[lat][1].append(t)
# set axes limits to fit map region.
self.set_axes_limits(ax=ax)
keys = list(linecolls.keys()); vals = list(linecolls.values())
for k,v in zip(keys,vals):
if v == ([], []):
del linecolls[k]
# add a remove method to each tuple.
else:
linecolls[k] = _tup(linecolls[k])
# override __delitem__ in dict to call remove() on values.
pardict = _dict(linecolls)
# clip parallels for round polar plots (and delete labels).
for lines, _ in pardict.values():
self._cliplimb(ax, lines)
return pardict
def drawmeridians(self,meridians,color='k',textcolor='k',linewidth=1., zorder=None,\
dashes=[1,1],labels=[0,0,0,0],labelstyle=None,\
fmt='%g',xoffset=None,yoffset=None,ax=None,latmax=None,
**text_kwargs):
"""
Draw and label meridians (longitude lines) for values (in degrees)
given in the sequence ``meridians``.
.. tabularcolumns:: |l|L|
============== ====================================================
Keyword Description
============== ====================================================
color color to draw meridians (default black).
textcolor color to draw labels (default black).
linewidth line width for meridians (default 1.)
zorder sets the zorder for meridians (if not specified,
uses default zorder for matplotlib.lines.Line2D
objects).
dashes dash pattern for meridians (default [1,1], i.e.
1 pixel on, 1 pixel off).
labels list of 4 values (default [0,0,0,0]) that control
whether meridians are labelled where they intersect
the left, right, top or bottom of the plot. For
example labels=[1,0,0,1] will cause meridians
to be labelled where they intersect the left and
and bottom of the plot, but not the right and top.
labelstyle if set to "+/-", east and west longitudes are
labelled with "+" and "-", otherwise they are
labelled with "E" and "W".
fmt a format string to format the meridian labels
(default '%g') **or** a function that takes a
longitude value in degrees as it's only argument
and returns a formatted string.
xoffset label offset from edge of map in x-direction
(default is 0.01 times width of map in map
projection coordinates).
yoffset label offset from edge of map in y-direction
(default is 0.01 times height of map in map
projection coordinates).
ax axes instance (overrides default axes instance)
latmax absolute value of latitude to which meridians are drawn
(default is 80).
\**text_kwargs additional keyword arguments controlling text
for labels that are passed on to
the text method of the axes instance (see
matplotlib.pyplot.text documentation).
============== ====================================================
returns a dictionary whose keys are the meridian values, and
whose values are tuples containing lists of the
matplotlib.lines.Line2D and matplotlib.text.Text instances
associated with each meridian. Deleting an item from the
dictionary removes the correpsonding meridian from the plot.
"""
text_kwargs['color']=textcolor # pass textcolor kwarg on to ax.text
# for cylindrical projections, try to handle wraparound (i.e. if
# projection is defined in -180 to 0 and user asks for meridians from
# 180 to 360 to be drawn, it should work)
if self.projection in _cylproj or self.projection in _pseudocyl:
def addlon(meridians,madd):
minside = (madd >= self.llcrnrlon and madd <= self.urcrnrlon)
if minside and madd not in meridians: meridians.append(madd)
return meridians
merids = list(meridians)
meridians = []
for m in merids:
meridians = addlon(meridians,m)
meridians = addlon(meridians,m+360)
meridians = addlon(meridians,m-360)
meridians.sort()
# if celestial=True, don't use "E" and "W" labels.
if labelstyle is None and self.celestial:
labelstyle="+/-"
# get current axes instance (if none specified).
ax = ax or self._check_ax()
# don't draw meridians past latmax, always draw parallel at latmax.
if latmax is None: latmax = 80. # unused w/ cyl, merc or miller proj.
# offset for labels.
if yoffset is None:
yoffset = (self.urcrnry-self.llcrnry)/100.
if self.aspect > 1:
yoffset = self.aspect*yoffset
else:
yoffset = yoffset/self.aspect
if xoffset is None:
xoffset = (self.urcrnrx-self.llcrnrx)/100.
lats = np.linspace(self.latmin,self.latmax,10001)
if self.projection not in _cylproj + _pseudocyl:
testlat = np.logical_and(lats>-latmax,lats<latmax)
lats = np.compress(testlat,lats)
xdelta = 0.01*(self.xmax-self.xmin)
ydelta = 0.01*(self.ymax-self.ymin)
linecolls = {}
for merid in meridians:
lons = merid*np.ones(len(lats),np.float32)
x,y = self(lons,lats)
# remove points outside domain.
# leave a little slop around edges (3*xdelta)
# don't really know why, but this appears to be needed to
# or lines sometimes don't reach edge of plot.
testx = np.logical_and(x>=self.xmin-3*xdelta,x<=self.xmax+3*xdelta)
x = np.compress(testx, x)
y = np.compress(testx, y)
testy = np.logical_and(y>=self.ymin-3*ydelta,y<=self.ymax+3*ydelta)
x = np.compress(testy, x)
y = np.compress(testy, y)
lines = []
if len(x) > 1 and len(y) > 1:
# split into separate line segments if necessary.
# (not necessary for mercator or cylindrical or miller).
xd = (x[1:]-x[0:-1])**2
yd = (y[1:]-y[0:-1])**2
dist = np.sqrt(xd+yd)
if self.projection not in ['cyl','rotpole']:
split = dist > self.rmajor/10.
else:
split = dist > 1.
if np.sum(split) and self.projection not in _cylproj:
ind = (np.compress(split,np.squeeze(split*np.indices(xd.shape)))+1).tolist()
xl = []
yl = []
iprev = 0
ind.append(len(xd))
for i in ind:
xl.append(x[iprev:i])
yl.append(y[iprev:i])
iprev = i
else:
xl = [x]
yl = [y]
# draw each line segment.
for x,y in zip(xl,yl):
# skip if only a point.
if len(x) > 1 and len(y) > 1:
l = Line2D(x,y,linewidth=linewidth)
l.set_color(color)
l.set_dashes(dashes)
l.set_label('_nolabel_')
if zorder is not None:
l.set_zorder(zorder)
ax.add_line(l)
lines.append(l)
linecolls[merid] = (lines,[])
# draw labels for meridians.
# meridians not labelled for sinusoidal, hammer, mollweide,
# VanDerGrinten or full-disk orthographic/geostationary.
if self.projection in ['sinu','moll','hammer','vandg'] and max(labels):
sys.stdout.write('Warning: Cannot label meridians on %s basemap' % _projnames[self.projection])
labels = [0,0,0,0]
if self.projection in ['ortho','geos','nsper','aeqd'] and max(labels):
if self._fulldisk and self.boundinglat is None:
sys.stdout.write(dedent(
"""'Warning: Cannot label meridians on full-disk
Geostationary, Orthographic or Azimuthal equidistant basemap
"""))
labels = [0,0,0,0]
# search along edges of map to see if parallels intersect.
# if so, find x,y location of intersection and draw a label there.
dx = (self.xmax-self.xmin)/1000.
dy = (self.ymax-self.ymin)/1000.
if self.projection in _pseudocyl:
lon_0 = self.projparams['lon_0']
xmin,ymin = self(lon_0-179.9,-90)
xmax,ymax = self(lon_0+179.9,90)
for dolab,side in zip(labels,['l','r','t','b']):
if not dolab or self.round: continue
# for cylindrical projections, don't draw meridians on left or right.
if self.projection in _cylproj + _pseudocyl and side in ['l','r']: continue
if side in ['l','r']:
nmax = int((self.ymax-self.ymin)/dy+1)
yy = np.linspace(self.llcrnry,self.urcrnry,nmax)
if side == 'l':
lons,lats = self(self.llcrnrx*np.ones(yy.shape,np.float32),yy,inverse=True)
lons = lons.tolist(); lats = lats.tolist()
else:
lons,lats = self(self.urcrnrx*np.ones(yy.shape,np.float32),yy,inverse=True)
lons = lons.tolist(); lats = lats.tolist()
if max(lons) > 1.e20 or max(lats) > 1.e20:
raise ValueError('inverse transformation undefined - please adjust the map projection region')
# adjust so 0 <= lons < 360
lons = [(lon+360) % 360 for lon in lons]
else:
nmax = int((self.xmax-self.xmin)/dx+1)
if self.projection in _pseudocyl:
xx = np.linspace(xmin,xmax,nmax)
else:
xx = np.linspace(self.llcrnrx,self.urcrnrx,nmax)
if side == 'b':
lons,lats = self(xx,self.llcrnry*np.ones(xx.shape,np.float32),inverse=True)
lons = lons.tolist(); lats = lats.tolist()
else:
lons,lats = self(xx,self.urcrnry*np.ones(xx.shape,np.float32),inverse=True)
lons = lons.tolist(); lats = lats.tolist()
if max(lons) > 1.e20 or max(lats) > 1.e20:
raise ValueError('inverse transformation undefined - please adjust the map projection region')
# adjust so 0 <= lons < 360
lons = [(lon+360) % 360 for lon in lons]
for lon in meridians:
# adjust so 0 <= lon < 360
lon2 = (lon+360) % 360
# find index of meridian (there may be two, so
# search from left and right).
nl = _searchlist(lons,lon2)
nr = _searchlist(lons[::-1],lon2)
if nr != -1: nr = len(lons)-nr-1
lonlab = _setlonlab(fmt,lon2,labelstyle)
# meridians can intersect each map edge twice.
for i,n in enumerate([nl,nr]):
lat = lats[n]/100.
# no meridians > latmax for projections other than merc,cyl,miller.
if self.projection not in _cylproj and lat > latmax: continue
# don't bother if close to the first label.
if i and abs(nr-nl) < 100: continue
if n >= 0:
t = None
if side == 'l':
t = ax.text(self.llcrnrx-xoffset,yy[n],lonlab,horizontalalignment='right',verticalalignment='center',**text_kwargs)
elif side == 'r':
t = ax.text(self.urcrnrx+xoffset,yy[n],lonlab,horizontalalignment='left',verticalalignment='center',**text_kwargs)
elif side == 'b':
t = ax.text(xx[n],self.llcrnry-yoffset,lonlab,horizontalalignment='center',verticalalignment='top',**text_kwargs)
else:
t = ax.text(xx[n],self.urcrnry+yoffset,lonlab,horizontalalignment='center',verticalalignment='bottom',**text_kwargs)
if t is not None: linecolls[lon][1].append(t)
# set axes limits to fit map region.
self.set_axes_limits(ax=ax)
# remove empty values from linecolls dictionary
keys = list(linecolls.keys()); vals = list(linecolls.values())
for k,v in zip(keys,vals):
if v == ([], []):
del linecolls[k]
else:
# add a remove method to each tuple.
linecolls[k] = _tup(linecolls[k])
# override __delitem__ in dict to call remove() on values.
meridict = _dict(linecolls)
# for round polar plots, clip meridian lines and label them.
if self.round:
# label desired?
label = False
for lab in labels:
if lab: label = True
for merid in meridict:
if not label: continue
# label
lonlab = _setlonlab(fmt,merid,labelstyle)
x,y = self(merid,self.boundinglat)
r = np.sqrt((x-0.5*(self.xmin+self.xmax))**2+
(y-0.5*(self.ymin+self.ymax))**2)
r = r + np.sqrt(xoffset**2+yoffset**2)
if self.projection.startswith('np'):
pole = 1
elif self.projection.startswith('sp'):
pole = -1
elif self.projection == 'ortho' and self.round:
pole = 1
if pole == 1:
theta = (np.pi/180.)*(merid-self.projparams['lon_0']-90)
if self.projection == 'ortho' and\
self.projparams['lat_0'] == -90:
theta = (np.pi/180.)*(-merid+self.projparams['lon_0']+90)
x = r*np.cos(theta)+0.5*(self.xmin+self.xmax)
y = r*np.sin(theta)+0.5*(self.ymin+self.ymax)
if x > 0.5*(self.xmin+self.xmax)+xoffset:
horizalign = 'left'
elif x < 0.5*(self.xmin+self.xmax)-xoffset:
horizalign = 'right'
else:
horizalign = 'center'
if y > 0.5*(self.ymin+self.ymax)+yoffset:
vertalign = 'bottom'
elif y < 0.5*(self.ymin+self.ymax)-yoffset:
vertalign = 'top'
else:
vertalign = 'center'
# labels [l,r,t,b]
if labels[0] and not labels[1] and x >= 0.5*(self.xmin+self.xmax)+xoffset: continue
if labels[1] and not labels[0] and x <= 0.5*(self.xmin+self.xmax)-xoffset: continue
if labels[2] and not labels[3] and y <= 0.5*(self.ymin+self.ymax)-yoffset: continue
if labels[3] and not labels[2]and y >= 0.5*(self.ymin+self.ymax)+yoffset: continue
elif pole == -1:
theta = (np.pi/180.)*(-merid+self.projparams['lon_0']+90)
x = r*np.cos(theta)+0.5*(self.xmin+self.xmax)
y = r*np.sin(theta)+0.5*(self.ymin+self.ymax)
if x > 0.5*(self.xmin+self.xmax)-xoffset:
horizalign = 'right'
elif x < 0.5*(self.xmin+self.xmax)+xoffset:
horizalign = 'left'
else:
horizalign = 'center'
if y > 0.5*(self.ymin+self.ymax)-yoffset:
vertalign = 'top'
elif y < 0.5*(self.ymin+self.ymax)+yoffset:
vertalign = 'bottom'
else:
vertalign = 'center'
# labels [l,r,t,b]
if labels[0] and not labels[1] and x <= 0.5*(self.xmin+self.xmax)+xoffset: continue
if labels[1] and not labels[0] and x >= 0.5*(self.xmin+self.xmax)-xoffset: continue
if labels[2] and not labels[3] and y >= 0.5*(self.ymin+self.ymax)-yoffset: continue
if labels[3] and not labels[2] and y <= 0.5*(self.ymin+self.ymax)+yoffset: continue
t=ax.text(x,y,lonlab,horizontalalignment=horizalign,verticalalignment=vertalign,**text_kwargs)
meridict[merid][1].append(t)
for lines, _ in meridict.values():
self._cliplimb(ax, lines)
return meridict
def tissot(self,lon_0,lat_0,radius_deg,npts,ax=None,**kwargs):
"""
Draw a polygon centered at ``lon_0,lat_0``. The polygon
approximates a circle on the surface of the earth with radius
``radius_deg`` degrees latitude along longitude ``lon_0``,
made up of ``npts`` vertices.
The polygon represents a Tissot's indicatrix
(http://en.wikipedia.org/wiki/Tissot's_Indicatrix),
which when drawn on a map shows the distortion
inherent in the map projection.
.. note::
Cannot handle situations in which the polygon intersects
the edge of the map projection domain, and then re-enters the domain.
Extra keyword ``ax`` can be used to override the default axis instance.
Other \**kwargs passed on to matplotlib.patches.Polygon.
returns a matplotlib.patches.Polygon object."""
ax = kwargs.pop('ax', None) or self._check_ax()
g = pyproj.Geod(a=self.rmajor,b=self.rminor)
az12,az21,dist = g.inv(lon_0,lat_0,lon_0,lat_0+radius_deg)
seg = [self(lon_0,lat_0+radius_deg)]
delaz = 360./npts
az = az12
for n in range(npts):
az = az+delaz
lon, lat, az21 = g.fwd(lon_0, lat_0, az, dist)
x,y = self(lon,lat)
# add segment if it is in the map projection region.
if x < 1.e20 and y < 1.e20:
seg.append((x,y))
poly = Polygon(seg,**kwargs)
ax.add_patch(poly)
# set axes limits to fit map region.
self.set_axes_limits(ax=ax)
# clip polygons to map limbs
poly,c = self._cliplimb(ax,poly)
return poly
def gcpoints(self,lon1,lat1,lon2,lat2,npoints):
"""
compute ``points`` points along a great circle with endpoints
``(lon1,lat1)`` and ``(lon2,lat2)``.
Returns arrays x,y with map projection coordinates.
"""
gc = pyproj.Geod(a=self.rmajor,b=self.rminor)
lonlats = gc.npts(lon1,lat1,lon2,lat2,npoints-2)
lons=[lon1];lats=[lat1]
for lon,lat in lonlats:
lons.append(lon); lats.append(lat)
lons.append(lon2); lats.append(lat2)
x, y = self(lons, lats)
return x,y
def drawgreatcircle(self,lon1,lat1,lon2,lat2,del_s=100.,**kwargs):
"""
Draw a great circle on the map from the longitude-latitude
pair ``lon1,lat1`` to ``lon2,lat2``
.. tabularcolumns:: |l|L|
============== =======================================================
Keyword Description
============== =======================================================
del_s points on great circle computed every del_s kilometers
(default 100).
\**kwargs other keyword arguments are passed on to :meth:`plot`
method of Basemap instance.
============== =======================================================
Returns a list with a single ``matplotlib.lines.Line2D`` object like a
call to ``pyplot.plot()``.
"""
# use great circle formula for a perfect sphere.
gc = pyproj.Geod(a=self.rmajor,b=self.rminor)
az12,az21,dist = gc.inv(lon1,lat1,lon2,lat2)
npoints = int((dist+0.5*1000.*del_s)/(1000.*del_s))
lonlats = gc.npts(lon1,lat1,lon2,lat2,npoints)
lons = [lon1]; lats = [lat1]
for lon, lat in lonlats:
lons.append(lon)
lats.append(lat)
lons.append(lon2); lats.append(lat2)
x, y = self(lons, lats)
# Correct wrap around effect of great circles
# get points
_p = self.plot(x,y,**kwargs)
p = _p[0].get_path()
# since we know the difference between any two points, we can use this to find wrap arounds on the plot
max_dist = 1000*del_s*2
# calculate distances and compare with max allowable distance
dists = np.abs(np.diff(p.vertices[:,0]))
cuts = np.where( dists > max_dist )[0]
# if there are any cut points, cut them and begin again at the next point
for i,k in enumerate(cuts):
# vertex to cut at
cut_point = cuts[i]
# create new vertices with a nan inbetween and set those as the path's vertices
verts = np.concatenate(
[p.vertices[:cut_point, :],
[[np.nan, np.nan]],
p.vertices[cut_point+1:, :]]
)
p.codes = None
p.vertices = verts
return _p
def transform_scalar(self,datin,lons,lats,nx,ny,returnxy=False,checkbounds=False,order=1,masked=False):
"""
Interpolate a scalar field (``datin``) from a lat/lon grid with
longitudes = ``lons`` and latitudes = ``lats`` to a ``ny`` by ``nx``
map projection grid. Typically used to transform data to
map projection coordinates for plotting on a map with
the :meth:`imshow`.
.. tabularcolumns:: |l|L|
============== ====================================================
Argument Description
============== ====================================================
datin input data on a lat/lon grid.
lons, lats rank-1 arrays containing longitudes and latitudes
(in degrees) of input data in increasing order.
For non-cylindrical projections (those other than
``cyl``, ``merc``, ``cea``, ``gall`` and ``mill``) lons
must fit within range -180 to 180.
nx, ny The size of the output regular grid in map
projection coordinates
============== ====================================================
.. tabularcolumns:: |l|L|
============== ====================================================
Keyword Description
============== ====================================================
returnxy If True, the x and y values of the map
projection grid are also returned (Default False).
checkbounds If True, values of lons and lats are checked to see
that they lie within the map projection region.
Default is False, and data outside map projection
region is clipped to values on boundary.
masked If True, interpolated data is returned as a masked
array with values outside map projection region
masked (Default False).
order 0 for nearest-neighbor interpolation, 1 for
bilinear, 3 for cubic spline (Default 1).
Cubic spline interpolation requires scipy.ndimage.
============== ====================================================
Returns ``datout`` (data on map projection grid).
If returnxy=True, returns ``data,x,y``.
"""
# check that lons, lats increasing
delon = lons[1:]-lons[0:-1]
delat = lats[1:]-lats[0:-1]
if min(delon) < 0. or min(delat) < 0.:
raise ValueError('lons and lats must be increasing!')
# check that lons in -180,180 for non-cylindrical projections.
if self.projection not in _cylproj:
lonsa = np.array(lons)
count = np.sum(lonsa < -180.00001) + np.sum(lonsa > 180.00001)
if count > 1:
raise ValueError('grid must be shifted so that lons are monotonically increasing and fit in range -180,+180 (see shiftgrid function)')
# allow for wraparound point to be outside.
elif count == 1 and math.fabs(lons[-1]-lons[0]-360.) > 1.e-4:
raise ValueError('grid must be shifted so that lons are monotonically increasing and fit in range -180,+180 (see shiftgrid function)')
if returnxy:
lonsout, latsout, x, y = self.makegrid(nx,ny,returnxy=True)
else:
lonsout, latsout = self.makegrid(nx,ny)
datout = interp(datin,lons,lats,lonsout,latsout,checkbounds=checkbounds,order=order,masked=masked)
if returnxy:
return datout, x, y
else:
return datout
def transform_vector(self,uin,vin,lons,lats,nx,ny,returnxy=False,checkbounds=False,order=1,masked=False):
"""
Rotate and interpolate a vector field (``uin,vin``) from a
lat/lon grid with longitudes = ``lons`` and latitudes = ``lats``
to a ``ny`` by ``nx`` map projection grid.
The input vector field is defined in spherical coordinates (it
has eastward and northward components) while the output
vector field is rotated to map projection coordinates (relative
to x and y). The magnitude of the vector is preserved.
.. tabularcolumns:: |l|L|
============== ====================================================
Arguments Description
============== ====================================================
uin, vin input vector field on a lat/lon grid.
lons, lats rank-1 arrays containing longitudes and latitudes
(in degrees) of input data in increasing order.
For non-cylindrical projections (those other than
``cyl``, ``merc``, ``cea``, ``gall`` and ``mill``) lons
must fit within range -180 to 180.
nx, ny The size of the output regular grid in map
projection coordinates
============== ====================================================
.. tabularcolumns:: |l|L|
============== ====================================================
Keyword Description
============== ====================================================
returnxy If True, the x and y values of the map
projection grid are also returned (Default False).
checkbounds If True, values of lons and lats are checked to see
that they lie within the map projection region.
Default is False, and data outside map projection
region is clipped to values on boundary.
masked If True, interpolated data is returned as a masked
array with values outside map projection region
masked (Default False).
order 0 for nearest-neighbor interpolation, 1 for
bilinear, 3 for cubic spline (Default 1).
Cubic spline interpolation requires scipy.ndimage.
============== ====================================================
Returns ``uout, vout`` (vector field on map projection grid).
If returnxy=True, returns ``uout,vout,x,y``.
"""
# check that lons, lats increasing
delon = lons[1:]-lons[0:-1]
delat = lats[1:]-lats[0:-1]
if min(delon) < 0. or min(delat) < 0.:
raise ValueError('lons and lats must be increasing!')
# check that lons in -180,180 for non-cylindrical projections.
if self.projection not in _cylproj:
lonsa = np.array(lons)
count = np.sum(lonsa < -180.00001) + np.sum(lonsa > 180.00001)
if count > 1:
raise ValueError('grid must be shifted so that lons are monotonically increasing and fit in range -180,+180 (see shiftgrid function)')
# allow for wraparound point to be outside.
elif count == 1 and math.fabs(lons[-1]-lons[0]-360.) > 1.e-4:
raise ValueError('grid must be shifted so that lons are monotonically increasing and fit in range -180,+180 (see shiftgrid function)')
lonsout, latsout, x, y = self.makegrid(nx,ny,returnxy=True)
# interpolate to map projection coordinates.
uin = interp(uin,lons,lats,lonsout,latsout,checkbounds=checkbounds,order=order,masked=masked)
vin = interp(vin,lons,lats,lonsout,latsout,checkbounds=checkbounds,order=order,masked=masked)
# rotate from geographic to map coordinates.
return self.rotate_vector(uin,vin,lonsout,latsout,returnxy=returnxy)
def rotate_vector(self,uin,vin,lons,lats,returnxy=False):
"""
Rotate a vector field (``uin,vin``) on a rectilinear grid
with longitudes = ``lons`` and latitudes = ``lats`` from
geographical (lat/lon) into map projection (x/y) coordinates.
Differs from transform_vector in that no interpolation is done.
The vector is returned on the same grid, but rotated into
x,y coordinates.
The input vector field is defined in spherical coordinates (it
has eastward and northward components) while the output
vector field is rotated to map projection coordinates (relative
to x and y). The magnitude of the vector is preserved.
.. tabularcolumns:: |l|L|
============== ====================================================
Arguments Description
============== ====================================================
uin, vin input vector field on a lat/lon grid.
lons, lats Arrays containing longitudes and latitudes
(in degrees) of input data in increasing order.
For non-cylindrical projections (those other than
``cyl``, ``merc``, ``cyl``, ``gall`` and ``mill``) lons
must fit within range -180 to 180.
============== ====================================================
Returns ``uout, vout`` (rotated vector field).
If the optional keyword argument
``returnxy`` is True (default is False),
returns ``uout,vout,x,y`` (where ``x,y`` are the map projection
coordinates of the grid defined by ``lons,lats``).
"""
# if lons,lats are 1d and uin,vin are 2d, and
# lats describes 1st dim of uin,vin, and
# lons describes 2nd dim of uin,vin, make lons,lats 2d
# with meshgrid.
if lons.ndim == lats.ndim == 1 and uin.ndim == vin.ndim == 2 and\
uin.shape[1] == vin.shape[1] == lons.shape[0] and\
uin.shape[0] == vin.shape[0] == lats.shape[0]:
lons, lats = np.meshgrid(lons, lats)
else:
if not lons.shape == lats.shape == uin.shape == vin.shape:
raise TypeError("shapes of lons,lats and uin,vin don't match")
x, y = self(lons, lats)
# rotate from geographic to map coordinates.
if ma.isMaskedArray(uin):
mask = ma.getmaskarray(uin)
masked = True
uin = uin.filled(1)
vin = vin.filled(1)
else:
masked = False
# Map the (lon, lat) vector in the complex plane.
uvc = uin + 1j*vin
uvmag = np.abs(uvc)
theta = np.angle(uvc)
# Define a displacement (dlon, dlat) that moves all
# positions (lons, lats) a small distance in the
# direction of the original vector.
dc = 1E-5 * np.exp(theta*1j)
dlat = dc.imag * np.cos(np.radians(lats))
dlon = dc.real
# Deal with displacements that overshoot the North or South Pole.
farnorth = np.abs(lats+dlat) >= 90.0
somenorth = farnorth.any()
if somenorth:
dlon[farnorth] *= -1.0
dlat[farnorth] *= -1.0
# Add displacement to original location and find the native coordinates.
lon1 = lons + dlon
lat1 = lats + dlat
xn, yn = self(lon1, lat1)
# Determine the angle of the displacement in the native coordinates.
vecangle = np.arctan2(yn-y, xn-x)
if somenorth:
vecangle[farnorth] += np.pi
# Compute the x-y components of the original vector.
uvcout = uvmag * np.exp(1j*vecangle)
uout = uvcout.real
vout = uvcout.imag
if masked:
uout = ma.array(uout, mask=mask)
vout = ma.array(vout, mask=mask)
if returnxy:
return uout,vout,x,y
else:
return uout,vout
def set_axes_limits(self,ax=None):
"""
Final step in Basemap method wrappers of Axes plotting methods:
Set axis limits, fix aspect ratio for map domain using current
or specified axes instance. This is done only once per axes
instance.
In interactive mode, this method always calls draw_if_interactive
before returning.
"""
# get current axes instance (if none specified).
ax = ax or self._check_ax()
# If we have already set the axes limits, and if the user
# has not defeated this by turning autoscaling back on,
# then all we need to do is plot if interactive.
if (hash(ax) in self._initialized_axes
and not ax.get_autoscalex_on()
and not ax.get_autoscaley_on()):
if is_interactive():
import matplotlib.pyplot as plt
plt.draw_if_interactive()
return
self._initialized_axes.add(hash(ax))
# Take control of axis scaling:
ax.set_autoscale_on(False)
# update data limits for map domain.
corners = ((self.llcrnrx, self.llcrnry), (self.urcrnrx, self.urcrnry))
ax.update_datalim(corners)
ax.set_xlim((self.llcrnrx, self.urcrnrx))
ax.set_ylim((self.llcrnry, self.urcrnry))
# if map boundary not yet drawn for elliptical maps, draw it with default values.
if not self._mapboundarydrawn or self._mapboundarydrawn not in ax.patches:
# elliptical map, draw boundary manually.
if ((self.projection in ['ortho', 'geos', 'nsper', 'aeqd'] and
self._fulldisk) or self.round or
self.projection in _pseudocyl):
# first draw boundary, no fill
limb1 = self.drawmapboundary(fill_color='none', ax=ax)
# draw another filled patch, with no boundary.
limb2 = self.drawmapboundary(linewidth=0, ax=ax)
self._mapboundarydrawn = limb2
# for elliptical map, always turn off axis_frame.
if ((self.projection in ['ortho', 'geos', 'nsper', 'aeqd'] and
self._fulldisk) or self.round or
self.projection in _pseudocyl):
# turn off axes frame.
ax.set_frame_on(False)
# make sure aspect ratio of map preserved.
# plot is re-centered in bounding rectangle.
# (anchor instance var determines where plot is placed)
if self.fix_aspect:
ax.set_aspect('equal',anchor=self.anchor)
else:
ax.set_aspect('auto',anchor=self.anchor)
# make sure axis ticks are turned off.
if self.noticks:
ax.set_xticks([])
ax.set_yticks([])
# force draw if in interactive mode.
if is_interactive():
import matplotlib.pyplot as plt
plt.draw_if_interactive()
def _save_use_hold(self, ax, kwargs):
h = kwargs.pop('hold', None)
if hasattr(ax, '_hold'):
self._tmp_hold = ax._hold
if h is not None:
ax._hold = h
def _restore_hold(self, ax):
if hasattr(ax, '_hold'):
ax._hold = self._tmp_hold
@_transform1d
def scatter(self, *args, **kwargs):
"""
Plot points with markers on the map
(see matplotlib.pyplot.scatter documentation).
If ``latlon`` keyword is set to True, x,y are intrepreted as
longitude and latitude in degrees. Data and longitudes are
automatically shifted to match map projection region for cylindrical
and pseudocylindrical projections, and x,y are transformed to map
projection coordinates. If ``latlon`` is False (default), x and y
are assumed to be map projection coordinates.
Extra keyword ``ax`` can be used to override the default axes instance.
Other \**kwargs passed on to matplotlib.pyplot.scatter.
"""
ax, plt = self._ax_plt_from_kw(kwargs)
self._save_use_hold(ax, kwargs)
try:
ret = ax.scatter(*args, **kwargs)
finally:
self._restore_hold(ax)
# reset current active image (only if pyplot is imported).
if plt:
plt.sci(ret)
# set axes limits to fit map region.
self.set_axes_limits(ax=ax)
# clip to map limbs
ret,c = self._cliplimb(ax,ret)
return ret
@_transform1d
def plot(self, *args, **kwargs):
"""
Draw lines and/or markers on the map
(see matplotlib.pyplot.plot documentation).
If ``latlon`` keyword is set to True, x,y are intrepreted as
longitude and latitude in degrees. Data and longitudes are
automatically shifted to match map projection region for cylindrical
and pseudocylindrical projections, and x,y are transformed to map
projection coordinates. If ``latlon`` is False (default), x and y
are assumed to be map projection coordinates.
Extra keyword ``ax`` can be used to override the default axis instance.
Other \**kwargs passed on to matplotlib.pyplot.plot.
"""
ax = kwargs.pop('ax', None) or self._check_ax()
self._save_use_hold(ax, kwargs)
try:
ret = ax.plot(*args, **kwargs)
finally:
self._restore_hold(ax)
# set axes limits to fit map region.
self.set_axes_limits(ax=ax)
# clip to map limbs
ret,c = self._cliplimb(ax,ret)
return ret
def imshow(self, *args, **kwargs):
"""
Display an image over the map
(see matplotlib.pyplot.imshow documentation).
``extent`` and ``origin`` keywords set automatically so image
will be drawn over map region.
Extra keyword ``ax`` can be used to override the default axis instance.
Other \**kwargs passed on to matplotlib.pyplot.plot.
returns an matplotlib.image.AxesImage instance.
"""
ax, plt = self._ax_plt_from_kw(kwargs)
kwargs['extent']=(self.llcrnrx,self.urcrnrx,self.llcrnry,self.urcrnry)
# use origin='lower', unless overridden.
if 'origin' not in kwargs:
kwargs['origin']='lower'
self._save_use_hold(ax, kwargs)
try:
ret = ax.imshow(*args, **kwargs)
finally:
self._restore_hold(ax)
# reset current active image (only if pyplot is imported).
if plt:
plt.sci(ret)
# set axes limits to fit map region.
self.set_axes_limits(ax=ax)
# clip image to map limbs
ret,c = self._cliplimb(ax,ret)
return ret
@_transform
def pcolor(self,x,y,data,**kwargs):
"""
Make a pseudo-color plot over the map
(see matplotlib.pyplot.pcolor documentation).
If ``latlon`` keyword is set to True, x,y are intrepreted as
longitude and latitude in degrees. Data and longitudes are
automatically shifted to match map projection region for cylindrical
and pseudocylindrical projections, and x,y are transformed to map
projection coordinates. If ``latlon`` is False (default), x and y
are assumed to be map projection coordinates.
If x or y are outside projection limb (i.e. they have values > 1.e20)
they will be convert to masked arrays with those values masked.
As a result, those values will not be plotted.
If ``tri`` is set to ``True``, an unstructured grid is assumed
(x,y,data must be 1-d) and matplotlib.pyplot.tripcolor is used.
Extra keyword ``ax`` can be used to override the default axis instance.
Other \**kwargs passed on to matplotlib.pyplot.pcolor (or tripcolor if
``tri=True``).
Note: (taken from matplotlib.pyplot.pcolor documentation)
Ideally the dimensions of x and y should be one greater than those of data;
if the dimensions are the same, then the last row and column of data will be ignored.
"""
ax, plt = self._ax_plt_from_kw(kwargs)
self._save_use_hold(ax, kwargs)
try:
if kwargs.pop('tri', False):
try:
import matplotlib.tri as tri
except:
msg='need matplotlib > 0.99.1 to plot on unstructured grids'
raise ImportError(msg)
# for unstructured grids, toss out points outside
# projection limb (don't use those points in triangulation).
if ma.isMA(data):
data = data.filled(fill_value=1.e30)
masked=True
else:
masked=False
mask = np.logical_or(x<1.e20,y<1.e20)
x = np.compress(mask,x)
y = np.compress(mask,y)
data = np.compress(mask,data)
if masked:
triang = tri.Triangulation(x, y)
z = data[triang.triangles]
mask = (z > 1.e20).sum(axis=-1)
triang.set_mask(mask)
ret = ax.tripcolor(triang,data,**kwargs)
else:
ret = ax.tripcolor(x,y,data,**kwargs)
else:
# make x,y masked arrays
# (masked where data is outside of projection limb)
x = ma.masked_values(np.where(x > 1.e20,1.e20,x), 1.e20)
y = ma.masked_values(np.where(y > 1.e20,1.e20,y), 1.e20)
ret = ax.pcolor(x,y,data,**kwargs)
finally:
self._restore_hold(ax)
# reset current active image (only if pyplot is imported).
if plt:
plt.sci(ret)
# set axes limits to fit map region.
self.set_axes_limits(ax=ax)
# clip to map limbs
ret,c = self._cliplimb(ax,ret)
if self.round:
# for some reason, frame gets turned on.
ax.set_frame_on(False)
return ret
@_transform
def pcolormesh(self,x,y,data,**kwargs):
"""
Make a pseudo-color plot over the map
(see matplotlib.pyplot.pcolormesh documentation).
If ``latlon`` keyword is set to True, x,y are intrepreted as
longitude and latitude in degrees. Data and longitudes are
automatically shifted to match map projection region for cylindrical
and pseudocylindrical projections, and x,y are transformed to map
projection coordinates. If ``latlon`` is False (default), x and y
are assumed to be map projection coordinates.
Extra keyword ``ax`` can be used to override the default axis instance.
Other \**kwargs passed on to matplotlib.pyplot.pcolormesh.
Note: (taken from matplotlib.pyplot.pcolor documentation)
Ideally the dimensions of x and y should be one greater than those of data;
if the dimensions are the same, then the last row and column of data will be ignored.
"""
ax, plt = self._ax_plt_from_kw(kwargs)
# fix for invalid grid points
if ((np.any(x > 1e20) or np.any(y > 1e20)) and
x.ndim == 2 and y.ndim == 2):
if x.shape != y.shape:
raise ValueError('pcolormesh: x and y need same dimension')
nx,ny = x.shape
if nx < data.shape[0] or ny < data.shape[1]:
raise ValueError('pcolormesh: data dimension needs to be at least that of x and y.')
mask = (
(x[:-1,:-1] > 1e20) |
(x[1:,:-1] > 1e20) |
(x[:-1,1:] > 1e20) |
(x[1:,1:] > 1e20) |
(y[:-1,:-1] > 1e20) |
(y[1:,:-1] > 1e20) |
(y[:-1,1:] > 1e20) |
(y[1:,1:] > 1e20)
)
# we do not want to overwrite original array
data = data[:nx-1,:ny-1].copy()
data[mask] = np.nan
self._save_use_hold(ax, kwargs)
try:
ret = ax.pcolormesh(x,y,data,**kwargs)
finally:
self._restore_hold(ax)
# reset current active image (only if pyplot is imported).
if plt:
plt.sci(ret)
# set axes limits to fit map region.
self.set_axes_limits(ax=ax)
# clip to map limbs
ret,c = self._cliplimb(ax,ret)
if self.round:
# for some reason, frame gets turned on.
ax.set_frame_on(False)
return ret
def hexbin(self,x,y,**kwargs):
"""
Make a hexagonal binning plot of x versus y, where x, y are 1-D
sequences of the same length, N. If C is None (the default), this is a
histogram of the number of occurences of the observations at
(x[i],y[i]).
If C is specified, it specifies values at the coordinate (x[i],y[i]).
These values are accumulated for each hexagonal bin and then reduced
according to reduce_C_function, which defaults to the numpy mean function
(np.mean). (If C is specified, it must also be a 1-D sequence of the
same length as x and y.)
x, y and/or C may be masked arrays, in which case only unmasked points
will be plotted.
(see matplotlib.pyplot.hexbin documentation).
Extra keyword ``ax`` can be used to override the default axis instance.
Other \**kwargs passed on to matplotlib.pyplot.hexbin
"""
ax, plt = self._ax_plt_from_kw(kwargs)
self._save_use_hold(ax, kwargs)
try:
# make x,y masked arrays
# (masked where data is outside of projection limb)
x = ma.masked_values(np.where(x > 1.e20,1.e20,x), 1.e20)
y = ma.masked_values(np.where(y > 1.e20,1.e20,y), 1.e20)
ret = ax.hexbin(x,y,**kwargs)
finally:
self._restore_hold(ax)
# reset current active image (only if pyplot is imported).
if plt:
plt.sci(ret)
# set axes limits to fit map region.
self.set_axes_limits(ax=ax)
# clip to map limbs
ret,c = self._cliplimb(ax,ret)
return ret
@_transform
def contour(self,x,y,data,*args,**kwargs):
"""
Make a contour plot over the map
(see matplotlib.pyplot.contour documentation).
If ``latlon`` keyword is set to True, x,y are intrepreted as
longitude and latitude in degrees. Data and longitudes are
automatically shifted to match map projection region for cylindrical
and pseudocylindrical projections, and x,y are transformed to map
projection coordinates. If ``latlon`` is False (default), x and y
are assumed to be map projection coordinates.
Extra keyword ``ax`` can be used to override the default axis instance.
If ``tri`` is set to ``True``, an unstructured grid is assumed
(x,y,data must be 1-d) and matplotlib.pyplot.tricontour is used.
Other \*args and \**kwargs passed on to matplotlib.pyplot.contour
(or tricontour if ``tri=True``).
"""
ax, plt = self._ax_plt_from_kw(kwargs)
self._save_use_hold(ax, kwargs)
try:
if kwargs.pop('tri', False):
try:
import matplotlib.tri as tri
except:
msg='need matplotlib > 0.99.1 to plot on unstructured grids'
raise ImportError(msg)
# for unstructured grids, toss out points outside
# projection limb (don't use those points in triangulation).
if ma.isMA(data):
data = data.filled(fill_value=1.e30)
masked=True
else:
masked=False
mask = np.logical_or(x<self.xmin,y<self.xmin) +\
np.logical_or(x>self.xmax,y>self.xmax)
x = np.compress(mask,x)
y = np.compress(mask,y)
data = np.compress(mask,data)
if masked:
triang = tri.Triangulation(x, y)
z = data[triang.triangles]
mask = (z > 1.e20).sum(axis=-1)
triang.set_mask(mask)
CS = ax.tricontour(triang,data,*args,**kwargs)
else:
CS = ax.tricontour(x,y,data,*args,**kwargs)
else:
# make sure x is monotonically increasing - if not,
# print warning suggesting that the data be shifted in longitude
# with the shiftgrid function.
# only do this check for global projections.
if self.projection in _cylproj + _pseudocyl:
xx = x[x.shape[0]//2,:]
condition = (xx >= self.xmin) & (xx <= self.xmax)
xl = xx.compress(condition).tolist()
xs = xl[:]
xs.sort()
if xl != xs:
sys.stdout.write(dedent("""
WARNING: x coordinate not montonically increasing - contour plot
may not be what you expect. If it looks odd, your can either
adjust the map projection region to be consistent with your data, or
(if your data is on a global lat/lon grid) use the shiftdata
method to adjust the data to be consistent with the map projection
region (see examples/shiftdata.py)."""))
# mask for points more than one grid length outside projection limb.
xx = ma.masked_where(x > 1.e20, x)
yy = ma.masked_where(y > 1.e20, y)
epsx = np.abs(xx[:,1:]-xx[:,0:-1]).max()
epsy = np.abs(yy[1:,:]-yy[0:-1,:]).max()
xymask = \
np.logical_or(np.greater(x,self.xmax+epsx),np.greater(y,self.ymax+epsy))
xymask = xymask + \
np.logical_or(np.less(x,self.xmin-epsx),np.less(y,self.ymin-epsy))
data = ma.asarray(data)
# combine with data mask.
mask = np.logical_or(ma.getmaskarray(data),xymask)
data = ma.masked_array(data,mask=mask)
CS = ax.contour(x,y,data,*args,**kwargs)
finally:
self._restore_hold(ax)
# reset current active image (only if pyplot is imported).
if plt and CS.get_array() is not None:
plt.sci(CS)
# set axes limits to fit map region.
self.set_axes_limits(ax=ax)
# clip to map limbs
CS.collections,c = self._cliplimb(ax,CS.collections)
return CS
@_transform
def contourf(self,x,y,data,*args,**kwargs):
"""
Make a filled contour plot over the map
(see matplotlib.pyplot.contourf documentation).
If ``latlon`` keyword is set to True, x,y are intrepreted as
longitude and latitude in degrees. Data and longitudes are
automatically shifted to match map projection region for cylindrical
and pseudocylindrical projections, and x,y are transformed to map
projection coordinates. If ``latlon`` is False (default), x and y
are assumed to be map projection coordinates.
If x or y are outside projection limb (i.e. they have values > 1.e20),
the corresponing data elements will be masked.
Extra keyword 'ax' can be used to override the default axis instance.
If ``tri`` is set to ``True``, an unstructured grid is assumed
(x,y,data must be 1-d) and matplotlib.pyplot.tricontourf is used.
Other \*args and \**kwargs passed on to matplotlib.pyplot.contourf
(or tricontourf if ``tri=True``).
"""
ax, plt = self._ax_plt_from_kw(kwargs)
self._save_use_hold(ax, kwargs)
try:
if kwargs.get('tri', False):
try:
import matplotlib.tri as tri
except:
msg='need matplotlib > 0.99.1 to plot on unstructured grids'
raise ImportError(msg)
# for unstructured grids, toss out points outside
# projection limb (don't use those points in triangulation).
if ma.isMA(data):
data = data.filled(fill_value=1.e30)
masked=True
else:
masked=False
mask = np.logical_or(x<1.e20,y<1.e20)
x = np.compress(mask,x)
y = np.compress(mask,y)
data = np.compress(mask,data)
if masked:
triang = tri.Triangulation(x, y)
z = data[triang.triangles]
mask = (z > 1.e20).sum(axis=-1)
triang.set_mask(mask)
CS = ax.tricontourf(triang,data,*args,**kwargs)
else:
CS = ax.tricontourf(x,y,data,*args,**kwargs)
else:
# make sure x is monotonically increasing - if not,
# print warning suggesting that the data be shifted in longitude
# with the shiftgrid function.
# only do this check for global projections.
if self.projection in _cylproj + _pseudocyl:
xx = x[x.shape[0]//2,:]
condition = (xx >= self.xmin) & (xx <= self.xmax)
xl = xx.compress(condition).tolist()
xs = xl[:]
xs.sort()
if xl != xs:
sys.stdout.write(dedent("""
WARNING: x coordinate not montonically increasing - contour plot
may not be what you expect. If it looks odd, your can either
adjust the map projection region to be consistent with your data, or
(if your data is on a global lat/lon grid) use the shiftgrid
function to adjust the data to be consistent with the map projection
region (see examples/contour_demo.py)."""))
# mask for points more than one grid length outside projection limb.
xx = ma.masked_where(x > 1.e20, x)
yy = ma.masked_where(y > 1.e20, y)
if self.projection != 'omerc':
epsx = np.abs(xx[:,1:]-xx[:,0:-1]).max()
epsy = np.abs(yy[1:,:]-yy[0:-1,:]).max()
else: # doesn't work for omerc (FIXME)
epsx = 0.; epsy = 0
xymask = \
np.logical_or(np.greater(x,self.xmax+epsx),np.greater(y,self.ymax+epsy))
xymask = xymask + \
np.logical_or(np.less(x,self.xmin-epsx),np.less(y,self.ymin-epsy))
data = ma.asarray(data)
# combine with data mask.
mask = np.logical_or(ma.getmaskarray(data),xymask)
data = ma.masked_array(data,mask=mask)
CS = ax.contourf(x,y,data,*args,**kwargs)
finally:
self._restore_hold(ax)
# reset current active image (only if pyplot is imported).
if plt and CS.get_array() is not None:
plt.sci(CS)
# set axes limits to fit map region.
self.set_axes_limits(ax=ax)
# clip to map limbs
CS.collections,c = self._cliplimb(ax,CS.collections)
return CS
@_transformuv
def quiver(self, x, y, u, v, *args, **kwargs):
"""
Make a vector plot (u, v) with arrows on the map.
Arguments may be 1-D or 2-D arrays or sequences
(see matplotlib.pyplot.quiver documentation for details).
If ``latlon`` keyword is set to True, x,y are intrepreted as
longitude and latitude in degrees. Data and longitudes are
automatically shifted to match map projection region for cylindrical
and pseudocylindrical projections, and x,y are transformed to map
projection coordinates. If ``latlon`` is False (default), x and y
are assumed to be map projection coordinates.
Extra keyword ``ax`` can be used to override the default axis instance.
Other \*args and \**kwargs passed on to matplotlib.pyplot.quiver.
"""
ax, plt = self._ax_plt_from_kw(kwargs)
self._save_use_hold(ax, kwargs)
try:
ret = ax.quiver(x,y,u,v,*args,**kwargs)
finally:
self._restore_hold(ax)
if plt is not None and ret.get_array() is not None:
plt.sci(ret)
# set axes limits to fit map region.
self.set_axes_limits(ax=ax)
# clip to map limbs
ret,c = self._cliplimb(ax,ret)
return ret
@_transformuv
def streamplot(self, x, y, u, v, *args, **kwargs):
"""
Draws streamlines of a vector flow.
(see matplotlib.pyplot.streamplot documentation).
If ``latlon`` keyword is set to True, x,y are intrepreted as
longitude and latitude in degrees. Data and longitudes are
automatically shifted to match map projection region for cylindrical
and pseudocylindrical projections, and x,y are transformed to map
projection coordinates. If ``latlon`` is False (default), x and y
are assumed to be map projection coordinates.
Extra keyword ``ax`` can be used to override the default axis instance.
Other \*args and \**kwargs passed on to matplotlib.pyplot.streamplot.
"""
if _matplotlib_version < '1.2':
msg = dedent("""
streamplot method requires matplotlib 1.2 or higher,
you have %s""" % _matplotlib_version)
raise NotImplementedError(msg)
ax, plt = self._ax_plt_from_kw(kwargs)
self._save_use_hold(ax, kwargs)
try:
ret = ax.streamplot(x,y,u,v,*args,**kwargs)
finally:
self._restore_hold(ax)
if plt is not None and ret.lines.get_array() is not None:
plt.sci(ret.lines)
# set axes limits to fit map region.
self.set_axes_limits(ax=ax)
# clip to map limbs
ret.lines,c = self._cliplimb(ax,ret.lines)
ret.arrows,c = self._cliplimb(ax,ret.arrows)
# streamplot arrows not returned in matplotlib 1.1.1, so clip all
# FancyArrow patches attached to axes instance.
if c is not None:
for p in ax.patches:
if isinstance(p,FancyArrowPatch): p.set_clip_path(c)
return ret
@_transformuv
def barbs(self, x, y, u, v, *args, **kwargs):
"""
Make a wind barb plot (u, v) with on the map.
(see matplotlib.pyplot.barbs documentation).
If ``latlon`` keyword is set to True, x,y are intrepreted as
longitude and latitude in degrees. Data and longitudes are
automatically shifted to match map projection region for cylindrical
and pseudocylindrical projections, and x,y are transformed to map
projection coordinates. If ``latlon`` is False (default), x and y
are assumed to be map projection coordinates.
Extra keyword ``ax`` can be used to override the default axis instance.
Other \*args and \**kwargs passed on to matplotlib.pyplot.barbs
Returns two matplotlib.axes.Barbs instances, one for the Northern
Hemisphere and one for the Southern Hemisphere.
"""
if _matplotlib_version < '0.98.3':
msg = dedent("""
barb method requires matplotlib 0.98.3 or higher,
you have %s""" % _matplotlib_version)
raise NotImplementedError(msg)
ax, plt = self._ax_plt_from_kw(kwargs)
lons, lats = self(x, y, inverse=True)
unh = ma.masked_where(lats <= 0, u)
vnh = ma.masked_where(lats <= 0, v)
ush = ma.masked_where(lats > 0, u)
vsh = ma.masked_where(lats > 0, v)
self._save_use_hold(ax, kwargs)
try:
retnh = ax.barbs(x,y,unh,vnh,*args,**kwargs)
kwargs['flip_barb']=True
retsh = ax.barbs(x,y,ush,vsh,*args,**kwargs)
finally:
self._restore_hold(ax)
# Because there are two collections returned in general,
# we can't set the current image...
#if plt is not None and ret.get_array() is not None:
# plt.sci(retnh)
# set axes limits to fit map region.
self.set_axes_limits(ax=ax)
# clip to map limbs
retnh,c = self._cliplimb(ax,retnh)
retsh,c = self._cliplimb(ax,retsh)
return retnh,retsh
def drawlsmask(self,land_color="0.8",ocean_color="w",lsmask=None,
lsmask_lons=None,lsmask_lats=None,lakes=True,resolution='l',grid=5,**kwargs):
"""
Draw land-sea mask image.
.. note::
The land-sea mask image cannot be overlaid on top
of other images, due to limitations in matplotlib image handling
(you can't specify the zorder of an image).
.. tabularcolumns:: |l|L|
============== ====================================================
Keywords Description
============== ====================================================
land_color desired land color (color name or rgba tuple).
Default gray ("0.8").
ocean_color desired water color (color name or rgba tuple).
Default white.
lsmask An array of 0's for ocean pixels, 1's for
land pixels and 2's for lake/pond pixels.
Default is None
(default 5-minute resolution land-sea mask is used).
lakes Plot lakes and ponds (Default True)
lsmask_lons 1d array of longitudes for lsmask (ignored
if lsmask is None). Longitudes must be ordered
from -180 W eastward.
lsmask_lats 1d array of latitudes for lsmask (ignored
if lsmask is None). Latitudes must be ordered
from -90 S northward.
resolution gshhs coastline resolution used to define land/sea
mask (default 'l', available 'c','l','i','h' or 'f')
grid land/sea mask grid spacing in minutes (Default 5;
10, 2.5 and 1.25 are also available).
\**kwargs extra keyword arguments passed on to
:meth:`imshow`
============== ====================================================
If any of the lsmask, lsmask_lons or lsmask_lats keywords are not
set, the built in GSHHS land-sea mask datasets are used.
Extra keyword ``ax`` can be used to override the default axis instance.
returns a matplotlib.image.AxesImage instance.
"""
# convert land and water colors to integer rgba tuples with
# values between 0 and 255.
from matplotlib.colors import ColorConverter
c = ColorConverter()
# if conversion fails, assume it's because the color
# given is already an rgba tuple with values between 0 and 255.
try:
cl = c.to_rgba(land_color)
rgba_land = tuple([int(255*x) for x in cl])
except:
rgba_land = land_color
try:
co = c.to_rgba(ocean_color)
rgba_ocean = tuple([int(255*x) for x in co])
except:
rgba_ocean = ocean_color
# look for axes instance (as keyword, an instance variable
# or from plt.gca().
ax = kwargs.pop('ax', None) or self._check_ax()
# Clear saved lsmask if new lsmask is passed
if lsmask is not None or lsmask_lons is not None \
or lsmask_lats is not None:
# Make sure passed lsmask is not the same as cached mask
if lsmask is not self.lsmask:
self.lsmask = None
# if lsmask,lsmask_lons,lsmask_lats keywords not given,
# read default land-sea mask in from file.
if lsmask is None or lsmask_lons is None or lsmask_lats is None:
# if lsmask instance variable already set, data already
# read in.
if self.lsmask is None:
# read in land/sea mask.
lsmask_lons, lsmask_lats, lsmask =\
_readlsmask(lakes=lakes,resolution=resolution,grid=grid)
# instance variable lsmask is set on first invocation,
# it contains the land-sea mask interpolated to the native
# projection grid. Further calls to drawlsmask will not
# redo the interpolation (unless a new land-sea mask is passed
# in via the lsmask, lsmask_lons, lsmask_lats keywords).
# is it a cylindrical projection whose limits lie
# outside the limits of the image?
cylproj = self.projection in _cylproj and \
(self.urcrnrlon > lsmask_lons[-1] or \
self.llcrnrlon < lsmask_lons[0])
if cylproj:
# stack grids side-by-side (in longitiudinal direction), so
# any range of longitudes may be plotted on a world map.
# in versions of NumPy later than 1.10.0, concatenate will
# not stack these arrays as expected. If axis 1 is outside
# the dimensions of the array, concatenate will now raise
# an IndexError. Using hstack instead.
lsmask_lons = \
np.hstack((lsmask_lons,lsmask_lons[1:] + 360))
lsmask = \
np.hstack((lsmask,lsmask[:,1:]))
else:
if lakes: lsmask = np.where(lsmask==2,np.array(0,np.uint8),lsmask)
# transform mask to nx x ny regularly spaced native projection grid
# nx and ny chosen to have roughly the same horizontal
# resolution as mask.
if self.lsmask is None:
nlons = len(lsmask_lons)
nlats = len(lsmask_lats)
if self.projection == 'cyl':
dx = lsmask_lons[1]-lsmask_lons[0]
else:
dx = (np.pi/180.)*(lsmask_lons[1]-lsmask_lons[0])*self.rmajor
nx = int((self.xmax-self.xmin)/dx)+1; ny = int((self.ymax-self.ymin)/dx)+1
# interpolate rgba values from proj='cyl' (geographic coords)
# to a rectangular map projection grid.
mask,x,y = self.transform_scalar(lsmask,lsmask_lons,\
lsmask_lats,nx,ny,returnxy=True,order=0,masked=255)
lsmask_lats.dtype
# for these projections, points outside the projection
# limb have to be set to transparent manually.
if self.projection in _pseudocyl:
lons, lats = self(x, y, inverse=True)
lon_0 = self.projparams['lon_0']
lats = lats[:,nx//2]
lons1 = (lon_0+180.)*np.ones(lons.shape[0],np.float64)
lons2 = (lon_0-180.)*np.ones(lons.shape[0],np.float64)
xmax,ytmp = self(lons1,lats)
xmin,ytmp = self(lons2,lats)
for j in range(lats.shape[0]):
xx = x[j,:]
mask[j,:]=np.where(np.logical_or(xx<xmin[j],xx>xmax[j]),\
255,mask[j,:])
self.lsmask = mask
ny, nx = self.lsmask.shape
rgba = np.ones((ny,nx,4),np.uint8)
rgba_land = np.array(rgba_land,np.uint8)
rgba_ocean = np.array(rgba_ocean,np.uint8)
for k in range(4):
rgba[:,:,k] =
|
np.where(self.lsmask,rgba_land[k],rgba_ocean[k])
|
numpy.where
|
import os
import cv2
import math
import random
import argparse
import numpy as np
from tqdm import tqdm
from glob import glob
from multiprocessing import Pool
ORIGINAL_RADIUS = 10
ORIGINAL_CENTER = (10, 0)
ORIGINAL_SIZE = 512
TARGET_SIZE = 128
def arg_boolean(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--input_dir", type=str)
parser.add_argument("--output_dir", type=str)
parser.add_argument("--num_workers", type=int, default=8)
parser.add_argument("--fit_movement", type=arg_boolean, default=True)
parser.add_argument("--use_symlinks", type=arg_boolean, default=True)
parser.add_argument("--subsample_10k", type=arg_boolean, default=True)
parser.add_argument("--trajectory_inpoints", type=int, default=4)
parser.add_argument("--trajectory_cycles", type=int, default=6)
parser.add_argument("--trajectory_duplicates", type=arg_boolean, default=False)
args = parser.parse_args()
return args
def round(v):
return int(np.round(v))
def int_abs(v):
return int(math.fabs(v))
def resize(img, size=TARGET_SIZE,
interpolation=cv2.INTER_CUBIC):
h, w, _ = img.shape
assert h == w
if not isinstance(size, (list, tuple)):
size = (size, size)
return cv2.resize(img, size, interpolation=interpolation)
def scale_radius(orig_radius=ORIGINAL_RADIUS,
orig_size=ORIGINAL_SIZE,
target_size=TARGET_SIZE):
scale = target_size / orig_size
return scale * orig_radius
def scale_center(orig_center=ORIGINAL_CENTER,
orig_size=ORIGINAL_SIZE,
target_size=TARGET_SIZE):
scale = target_size / orig_size
xc, yc = orig_center
return scale * xc, scale * yc
def fit_size_movement(size, trajectory):
min_x, max_x = trajectory[:, 0].min(), trajectory[:, 0].max()
min_y, max_y = trajectory[:, 1].min(), trajectory[:, 1].max()
border_x = max_x - min_x
border_y = max_y - min_y
fit_x = int(border_x) + int_abs(min_x) + size
fit_y = int(border_y) + int_abs(min_y) + size
return fit_x, fit_y
def get_trajectory(center=ORIGINAL_CENTER, radius=ORIGINAL_RADIUS,
inpoints=4, cycles=6, duplicates=True):
xc, yc = center
r = radius
# The 4 points of the square centered in (xc, yc)
p0 = (xc - r, yc)
p1 = (xc, yc - r)
p2 = (xc + r, yc)
p3 = (xc, yc + r)
# The end points of the paths
points = [p0, p1, p2, p3, p0]
final_points = []
alphas = np.linspace(1, 0, 2 + inpoints)
if duplicates is False:
alphas = alphas[1:]
# Interpolate points in each path
for p in range(len(points) - 1):
x_start, y_start = points[p]
x_end, y_end = points[p+1]
final_points += [(a * x_start + (1 - a) * x_end,
a * y_start + (1 - a) * y_end)
for a in alphas]
# Replicate the path for the number of cycles
final_points = np.array(final_points * cycles)
return final_points
def translate_image(img, path, crop_border=True):
h, w, c = img.shape
n = path.shape[0]
x_max_offset = path[:, 0].max()
x_min_offset = path[:, 0].min()
border_x = x_max_offset - x_min_offset
y_max_offset = path[:, 1].max()
y_min_offset = path[:, 1].min()
border_y = y_max_offset - y_min_offset
new_imgs = np.zeros((n, round(h + border_y),
round(w + border_x), c),
dtype=img.dtype)
for i, (x, y) in enumerate(path):
xs = x - x_min_offset
ys = y - y_min_offset
# Compute the affine transformation matrix
M =
|
np.float32([[1, 0, xs], [0, 1, ys]])
|
numpy.float32
|
# extract_data.py
import numpy as np
import matplotlib.pyplot as plt
import astropy.constants as ac
import astropy.units as au
import pyathena as pa
from pyathena.classic import cc_arr
from ..load_sim import LoadSim
class ExtractData:
@LoadSim.Decorators.check_pickle
def read_VFF_Peters17(self, num, savdir=None, force_override=False):
r = dict()
ds = self.load_vtk(num, load_method='pyathena_classic')
x1d, y1d, z1d = cc_arr(ds.domain)
z, _, _ = np.meshgrid(z1d, y1d, x1d, indexing='ij')
idx_z = np.abs(z) < 100.0
tot = idx_z.sum()
T = ds.read_all_data('temperature')
xn = ds.read_all_data('xn')
idx_c = (T[idx_z] <= 300.0)
idx_wi = ((T[idx_z] > 300.0) & (T[idx_z] <= 8.0e3) & (xn[idx_z] < 0.1))
idx_wn = ((T[idx_z] > 300.0) & (T[idx_z] <= 8.0e3) & (xn[idx_z] > 0.1))
idx_whn = ((T[idx_z] > 8000.0) & (T[idx_z] < 5.0e5) & (xn[idx_z] > 0.1))
idx_whi = ((T[idx_z] > 8000.0) & (T[idx_z] < 5.0e5) & (xn[idx_z] < 0.1))
idx_h = (T[idx_z] > 5e5)
r['time'] = ds.domain['time']
r['f_c'] = idx_c.sum()/tot
r['f_wi'] = idx_wi.sum()/tot
r['f_wn'] = idx_wn.sum()/tot
r['f_whi'] = idx_whi.sum()/tot
r['f_whn'] = idx_whn.sum()/tot
r['f_h'] = idx_h.sum()/tot
return r
@LoadSim.Decorators.check_pickle
def read_EM_pdf(self, num, savdir=None, force_override=False):
ds = self.load_vtk(num)
nH = ds.get_field(field='density')
xn = ds.get_field(field='specific_scalar[0]')
nesq = ((1.0 - xn)*nH)**2
z2 = 200.0
bins = np.linspace(-8, 5, 100)
dz = ds.domain['dx'][0]
id0 = 0
id1 = ds.domain['Nx'][2] // 2
# Calculate EM integrated from z = 200pc
id2 = id1 + int(z2/dz)
EM0 = nesq[id0:,:,:].sum(axis=0)*dz
EM1 = nesq[id1:,:,:].sum(axis=0)*dz
EM2 = nesq[id2:,:,:].sum(axis=0)*dz
h0, b0, _ = plt.hist(np.log10(EM0.flatten()), bins=bins, histtype='step', color='C0');
h1, b1, _ = plt.hist(np.log10(EM1.flatten()), bins=bins, histtype='step', color='C1');
h2, b2, _ = plt.hist(np.log10(EM2.flatten()), bins=bins, histtype='step', color='C2');
return dict(EM0=EM0, EM1=EM1, EM2=EM2, bins=bins, h0=h0, h1=h1, h2=h2)
@LoadSim.Decorators.check_pickle
def read_phot_dust_U_pdf(self, num, z0=200.0,
ifreq_ion=0, savdir=None, force_override=False):
s = self
sigmapi = s.par['radps']['sigma_ph[0]']
sigmad = s.par['radps']['kappa_dust[0]']*s.u.density.value
c = ac.c.cgs.value
# mean energy of ionizing photons
hnu = s.par['radps']['hnu[{0:1d}]'.format(ifreq_ion)]*((1.0*au.eV).cgs.value)
ds = s.load_vtk(num=num, load_method='pyathena_classic')
#print(ds.domain)
bins_nH = np.linspace(-5, 4, 61)
bins_U = np.linspace(-6, 1, 61)
bins_z = np.linspace(ds.domain['left_edge'][2], ds.domain['right_edge'][2], ds.domain['Nx'][2]//16 + 1)
#print(bins_nH, bins_U, bins_z)
nH = ds.read_all_data('density').flatten()
Erad0 = ds.read_all_data('Erad0').flatten() # cgs unit
xn = ds.read_all_data('xn').flatten()
T = ds.read_all_data('temperature').flatten()
ne = nH*(1.0 - xn)
nHI = nH*xn
Erad0ph = Erad0/hnu # photon number density
U = Erad0ph/nH # ionization parameter
x1d, y1d, z1d = pa.classic.cc_arr(ds.domain)
z, _, _ = np.meshgrid(z1d, y1d, x1d, indexing='ij')
# Warm phase indices
w = ((T > 5050.0) & (T < 2.0e4) & (xn < 0.1))
dvol = ds.domain['dx'].prod()*ac.pc.cgs.value**3
zw = z.flatten()[w]
Uw = U[w]
nesqw = (ne**2)[w]
nHw = nH[w]
# Tw = T[w]
# Local photoionization/dust absorption rate in a cell
ph_rate = nHI[w]*c*sigmapi*Erad0ph[w]*dvol
di_rate = nH[w]*c*sigmad*Erad0ph[w]*dvol
# print('phrate, dirate',ph_rate.sum(),di_rate.sum())
q = di_rate/(ph_rate + di_rate)
qma = np.ma.masked_invalid(q)
ma = qma.mask
wlz = np.abs(zw) < z0
bins = np.linspace(-5, 3, 80)
# nH_warm PDF weighted by ph_rate or di_rate (all, at |z| < 200pc, above 200 pc)
hdi, bedi, _ = plt.hist(np.log10(nHw[~ma]), bins=bins_nH, weights=di_rate[~ma], alpha=0.3, color='C0');
hph, beph, _ = plt.hist(
|
np.log10(nHw[~ma])
|
numpy.log10
|
import os
import logging
import numpy as np
from numpy.testing import assert_allclose
import trackpy as tp
from trackpy.artificial import (draw_features_brightfield,
gen_nonoverlapping_locations,
gen_connected_locations)
from trackpy.tests.common import sort_positions, StrictTestCase
from trackpy.feature import locate
from trackpy.locate_functions.brightfield_ring import locate_brightfield_ring
from trackpy.refine.brightfield_ring import (_min_edge, _fit_circle)
path, _ = os.path.split(os.path.abspath(__file__))
# we need to use a low value for min_percentile because the artificial
# edge is very sharp
MIN_PERC = 0.5
def draw_artificial_image(shape, pos, radius, noise_level, dip=False,
traditional=False, **kwargs):
radius = tp.utils.validate_tuple(radius, len(shape))
# tp.locate ignores a margin of size radius, take 1 px more to be safe
diameter = tuple([(r * 2) + 1 for r in radius])
size = [d / 2 for d in diameter]
cols = ['x', 'y', 'z'][:len(shape)][::-1]
image = draw_features_brightfield(shape, pos, size, noise_level, dip=dip)
if not traditional:
kwargs.update({'min_percentile': MIN_PERC})
result = locate_brightfield_ring(image, diameter, **kwargs)
else:
result = locate(image, diameter, **kwargs)
# For some reason, sorting the DataFrame gives wrong orders in some cases
result = np.sort(result[cols].astype(float).values, axis=0)
expected = np.sort(pos, axis=0)
return result, expected
def artificial_image(shape, count, radius, noise_level, dip=False,
traditional=False, **kwargs):
radius = tp.utils.validate_tuple(radius, len(shape))
margin = tuple([r + 1 for r in radius])
separation = tuple([2.5*r for r in radius])
pos = gen_nonoverlapping_locations(shape, count, separation, margin)
return draw_artificial_image(shape, pos, radius, noise_level, dip,
traditional, **kwargs)
def artificial_cluster(shape, count, radius, noise_level, dip=False,
traditional=False, **kwargs):
radius = tp.utils.validate_tuple(radius, len(shape))
margin = tuple([r + 1 for r in radius])
separation = tuple([1.4*r for r in radius])
pos = gen_connected_locations(shape, count, separation, margin)
return draw_artificial_image(shape, pos, radius, noise_level, dip,
traditional, **kwargs)
def generate_random_circle(r, x, y, num_samples=500, noise=0):
np.random.seed(1)
theta = np.random.rand(num_samples) * (2 * np.pi)
if noise > 0:
mini = r-noise
maxi = r+noise
r_rand = np.random.rand(num_samples) * (maxi-mini) + mini
else:
r_rand = r
xc = r_rand * np.cos(theta) + x
yc = r_rand * np.sin(theta) + y
return np.squeeze(
|
np.dstack((xc, yc))
|
numpy.dstack
|
import numpy as np
class StraightLineModel(object):
def __init__(self, x, y, y_err, model):
"""
We store the data as attributes of the object so we don't have to
keep passing it in to the methods that compute the probabilities.
"""
self.x = np.asarray(x)
self.y = np.asarray(y)
self.y_err = np.asarray(y_err)
self.model = model
def ln_likelihood(self, pars):
"""
We don't need to pass in the data because we can access it from the
attributes. This is basically the same as the weighted squared
deviation function, but includes the constant normalizations for the
Gaussian likelihood.
"""
N = len(self.y)
dy = self.y - self.model(pars, self.x)
ivar = 1 / self.y_err**2 # inverse-variance
return -0.5 * (N*np.log(2*np.pi) + np.sum(2*np.log(self.y_err)) + np.sum(dy**2 * ivar))
def ln_prior(self, pars):
"""
The prior only depends on the parameters, so we don't need to touch
the data at all. We're going to implement a flat (uniform) prior
over the ranges:
a : [0, 1]
b : [-50, 50]
"""
a, b = pars # unpack parameters
ln_prior_val = 0. # we'll add to this
if b < -0.5 or b > 0.5:
return -np.inf
else:
ln_prior_val += np.log(1E-2) # normalization, log(1/100)
if a < -5 or a > 5.:
return -np.inf
else:
ln_prior_val += np.log(1E-2) # normalization, log(1/100)
return ln_prior_val
def ln_posterior(self, pars):
"""
Up to a normalization constant, the log of the posterior pdf is just
the sum of the log likelihood plus the log prior.
"""
lnp = self.ln_prior(pars)
if np.isinf(lnp): # short-circuit if the prior is infinite (don't bother computing likelihood)
return lnp
lnL = self.ln_likelihood(pars)
lnprob = lnp + lnL
if
|
np.isnan(lnprob)
|
numpy.isnan
|
import numpy as np
from scipy import ndimage
def getCmpFaces(erpPatch, H=None, W=None):
'''
<h3>EquiRectangular Projection to CubeMap Projection Function</h3>
<b>Parameters:</b><ul>
<li><b>erpPatch :</b> Equirectangular Projected Image (OpenCV BGR Format)</li>
<li><b>H :</b> Height of CMP Faces (default : patch.shape[0] // 2)</li>
<li><b>W :</b> Width of CMP Faces (default : patch.shape[1] // 4)</b></li></ul>
<b>Returns:</b><ul>
<li><b>cmpFaces :</b> CubeMap Projected Faces:
front, right, back, left, top, bottom
with shape : [6, H, W, 3]</li></ul>
'''
V = [0, 0, 0, 0, -np.pi/2, np.pi/2]
U = [0, np.pi/2, np.pi, -np.pi/2, 0, 0]
fov = np.pi/2
if H is None:
H = erpPatch.shape[0] // 2
if W is None:
W = erpPatch.shape[1] // 4
cmpFaces = np.ndarray((6, H, W, 3))
for i, (u, v) in enumerate(zip(U, V)):
cmpFaces[i] = eqToPers(erpPatch, fov, u, v, H, W)
return cmpFaces
def genXYZ(fov, u, v, outH, outW):
''' Source : https://github.com/pepepor123/equirectangular-to-cubemap '''
out = np.ones((outH, outW, 3), np.float32)
xRng = np.linspace(-np.tan(fov / 2), np.tan(fov / 2),
num=outW, dtype=np.float32)
yRng = np.linspace(-np.tan(fov / 2), np.tan(fov / 2),
num=outH, dtype=np.float32)
out[:, :, :2] = np.stack(np.meshgrid(xRng, -yRng), -1)
Rx = np.array([[1, 0, 0], [0, np.cos(v), -
|
np.sin(v)
|
numpy.sin
|
'''
Created on Dec 9, 2016
@author: husensofteng
'''
import sys, os
import pandas as pd
import statsmodels.api as sm
import statsmodels.formula.api as smf
import numpy as np
import pickle
import json
from pybedtools import BedTool, set_tempdir, cleanup
from scipy import stats
from statsmodels.sandbox.stats.multicomp import multipletests
from scipy.stats import binom, hypergeom
from scipy.stats import binom
import shutil
from shutil import copyfile
from multiprocessing import Pool
from itertools import product
import csv
import math
import glob
#from score_motifs_tissuepertable import open_connection, close_connection
#import matplotlib.backends.backend_pdf
#matplotlib.style.use('ggplot')
#temp_dir = 'tmp_pybedtoos'
#if not os.path.exists(temp_dir):
# os.mkdir(temp_dir)
#use scratch
#temp_dir = tmp_dir
def unify_muts(annotated_mutations_input_file, output_extension, filter_mut_motifs=True, filter_cond = "", operation_on_unify='mean'):
annotated_mutations_grouped_file=annotated_mutations_input_file + output_extension + "_groupedbymut"
annotated_mutations_grouped_output_file=annotated_mutations_grouped_file+"withmotifinfo"
fsep = '\t'
vsep = '#'
if not os.path.exists(annotated_mutations_grouped_output_file):
awk_stmt = """awk 'BEGIN{{FS=OFS="{fsep}"}}{{{filter_cond} {{gsub("X","23",$1); gsub("Y","24",$1);gsub("MT","25",$1);gsub("M","25",$1);gsub("chr","",$1); $2=($2/1)*1; $3=($3/1)*1;
print $1,$2,$3,$4,$5,$6,$7,$8,$9,$10+$11,$10+$11"{vsep}"$10"{vsep}"$11"{vsep}"$12"{vsep}"$13"{vsep}"$14"{vsep}"$15"{vsep}"$16"{vsep}"$17"{vsep}"$18"{vsep}"$19"{vsep}"$20"{vsep}"$21"{vsep}"$22"{vsep}"$23"{vsep}"$24"{vsep}"$25"{vsep}"$26"{vsep}"$27"{vsep}"$28"{vsep}"$29"{vsep}"$30"{vsep}"$31"{vsep}"$32}}}}' {infile} |
sort -k1,1n -k2,2n -k3,3n -k4 -k5 -k6 -k7 -k8 -k9 |
groupBy -g 1-9 -c 10,11 -o {operation_on_unify},collapse > {grouped_file}""".format(
fsep=fsep, vsep=vsep, filter_cond=filter_cond, infile=annotated_mutations_input_file, operation_on_unify=operation_on_unify, grouped_file=annotated_mutations_grouped_file)
#print(awk_stmt)
os.system(awk_stmt)
#print("groupBy mutation is done")
return annotated_mutations_grouped_file
def get_max_motif_in_grouped_muts(annotated_mutations_grouped_file):
annotated_mutations_grouped_output_file=annotated_mutations_grouped_file+"withmotifinfo"
fsep = '\t'
vsep = '#'
if os.path.exists(annotated_mutations_grouped_output_file):
return annotated_mutations_grouped_output_file
#print("Reading the grouped mutations file")
score_index_in_grouped_file = 9
motif_index_in_grouped_file = 10
score_index_in_motif_info = 0
ref_alt_index_in_motif_info = 4
mutpos_index_in_motif_info = 5
motifname_index_in_motif_info = 9
with open(annotated_mutations_grouped_file, 'r') as grouped_file, open(annotated_mutations_grouped_output_file, 'w') as annotated_mutations_grouped_outfile_bed12:
l = grouped_file.readline()
while l:
sl = l.strip().split(fsep)
max_mut_score = float(sl[score_index_in_grouped_file])
motifs_info = [s.strip().split(vsep) for s in sl[motif_index_in_grouped_file].split(',')]
for motif_info in motifs_info:
mut_motif_score = float(motif_info[score_index_in_motif_info])
if mut_motif_score>=max_mut_score:#append the max score to the chromatin status of the overlapping motif
annotated_mutations_grouped_outfile_bed12.write(
l.strip() + '\t' + motif_info[ref_alt_index_in_motif_info]+ '\t' +
motif_info[mutpos_index_in_motif_info]+ '\t' +
motif_info[motifname_index_in_motif_info] + '\t' +
vsep.join(motif_info) + '\n')
break
l = grouped_file.readline()
return annotated_mutations_grouped_output_file
def get_scores(annotated_mutations_grouped_file):
fsep = '\t'
vsep = '#'
overall_scores_file = annotated_mutations_grouped_file+"_overallscores"
overall_scores_chromatin_status_file = annotated_mutations_grouped_file+"_chromatinscores"
overall_scores_phenotype_file = annotated_mutations_grouped_file+"_phenotypescores"
mut_scores_overall_combined_file = annotated_mutations_grouped_file+"_combined"
if (os.path.exists(overall_scores_file) and os.path.exists(overall_scores_chromatin_status_file) and os.path.exists(overall_scores_phenotype_file)
and os.path.exists(mut_scores_overall_combined_file)):
print('Loading scores')
with open(overall_scores_file, 'r') as gp:
mut_scores_overall = pickle.load(gp)
with open(overall_scores_chromatin_status_file, 'r') as gp:
mut_scores_overall_per_chromatin_status = pickle.load(gp)
with open(overall_scores_phenotype_file, 'r') as gp:
mut_scores_overall_per_phenotype = pickle.load(gp)
with open(mut_scores_overall_combined_file, 'r') as gp:
mut_scores_overall_combined = pickle.load(gp)
print(mut_scores_overall_per_chromatin_status.keys())
print(mut_scores_overall_per_phenotype.keys())
print(mut_scores_overall_combined.keys())
return mut_scores_overall_combined, mut_scores_overall, mut_scores_overall_per_chromatin_status, mut_scores_overall_per_phenotype
print("Reading the grouped file")
motif_index_in_grouped_file = 11
chromatin_status_index_in_motif_info = 13
phenotype_index_in_grouped_file = 5
score_index_in_motif_info = 0
mut_scores_overall = []
mut_scores_overall_combined = {'scores':[], 'chromatin_status': [], 'cancer_types':[]}
mut_scores_overall_per_chromatin_status = {}
mut_scores_overall_per_phenotype = {}
with open(annotated_mutations_grouped_file) as grouped_file:
l = grouped_file.readline()
while l:
sl = l.strip().split(fsep)
motif_info = sl[motif_index_in_grouped_file].split(vsep)
max_mut_score = float(motif_info[score_index_in_motif_info])
mut_scores_overall.append(max_mut_score)
phenotype = sl[phenotype_index_in_grouped_file]
try:
mut_scores_overall_per_phenotype[phenotype].append(max_mut_score)
except KeyError:
mut_scores_overall_per_phenotype[phenotype] = [max_mut_score]
chromatin_status = motif_info[chromatin_status_index_in_motif_info]
try:
mut_scores_overall_per_chromatin_status[chromatin_status].append(max_mut_score)
except KeyError:
mut_scores_overall_per_chromatin_status[chromatin_status] = [max_mut_score]
mut_scores_overall_combined['scores'].append(max_mut_score)
mut_scores_overall_combined['chromatin_status'].append(chromatin_status)
mut_scores_overall_combined['cancer_types'].append(phenotype)
l = grouped_file.readline()
#print(mut_scores_overall)
with open(overall_scores_file, 'w') as gp:
pickle.dump(mut_scores_overall, gp)
with open(overall_scores_chromatin_status_file, 'w') as gp:
pickle.dump(mut_scores_overall_per_chromatin_status, gp)
with open(overall_scores_phenotype_file, 'w') as gp:
pickle.dump(mut_scores_overall_per_phenotype, gp)
with open(mut_scores_overall_combined_file, 'w') as gp:
pickle.dump(mut_scores_overall_combined, gp)
return mut_scores_overall_combined, mut_scores_overall, mut_scores_overall_per_chromatin_status, mut_scores_overall_per_phenotype
def get_mean_and_sd_from_file(simulated_input_file, scores_index = 9, index_mut_type = 6, report_overlall_score = False):
if os.path.exists(simulated_input_file+"_statsdict"):
with open(simulated_input_file+"_statsdict", 'r') as simulated_infile:
d = simulated_infile.readline().strip()
return json.loads(d)
scores_SNPs = []
scores_MNPs = []
scores = []
with open(simulated_input_file, 'r') as simulated_infile:
l = simulated_infile.readline()
while l:
sl = l.strip().split('\t')
scores.append(float(sl[scores_index]))
if not report_overlall_score:
if sl[index_mut_type]=="SNP":
scores_SNPs.append(float(sl[scores_index]))
else:
scores_MNPs.append(float(sl[scores_index]))
l = simulated_infile.readline()
stats_dict = {'scores':scores,'avg': np.mean(scores), 'std': np.std(scores)}
if not report_overlall_score:
if len(scores_SNPs)>0:
stats_dict['avgSNPs'] = np.mean(scores_SNPs)
stats_dict['stdSNPs'] = np.std(scores_SNPs)
if len(scores_MNPs)>0:
stats_dict['avgMNPs'] = np.mean(scores_MNPs)
stats_dict['stdMNPs'] = np.std(scores_MNPs)
with open(simulated_input_file+"_statsdict", 'w') as simulated_outfile:
json.dump(stats_dict, simulated_outfile)
return stats_dict
def assess_stat_muts(muts_input_file, simulated_input_file, observed_output_file, observed_onlysig_output_file, score_index_observed_elements=9, score_index_sim_elements=9, index_mut_type = 6, mut_sig_threshold=0.05, stats_dict = {}):
if os.path.exists(observed_output_file) and os.path.exists(observed_onlysig_output_file):
return observed_output_file, observed_onlysig_output_file, 'NA'
if len(stats_dict.keys())==0:
stats_dict = get_mean_and_sd_from_file(simulated_input_file, scores_index=score_index_sim_elements, index_mut_type = index_mut_type)
print("getting pval for muts in {} using {}: {}".format(muts_input_file, simulated_input_file, stats_dict))
n_sig = 0
with open(muts_input_file, 'r') as observed_infile, open(observed_output_file, 'w') as observed_outfile, open(observed_onlysig_output_file, 'w') as observed_onlysig_outfile:
l = observed_infile.readline()
p_values = []
lines_to_write = []
while l:
sl = l.strip().split('\t')
p_value = 0.0
if 'avgSNPs' in stats_dict.keys() and 'avgMNPs' in stats_dict.keys():
if sl[index_mut_type]=='SNP':
p_value = get_pval(float(sl[score_index_observed_elements]), stats_dict['avgSNPs'], stats_dict['stdSNPs'])
else:
p_value = get_pval(float(sl[score_index_observed_elements]), stats_dict['avgMNPs'], stats_dict['stdMNPs'])
else:
p_value = get_pval(float(sl[score_index_observed_elements]), stats_dict['avg'], stats_dict['std'])
p_values.append(p_value)
lines_to_write.append(l.strip())
l = observed_infile.readline()
p_values_adjusted = adjust_pvales(p_values)
for i,lo in enumerate(lines_to_write):
observed_outfile.write(lo + '\t' + str(p_values[i]) + '\t' + str(p_values_adjusted[i]) + '\n')
if p_values[i]<mut_sig_threshold:
observed_onlysig_outfile.write(lo + '\t' + str(p_values[i]) + '\t' + str(p_values_adjusted[i]) + '\n')
#observed_onlysig_outfile.write(l.strip() + '\t' + str(p_value) + '\n')
return observed_output_file, observed_onlysig_output_file, n_sig
def merge_muts(muts_input_file, merged_muts_output_ext, filter_mut_motifs=False, filter_col_index=15, filter_value=0.05, mut_score_index=9, motifs_col_index =10, ref_alt_col_index=11, mutpos_col_index=12, motifname_col_index=13, motif_col_index=14, distance_to_merge=20):
merged_muts_output_file = muts_input_file + merged_muts_output_ext
fsep = '\t'
vsep = '#'
msep = '*'
MotifInfo, matching_motifs_sep, MaxMotif_sep = 'MotifInfo', 'MatchingMotifs', 'MaxMotif'
if os.path.exists(merged_muts_output_file):
return merged_muts_output_file
#selected_regions_file = 'analysis/encode_merge_cdsAndSpliceSitesSubtracted.bed3'
cond = ""
if filter_mut_motifs:
cond = 'if({}<{})'.format(filter_col_index, filter_value)
''''cols info:
awk print: 1 chr, 2 start, 3 end, 4 score, 5 phenotype, 6 ref_alt, 7 mutpos, 8 motifname,
10 donorID, 11 mutinfo*motifsinfo*maxmotifinfo
merge (-c -o) mean(score), count(mutinfo), count(donorID), distinct: phenotype,
collapse: ref_alt, mutpos, motifname, score, phenotype, distinct(donorID), donorID, mut_motifs_info'''
awk_stmt = """awk 'BEGIN{{FS=OFS="{fsep}"}}{{{cond}{{ gsub(",", "MotifInfo"); print $1,$2,$3,$10,$6,$12,$13,$14,$9,$1"{vsep}"$2"{vsep}"$3"{vsep}"$4"{vsep}"$5"{vsep}"$6"{vsep}"$7"{vsep}"$8"{vsep}"$9"{vsep}"$10"{matching_motifs_sep}"$11"{MaxMotif_sep}"$15}}}}' {muts_input_file} | sort -k1,1n -k2,2n -k3,3n | mergeBed -i stdin -d {distance_to_merge} -c 4,10,9,5,6,7,8,4,5,9,9,10 -o mean,count_distinct,count_distinct,distinct,collapse,collapse,collapse,collapse,collapse,distinct,collapse,collapse > {merged_muts_output_file}""".format(**locals()) # | awk 'BEGIN{{FS=OFS="\t"}}{{if($2!=$3){{$2=$2+1; $3=$3-1}}; if($2>$3){{$2=$2-1; $3=$3+1}}; print}}
#awk_stmt = """awk 'BEGIN{{FS=OFS="{fsep}"}}{{{cond}{{ gsub(",", "MotifInfo"); print $1,$2,$3,$10,$6,$12,$13,$14,$9,$1"{vsep}"$2"{vsep}"$3"{vsep}"$4"{vsep}"$5"{vsep}"$6"{vsep}"$7"{vsep}"$8"{vsep}"$9"{vsep}"$10"{matching_motifs_sep}"$11"{MaxMotif_sep}"$15}}}}' {muts_input_file} | sort -k1,1n -k2,2n -k3,3n | intersectBed -split -wo -a stdin -b {selected_regions_file} | sort -k11,11n -k12,12n -k13,13n | groupBy -g 11,12,13 -c 4,10,9,5,6,7,8,4,5,9,9,10 -o sum,count_distinct,count_distinct,distinct,collapse,collapse,collapse,collapse,collapse,distinct,collapse,collapse > {merged_muts_output_file}""".format(**locals())
#print(awk_stmt)
os.system(awk_stmt)
#print("merge mutations process is done")
return merged_muts_output_file
def get_chr_lengths(chr_lengths_file):
chr_lengths = {}
with open(chr_lengths_file, 'r') as chr_min_max_ifile:
lines = chr_min_max_ifile.readlines()
chr_names = range(1,26)
for l in lines:
if not l.startswith('chr') and l!="" and not l.startswith('//') and not l.startswith('#'):
sl = l.strip().split('\t')
chr_name = sl[0].replace('X', '23').replace('Y','24').replace('MT','25').replace('M','25')
if int(chr_name) in chr_names:
chr_lengths[int(chr_name)] = int(sl[1])
return chr_lengths
def sum_fscore_motif_breaking_score(feature,fscore_index, motif_breaking_score_index):
if(feature[fscore_index] != '.'):
sums = float(feature[fscore_index]) + float(feature[motif_breaking_score_index])
feature[fscore_index] = str(sums)
return feature
def empirical_pval(sl, stats_dict_scores):
p_values=[]
for score in sl:
scores_higher_than_observed = [i for i in stats_dict_scores if i >= score]
p_value= len(scores_higher_than_observed)/(len(stats_dict_scores))
if p_value==0.0:
p_value=1/103
p_values.append(p_value)
return p_values
def empirical_pval_global(dict_lines_observed_split, stats_dict_scores, pval_file):
with open(pval_file, 'a') as pval_ifile:
for index in dict_lines_observed_split:
score = dict_lines_observed_split[index]
scores_higher_than_observed = [i for i in stats_dict_scores if i >= score]
p_value= len(scores_higher_than_observed)/(len(stats_dict_scores))
if p_value==0.0:
p_value=1/103
pval_ifile.write(str(index) + '\t' + str(p_value)+'\n')
return pval_file
def empirical_pval_local_window(dict_lines_observed_split, pval_file):
# dict_p_values={}
with open(pval_file, 'a') as pval_ifile:
for index in dict_lines_observed_split:
simulated_score_vec=dict_lines_observed_split[index][1]
scores_len=len(simulated_score_vec)
element_score = float(dict_lines_observed_split[index][0])
scores_higher_than_observed = [i for i in simulated_score_vec if i >= element_score]
p_value= len(scores_higher_than_observed)/scores_len
if p_value==0.0:
p_value=1/103
pval_ifile.write(str(index) + '\t' + str(p_value)+ '\n')
return pval_file
def split_dict_equally(input_dict, chunks=2):
# prep with empty dicts
return_list = [dict() for idx in range(chunks)]
idx = 0
for k,v in input_dict.items():
return_list[idx][k] = v
if idx < chunks-1: # indexes start at 0
idx += 1
else:
idx = 0
return return_list
def assess_stat_elements_local_domain(observed_input_file, simulated_input_file, merged_elements_statspvalues, merged_elements_statspvaluesonlysig,
chr_lengths_file, local_domain_window=25000,
merged_mut_sig_threshold = 0.05, score_index_observed_elements=4, score_index_sim_elements=4, p_value_on_score=False):
if os.path.exists(merged_elements_statspvalues) and os.path.exists(merged_elements_statspvaluesonlysig):
return merged_elements_statspvalues, merged_elements_statspvaluesonlysig, 'NA'
simulated_input_file_sort=simulated_input_file+'_sort'
if not os.path.exists(simulated_input_file_sort):
os.system("""sort -k1,1n -k2,2n {} > {}""".format(simulated_input_file,simulated_input_file_sort))
#extend elements size
"replace chr, X, Y, add Line Number to use as window ID and sort by chr,start"
observed_input_file_temp_file = observed_input_file+"_temp"
cmd = """awk 'BEGIN{{OFS="\t"}}{{gsub("chr","",$1); gsub("X", 23, $1); gsub("Y", 24, $1); print $1,$2,$3,NR,$4}}' {} | bedtools slop -g /proj/snic2020-16-50/nobackup/pancananalysis/pancan12Feb2020/cancer_datafiles/chr_order_hg19.txt -b {}| sort -k1,1n -k2,2n > {}""".format(
observed_input_file, local_domain_window,observed_input_file_temp_file)
os.system(cmd)
os.system("""awk '{{print $0>"{}""_"$1".bed"}}' {}""".format(
observed_input_file_temp_file, observed_input_file_temp_file))
print(glob.glob(observed_input_file_temp_file+'*.bed'))
pval_files=[]
for observed_input_file_temp_file_per_chr in glob.glob(observed_input_file_temp_file+'_*.bed'):
print(observed_input_file_temp_file_per_chr)
#chr_lengths = get_chr_lengths(chr_lengths_file)
dict_lines_observed = {}
line_number = 1
with open(observed_input_file_temp_file_per_chr, 'r') as observed_infile: #, open(observed_input_file_temp_file, 'w') as observed_input_file_temp_ofile:
l = observed_infile.readline().strip().split('\t')
while l and len(l)>2:
dict_lines_observed[int(float(l[3]))] = [l[4],[]]
# extended_element_start = (int(l[1])-local_domain_window)
# extended_element_end = int(l[2])+local_domain_window
# if extended_element_start<0:
# extended_element_start = 0
# extended_element_end += 0 - (int(l[1])-local_domain_window)
# if extended_element_end>chr_lengths[int(l[0])]:
# extended_element_end = chr_lengths[int(l[0])]
# extended_element_start -= (int(l[2])+local_domain_window) - chr_lengths[int(l[0])]
#
# observed_input_file_temp_ofile.write(l[0] + '\t' + str(extended_element_start) + '\t' + str(extended_element_end) + '\t' + str(line_number) + '\n')
line_number+=1
l = observed_infile.readline().strip().split('\t')
print('observed_input_file: ', observed_input_file_temp_file_per_chr)
observed_input_file_temp_file_per_chr_sort=observed_input_file_temp_file_per_chr+'_sort'
if not os.path.exists(observed_input_file_temp_file_per_chr_sort):
os.system("""sort -k1,1n -k2,2n {} > {}""".format(observed_input_file_temp_file_per_chr, observed_input_file_temp_file_per_chr_sort))
observed_input_file_obj = BedTool(observed_input_file_temp_file_per_chr_sort)
simulated_input_file_temp = simulated_input_file+"_temp"
observed_input_file_obj.map(BedTool(simulated_input_file_sort), c=4, o=['collapse'], g='/proj/snic2020-16-50/nobackup/pancananalysis/pancan12Feb2020/cancer_datafiles/chr_order_hg19.txt').saveas(simulated_input_file_temp)
with open(simulated_input_file_temp, 'r') as simulated_input_file_temp_ifile:
l = simulated_input_file_temp_ifile.readline().strip().split('\t')
while l and len(l)>1:
sim_scores = []
for x in l[5].split(','):
try:
sim_scores.append(float(x))
except ValueError:
sim_scores.append(0.0)
dict_lines_observed[int(float(l[3]))][1].extend(sim_scores)
l = simulated_input_file_temp_ifile.readline().strip().split('\t')
#split dictionery into chunks
dict_lines_observed_chunks=split_dict_equally(dict_lines_observed, 100)
pval_file=observed_input_file_temp_file_per_chr+'_elem_pval_local'+str(local_domain_window)
if os.path.exists(pval_file):
os.remove(pval_file)
#print('p-value on score local')
pm = Pool(15)
pm.starmap(empirical_pval_local_window, product(dict_lines_observed_chunks, [pval_file]))
pm.close()
pm.join()
os.remove(simulated_input_file_temp)
os.remove(observed_input_file_temp_file_per_chr_sort)
pval_files.append(pval_file)
combined_pval_file=observed_input_file+'_elem_pval_local'+str(local_domain_window)
dict_pvals={}
p_values=[]
if not os.path.exists(combined_pval_file):
with open(combined_pval_file, 'w') as combined_pval_outfile:
for pval_file in pval_files:
with open(pval_file, 'r') as pval_ifile:
combined_pval_outfile.write(pval_ifile.read())
os.remove(pval_file)
with open(combined_pval_file, 'r') as combined_pval_ifile:
l = combined_pval_ifile.readline().strip().split('\t')
#
while l and len(l)>1:
p_values.append(float(l[1]))
dict_pvals[int(float(l[0]))]=float(l[1])
l = combined_pval_ifile.readline().strip().split('\t')
#l=1
# p_values = []
# pvalues_adjusted = []
# n_sig = 0
# lines = []
# dict_pvals={}
# p_values=pval_df[1]
# with open(pval_file, 'r') as pval_ifile:
# l = pval_ifile.readline().strip().split('\t')
#
# while l and len(l)>1:
# p_values.append(float(l[1]))
# dict_pvals[int(float(l[0]))]=float(l[1])
# l = pval_ifile.readline().strip().split('\t')
pvalues_adjusted = p_values
lambda_factor=np.median(stats.chi2.isf(p_values,1))/stats.chi2.ppf(0.5, 1)
lambda_values_file=observed_input_file+"_lambda_values_local_window_"+str(local_domain_window)+'.txt'
lambda_file=open(lambda_values_file, "w")
lambda_file.write(observed_input_file.split('/')[-1] + '\t' + str(lambda_factor)+'\n')
lambda_file.close()
#if os.path.exists(pval_file):
# os.remove(pval_file)
#l=1
n_sig = 0
with open(observed_input_file, 'r') as observed_infile, open(merged_elements_statspvalues, 'w') as merged_elements_statspvalues_outfile, open(merged_elements_statspvaluesonlysig, 'w') as merged_elements_statspvaluesonlysig_outfile:
l = observed_infile.readline()
l_number=1
while l:
sl = l.strip().split('\t')
#print(dict_pvals[l_number])
#print(l.strip())
merged_elements_statspvalues_outfile.write(l.strip() + '\t' + str(dict_pvals[l_number]) + '\t' + str(dict_pvals[l_number]) + '\n')
if dict_pvals[l_number]<merged_mut_sig_threshold:
n_sig+=1
merged_elements_statspvaluesonlysig_outfile.write(l.strip() + '\t' + str(dict_pvals[l_number]) + '\t' + str(dict_pvals[l_number]) + '\n')
l = observed_infile.readline()
l_number+=1
cleanup()
return merged_elements_statspvalues, merged_elements_statspvaluesonlysig, n_sig
def assess_stat_elements(observed_input_file, simulated_input_file,
merged_elements_statspvalues,
merged_elements_statspvaluesonlysig,
merged_mut_sig_threshold = 0.05,
score_index_observed_elements=4,
score_index_sim_elements=4, p_value_on_score=False):
if os.path.exists(merged_elements_statspvalues) and os.path.exists(merged_elements_statspvaluesonlysig):
return merged_elements_statspvalues, merged_elements_statspvaluesonlysig, 'NA'
stats_dict = get_mean_and_sd_from_file(simulated_input_file, scores_index=score_index_sim_elements, report_overlall_score=True)
#print("getting pval for elements in {} using {}: {}".format(observed_input_file, simulated_input_file))
#extend each merged region by wbp and combine all columns into one; intersect the entire list with the combine simulated list of elemenets, group by the column ID, take average and std from the grouping;
if p_value_on_score:
dict_lines_observed_score = {}
line_number = 1
with open(observed_input_file, 'r') as observed_infile: #, open(observed_input_file_temp_file, 'w') as observed_input_file_temp_ofile:
l = observed_infile.readline().strip().split('\t')
while l and len(l)>3:
dict_lines_observed_score[line_number] = float(l[3])
line_number+=1
l = observed_infile.readline().strip().split('\t')
dict_lines_observed_chunks=split_dict_equally(dict_lines_observed_score, 100)
pval_file=observed_input_file+'_elem_pval'
if os.path.exists(pval_file):
os.remove(pval_file)
pm = Pool(15)
pm.starmap(empirical_pval_global, product(dict_lines_observed_chunks, [stats_dict['scores']], [pval_file]))
pm.close()
pm.join()
dict_pvals={}
p_values=[]
with open(pval_file, 'r') as pval_ifile:
l = pval_ifile.readline().strip().split('\t')
while l and len(l)>1:
p_values.append(float(l[1]))
dict_pvals[int(float(l[0]))]=float(l[1])
l = pval_ifile.readline().strip().split('\t')
pvalues_adjusted = p_values
lambda_factor=np.median(stats.chi2.isf(p_values,1))/stats.chi2.ppf(0.5, 1)
#print(observed_input_file+ "_lambda_values_whole_genome.txt")
lambda_file=open(observed_input_file+ "_lambda_values_whole_genome.txt", "w")
lambda_file.write(observed_input_file.split('/')[-1] + '\t' + str(lambda_factor)+'\n')
lambda_file.close()
lines = []
n_sig = 0
with open(observed_input_file, 'r') as observed_infile, open(merged_elements_statspvalues, 'w') as merged_elements_statspvalues_outfile, open(merged_elements_statspvaluesonlysig, 'w') as merged_elements_statspvaluesonlysig_outfile:
l = observed_infile.readline()
l_number=1
while l:
sl = l.strip().split('\t')
#print(dict_pvals[l_number])
#print(l.strip())
merged_elements_statspvalues_outfile.write(l.strip() + '\t' + str(dict_pvals[l_number]) + '\t' + str(dict_pvals[l_number]) + '\n')
if dict_pvals[l_number]<merged_mut_sig_threshold:
n_sig+=1
merged_elements_statspvaluesonlysig_outfile.write(l.strip() + '\t' + str(dict_pvals[l_number]) + '\t' + str(dict_pvals[l_number]) + '\n')
l = observed_infile.readline()
l_number+=1
if os.path.exists(pval_file):
os.remove(pval_file)
else:
p_values = []
lines = []
pvalues_adjusted = []
n_sig = 0
with open(observed_input_file, 'r') as observed_infile, open(merged_elements_statspvalues, 'w') as merged_elements_statspvalues_outfile, open(merged_elements_statspvaluesonlysig, 'w') as merged_elements_statspvaluesonlysig_outfile:
l = observed_infile.readline()
while l:
sl = l.strip().split('\t')
#get avg and std from simulated merged elements located within w bps of this region
p_value = get_pval(float(sl[score_index_observed_elements]), stats_dict['avg'], stats_dict['std'])
p_values.append(p_value)
lines.append(l.strip())
l = observed_infile.readline()
if len(p_values)>0:
pvalues_adjusted = adjust_pvales(p_values)
for i,l in enumerate(lines):
merged_elements_statspvalues_outfile.write(l.strip() + '\t' + str(p_values[i]) + '\t' + str(pvalues_adjusted[i]) + '\n')
if pvalues_adjusted[i]<merged_mut_sig_threshold:
n_sig+=1
merged_elements_statspvaluesonlysig_outfile.write(l.strip() + '\t' + str(p_values[i]) + '\t' + str(pvalues_adjusted[i]) + '\n')
return merged_elements_statspvalues, merged_elements_statspvaluesonlysig, n_sig
def get_tf_pval(cohort, sig_muts_per_tf_mutation_input_files, p_value_on_score, motif_name_index,
f_score_index, motif_breaking_score_index,
filter_cond, fsep, sig_tfs_file, sig_tfpos_file,
filter_on_signal = True, dnase_index = 24, fantom_index = 25,
num_other_tfs_index = 27):
print('sig_muts_per_tf_mutation_input_files: ', sig_muts_per_tf_mutation_input_files)
observed_mut_motifs = sig_muts_per_tf_mutation_input_files[0]
if os.path.isfile(sig_tfs_file) and os.path.isfile(sig_tfpos_file):
return sig_tfs_file, sig_tfpos_file
observed_mut_motifs_temp = observed_mut_motifs+'_sigTFs_mintfscores_temp'
print('Calculating pval for TFs in ', cohort)
'''filter the mutations by motif-breaking score and gene-expression as given in filter_cond
the mutations in the input file are already checked for signicance (or TF binding>0)'''
os.system("""awk 'BEGIN{{FS=OFS="{fsep}"}}{{{filter_cond}{{print ${motif_name_index},${f_score_index}+${mut_break_score_index}}}}}' {observed_mut_motifs} | sort -k1 | groupBy -g 1 -c 2 -o min,mean,stdev,median > {observed_mut_motifs_temp}""".format(
filter_cond=filter_cond, observed_mut_motifs=observed_mut_motifs,
motif_name_index=motif_name_index+1, f_score_index=f_score_index+1,
mut_break_score_index=motif_breaking_score_index+1,
observed_mut_motifs_temp=observed_mut_motifs_temp, fsep=fsep))
tf_min_scores_in_sig_obs_motifs = {}
with open(observed_mut_motifs_temp, 'r') as i_observed_mut_motifs_temp:
lines = i_observed_mut_motifs_temp.readlines()
for l in lines:
#min
#Min
#tf_min_scores_in_sig_obs_motifs[l.strip().split('\t')[0]] = float(l.strip().split('\t')[1])
#mean
tf_min_scores_in_sig_obs_motifs[l.strip().split('\t')[0]] = float(float(l.strip().split('\t')[2])-float(l.strip().split('\t')[3]))
print('tf_min_scores_in_sig_obs_motifs: ', tf_min_scores_in_sig_obs_motifs)
gene_expression_index = 31
tf_binding_index = 30
mut_motif_pos_index = 13
breaking_score_threshold = 0.3
tf_counts_in_sim_sets = {}
tfpos_counts_in_sim_sets = {}
tf_counts_in_this_sim_set = {}
tfpos_counts_in_this_sim_set = {}
sim_file=sig_muts_per_tf_mutation_input_files[0]
with open(sim_file) as i_sim_file:
l = i_sim_file.readline().strip().split('\t')
while l and len(l)>gene_expression_index:
if ((l[gene_expression_index]=='nan' or float(l[gene_expression_index])>0) and
float(l[motif_breaking_score_index])>=breaking_score_threshold):
try:
tf_counts_in_this_sim_set[l[motif_name_index]] +=1
except KeyError:
tf_counts_in_this_sim_set[l[motif_name_index]] = 1
try:
tfpos_counts_in_this_sim_set[l[motif_name_index]+"#"+l[mut_motif_pos_index]] +=1
except KeyError:
tfpos_counts_in_this_sim_set[l[motif_name_index]+"#"+l[mut_motif_pos_index]] = 1
l = i_sim_file.readline().strip().split('\t')
for tf in tf_counts_in_this_sim_set.keys():
try:
tf_counts_in_sim_sets[tf].append(tf_counts_in_this_sim_set[tf])
except KeyError:
tf_counts_in_sim_sets[tf] = [tf_counts_in_this_sim_set[tf]]
for tf in tfpos_counts_in_this_sim_set.keys():
try:
tfpos_counts_in_sim_sets[tf].append(tfpos_counts_in_this_sim_set[tf])
except KeyError:
tfpos_counts_in_sim_sets[tf] = [tfpos_counts_in_this_sim_set[tf]]
for sim_file in sig_muts_per_tf_mutation_input_files[1:]: #count for all files incl. observed
tf_counts_in_this_sim_set = {}
tfpos_counts_in_this_sim_set = {}
with open(sim_file) as i_sim_file:
l = i_sim_file.readline().strip().split('\t')
while l and len(l)>gene_expression_index:
if ((l[gene_expression_index]=='nan' or float(l[gene_expression_index])>0) and
float(l[motif_breaking_score_index])>=breaking_score_threshold):
'''means no fdr and signal check has been applied therefore only keep motifs that have:
(f_score+breaking_score> minimum score obtained for the same motif in the obs set
(instead of FDR calculations to ensure only reasonable mutations are counted)
and there is dnase signal
or there is tf binindg signal
this is usually the case for sim sets. (the idea is to get mut similar to those in obs set)'''
try:
min_obs_score_this_motif = tf_min_scores_in_sig_obs_motifs[l[motif_name_index]]
except KeyError:
min_obs_score_this_motif = None
if min_obs_score_this_motif:
if ((float(l[f_score_index])) >= min_obs_score_this_motif) :
if((float(l[dnase_index])>0.0) or# or float(l[fantom_index])>0.0 or float(l[num_other_tfs_index])>0.0
(float(l[tf_binding_index])>0 and l[tf_binding_index]!="nan")):
try:
tf_counts_in_this_sim_set[l[motif_name_index]] +=1
except KeyError:
tf_counts_in_this_sim_set[l[motif_name_index]] = 1
try:
tfpos_counts_in_this_sim_set[l[motif_name_index]+"#"+l[mut_motif_pos_index]] +=1
except KeyError:
tfpos_counts_in_this_sim_set[l[motif_name_index]+"#"+l[mut_motif_pos_index]] = 1
l = i_sim_file.readline().strip().split('\t')
for tf in tf_counts_in_this_sim_set.keys():
try:
tf_counts_in_sim_sets[tf].append(tf_counts_in_this_sim_set[tf])
except KeyError:
tf_counts_in_sim_sets[tf] = [tf_counts_in_this_sim_set[tf]]
for tf in tfpos_counts_in_this_sim_set.keys():
try:
tfpos_counts_in_sim_sets[tf].append(tfpos_counts_in_this_sim_set[tf])
except KeyError:
tfpos_counts_in_sim_sets[tf] = [tfpos_counts_in_this_sim_set[tf]]
tf_p_values = []
tf_names = []
for tf in sorted(tf_counts_in_sim_sets.keys()):
if len(tf_counts_in_sim_sets[tf])<len(sig_muts_per_tf_mutation_input_files):
for i in range(len(tf_counts_in_sim_sets[tf]), len(sig_muts_per_tf_mutation_input_files)):
tf_counts_in_sim_sets[tf].append(0)
num_tf_obs = tf_counts_in_sim_sets[tf][0]
num_tf_sim = tf_counts_in_sim_sets[tf][1:]
tf_names.append(tf)
num_tf_sim_mean = np.mean(num_tf_sim)
num_tf_sim_sd = np.std(num_tf_sim)
if num_tf_sim_sd==0.0:
num_tf_sim_sd = 1.0
if p_value_on_score:
tf_p_values.append(empirical_pval((num_tf_obs,), num_tf_sim))
#adjusted_tf_p_values = []
#adjusted_tf_p_values = tf_p_values
else:
tf_p_values.append(get_pval(num_tf_obs, num_tf_sim_mean, num_tf_sim_sd))
adjusted_tf_p_values = []
if len(tf_p_values)>0:
adjusted_tf_p_values = adjust_pvales(tf_p_values)
else:
print('tf_p_values nothing:', tf_p_values)
tf_p_values_vec = [j for i in tf_p_values for j in i]
with open(sig_tfs_file, 'w') as ofile:
for i,tf in enumerate(tf_names):
ofile.write(tf + '\t' + str(tf_p_values_vec[i]) + '\t' + str(tf_p_values_vec[i]) + '\t' + str(tf_counts_in_sim_sets[tf][0]) + '\t' + str(np.mean(tf_counts_in_sim_sets[tf][1:])) + '\t' + ','.join([str(x) for x in tf_counts_in_sim_sets[tf][1:]])+ '\n')
tfpos_p_values = []
tfpos_names = []
for tfpos in sorted(tfpos_counts_in_sim_sets.keys()):
if len(tfpos_counts_in_sim_sets[tfpos])<len(sig_muts_per_tf_mutation_input_files):
for i in range(len(tfpos_counts_in_sim_sets[tfpos]), len(sig_muts_per_tf_mutation_input_files)):
tfpos_counts_in_sim_sets[tfpos].append(0)
num_tfpos_obs = tfpos_counts_in_sim_sets[tfpos][0]
num_tfpos_sim = tfpos_counts_in_sim_sets[tfpos][1:]
tfpos_names.append(tfpos)
num_tfpos_sim_mean = np.mean(num_tfpos_sim)
num_tfpos_sim_sd = np.std(num_tfpos_sim)
if num_tfpos_sim_sd==0.0:
num_tfpos_sim_sd = 1.0
if p_value_on_score:
tfpos_p_values.append(empirical_pval((num_tfpos_obs,), num_tfpos_sim))
else:
tfpos_p_values.append(get_pval(num_tfpos_obs, num_tfpos_sim_mean, num_tfpos_sim_sd))
adjusted_tfpos_p_values = []
if len(tfpos_p_values)>0:
adjusted_tfpos_p_values = adjust_pvales(tfpos_p_values)
tfpos_p_values_vec = [j for i in tfpos_p_values for j in i]
with open(sig_tfpos_file, 'w') as ofile:
for i,tfpos in enumerate(tfpos_names):
ofile.write(tfpos + '\t' + str(tfpos_p_values_vec[i]) + '\t' + str(tfpos_p_values_vec[i]) + '\t' + str(tfpos_counts_in_sim_sets[tfpos][0]) + '\t' + str(np.mean(tfpos_counts_in_sim_sets[tfpos][1:])) + '\t' + ','.join([str(x) for x in tfpos_counts_in_sim_sets[tfpos][1:]])+ '\n')
if os.path.exists(observed_mut_motifs_temp):
os.remove(observed_mut_motifs_temp)
return sig_tfs_file, sig_tfpos_file
def get_pval(element_score, avg, sd):
try:
z_score = (element_score - avg)/sd
except ZeroDivisionError: #in case sd is zero
z_score = (element_score - avg)
p_value = stats.norm.sf(z_score)
return p_value
def adjust_pvales(pvalues):
significant_bool_report, corrected_p_values_array, alphacSidak, alphacBonf = multipletests(pvalues, alpha=0.05, method='fdr_bh', returnsorted=False) #returns 4 things: a boolean array contains True or False for each value meaning wether the value after correction compared to the given alpha is significant or not, an array of the values after correction, a single for corrected alpha for Sidak method, a single value for corrected alpha for Bonferroni method
return corrected_p_values_array.tolist()
def process_input_file(observed_input_file, simulated_input_files,
combined_simulated_muts_output_file,
combined_simulated_muts_merged_output_file,
output_extension, distance_to_merge,
filter_cond, mut_sig_threshold):
if os.path.exists(combined_simulated_muts_merged_output_file) and os.path.exists(combined_simulated_muts_output_file):
pass
else:
with open(combined_simulated_muts_output_file, 'w') as combined_simulated_muts_outfile, open(combined_simulated_muts_merged_output_file, 'w') as combined_simulated_muts_merged_oufile:
for simulated_input_file in simulated_input_files:
unified_muts_file = simulated_input_file + output_extension + "_groupedbymut"
print(unified_muts_file)
unified_muts_file = unify_muts(simulated_input_file, unified_muts_file, filter_mut_motifs=True, filter_cond=filter_cond)
with open(unified_muts_file, 'r') as unified_muts_readfile:
combined_simulated_muts_outfile.write(unified_muts_readfile.read())
unified_muts_file_wihtmotifinfo = unified_muts_file+"withmotifinfo"
print(unified_muts_file_wihtmotifinfo)
unified_muts_file_wihtmotifinfo = get_max_motif_in_grouped_muts(annotated_mutations_grouped_file=unified_muts_file, annotated_mutations_grouped_output_file=unified_muts_file_wihtmotifinfo)
calculated_pvalues_unified_muts_file_wihtmotifinfo = unified_muts_file_wihtmotifinfo+"_statmuts"
sig_calculated_pvalues_unified_muts_file_wihtmotifinfo = unified_muts_file_wihtmotifinfo+"_statmutsonlysig"
calculated_pvalues_unified_muts_file_wihtmotifinfo, sig_calculated_pvalues_unified_muts_file_wihtmotifinfo, n_sig_muts = assess_stat_muts(muts_input_file=unified_muts_file_wihtmotifinfo, simulated_input_file=unified_muts_file_wihtmotifinfo, observed_output_file=calculated_pvalues_unified_muts_file_wihtmotifinfo, observed_onlysig_output_file=sig_calculated_pvalues_unified_muts_file_wihtmotifinfo, score_index_observed_elements=9, score_index_sim_elements=9, mut_sig_threshold=mut_sig_threshold)
merged_muts_output_file = sig_calculated_pvalues_unified_muts_file_wihtmotifinfo+"_mergedmuts{distance_to_merge}bp".format(distance_to_merge=distance_to_merge)
merged_muts_output_file = merge_muts(muts_input_file=sig_calculated_pvalues_unified_muts_file_wihtmotifinfo, merged_muts_output_file=merged_muts_output_file, filter_mut_motifs=False, filter_col_index=15, filter_value=0.05, mut_score_index=9, motifs_col_index =10, ref_alt_col_index=11, mutpos_col_index=12, motifname_col_index=13, motif_col_index=14, distance_to_merge=20)
with open(merged_muts_output_file, 'r') as merged_muts_read_file:
combined_simulated_muts_merged_oufile.write(merged_muts_read_file.read())
print(combined_simulated_muts_output_file)
print(combined_simulated_muts_merged_output_file)
unified_muts_file = observed_input_file + output_extension + "_groupedbymut"
unified_muts_file = unify_muts(observed_input_file, unified_muts_file, filter_mut_motifs=True, filter_cond=filter_cond)
unified_muts_file_wihtmotifinfo = unified_muts_file+"withmotifinfo"
unified_muts_file_wihtmotifinfo = get_max_motif_in_grouped_muts(annotated_mutations_grouped_file=unified_muts_file, annotated_mutations_grouped_output_file=unified_muts_file_wihtmotifinfo)
calculated_pvalues_unified_muts_file_wihtmotifinfo = unified_muts_file_wihtmotifinfo+"_statmuts"
sig_calculated_pvalues_unified_muts_file_wihtmotifinfo = unified_muts_file_wihtmotifinfo+"_statmutsonlysig"
calculated_pvalues_unified_muts_file_wihtmotifinfo, sig_calculated_pvalues_unified_muts_file_wihtmotifinfo, n_sig_muts = assess_stat_muts(muts_input_file=unified_muts_file_wihtmotifinfo, simulated_input_file=combined_simulated_muts_output_file, observed_output_file=calculated_pvalues_unified_muts_file_wihtmotifinfo, observed_onlysig_output_file=sig_calculated_pvalues_unified_muts_file_wihtmotifinfo, score_index_observed_elements=9, score_index_sim_elements=9, mut_sig_threshold=mut_sig_threshold)
merged_muts_output_file = sig_calculated_pvalues_unified_muts_file_wihtmotifinfo+"_mergedmuts{distance_to_merge}bp".format(distance_to_merge=distance_to_merge)
merged_muts_output_file = merge_muts(muts_input_file=sig_calculated_pvalues_unified_muts_file_wihtmotifinfo, merged_muts_output_file=merged_muts_output_file, filter_mut_motifs=False, filter_col_index=15, filter_value=0.05, mut_score_index=9, motifs_col_index =10, ref_alt_col_index=11, mutpos_col_index=12, motifname_col_index=13, motif_col_index=14, distance_to_merge=20)
merged_elements_statspvalues = merged_muts_output_file+"_statspvalues"
merged_elements_statspvaluesonlysig = merged_muts_output_file+"_statspvaluesonlysig"
merged_elements_statspvalues, merged_elements_statspvaluesonlysig, n_sig = assess_stat_elements(observed_input_file=merged_muts_output_file, simulated_input_file=combined_simulated_muts_merged_output_file, merged_elements_statspvalues=merged_elements_statspvalues, merged_elements_statspvaluesonlysig=merged_elements_statspvaluesonlysig, score_index_observed_elements=3, score_index_sim_elements=3)
print('Number of Sig elements: ', n_sig, 'initial #sig muts ', n_sig_muts)
return merged_elements_statspvaluesonlysig
def get_scores_per_window(observed_input_files_objs, observed_input_file, tmp_dir, window_size, ext, simulated_input_file):
simulated_input_file_tmp_overallTFs_local = tmp_dir +'/'+ observed_input_file.split('/')[-1] + '_' + simulated_input_file.split('/')[-1] + ext
if os.path.exists(simulated_input_file_tmp_overallTFs_local):
return simulated_input_file_tmp_overallTFs_local
#"remove chr, X>23,Y>24 and print in string format, check position if number"
#simulated_input_file_fixed_sorted = tmp_dir + '/' + observed_input_file.split('/')[-1] + simulated_input_file.split('/')[-1]
#awk_stmt = r"""awk 'BEGIN{{FS=OFS="\t"}}{{gsub("X","23", $1); gsub("Y","24", $1); gsub("chr","", $1);if($10==".") print $1, $2*1, $3*1, $11, $18; else print $1, $2*1, $3*1, $10+$11, $18}}' {simulated_file} | sort -k1,1n -k2,2n -k3,3n > {simulated_outfile_temp}""".format(simulated_file = simulated_input_file, simulated_outfile_temp = simulated_input_file_fixed_sorted)
#os.system(awk_stmt)
#sim_chrs_dir = simulated_input_file_fixed_sorted+'_sim_chrs/'
#if not os.path.isdir(sim_chrs_dir):
# os.makedirs(sim_chrs_dir)
#os.system("""awk '{{print $0>>"{}"$1".bed"}}' {}""".format(
# sim_chrs_dir, simulated_input_file_fixed_sorted))
simulated_input_file_tmp_overallTFs_local_temp = simulated_input_file_tmp_overallTFs_local + '_temp'
sim_chr = simulated_input_file.split('/')[-1].split('.')[0]
#print(sim_chr)
#print(observed_input_files_objs[sim_chr])
"if observed mutation file for any chromosome doesn't exist, return en ampty file"
try:
obs_chr_obj = BedTool(observed_input_files_objs[sim_chr])
except KeyError:
open(simulated_input_file_tmp_overallTFs_local, 'a').close()
return(simulated_input_file_tmp_overallTFs_local)
sim_chr_obj = BedTool(simulated_input_file)
print("Intersecting ", simulated_input_file)
sim_chr_file_intersected = simulated_input_file+ '_intersected'
obs_chr_obj.map(sim_chr_obj, c=4, o=['mean', 'stdev', 'count']).saveas(sim_chr_file_intersected)
#obs_chr_obj.map(sim_chr_obj, c=4, o=['collapse']).saveas(sim_chr_file_intersected)
#window_id_fscroe_file = """awk 'BEGIN{{FS=OFS="\t"}}{{if($5!=".") print $4,$5}}' {sim_intersected} >> {sim_scores_combined}""".format(
# sim_intersected=sim_chr_file_intersected, sim_scores_combined=simulated_input_file_tmp_overallTFs_local_temp)
window_id_fscroe_file = """awk 'BEGIN{{FS=OFS="\t"}}{{if($5!=".") print $4,$5,$6,$7, $8}}' {sim_intersected} >> {sim_scores_combined}""".format(
sim_intersected=sim_chr_file_intersected, sim_scores_combined=simulated_input_file_tmp_overallTFs_local_temp)
os.system(window_id_fscroe_file)
# for chr_file in os.listdir(sim_chrs_dir):
# if chr_file.endswith('.bed'):
# sim_chr_file = sim_chrs_dir+chr_file
# obs_chr_obj = observed_input_files_objs[chr_file.replace('.bed', '')]
# sim_chr_obj = BedTool(sim_chr_file)
#
# print("Intersecting ", sim_chr_file)
# sim_chr_file_intersected = sim_chr_file+'_intersected'
# obs_chr_obj.map(sim_chr_obj, c=4, o=['mean', 'stdev', 'count']).saveas(sim_chr_file_intersected)
# #obs_chr_obj.window(sim_chr_obj, w = window_size).saveas(sim_chr_file_intersected)
#
# #col 4: windowID; col18: tf-binding score; col9:fscore
# window_id_fscroe_file = """awk 'BEGIN{{FS=OFS="\t"}}{{if($7!=0) print $4,$5,$6,$7}}' {sim_intersected} >> {sim_scores_combined}""".format(
# sim_intersected=sim_chr_file_intersected, sim_scores_combined=simulated_input_file_tmp_overallTFs_local_temp)
# os.system(window_id_fscroe_file)
# #os.remove(sim_chr_file_intersected)
# #os.remove(sim_chr_file)
if os.path.isfile(simulated_input_file_tmp_overallTFs_local_temp):
shutil.move(simulated_input_file_tmp_overallTFs_local_temp, simulated_input_file_tmp_overallTFs_local)
#os.remove(simulated_input_file_tmp_overallTFs_local_temp)
#shutil.rmtree(sim_chrs_dir)
#print('cleanup')
cleanup()
return simulated_input_file_tmp_overallTFs_local
# def groupby_per_mut(score_input_files_objs, tmp_dir):
#
# score_group_file =
# d = score_input_files_objs.groupby(g=[1], c=2, o=['mean', 'stdev','count'])
#
#
#
# return
def get_simulated_mean_sd_per_TF_motif_background_window(cohort_full_name, annotated_input_file,
simulated_annotated_input_files,
mutations_cohorts_dir,
cohort_mean_sd_per_tf_overall_output_dict_file,
chr_lengths_file,
background_window_size = 50000,
motif_name_index = 17, f_score_index = 9,
motif_breaking_score_index = 10, chromatin_cat_index=22, tmp_dir = '$SNIC_TMP', n_cores_fscore=10):
if os.path.exists(cohort_mean_sd_per_tf_overall_output_dict_file):
with open(cohort_mean_sd_per_tf_overall_output_dict_file, 'r') as dict_simulated_mean_sd_per_TF_motif_ifile:
dict_type_mean_std_scores = json.loads(dict_simulated_mean_sd_per_TF_motif_ifile.readline())
return dict_type_mean_std_scores
print("Extracting avg and std per TF and overall from the simulation sets... onto: ", cohort_mean_sd_per_tf_overall_output_dict_file)
cohort = cohort_full_name.split('/')[-1]
"replace chr, X, Y, add Line Number to use as window ID and sort by chr,start"
observed_input_file_sorted = tmp_dir+'/'+ annotated_input_file.split('/')[-1] + '_fixed_sorted'
cmd = """awk 'BEGIN{{OFS="\t"}}{{gsub("chr","",$1); gsub("X", 23, $1); gsub("Y", 24, $1); print $1,$2,$3,NR}}' {} | bedtools slop -g /proj/snic2020-16-50/nobackup/pancananalysis/pancan12Feb2020/cancer_datafiles/chr_order_hg19.txt -b {}| sort -k1,1n -k2,2n > {}""".format(
annotated_input_file, background_window_size, observed_input_file_sorted)
os.system(cmd)
obs_chrs_dir = tmp_dir+'/'+cohort + '_chrs/'
if not os.path.isdir(obs_chrs_dir):
os.makedirs(obs_chrs_dir)
observed_input_files_objs = {}
os.system("""awk '{{print $0>>"{}"$1".bed"}}' {}""".format(
obs_chrs_dir, observed_input_file_sorted))
for chr_file in os.listdir(obs_chrs_dir):
if chr_file.endswith('.bed'):
#chr_file_window=obs_chrs_dir+chr_file+'_window'
#chr_file_window_sorted = chr_file_window +'_sorted'
#BedTool(obs_chrs_dir+chr_file).slop(b=background_window_size,genome='hg19').saveas(chr_file_window)
#os.system("""sort -k1,1n -k2,2n {} > {}""".format(
#chr_file_window, chr_file_window_sorted))
observed_input_files_objs[chr_file.replace('.bed', '')] = obs_chrs_dir+chr_file
sim_chrs_dir = tmp_dir + '/'+ cohort + '_sim/'
if not os.path.isdir(sim_chrs_dir):
os.makedirs(sim_chrs_dir)
"combine simulated files with the same chr"
for sim_file in simulated_annotated_input_files:
sim_file_tmp = sim_file +'_tmp'
os.system("""awk 'BEGIN{{FS=OFS="\t"}}{{gsub("X","23", $1); gsub("Y","24", $1); gsub("chr","", $1);if($10==".") print $1, $2*1, $3*1, $11, $18; else print $1, $2*1, $3*1, $10+$11, $18}}' {} > {}""".format(
sim_file, sim_file_tmp))
os.system("""awk '{{print $0>>"{}"$1".bed"}}' {}""".format(
sim_chrs_dir, sim_file_tmp))
os.remove(sim_file_tmp)
sim_input_files =[]
for sim_file in os.listdir(sim_chrs_dir):
sim_file_sorted = sim_chrs_dir +'/' + sim_file +'sorted'
os.system("""sort -k1,1n -k2,2n -k3,3n {} > {}""".format(
sim_chrs_dir+ sim_file, sim_file_sorted))
sim_input_files.append(sim_file_sorted)
#split files by 1 000 000 bps
#os.system("""awk 'BEGIN{{n=1}}{{x=$3;if(x>n*10000000){{++n}}{{print > "{sim_file_sorted}""_split_"n}}}}' {sim_file_sorted}""".format(
# sim_file_sorted=sim_file_sorted))
#sim_input_files = [sim_chrs_dir+'/'+x for x in os.listdir(sim_chrs_dir) if 'sorted_split_' in x]
#sim_input_files.append(sim_file_sorted)
#print(sim_input_files)
#awk_stmt = r"""awk 'BEGIN{{FS=OFS="\t"}}{{gsub("X","23", $1); gsub("Y","24", $1); gsub("chr","", $1);if($10==".") print $1, $2*1, $3*1, $11, $18; else print $1, $2*1, $3*1, $10+$11, $18}}' {simulated_file} | sort -k1,1n -k2,2n -k3,3n > {simulated_outfile_temp}""".format(simulated_file = simulated_input_file, simulated_outfile_temp = simulated_input_file_fixed_sorted)
#awk_stmt = r"""awk 'BEGIN{{FS=OFS="\t"}}{{gsub("X","23", $1); gsub("Y","24", $1); gsub("chr","", $1); print $1, $2*1, $3*1, $10, $11, $18}}' {simulated_file} | sort -k1,1n -k2,2n > {simulated_outfile_temp}""".format(simulated_file = simulated_input_file, simulated_outfile_temp = simulated_input_file_fixed_sorted)
#awk_stmt = r"""awk 'BEGIN{{FS=OFS="\t"}}{{gsub("X","23", $1); gsub("Y","24", $1); gsub("chr","", $1); printf ("%s\t%d\t%d\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%d\t%d\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n", $1, $2*1, $3*1, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19, $20, $21, $22, $23, $24, $25, $26, $27, $28, $29, $30, $31, $32)}}' {simulated_file} > {simulated_outfile_temp}""".format(simulated_file = simulated_input_file, simulated_outfile_temp = simulated_input_file_fixed)
#os.system(awk_stmt)
ext = '_scoresPerWindow'
obs_scores_files = []
if n_cores_fscore>1:
p = Pool(n_cores_fscore)
obs_scores_files = p.starmap(get_scores_per_window, product(
[observed_input_files_objs],[observed_input_file_sorted], [tmp_dir], [background_window_size],[ext],
sim_input_files))
p.close()
p.join()
else:
for simulated_annotated_input_file in sim_input_files:
obs_scores_files.append(get_scores_per_window(observed_input_files_objs, observed_input_file_sorted, tmp_dir,
background_window_size, ext, simulated_annotated_input_file))
print(obs_scores_files)
print('Combining the scores')
simulated_mean_sd_outfiles = tmp_dir + '/' + cohort + '_allscores'
if not os.path.isfile(simulated_mean_sd_outfiles):
#merge files from the same category, sort by the line number and group by position, TF motif, chromatin cat. and line number
with open(simulated_mean_sd_outfiles, 'w') as sim_fn:
for obs_scores_file in obs_scores_files:
with open(obs_scores_file, 'r') as score_fn:
sim_fn.write(score_fn.read())
"create a dictionery for mean, std scores for all categories"
dict_simulated_mean_sd = {}
with open(simulated_mean_sd_outfiles, 'r') as simulated_mean_sd_ifile:
l = simulated_mean_sd_ifile.readline().strip().split('\t')
while l and len(l)>1:
#fscore = map(float, l[1].split(','))
#fscore = [float(x) for x in l[1].split(',')]
#dict_simulated_mean_sd[l[0]] = {'mean': np.mean(fscore),
# "std": np.std(fscore),
# "nummotifs": len(fscore)}
dict_simulated_mean_sd[l[0]] = {'mean': l[1],
"std": l[2],
"nummotifs": l[3]}
l = simulated_mean_sd_ifile.readline().strip().split('\t')
#save the dictionery per category
dict_type_mean_std_scores = {}
dict_type_mean_std_scores['overallTFs'] = dict_simulated_mean_sd
with open(cohort_mean_sd_per_tf_overall_output_dict_file, 'w') as dict_simulated_mean_sd_per_TF_motif_outfile:
json.dump(dict_type_mean_std_scores, dict_simulated_mean_sd_per_TF_motif_outfile)
shutil.rmtree(sim_chrs_dir)
shutil.rmtree(obs_chrs_dir)
cleanup()
return dict_type_mean_std_scores
def get_simulated_mean_sd_per_TF_motif(simulated_annotated_input_files,
cohort_mean_sd_per_tf_overall_output_dict_file,
obs_chrs_dir, observed_input_files_objs,
motif_name_index = 17, f_score_index = 9,
motif_breaking_score_index = 10, chromatin_cat_index=22):
if os.path.exists(cohort_mean_sd_per_tf_overall_output_dict_file):
with open(cohort_mean_sd_per_tf_overall_output_dict_file, 'r') as dict_simulated_mean_sd_per_TF_motif_ifile:
dict_simulated_mean_sd_per_TF_motif = json.loads(dict_simulated_mean_sd_per_TF_motif_ifile.readline())
return dict_simulated_mean_sd_per_TF_motif
print("Extracting avg and std per TF and overall from the simulation sets... onto: ", cohort_mean_sd_per_tf_overall_output_dict_file)
overall_score_values = []
dict_simulated_score_per_TF_motif = {}
dict_simulated_mean_sd_per_TF_motif = {}
dict_simulated_score_per_chromatin_cat = {}
dict_simulated_mean_sd_per_chromatin_cat = {}
dict_simulated_score_per_TF_motif_per_chromatin_cat = {}
dict_simulated_mean_sd_per_TF_motif_per_chromatin_cat = {}
for simulated_annotated_input_file in simulated_annotated_input_files:
with open(simulated_annotated_input_file, 'r') as simulated_annotated_ifile:
l = simulated_annotated_ifile.readline().strip().split('\t')
while l and len(l)>motif_name_index:
try:
dict_simulated_score_per_TF_motif[l[motif_name_index]].append(float(l[f_score_index])+float(l[motif_breaking_score_index]))
except KeyError:
dict_simulated_score_per_TF_motif[l[motif_name_index]] = [float(l[f_score_index])+float(l[motif_breaking_score_index])]
try:
dict_simulated_score_per_chromatin_cat[l[chromatin_cat_index]].append(float(l[f_score_index])+float(l[motif_breaking_score_index]))
except KeyError:
dict_simulated_score_per_chromatin_cat[l[chromatin_cat_index]] = [float(l[f_score_index])+float(l[motif_breaking_score_index])]
try:
dict_simulated_score_per_TF_motif_per_chromatin_cat[l[motif_name_index]][l[chromatin_cat_index]].append(float(l[f_score_index])+float(l[motif_breaking_score_index]))
except KeyError:
try:
dict_simulated_score_per_TF_motif_per_chromatin_cat[l[motif_name_index]][l[chromatin_cat_index]] = [float(l[f_score_index])+float(l[motif_breaking_score_index])]
except KeyError:
dict_simulated_score_per_TF_motif_per_chromatin_cat[l[motif_name_index]] = {l[chromatin_cat_index] : [float(l[f_score_index])+float(l[motif_breaking_score_index])]}
overall_score_values.append(float(l[f_score_index])+float(l[motif_breaking_score_index]))
l = simulated_annotated_ifile.readline().strip().split('\t')
#get mean and std of scores per TF_motif
for tf in dict_simulated_score_per_TF_motif.keys():
tf_mean = np.mean(dict_simulated_score_per_TF_motif[tf])
tf_std = np.std(dict_simulated_score_per_TF_motif[tf])
num_motifs = len(dict_simulated_score_per_TF_motif[tf])
dict_simulated_mean_sd_per_TF_motif[tf] = {'mean': tf_mean,
"std": tf_std,
"nummotifs": num_motifs}
#get mean and std of scores per chromatin category
for chromatin_cat in dict_simulated_score_per_chromatin_cat.keys():
chromatin_cat_mean = np.mean(dict_simulated_score_per_chromatin_cat[chromatin_cat])
chromatin_cat_std = np.std(dict_simulated_score_per_chromatin_cat[chromatin_cat])
num_motifs = len(dict_simulated_score_per_chromatin_cat[chromatin_cat])
dict_simulated_mean_sd_per_chromatin_cat[chromatin_cat] = {'mean': chromatin_cat_mean,
"std": chromatin_cat_std,
"nummotifs": num_motifs}
#get mean and std of scores per TF_motif per chromatin category
for tf in dict_simulated_score_per_TF_motif_per_chromatin_cat.keys():
for chromatin_cat in dict_simulated_score_per_TF_motif_per_chromatin_cat[tf].keys():
tf_chromatin_cat_mean = np.mean(dict_simulated_score_per_TF_motif_per_chromatin_cat[tf][chromatin_cat])
tf_chromatin_cat_std = np.std(dict_simulated_score_per_TF_motif_per_chromatin_cat[tf][chromatin_cat])
num_motifs = len(dict_simulated_score_per_TF_motif_per_chromatin_cat[tf][chromatin_cat])
try:
dict_simulated_mean_sd_per_TF_motif_per_chromatin_cat[tf][chromatin_cat] = {'mean': tf_chromatin_cat_mean,
"std": tf_chromatin_cat_std,
"nummotifs": num_motifs}
except KeyError:
dict_simulated_mean_sd_per_TF_motif_per_chromatin_cat[tf] = {chromatin_cat: {'mean': tf_chromatin_cat_mean,
"std": tf_chromatin_cat_std,
"nummotifs": num_motifs}}
#get mean and std of scores across the genome regardless motif or chromatin category
overall_num_motifs = len(overall_score_values)
number_subsets = 1
subset_size = 10000000
if overall_num_motifs > subset_size:
number_subsets = int(overall_num_motifs / subset_size)
subsets =
|
np.array_split(overall_score_values, number_subsets)
|
numpy.array_split
|
# -- coding: utf-8 --
'''
the shape of sparsetensor is a tuuple, like this
(array([[ 0, 297],
[ 0, 296],
[ 0, 295],
...,
[161, 2],
[161, 1],
[161, 0]], dtype=int32), array([0.00323625, 0.00485437, 0.00323625, ..., 0.00646204, 0.00161551,
0.00161551], dtype=float32), (162, 300))
axis=0: is nonzero values, x-axis represents Row, y-axis represents Column.
axis=1: corresponding the nonzero value.
axis=2: represents the sparse matrix shape.
'''
from __future__ import division
from __future__ import print_function
from baseline.utils import *
from model.models import GCN
from model.hyparameter import parameter
from model.embedding import embedding
from baseline.lstm.lstm import LstmClass
from baseline.bi_lstm.bi_lstm import BilstmClass
from baseline.dela.dela import DelaClass
# from baseline.att_convlstm.model import at_convlstm
import pandas as pd
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import model.data_next as data_load
import os
import argparse
tf.reset_default_graph()
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
logs_path = "board"
# os.environ['CUDA_VISIBLE_DEVICES']='1'
#
# from tensorflow.compat.v1 import ConfigProto
# from tensorflow.compat.v1 import InteractiveSession
#
# config = ConfigProto()
# config.gpu_options.allow_growth = True
# session = InteractiveSession(config=config)
class Model(object):
def __init__(self, hp):
'''
:param para:
'''
self.hp = hp # hyperparameter
self.init_gcn() # init gcn model
self.init_placeholder() # init placeholder
self.init_embed() # init embedding
self.model() # init prediction model
def init_gcn(self):
'''
:return:
'''
self.adj = preprocess_adj(self.adjecent())
# define gcn model
if self.hp.model_name == 'gcn_cheby':
self.support = chebyshev_polynomials(self.adj, self.hp.max_degree)
self.num_supports = 1 + self.hp.max_degree
self.model_func = GCN
else:
self.support = [self.adj]
self.num_supports = 1
self.model_func = GCN
def init_placeholder(self):
'''
:return:
'''
self.placeholders = {
'position': tf.placeholder(tf.int32, shape=(1, self.hp.site_num), name='input_position'),
'day': tf.placeholder(tf.int32, shape=(None, self.hp.site_num), name='input_day'),
'hour': tf.placeholder(tf.int32, shape=(None, self.hp.site_num), name='input_hour'),
'minute': tf.placeholder(tf.int32, shape=(None, self.hp.site_num), name='input_minute'),
'indices_i': tf.placeholder(dtype=tf.int64, shape=[None, None], name='input_indices'),
'values_i': tf.placeholder(dtype=tf.float32, shape=[None], name='input_values'),
'dense_shape_i': tf.placeholder(dtype=tf.int64, shape=[None], name='input_dense_shape'),
# None: batch size * time size
'features': tf.placeholder(tf.float32, shape=[None, self.hp.site_num, self.hp.features],
name='input_features'),
'labels': tf.placeholder(tf.float32, shape=[None, self.hp.site_num, self.hp.output_length],
name='labels'),
'dropout': tf.placeholder_with_default(0., shape=(), name='input_dropout'),
'num_features_nonzero': tf.placeholder(tf.int32, name='input_zero') # helper variable for sparse dropout
}
self.supports = [tf.SparseTensor(indices=self.placeholders['indices_i'],
values=self.placeholders['values_i'],
dense_shape=self.placeholders['dense_shape_i']) for _ in range(self.num_supports)]
def adjecent(self):
'''
:return: adjacent matrix
'''
data = pd.read_csv(filepath_or_buffer=self.hp.file_adj)
adj = np.zeros(shape=[self.hp.site_num, self.hp.site_num])
for line in data[['src_FID', 'nbr_FID']].values:
adj[line[0]][line[1]] = 1
return adj
def init_embed(self):
'''
:return:
'''
with tf.variable_scope('position'):
p_emd = embedding(self.placeholders['position'], vocab_size=self.hp.site_num,
num_units=self.hp.emb_size,
scale=False, scope="position_embed")
p_emd = tf.reshape(p_emd, shape=[1, self.hp.site_num, self.hp.emb_size])
p_emd = tf.expand_dims(p_emd, axis=0)
self.p_emd = tf.tile(p_emd, [self.hp.batch_size, self.hp.input_length, 1, 1])
print('p_emd shape is : ', self.p_emd.shape)
with tf.variable_scope('day'):
self.d_emb = embedding(self.placeholders['day'], vocab_size=32, num_units=self.hp.emb_size,
scale=False, scope="day_embed")
self.d_emd = tf.reshape(self.d_emb,
shape=[self.hp.batch_size, self.hp.input_length + self.hp.output_length,
self.hp.site_num, self.hp.emb_size])
print('d_emd shape is : ', self.d_emd.shape)
with tf.variable_scope('hour'):
self.h_emb = embedding(self.placeholders['hour'], vocab_size=24, num_units=self.hp.emb_size,
scale=False, scope="hour_embed")
self.h_emd = tf.reshape(self.h_emb,
shape=[self.hp.batch_size, self.hp.input_length + self.hp.output_length,
self.hp.site_num, self.hp.emb_size])
print('h_emd shape is : ', self.h_emd.shape)
with tf.variable_scope('mimute'):
self.m_emb = embedding(self.placeholders['minute'], vocab_size=12, num_units=self.hp.emb_size,
scale=False, scope="minute_embed")
self.m_emd = tf.reshape(self.m_emb,
shape=[self.hp.batch_size, self.hp.input_length + self.hp.output_length,
self.hp.site_num, self.hp.emb_size])
print('m_emd shape is : ', self.m_emd.shape)
'''
with tf.variable_scope('position_gcn'): # using the gcn to extract position relationship
p_emb = tf.reshape(self.p_emd, shape=[-1, self.para.site_num, self.para.emb_size])
p_gcn = self.model_func(self.placeholders,
input_dim=self.para.emb_size,
para=self.para,
supports=self.supports)
p_emd = p_gcn.predict(p_emb)
self.g_p_emd = tf.reshape(p_emd, shape=[self.para.batch_size,
self.para.input_length,
self.para.site_num,
self.para.gcn_output_size])
print('p_emd shape is : ', self.g_p_emd.shape)
'''
def model(self):
'''
:return:
'''
print('#................................in the encoder step......................................#')
if self.hp.model_name=='lstm':
# features=tf.layers.dense(self.placeholders['features'], units=self.para.emb_size) #[-1, site num, emb_size]
features = tf.reshape(self.placeholders['features'], shape=[self.hp.batch_size,
self.hp.input_length,
self.hp.site_num,
self.hp.features])
# this step use to encoding the input series data
encoder_init = LstmClass(self.hp.batch_size * self.hp.site_num,
predict_time=self.hp.output_length,
layer_num=self.hp.hidden_layer,
nodes=self.hp.emb_size,
placeholders=self.placeholders)
inputs = tf.transpose(features, perm=[0, 2, 1, 3])
inputs = tf.reshape(inputs, shape=[self.hp.batch_size * self.hp.site_num, self.hp.input_length,
self.hp.features])
h_states= encoder_init.encoding(inputs)
# decoder
print('#................................in the decoder step......................................#')
# this step to presict the polutant concentration
self.pre=encoder_init.decoding(h_states, self.hp.site_num)
print('pres shape is : ', self.pre.shape)
elif self.hp.model_name=='bilstm':
# features=tf.layers.dense(self.placeholders['features'], units=self.para.emb_size) #[-1, site num, emb_size]
features = tf.reshape(self.placeholders['features'], shape=[self.hp.batch_size,
self.hp.input_length,
self.hp.site_num,
self.hp.features])
# this step use to encoding the input series data
encoder_init = BilstmClass(self.hp, placeholders=self.placeholders)
inputs = tf.transpose(features, perm=[0, 2, 1, 3])
inputs = tf.reshape(inputs, shape=[self.hp.batch_size * self.hp.site_num,
self.hp.input_length,
self.hp.features])
h_states= encoder_init.encoding(inputs)
# decoder
print('#................................in the decoder step......................................#')
# this step to presict the polutant concentration
self.pre=encoder_init.decoding(h_states, self.hp.site_num)
print('pres shape is : ', self.pre.shape)
elif self.hp.model_name=='dela':
# features=tf.layers.dense(self.placeholders['features'], units=self.para.emb_size) #[-1, site num, emb_size]
features = tf.reshape(self.placeholders['features'], shape=[self.hp.batch_size,
self.hp.input_length,
self.hp.site_num,
self.hp.features])
in_day = self.d_emd[:, :self.hp.input_length, :, :]
in_hour = self.h_emd[:, :self.hp.input_length, :, :]
in_mimute = self.m_emd[:, :self.hp.input_length, :, :]
in_position = self.p_emd[:, :self.hp.input_length, :, :]
embeddings=[in_hour, in_mimute, in_position]
# this step use to encoding the input series data
encoder_init = DelaClass(self.hp, placeholders=self.placeholders)
x = encoder_init.encoding(features)
# decoder
print('#................................in the decoder step......................................#')
# this step to presict the polutant concentration
self.pre=encoder_init.decoding(x, embeddings)
print('pres shape is : ', self.pre.shape)
# elif self.hp.model_name == 'atconvlstm':
# features = tf.reshape(self.placeholders['features'], shape=[self.hp.batch_size,
# self.hp.input_length,
# self.hp.site_num,
# self.hp.features])
# self.pre=at_convlstm(main_input=features)
self.loss = tf.reduce_mean(
tf.sqrt(tf.reduce_mean(tf.square(self.pre + 1e-10 - self.placeholders['labels']), axis=0)))
self.train_op = tf.train.AdamOptimizer(self.hp.learning_rate).minimize(self.loss)
def test(self):
'''
:return:
'''
model_file = tf.train.latest_checkpoint('weights/')
self.saver.restore(self.sess, model_file)
def describe(self, label, predict):
'''
:param label:
:param predict:
:return:
'''
plt.figure()
# Label is observed value,Blue
plt.plot(label[0:], 'b', label=u'actual value')
# Predict is predicted value,Red
plt.plot(predict[0:], 'r', label=u'predicted value')
# use the legend
plt.legend()
# plt.xlabel("time(hours)", fontsize=17)
# plt.ylabel("pm$_{2.5}$ (ug/m$^3$)", fontsize=17)
# plt.title("the prediction of pm$_{2.5}", fontsize=17)
plt.show()
def initialize_session(self):
self.sess = tf.Session()
self.saver = tf.train.Saver(var_list=tf.trainable_variables())
def re_current(self, a, max, min):
return [num * (max - min) + min for num in a]
def run_epoch(self):
'''
:return:
'''
max_rmse = 100
self.sess.run(tf.global_variables_initializer())
iterate = data_load.DataClass(hp=self.hp)
train_next = iterate.next_batch(batch_size=self.hp.batch_size, epoch=self.hp.epoch, is_training=True)
for i in range(int((iterate.length // self.hp.site_num * iterate.divide_ratio - (
iterate.input_length + iterate.output_length)) // iterate.step)
* self.hp.epoch // self.hp.batch_size):
x, day, hour, minute, label = self.sess.run(train_next)
features = np.reshape(x, [-1, self.hp.site_num, self.hp.features])
day = np.reshape(day, [-1, self.hp.site_num])
hour = np.reshape(hour, [-1, self.hp.site_num])
minute = np.reshape(minute, [-1, self.hp.site_num])
feed_dict = construct_feed_dict(features, self.adj, label, day, hour, minute, self.placeholders, site_num=self.hp.site_num)
feed_dict.update({self.placeholders['dropout']: self.hp.dropout})
loss_, _ = self.sess.run((self.loss, self.train_op), feed_dict=feed_dict)
print("after %d steps,the training average loss value is : %.6f" % (i, loss_))
# validate processing
if i % 100 == 0:
rmse_error = self.evaluate()
if max_rmse > rmse_error:
print("the validate average rmse loss value is : %.6f" % (rmse_error))
max_rmse = rmse_error
self.saver.save(self.sess, save_path=self.hp.save_path + 'model.ckpt')
# if os.path.exists('model_pb'): shutil.rmtree('model_pb')
# builder = tf.saved_model.builder.SavedModelBuilder('model_pb')
# builder.add_meta_graph_and_variables(self.sess, ["mytag"])
# builder.save()
def evaluate(self):
'''
:return:
'''
label_list = list()
predict_list = list()
label_list1, label_list2, label_list3 = list(), list(), list()
predict_list1, predict_list2, predict_list3 = list(), list(), list()
# with tf.Session() as sess:
model_file = tf.train.latest_checkpoint(self.hp.save_path)
if not self.hp.is_training:
print('the model weights has been loaded:')
self.saver.restore(self.sess, model_file)
# self.saver.save(self.sess, save_path='gcn/model/' + 'model.ckpt')
iterate_test = data_load.DataClass(hp=self.hp)
test_next = iterate_test.next_batch(batch_size=self.hp.batch_size, epoch=1, is_training=False)
max, min = iterate_test.max_dict['flow'], iterate_test.min_dict['flow']
print(max, min)
# '''
for i in range(int((iterate_test.length // self.hp.site_num
- iterate_test.length // self.hp.site_num * iterate_test.divide_ratio
- (iterate_test.input_length + iterate_test.output_length)) // iterate_test.output_length)
// self.hp.batch_size):
x, day, hour, minute, label = self.sess.run(test_next)
features = np.reshape(x, [-1, self.hp.site_num, self.hp.features])
day = np.reshape(day, [-1, self.hp.site_num])
hour = np.reshape(hour, [-1, self.hp.site_num])
minute =
|
np.reshape(minute, [-1, self.hp.site_num])
|
numpy.reshape
|
import numpy as np
import matplotlib.pyplot as plt
from astropy.io import ascii
# simple function to assign an evolutionary state given Teff and R/logg
# 0 = main sequence
# 1 = subgiant
# 2 = RGB
# 3 = main sequence binary (teff & R only)
def evolstate(teff,rad,logg):
cl=np.zeros(len(teff))
# Teff+Rad, see Berger et al. 2018
if (rad[0] > -99):
sg_pc=ascii.read('tams_parsec.txt')
rgb_pc=ascii.read('rgb_parsec.txt')
mist=ascii.read('MIST_iso_5ae3ba3aad3cd.iso')
um=np.where(mist['phase'] < 3)[0]
mistteff=10**mist['log_Teff'][um]
mistrad=10**mist['log_R'][um]
a=-0.68081514
b=2.3019458
c=1.5722924
d=16.269495
e=-37.498445
um=np.where((mistteff < 5029.) & (mistrad < 1.))[0]
x = (mistteff[um]/4637.69 - 1)
logl_lim = a + b*x + c*x**2 + d*x**3 + d*x**4
mistrad[um]=np.sqrt(1.55*(10**logl_lim) * (mistteff[um]/5777.)**(-4))
bincut=np.interp(rad,mistrad,mistteff)
cl=np.zeros(len(teff))
#cl[:]=1
sgcut=np.interp(teff,sg_pc['col1'],
|
np.log10(sg_pc['col2'])
|
numpy.log10
|
"""
Module for extracting statistics from data and running the preprocessing function.
"""
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
import scipy.stats as stats
from generative_models import *
from basic_functions import *
from distance_functions import *
from summary_stats import *
def extract_stats(data, deltaT, binSize, summStat_metric, ifNorm, maxTimeLag = None):
"""Extract required statistics from data for ABC fitting.
Parameters
-----------
data : nd array
time-series of continous data, e.g., OU process, (numTrials * numTimePoints)
or binned spike counts (numTrials * numBin).
deltaT : float
temporal resolution of data (or binSize of spike counts).
binSize : float
bin-size for computing the autocorrelation.
summStat : string
metric for computing summay statistics ('comp_cc', 'comp_ac_fft', 'comp_psd').
ifNorm : string
if normalize the autocorrelation or PSD.
maxTimeLag : float, default None
maximum time-lag when using 'comp_cc' summary statistics.
Returns
-------
sumStat_nonNorm : 1d array
non normalized autocorrelation or PSD depending on chosen summary statistics.
data_mean : float
mean of data (for one unit of time)
data_mean : float
variance of data (for one unit of time)
T : float
duration of each trial in data
numTrials : float
number of trials in data
binSize : float
bin-size used for computing the autocorrlation.
maxTimeLag : float
maximum time-lag used for computing the autocorrelation.
"""
# extract duration and number of trials
numTrials, numTimePoints = np.shape(data)
T = numTimePoints* deltaT
# compute mean and variance for one unit of time (1 s or 1 ms)
bin_var = 1
binsData_var = np.arange(0, T + bin_var, bin_var)
numBinData_var = len(binsData_var)-1
binned_data_var = binData(data, [numTrials,numBinData_var]) * deltaT
data_mean = np.mean(binned_data_var)/bin_var
data_var = comp_cc(binned_data_var, binned_data_var, 1, bin_var, numBinData_var)[0]
# bin data
binsData = np.arange(0, T + binSize, binSize)
numBinData = len(binsData)-1
binned_data = binData(data, [numTrials,numBinData]) * deltaT
sumStat = comp_sumStat(binned_data, summStat_metric, ifNorm, deltaT, binSize, T, numBinData, maxTimeLag)
return sumStat, data_mean, data_var, T, numTrials
def compute_spikesDisperssion_twoTau(ac_data, theta, deltaT, binSize, T, numTrials, data_mean, data_var,\
min_disp, max_disp, jump_disp, borderline_jump, numIter):
"""Compute the disperssion parameter of spike counts from the autocorrelation of
a doubly stochastic process with two timescale using grid search method.
Parameters
-----------
ac_data : 1d array
autocorrelation of data.
theta : 1d array
[timescale1, timescale2, coefficient for timescale1].
deltaT : float
temporal resolution for the OU process generation.
binSize : float
bin-size for binning data and computing the autocorrelation.
T : float
duration of trials.
numTrials : float
number of trials.
data_mean : float
mean value of the OU process (average of firing rate).
data_var : float
variance of the OU process (variance of firing rate).
min_disp : float
minimum value of disperssion for grid search.
max_disp : float
maximum value of disperssion for grid search.
jump_disp : float
resolution of grid search.
borderline_jump : int
is used when the intial range of dispersion grid search is initially small,
defines number of "jump_disps" to be searched below or above the initial range.
Returns
-------
disp_estim : float
estimated value of disperssion.
"""
disp_range = np.arange(min_disp, max_disp, jump_disp)
maxTimeLag = 2
min_border = 0
max_border = 0
border_line = 1
while border_line == 1:
error_all = []
if min_border or max_border:
disp_range = np.arange(min_disp, max_disp, jump_disp)
for disp in disp_range:
print(disp)
error_sum = 0
for i in range(numIter):
data_syn, numBinData = twoTauOU_gammaSpikes(theta, deltaT, binSize, T, numTrials, data_mean,\
data_var, disp)
ac_syn = comp_cc(data_syn, data_syn, maxTimeLag, binSize, numBinData)
error_sum = error_sum + abs(ac_syn[1] - ac_data[1])
error_all.append(error_sum)
error_all = np.array(error_all)
disp_estim = disp_range[np.argmin((error_all))]
if disp_estim == disp_range[0]:
min_border = 1
min_disp, max_disp = min_disp - borderline_jump * jump_disp, min_disp + jump_disp
elif disp_estim == disp_range[-1]:
max_border = 1
min_disp, max_disp = max_disp - jump_disp, max_disp + borderline_jump * jump_disp
else:
border_line = 0
return disp_estim
def compute_spikesDisperssion_oneTau(ac_data, theta, deltaT, binSize, T, numTrials, data_mean, data_var,\
min_disp, max_disp, jump_disp, borderline_jump, numIter = 200):
"""Compute the disperssion parameter of spike counts from the autocorrelation of
a doubly stochastic process with one timescale using grid search method.
Parameters
-----------
ac_data : 1d array
autocorrelation of data.
theta : 1d array
[timescale].
deltaT : float
temporal resolution for the OU process generation.
binSize : float
bin-size for binning data and computing the autocorrelation.
T : float
duration of trials.
numTrials : float
number of trials.
data_mean : float
mean value of the OU process (average of firing rate).
data_var : float
variance of the OU process (variance of firing rate).
min_disp : float
minimum value of disperssion for grid search.
max_disp : float
maximum value of disperssion for grid search.
jump_disp : float
resolution of grid search.
borderline_jump : int
is used when the intial range of dispersion grid search is initially small,
defines number of "jump_disps" to be searched below or above the initial range.
Returns
-------
disp_estim : float
estimated value of disperssion.
"""
disp_range =
|
np.arange(min_disp, max_disp, jump_disp)
|
numpy.arange
|
import numpy as np
import fractions as f
from scipy.linalg import circulant
import matplotlib.pyplot as plt
from scipy import signal
import random
import matplotlib.pyplot as plt
def factors(N):
lis = []
for i in range(1,N+1):
if N%i == 0:
lis.append(i)
lis = np.array(lis)
return lis
def phi(n):
if n == 0:
return 0
num = 0
for k in range(1, n+1):
if f.gcd(n,k) == 1:
num = num+1
return num
def c(q):
k = []
for i in range(q):
if f.gcd(i,q) == 1:
k.append(i)
k = np.array(k)
c = []
for n in range(q):
p = np.sum(np.cos(2*np.pi*k*n/q))
c.append(p)
return c
def rec_q(q):
if (len(c(q))<N):
div = int(N/len(c(q)))
quo = N%len(c(q))
vf = c(q)
vff = div*vf
full = vff + vf[0:quo]
if (len(c(q))==N):
full = c(q)
full = np.array(full)
basis = circulant(full)
G_q = basis[:,0:phi(q)]
return G_q
def projector(q):
r = rec_q(q)
p = np.linalg.pinv(np.matmul(r.T,r))
p = np.matmul(p,r.T)
P_q = np.matmul(r,p)
P_q = P_q/q
return P_q
def projected_x(q, x):
xq_i = np.matmul(projector(q),x)
norm = np.matmul(xq_i.T, xq_i)
xq_i = xq_i/
|
np.sqrt(norm)
|
numpy.sqrt
|
import numpy as np
import PPIPUtils
import time
def writePredictions(fname,predictions,classes):
f = open(fname,'w')
for i in range(0,predictions.shape[0]):
f.write(str(predictions[i])+'\t'+str(classes[i])+'\n')
f.close()
def writeScore(predictions,classes, fOut, predictionsFName=None, thresholds=[0.01,0.03,0.05,0.1,0.25,0.5,1]):
#concate results from each fold, and get total scoring metrics
finalPredictions =
|
np.hstack(predictions)
|
numpy.hstack
|
import logging
from datetime import datetime
from pathlib import Path
import matplotlib.pyplot as plt
from src.kiss_data import KissRawData
from src.db import get_manual
import numpy as np
import warnings
from scipy.optimize import curve_fit
from astropy.stats import mad_std
logging.basicConfig(level=logging.INFO)
plt.ion()
scans = get_manual(start=datetime(2019, 5, 1, 19, 14, 24), end=datetime(2019, 5, 1, 19, 52, 53))
title = Path(scans[0]).name + " ".join([Path(scan).name.split("_")[4] for scan in scans[1:]])
signal = []
std = []
elevation = []
for scan in scans:
kd = KissRawData(scan)
kd.read_data(list_data="A_masq I Q F_tone F_tl_Az F_tl_El")
# TODO: Why do we need copy here, seems that numpy strides are making
# funny things here !
F_tone = 1e3 * kd.F_tone.copy().mean(1)[:, np.newaxis] + kd.continuum
signal.append(F_tone.mean(1))
std.append(F_tone.std(1))
elevation.append(kd.F_tl_El.mean())
signal = np.array(signal)
std = np.array(std)
elevation = np.array(elevation)
detectors = kd.list_detector
# rearrange signal to be coherent with the fit ?
signal_new = 2 * signal[:, 0][:, np.newaxis] - signal
air_mass = 1.0 / np.sin(np.radians(elevation))
def T(
airm, const, fact, tau_f
): # signal definition for skydip model: there is -1 before B to take into account the increasing resonance to lower optical load
return const + 270.0 * fact * (1.0 - np.exp(-tau_f * airm))
popts = []
pcovs = []
for _sig, _std in zip(signal_new.T, std.T):
P0 = (4e8, 1e8, 1.0)
popt, pcov = curve_fit(T, air_mass, _sig, sigma=_sig, p0=P0, maxfev=100000)
popts.append(popt)
pcovs.append(pcovs)
popts = np.array(popts)
ndet = popts.shape[0]
fig, axes = plt.subplots(np.int(
|
np.sqrt(ndet)
|
numpy.sqrt
|
from .vos_imdb import vos_imdb
import sys
import os
from os import path as osp
from PIL import Image
from matplotlib import pyplot as plt
from .config import cfg
import cv2
import numpy as np
import scipy.sparse
from six.moves import cPickle as pickle
import logging
logger = logging.getLogger(__name__)
def addPath(path):
if path not in sys.path:
sys.path.append(path)
davis_api_home = osp.join(cfg.SegTrack_v2.HOME,'python','lib')
dataset_lib = osp.abspath(osp.join(osp.dirname(__file__),'../../lib/'))
vos_util_path = osp.abspath(osp.join(osp.dirname(__file__),'../../lib_vos/vos_utils'))
addPath(davis_api_home)
addPath(dataset_lib)
addPath(vos_util_path)
from davis import cfg as cfg_davis
from davis import io,DAVISLoader,phase
from utils.timer import Timer
import utils.boxes as box_utils
from flow_util.readFlowFile import read as flo_read
#from utils.segms import binary_mask_to_rle
import datasets.dummy_datasets as datasets
if not cfg.COCO_API_HOME in sys.path:
sys.path.append(cfg.COCO_API_HOME)
from pycocotools import mask as COCOmask
from pycocotools.coco import COCO
import pycocotools.mask as mask_util
splits = ['train','val','trainval','test-dev']
def image_saver(filename, array):
return io.imwrite_indexed(filename, array)
class DAVIS_imdb(vos_imdb):
def __init__(self,db_name="DAVIS", split = 'train',cls_mapper = None, load_flow=False, load_inv_db=False):
'''
Args:
cls_mapper(dict type): VOS dataset only provides instance id label or class label that
is not consistent with the object detection model. As our work is to provide object
detection model with the ability for VOS task, so object label is provided by the
prediction of object detection model. The prediction is provided by label_mapper.
If set None, no class is assigned. Otherwise, class_id = cls_mapper[instance_id].
For seq_idx, instance_idx, its class label can be got by "label_mapper[seq_idx][instance_idx]".
As some objects may be predicted as background, we choose the class with highest probability
among non-background classes to be its class label.
'''
super().__init__(db_name+str(cfg_davis.YEAR))
self.split = split
if split is not None:
if split not in splits:
raise ValueError('split not recognizable')
if split=='train':
self.phase = phase.TRAIN
self.use_local_id = False
elif split=='val':
self.phase = phase.VAL
self.use_local_id = True
elif split=='trainval':
self.phase = phase.TRAINVAL
self.use_local_id = False
elif split=='test-dev':
self.phase = phase.TESTDEV
self.use_local_id = True
else:
raise ValueError('split not recognizable')
if cfg_davis.PHASE!=self.phase:
print('phase changed from %s to %s'%(cfg_davis.PHASE.value,self.phase.value))
cfg_davis.PHASE = self.phase
print('year:',cfg_davis.YEAR)
print('phase:',cfg_davis.PHASE.value)
self.db = DAVISLoader(year=cfg_davis.YEAR,phase=cfg_davis.PHASE)
self.seq_idx = 0
self.cls_mapper = None
if cls_mapper is not None:
assert(isinstance(cls_mapper, dict))
self.cls_mapper = cls_mapper
# Here we adopt COCO classes.
self.number_of_instance_ids = 0
self.global_instance_id_start_of_seq = np.zeros(self.get_num_sequence(),dtype=np.int32)
self.instance_number_of_seq = np.zeros(self.get_num_sequence(),dtype=np.int32)
self.set_global_instance_id_start()
self.debug_timer = Timer()
self.keypoints = None
self.load_flow = load_flow
# load_inv_db: only affect get_separate_roidb_from_all_sequences
self.load_inv_db = load_inv_db
#self.COCO = datasets.get_coco_dataset()
#category_ids = list(self.COCO.classes.keys())
#categories = list(self.COCO.classes.values())
#self.category_to_id_map = dict(zip(categories, category_ids))
#self.classes = ['__background__']+categories+['__unknown__']
category_ids = list(range(self.number_of_instance_ids))
self.classes = [self.global_id_to_seq_name_plus_id(i) for i in range(self.number_of_instance_ids)]
categories = self.classes
self.category_to_id_map = dict(zip(categories, category_ids))
print(self.category_to_id_map)
self.num_classes = len(self.classes)
self.cfg = cfg_davis
@property
def valid_cached_keys(self):
""" Can load following key-ed values from the cached roidb file
'image'(image path) and 'flipped' values are already filled on _prep_roidb_entry,
so we don't need to overwrite it again.
"""
keys = ['boxes', 'segms', 'gt_classes', 'instance_id', 'global_instance_id', 'seg_areas', 'gt_overlaps','gt_overlaps_id', 'is_crowd', 'box_to_gt_ind_map']
return keys
def get_num_sequence(self):
return len(self.db.sequences)
def set_to_sequence(self,seq_idx):
assert(seq_idx>=0 and seq_idx<self.get_num_sequence())
self.seq_idx = seq_idx
def get_current_seq_name(self):
return self.db.sequences[self.seq_idx].name
def get_current_seq(self):
return self.db.sequences[self.seq_idx]
def get_current_seq_index(self):
return self.seq_idx
def get_current_seq_length(self):
return len(self.get_current_seq().files)
def get_image(self, idx):
seq = self.get_current_seq()
return np.array(Image.open(seq.files[idx]))
def get_image_cv2(self,idx):
seq = self.get_current_seq()
return cv2.imread(seq.files[idx])
def get_flow(self,idx):
if idx==0:
return None
elif idx>0:
flow_file_name = cfg.DAVIS.FLOW_FILENAME_TEMPLATE%(idx-1)
flow_file_path = osp.join(cfg.DAVIS.FLOW_DIR, self.get_current_seq_name(), flow_file_name)
assert(osp.exists(flow_file_path))
return flo_read(flow_file_path)
else:
raise ValueError("idx should be integer >= 0")
def get_inv_flow(self, idx):
if idx==self.get_current_seq_length()-1:
return None
elif idx<self.get_current_seq_length()-1:
flow_file_name = cfg.DAVIS.FLOW_FILENAME_TEMPLATE%(idx)
flow_file_path = osp.join(cfg.DAVIS.FLOW_INV_DIR, self.get_current_seq_name(), flow_file_name)
assert(osp.exists(flow_file_path))
return flo_read(flow_file_path)
else:
raise ValueError("idx should be integer <= self.get_current_seq_length()-1")
def get_gt_with_color(self, idx):
return cfg_davis.palette[self.db.annotations[self.seq_idx][idx]][...,[2,1,0]]
def get_gt(self,idx):
ann = self.db.annotations[self.seq_idx][idx]
vals = np.unique(ann)
out = np.array(ann)
for val in vals:
if val==255:
print('current_seq %d, idx %d has val eq 255.'%(self.seq_idx, idx))
assert(val==vals[-1])
out[ann==val] = len(vals)-1
return out
def get_bboxes(self, idx):
gt = self.get_gt(idx)
vals = np.unique(gt)
boxes = []
for val in vals:
#it is background when val==0
if val !=0:
obj={}
mask = np.array(gt==val,dtype=np.uint8)
#make sure gt==val is converted to value in 0 and 1.
assert(len(set(mask.reshape(-1))-{0,1})==0)
x,y,w,h = cv2.boundingRect(mask)
boxes.append([x,y,w,h])
return boxes
#if idx eq 255, it has to be mapped to the last index of the idx.
def local_id_to_global_id(self, idx, seq_idx):
assert(idx>0 and idx<=self.number_of_instance_ids)
return self.global_instance_id_start_of_seq[seq_idx]+idx-1
def global_id_to_local_id(self, idx, seq_idx):
if idx==0:
return 0
start = self.global_instance_id_start_of_seq[seq_idx]
end = self.instance_number_of_seq[seq_idx]+start
if not (idx>=start and idx<end):
# -1 marks wrong global id.
return -1
else:
return idx-start+1
def global_id_to_seq_id_and_local_id(self, idx):
if idx == 0:
return 0,0
seq_idx = np.where((idx>=np.array(self.global_instance_id_start_of_seq))&(idx<np.array(self.global_instance_id_start_of_seq
+np.array(self.instance_number_of_seq))))[0]
#should have only one seq_idx
assert(seq_idx.shape[0]==1)
seq_idx = seq_idx[0]
local_idx = self.global_id_to_local_id(idx, seq_idx)
return seq_idx, local_idx
def global_id_to_seq_name_plus_id(self, idx):
if idx == 0:
return 'background'
seq_idx, local_idx = self.global_id_to_seq_id_and_local_id(idx)
return self.db.sequences[seq_idx].name+'_%02d'%(local_idx)
def id_mask_to_color(self, id_mask, seq_idx, ):
assert(id_mask.ndim==2)
return cfg_davis.palette(id_mask)[...,[2,1,0]]
def set_global_instance_id_start(self):
#start from 1. 0 is reserved for background.
accumulate = 1
self.global_instance_id_start_of_seq[0] = accumulate
for seq_idx in range(self.get_num_sequence()):
if self.instance_number_of_seq[seq_idx] == 0:
self.set_to_sequence(seq_idx)
#get gt from first frame.
gt = self.get_gt(idx=0)
#len(np.unique(gt))-1 as there is background in vals.
self.instance_number_of_seq[seq_idx] = len(np.unique(gt))-1
if seq_idx<self.get_num_sequence()-1:
accumulate += self.instance_number_of_seq[seq_idx]
self.global_instance_id_start_of_seq[seq_idx+1] = accumulate
print('instance_number_of_seq:',self.instance_number_of_seq)
print('global_instance_id_start_of_seq:',self.global_instance_id_start_of_seq)
self.number_of_instance_ids = accumulate+self.instance_number_of_seq[self.get_num_sequence()-1]
print('Total global instance id number(include background):%d'%(self.number_of_instance_ids))
def set_number_of_instance(self, seq_idx, num_instances):
self.instance_number_of_seq[seq_idx] = num_instances
def visualize_blended_image_label(self,idx,w1=0.5,w2=0.5):
'''
Args:
w1: weight for Image
w2: weight for label
'''
img = self.get_image(idx)
gt = self.get_gt_with_color(idx)
img_cpy = copy(img)
mask = np.array(np.all(gt[:,:,:]==0,axis=2,keepdims=True),dtype=np.uint8)
unmasked_img = np.array(img_cpy*mask,dtype=np.uint8)
mask_img = img-unmasked_img
blend = unmasked_img+np.array(mask_img*w1+gt*w2,dtype=np.uint8)
plt.imshow(blend)
plt.show()
return blend
def get_roidb_at_idx_from_sequence(self,idx):
roidb = {}
seq = self.get_current_seq()
gt = self.get_gt(idx)
roidb['image'] = seq.files[idx]
roidb['height'] = gt.shape[0]
roidb['width'] = gt.shape[1]
roidb['seq_idx'] = self.get_current_seq_index()
roidb['idx'] = idx
if self.load_flow is True:
if idx>0:
flow_file_name = cfg.DAVIS.FLOW_FILENAME_TEMPLATE%(idx-1)
flow_file_path = osp.join(cfg.DAVIS.FLOW_DIR, self.get_current_seq_name(), flow_file_name)
assert(osp.exists(flow_file_path))
roidb['flow'] = flow_file_path
else:
roidb['flow'] = None
return roidb
def get_roidb_at_idx_from_sequence_with_inv_flow(self, idx):
roidb = {}
seq = self.get_current_seq()
gt = self.get_gt(idx)
roidb['image'] = seq.files[idx]
roidb['height'] = gt.shape[0]
roidb['width'] = gt.shape[1]
roidb['seq_idx'] = self.get_current_seq_index()
roidb['idx'] = idx
if self.load_flow is True:
if idx<self.get_current_seq_length()-1:
flow_file_name = cfg.DAVIS.FLOW_FILENAME_TEMPLATE%(idx)
flow_file_path = osp.join(cfg.DAVIS.FLOW_INV_DIR, self.get_current_seq_name(), flow_file_name)
assert(osp.exists(flow_file_path))
roidb['flow'] = flow_file_path
else:
roidb['flow'] = None
return roidb
def prepare_roi_db(self, roidb, db_name, proposal_file = None):
for entry in roidb:
self._prep_roidb_entry(entry)
# Include ground-truth object annotations
if not osp.isdir(cfg.CACHE_DIR):
os.makedirs(cfg.CACHE_DIR)
assert(osp.isdir(cfg.CACHE_DIR))
cache_filepath = os.path.join(cfg.CACHE_DIR, db_name)
if os.path.exists(cache_filepath) and not cfg.DEBUG:
self._add_gt_from_cache(roidb, cache_filepath)
logger.debug(
'_add_gt_from_cache took {:.3f}s'.
format(self.debug_timer.toc(average=False))
)
else:
if self.split in ['train','val','trainval']:
for entry in roidb:
self._add_gt_annotations(entry)
logger.debug(
'_add_gt_annotations took {:.3f}s'.
format(self.debug_timer.toc(average=False))
)
if not cfg.DEBUG:
with open(cache_filepath, 'wb') as fp:
pickle.dump(roidb, fp, pickle.HIGHEST_PROTOCOL)
logger.info('Cache ground truth roidb to %s', cache_filepath)
elif self.split == 'test-dev':
for entry in roidb:
if entry['idx'] == 0:
self._add_gt_annotations(entry)
logger.debug(
'_add_gt_annotations took {:.3f}s'.
format(self.debug_timer.toc(average=False))
)
if not cfg.DEBUG:
with open(cache_filepath, 'wb') as fp:
pickle.dump(roidb, fp, pickle.HIGHEST_PROTOCOL)
logger.info('Cache ground truth roidb to %s', cache_filepath)
if proposal_file is not None:
# Include proposals from a file
self.debug_timer.tic()
#TODO: set proper min_proposal_size and proposal_limit.
self._add_proposals_from_file(roidb, proposal_file = proposal_file, min_proposal_size=2, proposal_limit=-1)
logger.debug(
'_add_proposals_from_file took {:.3f}s'.
format(self.debug_timer.toc(average=False))
)
_add_class_assignments(roidb)
_check_box_valid(roidb)
def _prep_roidb_entry(self, entry):
"""Adds empty metadata fields to an roidb entry."""
# Reference back to the parent dataset
entry['dataset'] = self
im_path = entry['image']
assert os.path.exists(im_path), 'Image \'{}\' not found'.format(im_path)
entry['flipped'] = False
entry['has_visible_keypoints'] = False
# Empty placeholders
entry['boxes'] = np.empty((0, 4), dtype=np.float32)
entry['segms'] = []
entry['gt_classes'] = np.empty((0), dtype=np.int32)
entry['global_instance_id'] = np.empty((0), dtype=np.int32)
entry['instance_id'] = np.empty((0), dtype=np.int32)
entry['seg_areas'] = np.empty((0), dtype=np.float32)
entry['gt_overlaps'] = scipy.sparse.csr_matrix(
np.empty((0, self.num_classes), dtype=np.float32)
)
entry['gt_overlaps_id'] = scipy.sparse.csr_matrix(
np.empty((0, self.number_of_instance_ids), dtype=np.float32)
)
entry['is_crowd'] = np.empty((0), dtype=np.bool)
# 'box_to_gt_ind_map': Shape is (#rois). Maps from each roi to the index
# in the list of rois that satisfy np.where(entry['gt_classes'] > 0)
entry['box_to_gt_ind_map'] = np.empty((0), dtype=np.int32)
def _expand_box(self, x, y, w, h, rate = 0.1):
expand_length = np.sqrt(w*h)*rate
x = x-expand_length/2.
y = y-expand_length/2.
h = h+expand_length
w = w+expand_length
return x,y,w,h
def add_gt_annotations_to_entry(self, entry, gt, bboxs=None):
seq_idx = entry['seq_idx']
idx = entry['idx']
vals = np.unique(gt)
objs = []
for val in vals:
#it is background when val==0
if val !=0:
obj={}
mask = np.array(gt==val,dtype=np.uint8)
#make sure gt==val is converted to value in 0 and 1.
assert(len(set(mask.reshape(-1))-{0,1})==0)
if bboxs is None:
x,y,w,h = cv2.boundingRect(mask)
x,y,w,h = self._expand_box(x, y, w, h, rate = 0.05)
else:
print(bboxs[val])
x,y,w,h = bboxs[val][-1]
#obj['segmentation'] = binary_mask_to_rle(mask)
obj['segmentation'] = mask_util.encode(np.array(mask, order='F', dtype=np.uint8))
obj['area'] = np.sum(mask)
obj['iscrowd'] = 0
obj['bbox'] = x,y,w,h
if self.cls_mapper is not None:
#set category id by cls_mapper.
obj['category_id'] = self.cls_mapper[val]
else:
if not self.use_local_id:
obj['category_id'] = self.global_instance_id_start_of_seq[seq_idx]+val-1
else:
obj['category_id'] = val
obj['instance_id'] = val
assert(self.global_instance_id_start_of_seq[seq_idx]!=0)
# val-1 to remove background.
obj['global_instance_id'] = self.global_instance_id_start_of_seq[seq_idx]+val-1
objs.append(obj)
# Sanitize bboxes -- some are invalid
valid_objs = []
valid_segms = []
width = entry['width']
height = entry['height']
for obj in objs:
# crowd regions are RLE encoded and stored as dicts
assert(isinstance(obj['segmentation'], dict))
if isinstance(obj['segmentation'], list):
# Valid polygons have >= 3 points, so require >= 6 coordinates
obj['segmentation'] = [
p for p in obj['segmentation'] if len(p) >= 6
]
# Convert form (x1, y1, w, h) to (x1, y1, x2, y2)
x1, y1, x2, y2 = box_utils.xywh_to_xyxy(obj['bbox'])
x1, y1, x2, y2 = box_utils.clip_xyxy_to_image(
x1, y1, x2, y2, height, width
)
# Require non-zero seg area and more than 1x1 box size
if obj['area'] > 0 and x2 > x1 and y2 > y1:
obj['clean_bbox'] = [x1, y1, x2, y2]
valid_objs.append(obj)
valid_segms.append(obj['segmentation'])
num_valid_objs = len(valid_objs)
boxes = np.zeros((num_valid_objs, 4), dtype=entry['boxes'].dtype)
gt_classes = np.zeros((num_valid_objs), dtype=entry['gt_classes'].dtype)
instance_id = np.zeros((num_valid_objs), dtype=entry['instance_id'].dtype)
global_instance_id = np.zeros((num_valid_objs), dtype=entry['global_instance_id'].dtype)
gt_overlaps = np.zeros(
(num_valid_objs, self.num_classes),
dtype=entry['gt_overlaps'].dtype
)
gt_overlaps_id = np.zeros(
(num_valid_objs, self.number_of_instance_ids),
dtype=entry['gt_overlaps_id'].dtype
)
seg_areas = np.zeros((num_valid_objs), dtype=entry['seg_areas'].dtype)
is_crowd = np.zeros((num_valid_objs), dtype=entry['is_crowd'].dtype)
box_to_gt_ind_map = np.zeros(
(num_valid_objs), dtype=entry['box_to_gt_ind_map'].dtype
)
im_has_visible_keypoints = False
for ix, obj in enumerate(valid_objs):
if obj['category_id'] is not None:
#cls = self.json_category_id_to_contiguous_id[obj['category_id']]
cls = obj['category_id']
else:
#if no category_id specified, use background instead. index is 'self.num_classes-1'
cls = self.num_classes-1
boxes[ix, :] = obj['clean_bbox']
gt_classes[ix] = cls
instance_id[ix] = obj['instance_id']
global_instance_id[ix] = obj['global_instance_id']
seg_areas[ix] = obj['area']
is_crowd[ix] = obj['iscrowd']
box_to_gt_ind_map[ix] = ix
if obj['iscrowd']:
# Set overlap to -1 for all classes for crowd objects
# so they will be excluded during training
gt_overlaps[ix, :] = -1.0
gt_overlaps_id[ix,:] = -1.0
else:
gt_overlaps[ix, cls] = 1.0
gt_overlaps_id[ix, global_instance_id[ix]] = 1.0
entry['boxes'] = np.append(entry['boxes'], boxes, axis=0)
entry['segms'].extend(valid_segms)
# To match the original implementation:
# entry['boxes'] = np.append(
# entry['boxes'], boxes.astype(np.int).astype(np.float), axis=0)
entry['gt_classes'] = np.append(entry['gt_classes'], gt_classes)
entry['instance_id'] = np.append(entry['instance_id'], instance_id)
entry['global_instance_id'] = np.append(entry['global_instance_id'], global_instance_id)
entry['seg_areas'] = np.append(entry['seg_areas'], seg_areas)
entry['gt_overlaps'] = np.append(
entry['gt_overlaps'].toarray(), gt_overlaps, axis=0
)
entry['gt_overlaps'] = scipy.sparse.csr_matrix(entry['gt_overlaps'])
entry['gt_overlaps_id'] = np.append(
entry['gt_overlaps_id'].toarray(), gt_overlaps_id, axis=0
)
entry['gt_overlaps_id'] = scipy.sparse.csr_matrix(entry['gt_overlaps_id'])
entry['is_crowd'] = np.append(entry['is_crowd'], is_crowd)
entry['box_to_gt_ind_map'] = np.append(
entry['box_to_gt_ind_map'], box_to_gt_ind_map
)
assert(entry['gt_overlaps_id'].shape[0]==entry['gt_overlaps'].shape[0])
def _add_gt_annotations(self, entry):
"""Add ground truth annotation metadata to an roidb entry.
"""
seq_idx = entry['seq_idx']
idx = entry['idx']
self.set_to_sequence(seq_idx)
#get gt image.
gt = self.get_gt(idx)
vals = np.unique(gt)
objs = []
for val in vals:
#it is background when val==0
if val !=0:
obj={}
mask = np.array(gt==val,dtype=np.uint8)
#make sure gt==val is converted to value in 0 and 1.
assert(len(set(mask.reshape(-1))-{0,1})==0)
x,y,w,h = cv2.boundingRect(mask)
x,y,w,h = self._expand_box(x, y, w, h, rate = 0.05)
#obj['segmentation'] = binary_mask_to_rle(mask)
obj['segmentation'] = mask_util.encode(np.array(mask, order='F', dtype=np.uint8))
obj['area'] = np.sum(mask)
obj['iscrowd'] = 0
obj['bbox'] = x,y,w,h
if self.cls_mapper is not None:
#set category id by cls_mapper.
obj['category_id'] = self.cls_mapper[val]
else:
if not self.use_local_id:
obj['category_id'] = self.global_instance_id_start_of_seq[seq_idx]+val-1
else:
obj['category_id'] = val
obj['instance_id'] = val
assert(self.global_instance_id_start_of_seq[seq_idx]!=0)
# val-1 to remove background.
obj['global_instance_id'] = self.global_instance_id_start_of_seq[seq_idx]+val-1
objs.append(obj)
# Sanitize bboxes -- some are invalid
valid_objs = []
valid_segms = []
width = entry['width']
height = entry['height']
for obj in objs:
# crowd regions are RLE encoded and stored as dicts
assert(isinstance(obj['segmentation'], dict))
if isinstance(obj['segmentation'], list):
# Valid polygons have >= 3 points, so require >= 6 coordinates
obj['segmentation'] = [
p for p in obj['segmentation'] if len(p) >= 6
]
# Convert form (x1, y1, w, h) to (x1, y1, x2, y2)
x1, y1, x2, y2 = box_utils.xywh_to_xyxy(obj['bbox'])
x1, y1, x2, y2 = box_utils.clip_xyxy_to_image(
x1, y1, x2, y2, height, width
)
# Require non-zero seg area and more than 1x1 box size
if obj['area'] > 0 and x2 > x1 and y2 > y1:
obj['clean_bbox'] = [x1, y1, x2, y2]
valid_objs.append(obj)
valid_segms.append(obj['segmentation'])
num_valid_objs = len(valid_objs)
boxes = np.zeros((num_valid_objs, 4), dtype=entry['boxes'].dtype)
gt_classes = np.zeros((num_valid_objs), dtype=entry['gt_classes'].dtype)
instance_id = np.zeros((num_valid_objs), dtype=entry['instance_id'].dtype)
global_instance_id = np.zeros((num_valid_objs), dtype=entry['global_instance_id'].dtype)
gt_overlaps = np.zeros(
(num_valid_objs, self.num_classes),
dtype=entry['gt_overlaps'].dtype
)
gt_overlaps_id = np.zeros(
(num_valid_objs, self.number_of_instance_ids),
dtype=entry['gt_overlaps_id'].dtype
)
seg_areas = np.zeros((num_valid_objs), dtype=entry['seg_areas'].dtype)
is_crowd = np.zeros((num_valid_objs), dtype=entry['is_crowd'].dtype)
box_to_gt_ind_map = np.zeros(
(num_valid_objs), dtype=entry['box_to_gt_ind_map'].dtype
)
im_has_visible_keypoints = False
for ix, obj in enumerate(valid_objs):
if obj['category_id'] is not None:
#cls = self.json_category_id_to_contiguous_id[obj['category_id']]
cls = obj['category_id']
else:
#if no category_id specified, use background instead. index is 'self.num_classes-1'
cls = self.num_classes-1
boxes[ix, :] = obj['clean_bbox']
gt_classes[ix] = cls
instance_id[ix] = obj['instance_id']
global_instance_id[ix] = obj['global_instance_id']
seg_areas[ix] = obj['area']
is_crowd[ix] = obj['iscrowd']
box_to_gt_ind_map[ix] = ix
if obj['iscrowd']:
# Set overlap to -1 for all classes for crowd objects
# so they will be excluded during training
gt_overlaps[ix, :] = -1.0
gt_overlaps_id[ix,:] = -1.0
else:
gt_overlaps[ix, cls] = 1.0
gt_overlaps_id[ix, global_instance_id[ix]] = 1.0
entry['boxes'] = np.append(entry['boxes'], boxes, axis=0)
entry['segms'].extend(valid_segms)
# To match the original implementation:
# entry['boxes'] = np.append(
# entry['boxes'], boxes.astype(np.int).astype(np.float), axis=0)
entry['gt_classes'] = np.append(entry['gt_classes'], gt_classes)
entry['instance_id'] = np.append(entry['instance_id'], instance_id)
entry['global_instance_id'] = np.append(entry['global_instance_id'], global_instance_id)
entry['seg_areas'] = np.append(entry['seg_areas'], seg_areas)
entry['gt_overlaps'] = np.append(
entry['gt_overlaps'].toarray(), gt_overlaps, axis=0
)
entry['gt_overlaps'] = scipy.sparse.csr_matrix(entry['gt_overlaps'])
entry['gt_overlaps_id'] = np.append(
entry['gt_overlaps_id'].toarray(), gt_overlaps_id, axis=0
)
entry['gt_overlaps_id'] = scipy.sparse.csr_matrix(entry['gt_overlaps_id'])
entry['is_crowd'] = np.append(entry['is_crowd'], is_crowd)
entry['box_to_gt_ind_map'] = np.append(
entry['box_to_gt_ind_map'], box_to_gt_ind_map
)
assert(entry['gt_overlaps_id'].shape[0]==entry['gt_overlaps'].shape[0])
def _add_gt_from_cache(self, roidb, cache_filepath):
"""Add ground truth annotation metadata from cached file."""
logger.info('Loading cached gt_roidb from %s', cache_filepath)
with open(cache_filepath, 'rb') as fp:
cached_roidb = pickle.load(fp)
assert len(roidb) == len(cached_roidb)
for entry, cached_entry in zip(roidb, cached_roidb):
if self.split == 'test-dev' and entry['idx']!=0:
continue
values = [cached_entry[key] for key in self.valid_cached_keys]
boxes, segms, gt_classes, instance_id, global_instance_id, seg_areas, gt_overlaps, gt_overlaps_id, is_crowd, \
box_to_gt_ind_map = values[:10]
entry['boxes'] = np.append(entry['boxes'], boxes, axis=0)
entry['segms'].extend(segms)
# To match the original implementation:
# entry['boxes'] = np.append(
# entry['boxes'], boxes.astype(np.int).astype(np.float), axis=0)
entry['gt_classes'] = np.append(entry['gt_classes'], gt_classes)
entry['instance_id'] = np.append(entry['instance_id'], instance_id)
entry['global_instance_id'] = np.append(entry['global_instance_id'], global_instance_id)
entry['seg_areas'] = np.append(entry['seg_areas'], seg_areas)
entry['gt_overlaps'] = scipy.sparse.csr_matrix(gt_overlaps)
entry['gt_overlaps_id'] = scipy.sparse.csr_matrix(gt_overlaps_id)
entry['is_crowd'] = np.append(entry['is_crowd'], is_crowd)
entry['box_to_gt_ind_map'] = np.append(
entry['box_to_gt_ind_map'], box_to_gt_ind_map
)
def _add_proposals_from_file(self, roidb, proposal_file, min_proposal_size, top_k):
"""Add proposals from a proposals file to an roidb.
"""
logger.info('Loading proposals from: {}'.format(proposal_file))
with open(proposal_file, 'r') as f:
proposals = pickle.load(f)
#proposals[seq_idx][idx]
box_list = []
for i, entry in enumerate(roidb):
if i % 500 == 0:
logger.info(' {:d}/{:d}'.format(i + 1, len(roidb)))
seq_idx = entry['seq_idx']
idx = entry['idx']
boxes = proposals['boxes'][seq_idx][idx]
# Remove duplicate boxes and very small boxes and then take top k
boxes = box_utils.clip_boxes_to_image(
boxes, entry['height'], entry['width']
)
keep = box_utils.unique_boxes(boxes)
boxes = boxes[keep, :]
keep = box_utils.filter_small_boxes(boxes, min_proposal_size)
boxes = boxes[keep, :]
if top_k > 0:
boxes = boxes[:top_k, :]
box_list.append(boxes)
_merge_proposal_boxes_into_roidb(roidb, box_list)
def get_roidb_from_seq_idx_sequence(self, seq_idx, proposal_file = None):
roidb = []
self.debug_timer.tic()
self.set_to_sequence(seq_idx)
print('preparing davis roidb of %dth(%s) sequence...'%(seq_idx,self.get_current_seq_name()))
seq_len = self.get_current_seq_length()
for idx in range(seq_len):
roidb.append(self.get_roidb_at_idx_from_sequence(idx))
db_name = self.name+'_'+self.split+'_%d_sequence_roidb.pkl'%(seq_idx)
self.prepare_roi_db(roidb, db_name = db_name,proposal_file = proposal_file)
print('Done.')
return roidb
def get_roidb_from_seq_idx_sequence_inv(self, seq_idx, proposal_file = None):
roidb = []
self.debug_timer.tic()
self.set_to_sequence(seq_idx)
print('preparing davis inv roidb of %dth(%s) sequence...'%(seq_idx,self.get_current_seq_name()))
seq_len = self.get_current_seq_length()
for idx in range(seq_len):
roidb.append(self.get_roidb_at_idx_from_sequence_with_inv_flow(seq_len-1-idx))
db_name = self.name+'_'+self.split+'_%d_inv_sequence_roidb.pkl'%(seq_idx)
self.prepare_roi_db(roidb, db_name = db_name,proposal_file = proposal_file)
print('Done.')
return roidb
def get_roidb_from_all_sequences(self, proposal_file = None):
roidb = []
self.debug_timer.tic()
for seq_idx in range(self.get_num_sequence()):
roidb.extend(self.get_roidb_from_seq_idx_sequence(seq_idx, proposal_file = proposal_file))
return roidb
def get_separate_roidb_from_all_sequences(self, proposal_file = None):
roidbs = []
self.debug_timer.tic()
for seq_idx in range(self.get_num_sequence()):
roidbs.append(self.get_roidb_from_seq_idx_sequence(seq_idx, proposal_file = None))
#TODO remove.
#if seq_idx==3:
#return roidbs
if self.load_inv_db is True:
for seq_idx in range(self.get_num_sequence()):
roidbs.append(self.get_roidb_from_seq_idx_sequence_inv(seq_idx, proposal_file = None))
return roidbs
def _create_db_from_label(self):
pass
def _check_box_valid(roidb):
for entry in roidb:
for box in entry['boxes']:
assert(box[2]>box[0])
assert(box[3]>box[1])
def add_proposals(roidb, rois, scales, crowd_thresh):
"""Add proposal boxes (rois) to an roidb that has ground-truth annotations
but no proposals. If the proposals are not at the original image scale,
specify the scale factor that separate them in scales.
"""
box_list = []
for i in range(len(roidb)):
inv_im_scale = 1. / scales[i]
idx = np.where(rois[:, 0] == i)[0]
box_list.append(rois[idx, 1:] * inv_im_scale)
_merge_proposal_boxes_into_roidb(roidb, box_list)
_add_class_assignments(roidb)
def _merge_proposal_boxes_into_roidb(roidb, box_list):
"""Add proposal boxes to each roidb entry."""
assert len(box_list) == len(roidb)
for i, entry in enumerate(roidb):
boxes = box_list[i]
num_boxes = boxes.shape[0]
gt_overlaps = np.zeros(
(num_boxes, entry['gt_overlaps'].shape[1]),
dtype=entry['gt_overlaps'].dtype
)
'''
gt_overlaps_id = np.zeros(
(num_boxes, entry['gt_overlaps_id'].shape[1]),
dtype=entry['gt_overlaps_id'].dtype
)'''
box_to_gt_ind_map = -np.ones(
(num_boxes), dtype=entry['box_to_gt_ind_map'].dtype
)
# Note: unlike in other places, here we intentionally include all gt
# rois, even ones marked as crowd. Boxes that overlap with crowds will
# be filtered out later (see: _filter_crowd_proposals).
gt_inds = np.where(entry['gt_classes'] > 0)[0]
if len(gt_inds) > 0:
gt_boxes = entry['boxes'][gt_inds, :]
gt_classes = entry['gt_classes'][gt_inds]
'''
global_instance_id = entry['global_instance_id'][gt_inds]
'''
proposal_to_gt_overlaps = box_utils.bbox_overlaps(
boxes.astype(dtype=np.float32, copy=False),
gt_boxes.astype(dtype=np.float32, copy=False)
)
# Gt box that overlaps each input box the most
# (ties are broken arbitrarily by class order)
argmaxes = proposal_to_gt_overlaps.argmax(axis=1)
# Amount of that overlap
maxes = proposal_to_gt_overlaps.max(axis=1)
# Those boxes with non-zero overlap with gt boxes
I = np.where(maxes > 0)[0]
# Record max overlaps with the class of the appropriate gt box
gt_overlaps[I, gt_classes[argmaxes[I]]] = maxes[I]
'''
gt_overlaps_id[I,global_instance_id[argmaxes[I]]] = maxes[I]
'''
#print('_merge_proposal_boxes_into_roidb',gt_overlaps.shape)
#print('_merge_proposal_boxes_into_roidb',gt_overlaps_id.shape)
box_to_gt_ind_map[I] = gt_inds[argmaxes[I]]
entry['boxes'] = np.append(
entry['boxes'],
boxes.astype(entry['boxes'].dtype, copy=False),
axis=0
)
entry['gt_classes'] = np.append(
entry['gt_classes'],
np.zeros((num_boxes), dtype=entry['gt_classes'].dtype)
)
entry['seg_areas'] = np.append(
entry['seg_areas'],
np.zeros((num_boxes), dtype=entry['seg_areas'].dtype)
)
entry['gt_overlaps'] = np.append(
entry['gt_overlaps'].toarray(), gt_overlaps, axis=0
)
entry['gt_overlaps'] = scipy.sparse.csr_matrix(entry['gt_overlaps'])
'''
entry['gt_overlaps_id'] = np.append(
entry['gt_overlaps_id'].toarray(), gt_overlaps_id, axis=0
)
entry['gt_overlaps_id'] = scipy.sparse.csr_matrix(entry['gt_overlaps_id'])
'''
entry['is_crowd'] = np.append(
entry['is_crowd'],
np.zeros((num_boxes), dtype=entry['is_crowd'].dtype)
)
entry['box_to_gt_ind_map'] = np.append(
entry['box_to_gt_ind_map'],
box_to_gt_ind_map.astype(
entry['box_to_gt_ind_map'].dtype, copy=False
)
)
def _add_class_assignments(roidb):
"""Compute object category assignment for each box associated with each
roidb entry.
"""
for entry in roidb:
gt_overlaps = entry['gt_overlaps'].toarray()
# max overlap with gt over classes (columns)
max_overlaps = gt_overlaps.max(axis=1)
# gt class that had the max overlap
max_classes = gt_overlaps.argmax(axis=1)
entry['max_classes'] = max_classes
entry['max_overlaps'] = max_overlaps
'''
gt_overlaps_id = entry['gt_overlaps_id'].toarray()
'''
# max overlap with gt over classes (columns)
'''
max_overlaps_id = gt_overlaps_id.max(axis=1)
# gt class that had the max overlap
max_global_id = gt_overlaps_id.argmax(axis=1)
entry['max_global_id'] = max_global_id
entry['max_overlaps_id'] = max_overlaps_id
'''
# sanity checks
# if max overlap is 0, the class must be background (class 0)
zero_inds = np.where(max_overlaps == 0)[0]
assert all(max_classes[zero_inds] == 0)
# if max overlap > 0, the class must be a fg class (not class 0)
nonzero_inds =
|
np.where(max_overlaps > 0)
|
numpy.where
|
"""Configuration definitions. A Configuration definition is read from a number of different formats.
"""
__all__ = ['create_configuration_from_file', 'create_configuration_from_MIDfile', 'create_configuration_from_SKAfile',
'create_LOFAR_configuration', 'create_named_configuration', 'limit_rmax']
import numpy
from astropy import units as u
from astropy.coordinates import EarthLocation
from rascil.processing_components.util.coordinate_support import xyz_at_latitude
from rascil.data_models.memory_data_models import Configuration
from rascil.data_models.parameters import rascil_path, rascil_data_path, get_parameter
from rascil.processing_components.util.installation_checks import check_data_directory
import logging
log = logging.getLogger('logger')
def create_configuration_from_file(antfile: str, location: EarthLocation = None,
mount: str = 'azel',
names: str = "%d",
diameter=35.0,
rmax=None, name='') -> Configuration:
""" Define configuration from a text file
:param antfile: Antenna file name
:param location: Earthlocation of array
:param mount: mount type: 'azel', 'xy', 'equatorial'
:param names: Antenna names e.g. "VLA%d"
:param diameter: Effective diameter of station or antenna
:param rmax: Maximum distance from array centre (m)
:param name: Name of array
:return: Configuration
"""
check_data_directory()
antxyz = numpy.genfromtxt(antfile, delimiter=",")
assert antxyz.shape[1] == 3, ("Antenna array has wrong shape %s" % antxyz.shape)
latitude = location.geodetic[1].to(u.rad).value
antxyz = xyz_at_latitude(antxyz, latitude)
antxyz += [location.geocentric[0].to(u.m).value,
location.geocentric[1].to(u.m).value,
location.geocentric[2].to(u.m).value]
nants = antxyz.shape[0]
diameters = diameter * numpy.ones(nants)
anames = [names % ant for ant in range(nants)]
mounts = numpy.repeat(mount, nants)
antxyz, diameters, anames, mounts = limit_rmax(antxyz, diameters, anames, mounts, rmax)
fc = Configuration(location=location, names=anames, mount=mounts, xyz=antxyz,
diameter=diameters, name=name)
return fc
def create_configuration_from_SKAfile(antfile: str,
mount: str = 'azel',
names: str = "%d",
rmax=None, name='', location=None) -> Configuration:
""" Define configuration from a SKA format file
:param antfile: Antenna file name
:param location: Earthlocation of array
:param mount: mount type: 'azel', 'xy', 'equatorial'
:param names: Antenna names e.g. "VLA%d"
:param rmax: Maximum distance from array centre (m)
:param name: Name of array
:return: Configuration
"""
check_data_directory()
antdiamlonglat = numpy.genfromtxt(antfile, usecols=[0, 1, 2], delimiter="\t")
assert antdiamlonglat.shape[1] == 3, ("Antenna array has wrong shape %s" % antdiamlonglat.shape)
antxyz = numpy.zeros([antdiamlonglat.shape[0] - 1, 3])
diameters = numpy.zeros([antdiamlonglat.shape[0] - 1])
for ant in range(antdiamlonglat.shape[0] - 1):
loc = EarthLocation(lon=antdiamlonglat[ant, 1], lat=antdiamlonglat[ant, 2], height=0.0).geocentric
antxyz[ant] = [loc[0].to(u.m).value, loc[1].to(u.m).value, loc[2].to(u.m).value]
diameters[ant] = antdiamlonglat[ant, 0]
nants = antxyz.shape[0]
anames = [names % ant for ant in range(nants)]
mounts = numpy.repeat(mount, nants)
antxyz, diameters, anames, mounts = limit_rmax(antxyz, diameters, anames, mounts, rmax)
fc = Configuration(location=location, names=anames, mount=mounts, xyz=antxyz,
diameter=diameters, name=name)
return fc
def create_configuration_from_MIDfile(antfile: str, location=None,
mount: str = 'azel',
rmax=None, name='') -> Configuration:
""" Define configuration from a SKA MID format file
:param antfile: Antenna file name
:param mount: mount type: 'azel', 'xy'
:param rmax: Maximum distance from array centre (m)
:param name: Name of array
:return: Configuration
"""
check_data_directory()
# X Y Z Diam Station
# 9.36976 35.48262 1052.99987 13.50 M001
antxyz = numpy.genfromtxt(antfile, skip_header=5, usecols=[0, 1, 2], delimiter=" ")
antxyz = xyz_at_latitude(antxyz, location.lat.rad)
antxyz += [location.geocentric[0].to(u.m).value,
location.geocentric[1].to(u.m).value,
location.geocentric[2].to(u.m).value]
nants = antxyz.shape[0]
assert antxyz.shape[1] == 3, "Antenna array has wrong shape %s" % antxyz.shape
anames = numpy.genfromtxt(antfile, dtype='str', skip_header=5, usecols=[4], delimiter=" ")
mounts = numpy.repeat(mount, nants)
diameters = numpy.genfromtxt(antfile, dtype='str', skip_header=5, usecols=[3], delimiter=" ")
antxyz, diameters, anames, mounts = limit_rmax(antxyz, diameters, anames, mounts, rmax)
fc = Configuration(location=location, names=anames, mount=mounts, xyz=antxyz,
diameter=diameters, name=name)
return fc
def limit_rmax(antxyz, diameters, names, mounts, rmax):
""" Select antennas with radius from centre < rmax
:param antxyz: Geocentric coordinates
:param diameters: diameters in metres
:param names: Names
:param mounts: Mount types
:param rmax: Maximum radius (m)
:return:
"""
if rmax is not None:
lantxyz = antxyz - numpy.average(antxyz, axis=0)
r = numpy.sqrt(lantxyz[:, 0] ** 2 + lantxyz[:, 1] ** 2 + lantxyz[:, 2] ** 2)
antxyz = antxyz[r < rmax]
log.debug('create_configuration_from_file: Maximum radius %.1f m includes %d antennas/stations' %
(rmax, antxyz.shape[0]))
diameters = diameters[r < rmax]
names = numpy.array(names)[r < rmax]
mounts = numpy.array(mounts)[r<rmax]
else:
log.debug('create_configuration_from_file: %d antennas/stations' % (antxyz.shape[0]))
return antxyz, diameters, names, mounts
def create_LOFAR_configuration(antfile: str, location, rmax=1e6) -> Configuration:
""" Define configuration from the LOFAR configuration file
:param antfile:
:param location: EarthLocation
:param rmax: Maximum distance from array centre (m)
:return: Configuration
"""
check_data_directory()
antxyz = numpy.genfromtxt(antfile, skip_header=2, usecols=[1, 2, 3], delimiter=",")
nants = antxyz.shape[0]
assert antxyz.shape[1] == 3, "Antenna array has wrong shape %s" % antxyz.shape
anames = numpy.genfromtxt(antfile, dtype='str', skip_header=2, usecols=[0], delimiter=",")
mounts =
|
numpy.repeat('XY', nants)
|
numpy.repeat
|
#!/usr/bin/python
# encoding: utf-8
"""
@author: Ian
@file: data_preprocessing.py
@time: 2019-05-05 16:55
"""
# coding=utf-8
import numpy as np
from apps.nlp.text_classification.cnn_char_test1.config import config
import csv
class Dataset(object):
def __init__(self, data_source):
self.data_source = data_source
self.index_in_epoch = 0
self.alphabet = config.alphabet
self.alphabet_size = config.alphabet_size
self.num_classes = config.nums_classes
self.l0 = config.l0
self.epochs_completed = 0
self.batch_size = config.batch_size
self.example_nums = config.example_nums
self.doc_image = []
self.label_image = []
def next_batch(self):
# 得到Dataset对象的batch
start = self.index_in_epoch
self.index_in_epoch += self.batch_size
if self.index_in_epoch > self.example_nums:
# Finished epoch
self.epochs_completed += 1
# Shuffle the data
perm = np.arange(self.example_nums)
|
np.random.shuffle(perm)
|
numpy.random.shuffle
|
import numpy as np
import nibabel as nib
import pickle
from time import time
from src.utils.definitions import *
from src.evaluation_metrics.segmentation_metrics import dice_score, haussdorff_distance
DATA_DIR = [
Controls_LEUVEN_TESTINGSET,
SB_LEUVEN_TESTINGSET,
CORRECTED_ZURICH_DATA_DIR,
]
SAVE_FOLDER = '/data/saved_res_fetal'
# GENERAL EVALUATION OPTIONS
DO_EVAL = True
METRIC_NAMES = ['dice', 'hausdorff']
MAX_HD = (144. / 2.) * 0.8 # distance from the center to the border (57.6 mm)
METHOD_NAMES = ['cnn']
# MODELS with 10 different seeds and train/valid splits
FULL_DICE_ATLAS_MODEL_LIST = [ # baseline 1
'%s/logs_DGX/baseline1_fold0/model_iter2900.pt7' % REPO_PATH,
'%s/logs_DGX/baseline1_fold1/model_iter2500.pt7' % REPO_PATH,
'%s/logs_DGX/baseline1_fold2/model_iter1400.pt7' % REPO_PATH,
'%s/logs_DGX/baseline1_fold3/model_iter2800.pt7' % REPO_PATH,
'%s/logs_DGX/baseline1_fold4/model_iter2000.pt7' % REPO_PATH,
'%s/logs_DGX/baseline1_fold5/model_iter1900.pt7' % REPO_PATH,
'%s/logs_DGX/baseline1_fold6/model_iter2000.pt7' % REPO_PATH,
'%s/logs_DGX/baseline1_fold7/model_iter2500.pt7' % REPO_PATH,
'%s/logs_DGX/baseline1_fold8/model_iter2600.pt7' % REPO_PATH,
'%s/logs_DGX/baseline1_fold9/model_iter2500.pt7' % REPO_PATH,
]
PARTIAL_MARG_DICE_MODEL_LIST = [ # baseline 3
'%s/logs_DGX/baseline3_fold0/model_iter14200.pt7' % REPO_PATH,
'%s/logs_DGX/baseline3_fold1/model_iter12600.pt7' % REPO_PATH,
'%s/logs_DGX/baseline3_fold2/model_iter11800.pt7' % REPO_PATH,
'%s/logs_DGX/baseline3_fold3/model_iter13500.pt7' % REPO_PATH,
'%s/logs_DGX/baseline3_fold4/model_iter8100.pt7' % REPO_PATH,
'%s/logs_DGX/baseline3_fold5/model_iter17900.pt7' % REPO_PATH,
'%s/logs_DGX/baseline3_fold6/model_iter13100.pt7' % REPO_PATH,
'%s/logs_DGX/baseline3_fold7/model_iter10800.pt7' % REPO_PATH,
'%s/logs_DGX/baseline3_fold8/model_iter10800.pt7' % REPO_PATH,
'%s/logs_DGX/baseline3_fold9/model_iter13500.pt7' % REPO_PATH,
]
PARTIAL_LEAF_DICE_MODEL_LIST = [ # ours
'%s/logs_DGX/leafdice_fold0/model_iter3400.pt7' % REPO_PATH,
'%s/logs_DGX/leafdice_fold1/model_iter2200.pt7' % REPO_PATH,
'%s/logs_DGX/leafdice_fold2/model_iter3900.pt7' % REPO_PATH,
'%s/logs_DGX/leafdice_fold3/model_iter2900.pt7' % REPO_PATH,
'%s/logs_DGX/leafdice_fold4/model_iter1300.pt7' % REPO_PATH,
'%s/logs_DGX/leafdice_fold5/model_iter3100.pt7' % REPO_PATH,
'%s/logs_DGX/leafdice_fold6/model_iter4100.pt7' % REPO_PATH,
'%s/logs_DGX/leafdice_fold7/model_iter1900.pt7' % REPO_PATH,
'%s/logs_DGX/leafdice_fold8/model_iter3000.pt7' % REPO_PATH,
'%s/logs_DGX/leafdice_fold9/model_iter4300.pt7' % REPO_PATH,
]
PARTIAL_DICE_SOFT_TARGET_MODEL_LIST = [ # baseline 2
'%s/logs_DGX/baseline2_fold0/model_iter11100.pt7' % REPO_PATH,
'%s/logs_DGX/baseline2_fold1/model_iter21300.pt7' % REPO_PATH,
'%s/logs_DGX/baseline2_fold2/model_iter16300.pt7' % REPO_PATH,
'%s/logs_DGX/baseline2_fold3/model_iter10900.pt7' % REPO_PATH,
'%s/logs_DGX/baseline2_fold4/model_iter15900.pt7' % REPO_PATH,
'%s/logs_DGX/baseline2_fold5/model_iter19900.pt7' % REPO_PATH,
'%s/logs_DGX/baseline2_fold6/model_iter9900.pt7' % REPO_PATH,
'%s/logs_DGX/baseline2_fold7/model_iter16400.pt7' % REPO_PATH,
'%s/logs_DGX/baseline2_fold8/model_iter13300.pt7' % REPO_PATH,
'%s/logs_DGX/baseline2_fold9/model_iter17000.pt7' % REPO_PATH,
]
MODELS = PARTIAL_LEAF_DICE_MODEL_LIST
MODEL_ID = 'CameraReady_Partial_Leaf_Dice_fold0-9'
PRED_FOLDER_LUCAS = os.path.join(
SAVE_FOLDER,
'fetal_seg_pred_%s' % MODEL_ID
)
def print_results(metrics, method_names=METHOD_NAMES, save_path=None):
print('\nGlobal statistics for the metrics')
for method in method_names:
print('\n\033[93m----------')
print(method.upper())
print('----------\033[0m')
for roi in ALL_ROI:
print('\033[92m%s\033[0m' % roi)
for metric in METRIC_NAMES:
key = '%s_%s' % (metric, roi)
num_data = len(metrics[method][key])
if num_data == 0:
print('No data for %s' % key)
continue
print('%d cases' % num_data)
mean = np.mean(metrics[method][key])
std = np.std(metrics[method][key])
median = np.median(metrics[method][key])
q3 = np.percentile(metrics[method][key], 75)
p95 = np.percentile(metrics[method][key], 95)
q1 = np.percentile(metrics[method][key], 25)
p5 = np.percentile(metrics[method][key], 25)
print(key)
if metric == 'dice':
print('mean=%.1f std=%.1f median=%.1f q1=%.1f p5=%.1f' % (mean, std, median, q1, p5))
else:
print('mean=%.1f std=%.1f median=%.1f q3=%.1f p95=%.1f' % (mean, std, median, q3, p95))
print('-----------')
if save_path is not None:
with open(save_path, 'wb') as f:
pickle.dump(metrics, f, pickle.HIGHEST_PROTOCOL)
def compute_evaluation_metrics(pred_seg_path, gt_seg_path, dataset_path):
def load_np(seg_path):
seg = nib.load(seg_path).get_fdata().astype(np.uint8)
return seg
pred_seg_folder, pred_seg_name = os.path.split(pred_seg_path)
pred_seg = load_np(pred_seg_path)
gt_seg = load_np(gt_seg_path)
if dataset_path == CORRECTED_ZURICH_DATA_DIR:
print('Merge CC with WM')
pred_seg[pred_seg == LABELS['corpus_callosum']] = LABELS['wm']
if dataset_path == SB_LEUVEN_TESTINGSET:
print('Change the CC label from 5 to 8')
gt_seg[gt_seg == 5] = LABELS['corpus_callosum']
# Compute the metrics
dice_values = {}
haus_values = {}
for roi in DATASET_LABELS[dataset_path]:
dice_values[roi] = dice_score(
pred_seg,
gt_seg,
fg_class=LABELS[roi],
)
haus_values[roi] = min(
MAX_HD,
haussdorff_distance(
pred_seg,
gt_seg,
fg_class=LABELS[roi],
percentile=95,
)
)
print('\n\033[92mEvaluation for %s\033[0m' % pred_seg_name)
print('Dice scores:')
print(dice_values)
print('Hausdorff95 distances:')
print(haus_values)
return dice_values, haus_values
def main(dataset_path_list):
if not os.path.exists(PRED_FOLDER_LUCAS):
os.mkdir(PRED_FOLDER_LUCAS)
# Initialize the metric dict
metrics = {
method: {'%s_%s' % (metric, roi): [] for roi in ALL_ROI for metric in METRIC_NAMES}
for method in METHOD_NAMES
}
# Run the batch inference
for dataset in dataset_path_list:
for f_n in os.listdir(dataset):
print('\n--------------')
print('Start inference for case %s' % f_n)
if '.' in f_n:
continue
input_path = os.path.join(dataset, f_n, 'srr.nii.gz')
output_path = os.path.join(PRED_FOLDER_LUCAS, f_n)
if not os.path.exists(output_path):
os.mkdir(output_path)
pred_path = os.path.join(
output_path,
'srr_parcellation_cnn_autoseg.nii.gz',
)
if not os.path.exists(pred_path): # Skip inference if the predicted segmentation already exists
# Set the models to use
cmd_models = '--model'
for model in MODELS:
cmd_models += ' %s' % model
# Inference command line
cmd = 'python %s/infer_seg.py --input %s --output_folder %s --num_classes %d %s' % \
(REPO_PATH, input_path, output_path, NUM_CLASS, cmd_models)
print(cmd)
os.system(cmd)
# Eval
if DO_EVAL:
gt_seg_path = os.path.join(dataset, f_n, 'parcellation.nii.gz')
for method in METHOD_NAMES:
dice, haus = compute_evaluation_metrics(pred_path, gt_seg_path, dataset_path=dataset)
for roi in DATASET_LABELS[dataset]:
metrics[method]['dice_%s' % roi].append(100 * dice[roi])
metrics[method]['hausdorff_%s' % roi].append(haus[roi])
# Save and print the metrics aggregated
if DO_EVAL:
save_metrics_path = os.path.join(PRED_FOLDER_LUCAS, 'metrics.pkl')
print_results(metrics, save_path=save_metrics_path)
else:
print('\nNo evaluation was run.')
def main_ori_vs_after_correction_zurich_data():
# Measure the difference between the original data from Zurich
# and the manually corrected segmentations
cases_ids = os.listdir(CORRECTED_ZURICH_DATA_DIR)
metrics = {'%s_%s' % (metric, roi): []
for roi in DATASET_LABELS[CORRECTED_ZURICH_DATA_DIR]
for metric in METRIC_NAMES}
print('Compare Zurich segmentation before and after manual corrections')
print('%d cases to evaluate\n' % len(cases_ids))
for case_id in cases_ids:
print('\n----------------')
print('Case %s' % case_id)
new_seg = os.path.join(CORRECTED_ZURICH_DATA_DIR, case_id, 'parcellation.nii.gz')
old_seg = os.path.join(ORI_ZURICH_DATA_DIR, case_id, 'parcellation.nii.gz')
dice, haus = compute_evaluation_metrics(
new_seg, old_seg, dataset_path=CORRECTED_ZURICH_DATA_DIR)
for roi in DATASET_LABELS[CORRECTED_ZURICH_DATA_DIR]:
metrics['dice_%s' % roi].append(100 * dice[roi])
metrics['hausdorff_%s' % roi].append(haus[roi])
# Print the global results
print('\nGLOBAl METRICS')
for metric in METRIC_NAMES:
for roi in DATASET_LABELS[CORRECTED_ZURICH_DATA_DIR]:
key = '%s_%s' % (metric, roi)
mean = np.mean(metrics[key])
std = np.std(metrics[key])
median = np.median(metrics[key])
q3 = np.percentile(metrics[key], 75)
q1 = np.percentile(metrics[key], 25)
IQR = q3 - q1
print(key)
print('mean=%f std=%f median=%f IQR=%f' % (mean, std, median, IQR))
def main_results_analysis(pkl_files_list):
"""
Useful for averaging the results of several evaluations.
:param pkl_files_list: list metrics computed with one of the main function above
:return:
"""
print('')
for method in METHOD_NAMES:
for roi in ALL_ROI:
print('\033[92m%s\033[0m' % roi)
for metric in METRIC_NAMES:
key = '%s_%s' % (metric, roi)
mean_list = []
std_list = []
median_list = []
iqr_list = []
q1_list = []
q3_list = []
for pkl_file in pkl_files_list:
with open(pkl_file, 'rb') as f:
metrics = pickle.load(f)
key = '%s_%s' % (metric, roi)
mean_list.append(np.mean(metrics[method][key]))
std_list.append(np.std(metrics[method][key]))
median_list.append(np.median(metrics[method][key]))
q3 =
|
np.percentile(metrics[method][key], 75)
|
numpy.percentile
|
#imports--------------------------------------------------------------------------------------------------------------------
import os
import glob
import sys
#import wget
import time
import subprocess
import shlex
import sys
import warnings
import random
import pickle
from Bio.SeqUtils import seq1
from Bio.PDB.PDBParser import PDBParser
from Bio import AlignIO
from sklearn.base import TransformerMixin
from sklearn.preprocessing import StandardScaler, Normalizer , MinMaxScaler , RobustScaler
from sklearn.decomposition import PCA
from sklearn.decomposition import IncrementalPCA
sys.path.append('./ProFET/ProFET/feat_extract/')
import FeatureGen
import numpy as np
import pandas as pd
import seaborn as sns
from matplotlib import pyplot as plt
import h5py
#class definitions--------------------------------------------------------------------------------------------------------------------
#scaler
class NDSRobust(TransformerMixin):
def __init__(self, **kwargs):
self._scaler = RobustScaler(copy=True, **kwargs)
self._orig_shape = None
def fit(self, X, **kwargs):
X = np.array(X)
# Save the original shape to reshape the flattened X later
# back to its original shape
if len(X.shape) > 1:
self._orig_shape = X.shape[1:]
X = self._flatten(X)
self._scaler.fit(X, **kwargs)
return self
def transform(self, X, **kwargs):
X = np.array(X)
X = self._flatten(X)
X = self._scaler.transform(X, **kwargs)
X = self._reshape(X)
return X
def inverse_transform(self, X, **kwargs):
X = np.array(X)
X = self._flatten(X)
X = self._scaler.inverse_transform(X, **kwargs)
X = self._reshape(X)
return X
def _flatten(self, X):
# Reshape X to <= 2 dimensions
if len(X.shape) > 2:
n_dims = np.prod(self._orig_shape)
X = X.reshape(-1, n_dims)
return X
def _reshape(self, X):
# Reshape X back to it's original shape
if len(X.shape) >= 2:
X = X.reshape(-1, *self._orig_shape)
return X
#ndimensional PCA for arrays
class NDSPCA(TransformerMixin):
def __init__(self, **kwargs):
self._scaler = IncrementalPCA(copy = True, **kwargs)
self._orig_shape = None
def fit(self, X, **kwargs):
X = np.array(X)
# Save the original shape to reshape the flattened X later
# back to its original shape
if len(X.shape) > 1:
self._orig_shape = X.shape[1:]
X = self._flatten(X)
self._scaler.fit(X, **kwargs)
self.explained_variance_ratio_ = self._scaler.explained_variance_ratio_
self.components_ =self._scaler.components_
return self
def transform(self, X, **kwargs):
X = np.array(X)
X = self._flatten(X)
X = self._scaler.transform(X, **kwargs)
return X
def inverse_transform(self, X, **kwargs):
X = np.array(X)
X = self._flatten(X)
X = self._scaler.inverse_transform(X, **kwargs)
X = self._reshape(X)
return X
def _flatten(self, X):
# Reshape X to <= 2 dimensions
if len(X.shape) > 2:
n_dims = np.prod(self._orig_shape)
X = X.reshape(-1, n_dims)
return X
def _reshape(self, X):
# Reshape X back to it's original shape
if len(X.shape) >= 2:
X = X.reshape(-1, *self._orig_shape)
return X
#global parameters--------------------------------------------------------------------------------------------------------------------
verbose = True
#how many components to keep after PCA?
components = 300
#clipping value for FFT components (how many components should be kept?)
maxFFTComponents = 100
#amount of properties stored in the voxels
propAmount = 12
#amino acids supported by protfeat
legalAANames = {b'A', b'R', b'N', b'D', b'C', b'Q', b'E', b'G', b'H', b'I', b'L', b'K', b'M', b'F', b'P', b'S', b'T', b'W', b'Y', b'V', b'B', b'Z', b'X'}
#working on a sample? how big?
sampling = True
sampleSize = 20
#function definitions--------------------------------------------------------------------------------------------------------------------
#fit the components of the output space
#y: array of stacked distmats (on the 1st axis)
def fit_y( y, components = components, FFT = False):
if FFT == True:
#got through a stack of structural distmats. these should be 0 padded to all fit in an array
y = np.stack([ np.fft.rfft2(y[i,:,:]) for i in range(y.shape[0])] )
if verbose:
print(y.shape)
y = np.hstack( [ np.real(y) , np.imag(y)] )
if verbose:
print(y.shape)
ndpca = NDSPCA(n_components=components)
ndpca.fit(y)
if verbose:
print('explained variance')
print(np.sum(ndpca.explained_variance_ratio_))
scaler0 = NDSRobust()
scaler0.fit(y)
return ndpca, scaler0
def transform_y(y, scaler0, ndpca, FFT = False):
if FFT == True:
y = np.stack([np.fft.rfft2(y[i,:,:]) for i in range(y.shape[0])])
if verbose:
print(y.shape)
y = np.hstack( [ np.real(y) , np.imag(y)] )
y = ndpca.transform(y)
scaler0 = NDSRobust()
scaler0.fit(y)
y = scaler0.transform(y)
if verbose:
print(y.shape)
return y, scaler0
def inverse_transform_y(y, scaler0, ndpca, FFT=False):
y = scaler0.inverse_transform(y)
y = ndpca.inverse_transform(y)
if FFT == True:
split = int(y.shape[1]/2)
y = np.stack([ np.fft.irfft2(y[i,:split,:] + 1j*y[i,split:,:]) for i in range(y.shape[0]) ] )
return y
#fit the components of the in space
#stacked align voxels (on the 1st axis)
def fit_x(x, components = components, FFT = False):
if FFT == True:
#got through a stack of align voxels. these should be 0 padded to all fit in an array
x = np.stack([ np.fft.rfftn(x[i,:,:,:]) for i in range(x.shape[0])] )
if verbose:
print(x.shape)
x = np.hstack( [ np.real(x) , np.imag(x)] )
if verbose:
print(x.shape)
ndpca = NDSPCA(n_components=components)
ndpca.fit(x)
if verbose:
print('explained variance')
print(np.sum(ndpca.explained_variance_ratio_))
scaler0 = NDSRobust()
scaler0.fit(x)
return ndpca, scaler0
def transform_x(x, scaler0, ndpca, FFT = False):
if FFT == True:
x = np.stack([ np.fft.rfftn(x[i,:,:,:]) for i in range(x.shape[0])] )
if verbose:
print(x.shape)
x = np.hstack( [ np.real(x) , np.imag(x)] )
x = ndpca.transform(x)
scaler0 = NDSRobust()
scaler0.fit(x)
x = scaler0.transform(x)
if verbose:
print(x.shape)
return x, scaler0
def inverse_transform_x(x, scaler0, ndpca, FFT=False):
x = scaler0.inverse_transform(x)
x = ndpca.inverse_transform(x)
if FFT == True:
split = int(x.shape[1]/2)
x = np.stack([ np.fft.irfftn(x[i,:split,:,:] + 1j*x[i,split:,:,:]) for i in range(x.shape[0]) ] )
return x
def alnFileToArray(filename, returnMsa = False):
alnfile = filename
msa = AlignIO.read(alnfile , format = 'fasta')
align_array = np.array([ list(rec.upper()) for rec in msa], np.character)
if returnMsa:
return align_array, msa
return align_array
def alnArrayLineToSequence(align_array, index):
seq = ''
for aa in align_array[index]:
seq += aa.decode('utf-8')
return seq
#generate align list
def generateAlignList(directory = 'alns', returnMsa = False):
aligns = list()
msas = list()
#read through align files to get align arrays list
for file in os.listdir(directory):
if file.endswith('.fasta'):
aligns.append(alnFileToArray(directory+'/'+file, returnMsa)[0])
if returnMsa:
msas.append(alnFileToArray(directory+'/'+file, returnMsa)[1])
if returnMsa:
return aligns, msas
return aligns
#find biggest align shape (for padding) - aligns is a list of arrays
def biggestAlignShape(aligns):
longestProts = 0
mostProts = 0
for aln in aligns:
if aln.shape[0] > mostProts:
mostProts = aln.shape[0]
if aln.shape[1] > longestProts:
longestProts = aln.shape[1]
return mostProts, longestProts
#structs is a dictionary of locations of the files for structures
def parsePDB(structs):
parser = PDBParser()
converter = {'ALA': 'A', 'ASX': 'B', 'CYS': 'C', 'ASP': 'D', 'GLU': 'E', 'PHE': 'F', 'GLY': 'G',
'HIS': 'H', 'ILE': 'I', 'LYS': 'K', 'LEU': 'L', 'MET': 'M', 'ASN': 'N', 'PRO': 'P',
'GLN': 'Q', 'ARG': 'R', 'SER': 'S', 'THR': 'T', 'SEC': 'U', 'VAL': 'V', 'TRP': 'W',
'XAA': 'X', 'TYR': 'Y', 'GLX': 'Z'}
structseqs={}
with open( 'structs.fast' , 'w') as fastout:
for s in structs:
Structure = PDBParser().get_structure(s, structs[s])
for model in Structure:
for chain in model:
res = chain.get_residues()
seq = ''.join([ converter[r.get_resname()] for r in res if r.get_resname() in converter ] )
fastout.write('>' + s + '|'+ chain.id +'\\n')
fastout.write(str( seq ) +'\\n' )
structseqs[ s + '|'+ chain.id ] = seq
return structseqs
def generateProtFeatDict(sequence):
features = FeatureGen.Get_Protein_Feat(sequence)
return features
#generate complete set of dictionary keys generated by protFET
def protFeatKeys(align_array):
dictKeys = set()
for i in range(align_array.shape[0]):
sequence = alnArrayLineToSequence(align_array, i)
#sequence = str(msa[i].seq)
#temporary fix for ProtFeat not supporting B, Z, X
sequence = sequence.replace('B', 'D')
sequence = sequence.replace('Z', 'E')
sequence = sequence.replace('X', 'A')
sequence = sequence.replace('.', '')
sequence = sequence.replace('-','')
dictKeys = dictKeys.union(set(generateProtFeatDict(sequence).keys()) - dictKeys)
return dictKeys
#generate ProtFET array for given align (maxKeys: all keys of the feature dictionary, over the entire set)
def alignToProtFeat(align_array, dictKeys):
#generate 2d array of ProtFET features for each sequence in align
align_features = np.zeros((align_array.shape[0], len(dictKeys)), dtype=float)
missingFeatures = set()
for i in range(align_array.shape[0]):
sequence = alnArrayLineToSequence(align_array, i)
#temporary fix for ProtFeat not supporting B, Z, X
sequence = sequence.replace('B', 'D')
sequence = sequence.replace('Z', 'E')
sequence = sequence.replace('X', 'A')
sequence = sequence.replace('.', '')
sequence = sequence.replace('-','')
featuresDict = generateProtFeatDict(sequence)
missingFeatures = dictKeys - set(featuresDict.keys())
for newKey in missingFeatures:
featuresDict[newKey] = float(0)
features = np.array(list(featuresDict.values()))
align_features[i,:] = features
return align_features
#generate array of ProtFeat features for all aligns
def protFeatArrays(aligns):
maxKeys = set()
mostProts = biggestAlignShape(aligns)[0]
#build set of all keys used in the set
for i in range(len(aligns)):
maxKeys = maxKeys.union(protFeatKeys(aligns[i]) - maxKeys)
setFeatures = np.zeros((len(aligns), mostProts, len(maxKeys)))
for i in range(len(aligns)):
np.append(setFeatures, alignToProtFeat(aligns[i], maxKeys))
return setFeatures
def generateGapMatrix(align_array):
gap_array = np.array([[1 if (align_array[i][j] == b'.' or align_array[i][j] == b'-') else 0 for j in range(align_array.shape[1])] for i in range(align_array.shape[0])])
return gap_array
def generateAlignVoxel(align_array, propAmount = 12, verbose = False):
align_prop_voxel = np.zeros((align_array.shape[0], align_array.shape[1], propAmount + 1), dtype=float)
if(verbose):
print('final voxel shape: ', align_prop_voxel.shape)
gap_array = generateGapMatrix(align_array)
if(verbose):
print('initial array shape: ', align_array.shape)
for prop in numerical:
align_prop_array = np.zeros(align_array.shape, dtype=float)
align_prop_array = [[properties[prop][bstring] for bstring in seq] for seq in align_array]
align_prop_voxel[:,:,numerical.index(prop)] = align_prop_array
align_prop_voxel[:,:,12] = gap_array
if(verbose):
print('full voxel shape: ', align_prop_voxel.shape)
return align_prop_voxel
#fourier transform of all aligns, input is list of unpadded aligns, output is list of FFT of aligns
def fourierAligns(aligns, verbose = False):
alignsFFT = []
for align in aligns:
if(verbose):
print('pre-FFT: ', align.shape)
temp = np.fft.rfftn(align)
if(verbose):
print('post-FFT: ', temp.shape)
temp = np.dstack([np.real(temp), np.imag(temp)])
if(verbose):
print('post-stack: ', temp.shape)
alignsFFT.append(temp)
return alignsFFT
def fourierAlign(align):
temp = np.fft.rfftn(align)
alignFFT = np.dstack([np.real(temp), np.imag(temp)])
return alignFFT
def clipAlign(align):
final = np.zeros((clippingSize, clippingSize, propAmount + 2)) #for some reason we gain 1 depth layer after FFT, so it's +2 and not +1
if (align.shape[0] <= clippingSize and align.shape[1] <= clippingSize):
final[:align.shape[0],:align.shape[1],:align.shape[2]] = align
elif (align.shape[0] <= clippingSize and align.shape[1] > clippingSize):
final[:align.shape[0],:,:align.shape[2]] = align[:,:clippingSize,:]
elif (align.shape[0] > clippingSize and align.shape[1] <= clippingSize):
final[:,:align.shape[1],:align.shape[2]] = align[:clippingSize,:,:]
else:
final[:,:,:align.shape[2]] = align[:clippingSize,:clippingSize,:]
return final
#generate 4D array of stacked 3D voxels for PCA
def generateVoxelArray(aligns, propAmount = 12, clippingSize = maxFFTComponents, verbose = False):
#generate voxel array
alignsList = []
for i in range(len(aligns)):
alignsList.append(generateAlignVoxel(aligns[i], propAmount, verbose))
#apply fourier transform to aligns before padding
alignsList = fourierAligns(alignsList, verbose)
#pad or clip all aligns to be the same size, based on how many components of the FFT we want to keep
for i in range(len(alignsList)):
final = np.zeros((clippingSize, clippingSize, propAmount + 2)) #for some reason we gain 1 depth layer after FFT, so it's +2 and not +1
if(alignsList[i].shape[0] <= clippingSize and alignsList[i].shape[1] <= clippingSize):
final[:alignsList[i].shape[0],:alignsList[i].shape[1],:alignsList[i].shape[2]] = alignsList[i]
elif(alignsList[i].shape[0] <= clippingSize and alignsList[i].shape[1] > clippingSize):
final[:alignsList[i].shape[0],:,:alignsList[i].shape[2]] = alignsList[i][:,:clippingSize,:]
elif(alignsList[i].shape[0] > clippingSize and alignsList[i].shape[1] <= clippingSize):
final[:,:alignsList[i].shape[1],:alignsList[i].shape[2]] = alignsList[i][:clippingSize,:,:]
else:
final[:,:,:alignsList[i].shape[2]] = alignsList[i][:clippingSize,:clippingSize,:]
alignsList[i] = final
voxels = np.stack(alignsList, axis=0)
if verbose:
print('voxels shape: ', voxels.shape)
return voxels
#keep only chains with usable data (between 50 and 1500 AAs long, corresponding to a pfam MSA), returns list of pdb_id-chain tuples meeting requirements (pass this list to filterDataFrameBefore to remove all non-usable chains)
def filterChains(structs, availableChainData):
validChainsList = list()
for s in structs:
Structure = PDBParser().get_structure(s, structs[s])
for model in Structure:
for chain in model:
chainLetter = ''.join([c for c in str(chain) if c.isupper()])[1:]
if(len(chain) < 50 or len(chain) > 1500):
continue
elif chainLetter not in set(availableChainData[availableChainData['PDB'] == s]['CHAIN'].tolist()): #checking if the chain has corresponding pfam data
continue
else:
validChainsList.append((s, chainLetter))
return validChainsList
def filterDataFrameBefore(validChainsList, data_df):
keep_indexes = list()
for i in list(data_df.index.values):
if (data_df.loc[i, 'PDB'], data_df.loc[i, 'CHAIN']) in validChainsList:
keep_indexes.append(i)
data_df = data_df[data_df.index.isin(keep_indexes)]
return data_df
#after filtering the distmat data, the dataframe must be adjusted to only include valid chain-pfam couplings and to excluse empty chains
def filterDataFrameAfter(data_df, proteinList, protChainIndexes, verbose = False):
'''multiple pfam files are sometimes used to represent the same chain, for now only the first is used
in the future, restructuring the data prep code could allow to keep all pfam data'''
proteinChainLetters = list()
proteinRepList = list()
for protein in proteinList:
for chain in protChainIndexes[protein].keys():
proteinRepList.append(protein)
proteinChainLetters.append(''.join([c for c in str(chain) if c.isupper()])[1:])
chainLettersTuples = list(zip(proteinRepList, proteinChainLetters))
keep_indexes = list()
no_dupes = list()
for i in list(data_df.index.values):
if (data_df.loc[i, 'PDB'], data_df.loc[i, 'CHAIN']) in chainLettersTuples:
if (data_df.loc[i, 'PDB'], data_df.loc[i, 'CHAIN']) not in no_dupes:
no_dupes.append((data_df.loc[i, 'PDB'], data_df.loc[i, 'CHAIN']))
keep_indexes.append(i)
data_df = data_df[data_df.index.isin(keep_indexes)]
if verbose:
print(data_df)
return data_df
#builds a dictionary of distmats in the set - structs is a dictionary of all the structures (which are then subdivided into chains)
#also adds the distmats to the corresponding data_df column
def PDBToDistmat(structs, data_df, keepOnlyFirstChain, verbose = False):
distances = {}
for s in structs:
Structure = PDBParser().get_structure(s, structs[s])
if(verbose):
print(Structure)
distances[s] = {}
for model in Structure:
for chain in model:
if(verbose):
print('chain: ', chain)
print(len(chain))
res = [r for r in chain.get_residues()]
distmat = [ [res2['CA'] - res1['CA'] if 'CA' in res1 and 'CA' in res2 and i > j else 0 for i,res1 in enumerate(res)] for j,res2 in enumerate(res)]
distmat =
|
np.array(distmat)
|
numpy.array
|
import random
import numpy as np
import numpy.linalg as LA
from PIL import Image
import torch
from torch.utils.data import Dataset
import torchvision.transforms as transforms
from robo_utils.oxford.partial_master import PartialDatasetMaster
from robo_utils.oxford.partial_augment import PartialDatasetAugment
class DIMDataset(Dataset):
def __init__(self, param, mode, opt=None, data_index=None):
'''
Load past costmap and future trajectory
'''
self.param = param
self.opt = opt
self.input_type = None
self.data_index = data_index
if data_index is not None:
self.dataset_master = PartialDatasetMaster(param, data_index)
else:
self.dataset_master = PartialDatasetMaster(param)
self.partial_datasets = self.dataset_master.partial_datasets
self.train_key_list = self.dataset_master.train_key_list
self.eval_key_list = self.dataset_master.eval_key_list
self.test_key_list = self.dataset_master.test_key_list
if data_index is not None:
self._train_key_list = []
for i in range(len(self.train_key_list)):
key, index = self.train_key_list[i]
if key == self.data_index:
self._train_key_list.append(self.train_key_list[i])
self.train_key_list = self._train_key_list
self.num_trajectory = param.net.num_trajectory
self.normalize_length = param.net.normalize_length
self.normalize_speed = param.net.normalize_speed
image_transforms = [
transforms.Resize((200, 400), Image.BICUBIC),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
]
self.image_transforms = transforms.Compose(image_transforms)
image_transformsv2 = [
transforms.Resize((200, 400), Image.BICUBIC),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
]
self.image_transformsv2 = transforms.Compose(image_transformsv2)
self.mode = mode
self.random_index = None
def set_tmp_index(self, index):
self.tmp_index = index
def __getitem__(self, pesudo_index):
if self.mode == 'train':
key, index = random.choice(self.train_key_list)
dataset : PartialDatasetAugment = self.partial_datasets[0]
elif self.mode == 'eval':
key, index = random.choice(self.eval_key_list)
dataset : PartialDatasetAugment = self.partial_datasets[key]
# key, index = self.eval_key_list[self.tmp_index % len(self.eval_key_list)]
else:
key, index = random.choice(self.test_key_list)
dataset : PartialDatasetAugment = self.partial_datasets[key]
nav = dataset.get_nav_map(dataset.ref_timestamp_array[index])
nav = self.image_transformsv2(nav)
image = dataset.get_image(dataset.ref_timestamp_array[index], crop=True)
image = self.image_transforms(image)
image = torch.cat((image, nav), 0)
times, x, y, vx, vy = self.dataset_master.get_trajectory(dataset, index)
times = times.astype(np.float32) / self.opt.max_t
v_0 = np.sqrt(vx[0]**2+vy[0]**2) / self.opt.max_speed
fixed_step = times.shape[0]//(self.opt.points_num+1)
times = times[::fixed_step][1:-1]
x = x[::fixed_step][1:-1]
y = y[::fixed_step][1:-1]
vx = vx[::fixed_step][1:-1]
vy = vy[::fixed_step][1:-1]
x /= self.opt.max_dist
y /= self.opt.max_dist
vx /= self.opt.max_speed
vy /= self.opt.max_speed
xy = torch.FloatTensor([x, -y]).T
vxy = torch.FloatTensor([vx, -vy]).T
v0_array = torch.FloatTensor([v_0]*len(x))
v_0 = torch.FloatTensor([v_0])
t = torch.FloatTensor(times)
return {'img': image, 't': t,
#'x':x,'y':-y, 'vx':vx,'vy':-vy,
'v_0':v_0, 'v0_array':v0_array,
'xy':xy, 'vxy':vxy,
'key': key, 'index': index}
def __len__(self):
return 100000000000
def set_mode(self, mode):
self.mode = mode
class DIVADataset(Dataset):
def __init__(self, param, mode, opt=None):
'''
Load past costmap and future trajectory
'''
self.param = param
self.opt = opt
self.input_type = None
self.data_index = [0,1,2,3,4]
self.dataset_master = PartialDatasetMaster(param)
self.partial_datasets = self.dataset_master.partial_datasets
self.train_key_list = self.dataset_master.train_key_list
self.eval_key_list = self.dataset_master.eval_key_list
self.test_key_list = self.dataset_master.test_key_list
self._train_key_list = []
for i in range(len(self.train_key_list)):
key, index = self.train_key_list[i]
if key in self.data_index:
self._train_key_list.append(self.train_key_list[i])
self.train_key_list = self._train_key_list
self.num_trajectory = param.net.num_trajectory
self.normalize_length = param.net.normalize_length
self.normalize_speed = param.net.normalize_speed
if mode == 'train':
image_transforms = [
transforms.Resize((200, 400), Image.BICUBIC),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
]
else:
image_transforms = [
transforms.Resize((200, 400), Image.BICUBIC),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
]
image_transformsv2 = [
transforms.Resize((200, 400), Image.BICUBIC),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
]
self.image_transforms = transforms.Compose(image_transforms)
self.image_transformsv2 = transforms.Compose(image_transformsv2)
self.mode = mode
self.random_index = None
def set_tmp_index(self, index):
self.tmp_index = index
def __getitem__(self, pesudo_index):
while True:
if self.mode == 'train':
key, index = random.choice(self.train_key_list)
dataset : PartialDatasetAugment = self.partial_datasets[key]####
elif self.mode == 'eval':
key, index = random.choice(self.eval_key_list)
dataset : PartialDatasetAugment = self.partial_datasets[key]
# key, index = self.eval_key_list[self.tmp_index % len(self.eval_key_list)]
else:
key, index = random.choice(self.test_key_list)
dataset : PartialDatasetAugment = self.partial_datasets[key]
try:
# if True:
nav = dataset.get_nav_map(dataset.ref_timestamp_array[index])
nav = self.image_transformsv2(nav)
image = dataset.get_image(dataset.ref_timestamp_array[index], crop=True)
if image is None: continue
image = self.image_transforms(image)
images = torch.cat((image, nav), 0)
except:
continue
times, x, y, vx, vy = self.dataset_master.get_trajectory(dataset, index)
times = times.astype(np.float32) / self.opt.max_t
v_0 = np.sqrt(vx[0]**2+vy[0]**2) / self.opt.max_speed
if times.shape[0] < 47:
print(times.shape[0])
continue
fixed_step = times.shape[0]//self.opt.points_num
try:
times = times[::fixed_step]
except:
print('Error', times.shape[0], self.opt.points_num, fixed_step)
x = x[::fixed_step]
y = y[::fixed_step]
vx = vx[::fixed_step]
vy = vy[::fixed_step]
break
x /= self.opt.max_dist
y /= self.opt.max_dist
vx /= self.opt.max_speed
vy /= self.opt.max_speed
xy = torch.FloatTensor([x, -y]).T
vxy = torch.FloatTensor([vx, -vy]).T
v0_array = torch.FloatTensor([v_0]*len(x))
v_0 = torch.FloatTensor([v_0])
domian = [0]*len(self.data_index)
if self.mode == 'train': domian[key%len(self.data_index)] = 1.
domian = torch.FloatTensor(domian)
t = torch.FloatTensor(times)
return {'img': images, 't': t, 'domian':domian,
'v_0':v_0, 'v0_array':v0_array,
'xy':xy, 'vxy':vxy,
'key': key, 'index': index}
def __len__(self):
return 100000000000
def set_mode(self, mode):
self.mode = mode
class GANDataset(Dataset):
def __init__(self, param, mode, opt=None):
'''
Load past costmap and future trajectory
'''
self.param = param
self.opt = opt
self.input_type = None
self.data_index = [0,1,2,3,4]
self.dataset_master = PartialDatasetMaster(param)
self.partial_datasets = self.dataset_master.partial_datasets
self.train_key_list = self.dataset_master.train_key_list
self.eval_key_list = self.dataset_master.eval_key_list
self.test_key_list = self.dataset_master.test_key_list
self._train_key_list = []
for i in range(len(self.train_key_list)):
key, index = self.train_key_list[i]
if key in self.data_index:
self._train_key_list.append(self.train_key_list[i])
self.train_key_list = self._train_key_list
self.num_trajectory = param.net.num_trajectory
self.normalize_length = param.net.normalize_length
self.normalize_speed = param.net.normalize_speed
self.mode = mode
self.random_index = None
def PJcurvature(self, x,y):
"""
input : the coordinate of the three point
output : the curvature and norm direction
refer to https://github.com/Pjer-zhang/PJCurvature for detail
"""
t_a = LA.norm([x[1]-x[0],y[1]-y[0]])
t_b = LA.norm([x[2]-x[1],y[2]-y[1]])
M = np.array([
[1, -t_a, t_a**2],
[1, 0, 0 ],
[1, t_b, t_b**2]
])
a = np.matmul(LA.inv(M),x)
b = np.matmul(LA.inv(M),y)
kappa = 2*(a[2]*b[1]-b[2]*a[1])/(a[1]**2.+b[1]**2.)**(1.5)
return kappa#, [b[1],-a[1]]/np.sqrt(a[1]**2.+b[1]**2.)
def set_tmp_index(self, index):
self.tmp_index = index
def __getitem__(self, pesudo_index):
while True:
if self.mode == 'train':
key, index = random.choice(self.train_key_list)
dataset : PartialDatasetAugment = self.partial_datasets[key]####
elif self.mode == 'eval':
key, index = random.choice(self.eval_key_list)
dataset : PartialDatasetAugment = self.partial_datasets[key]
else:
key, index = random.choice(self.test_key_list)
dataset : PartialDatasetAugment = self.partial_datasets[key]
times, x, y, vx, vy = self.dataset_master.get_trajectory(dataset, index)
times = times.astype(np.float32) / self.opt.max_t
v_0 =
|
np.sqrt(vx[0]**2+vy[0]**2)
|
numpy.sqrt
|
from typing import List, Any
import numpy as np
class SubClass(np.ndarray): ...
i8: np.int64
A: np.ndarray
B: SubClass
C: List[int]
def func(i: int, j: int, **kwargs: Any) -> SubClass: ...
reveal_type(np.asarray(A)) # E: ndarray
reveal_type(np.asarray(B)) # E: ndarray
reveal_type(np.asarray(C)) # E: ndarray
reveal_type(np.asanyarray(A)) # E: ndarray
reveal_type(np.asanyarray(B)) # E: SubClass
reveal_type(np.asanyarray(B, dtype=int)) # E: ndarray
reveal_type(np.asanyarray(C)) # E: ndarray
reveal_type(np.ascontiguousarray(A)) # E: ndarray
reveal_type(np.ascontiguousarray(B)) # E: ndarray
reveal_type(np.ascontiguousarray(C)) # E: ndarray
reveal_type(np.asfortranarray(A)) # E: ndarray
reveal_type(np.asfortranarray(B)) # E: ndarray
reveal_type(np.asfortranarray(C)) # E: ndarray
reveal_type(np.require(A)) # E: ndarray
reveal_type(np.require(B)) # E: SubClass
reveal_type(np.require(B, requirements=None)) # E: SubClass
reveal_type(np.require(B, dtype=int)) # E: ndarray
reveal_type(np.require(B, requirements="E")) # E: ndarray
reveal_type(np.require(B, requirements=["ENSUREARRAY"])) # E: ndarray
reveal_type(np.require(B, requirements={"F", "E"})) # E: ndarray
reveal_type(np.require(B, requirements=["C", "OWNDATA"])) # E: SubClass
reveal_type(np.require(B, requirements="W")) # E: SubClass
reveal_type(np.require(B, requirements="A")) # E: SubClass
reveal_type(np.require(C)) # E: ndarray
reveal_type(np.linspace(0, 10)) # E: numpy.ndarray
reveal_type(np.linspace(0, 10, retstep=True)) # E: Tuple[numpy.ndarray, Any]
reveal_type(np.logspace(0, 10)) # E: numpy.ndarray
reveal_type(np.geomspace(1, 10)) # E: numpy.ndarray
reveal_type(np.zeros_like(A)) # E: numpy.ndarray
reveal_type(np.zeros_like(C)) # E: numpy.ndarray
reveal_type(np.zeros_like(B)) # E: SubClass
reveal_type(np.zeros_like(B, dtype=np.int64)) # E: numpy.ndarray
reveal_type(np.ones_like(A)) # E: numpy.ndarray
reveal_type(np.ones_like(C)) # E: numpy.ndarray
reveal_type(np.ones_like(B)) # E: SubClass
reveal_type(np.ones_like(B, dtype=np.int64)) # E: numpy.ndarray
reveal_type(np.empty_like(A)) # E: numpy.ndarray
reveal_type(np.empty_like(C)) # E: numpy.ndarray
reveal_type(np.empty_like(B)) # E: SubClass
reveal_type(np.empty_like(B, dtype=np.int64)) # E: numpy.ndarray
reveal_type(np.full_like(A, i8)) # E: numpy.ndarray
reveal_type(np.full_like(C, i8)) # E: numpy.ndarray
reveal_type(np.full_like(B, i8)) # E: SubClass
reveal_type(np.full_like(B, i8, dtype=np.int64)) # E: numpy.ndarray
reveal_type(np.ones(1)) # E: numpy.ndarray
reveal_type(np.ones([1, 1, 1])) # E: numpy.ndarray
reveal_type(np.full(1, i8)) # E: numpy.ndarray
reveal_type(np.full([1, 1, 1], i8)) # E: numpy.ndarray
reveal_type(np.indices([1, 2, 3])) # E: numpy.ndarray
reveal_type(np.indices([1, 2, 3], sparse=True)) # E: tuple[numpy.ndarray]
reveal_type(np.fromfunction(func, (3, 5))) # E: SubClass
reveal_type(
|
np.identity(10)
|
numpy.identity
|
import time
import interface as bbox
import numpy as np
# initial: Level score= 2308.362061 14s
# Optimized: Level score= 2308.362061 6.542621850967407s
# test level
# baseline: 2221.729980
# best: 2246.279541
# best_coefs_score=2560.100830078125_sigma=0.004999999888241291.txt: 2158.130615
# best_coefs_score=2964.60009765625_sigma=0.0010000000474974513.txt: 2259.347900
# star3 - subfit_best_coefs_score=2621.0400390625_sigma=0.009999999776482582.txt: 2621.040039
# star 4-subfit_best_coefs_score=2738.301513671875_sigma=0.009999999776482582.txt: 2738.301514
# star 5-best_coefs_score=2966.489501953125_sigma=0.009999999776482582_level=train_level: 2422.259033
# star 6-best_coefs_score=2964.60009765625_sigma=0.10000000149011612_level=train_level: 2259.347900
# star 7-best_coefs_score=2994.271240234375_sigma=0.009999999776482582_level=train_level:
# star 8-best_coefs_score=2992.164794921875_sigma=0.0010000000474974513_level=train_level:
# star 9-best_coefs_score=3017.848388671875_sigma=0.0010000000474974513_level=train_level: 2389.348633
# star 10-best_coefs_score=2972.124267578125_sigma=9.999999747378752e-05_level=train_level.txt: 2257.179688
# star 13-best_coefs_score=2980.401123046875_sigma=0.0010000000474974513_level=train_level.txt:
def get_action_by_state(state, coefs):
return np.argmax(np.dot(coefs, state))
n_features = 36
n_actions = 4
max_time = -1
def prepare_bbox():
global n_features, n_actions, max_time
if bbox.is_level_loaded():
bbox.reset_level()
else:
bbox.load_level("../levels/test_level.data", verbose=1)
n_features = bbox.get_num_of_features()
n_actions = bbox.get_num_of_actions()
max_time = bbox.get_max_time()
def load_regression_coefs(filename):
coefs = np.loadtxt(filename)
return coefs
def run_bbox():
start_time = time.time()
has_next = 1
prepare_bbox()
coefs = load_regression_coefs("star 13-best_coefs_score=2980.401123046875_sigma=0.0010000000474974513_level=train_level.txt")
state =
|
np.ones(n_features + 1)
|
numpy.ones
|
import pytest
import itertools
import math
import numpy as np
from numpy.testing import assert_allclose
import quimb as qu
@pytest.fixture
def p1():
return qu.rand_rho(3)
@pytest.fixture
def p2():
return qu.rand_rho(3)
@pytest.fixture
def k1():
return qu.rand_ket(3)
@pytest.fixture
def k2():
return qu.rand_ket(3)
@pytest.fixture
def orthog_ks():
p = qu.rand_rho(3)
v = qu.eigvecsh(p)
return (v[:, [0]], v[:, [1]], v[:, [2]])
# --------------------------------------------------------------------------- #
# TESTS #
# --------------------------------------------------------------------------- #
class TestFidelity:
def test_both_pure(self, k1, k2):
f = qu.fidelity(k1, k1)
assert_allclose(f, 1.0)
f = qu.fidelity(k1, k2)
assert f > 0 and f < 1
def test_both_mixed(self, p1, p2):
f = qu.fidelity(qu.eye(3) / 3, qu.eye(3) / 3)
assert_allclose(f, 1.0)
f = qu.fidelity(p1, p1)
assert_allclose(f, 1.0)
f = qu.fidelity(p1, p2)
assert f > 0 and f < 1
def test_orthog_pure(self, orthog_ks):
k1, k2, k3 = orthog_ks
for s1, s2, in ([k1, k2],
[k2, k3],
[k3, k1],
[k1 @ k1.H, k2],
[k1, k2 @ k2.H],
[k3 @ k3.H, k2],
[k3, k2 @ k2.H],
[k1 @ k1.H, k3],
[k1, k3 @ k3.H],
[k1 @ k1.H, k2 @ k2.H],
[k2 @ k2.H, k3 @ k3.H],
[k1 @ k1.H, k3 @ k3.H]):
f = qu.fidelity(s1, s2)
assert_allclose(f, 0.0, atol=1e-6)
class TestPurify:
def test_d2(self):
rho = qu.eye(2) / 2
psi = qu.purify(rho)
assert qu.expec(psi, qu.bell_state('phi+')) > 1 - 1e-14
def test_pure(self):
rho = qu.up(qtype='dop')
psi = qu.purify(rho)
assert abs(qu.concurrence(psi)) < 1e-14
class TestDephase:
@pytest.mark.parametrize("rand_rank", [None, 0.3, 2])
def test_basic(self, rand_rank):
rho = qu.rand_rho(9)
ln = qu.logneg(rho, [3, 3])
for p in (0.2, 0.5, 0.8, 1.0):
rho_d = qu.dephase(rho, p, rand_rank=rand_rank)
assert qu.logneg(rho_d, [3, 3]) <= ln
assert rho_d.tr() == pytest.approx(1.0)
class TestEntropy:
def test_entropy_pure(self):
a = qu.bell_state(1, qtype='dop')
assert_allclose(0.0, qu.entropy(a), atol=1e-12)
def test_entropy_mixed(self):
a = 0.5 * (qu.bell_state(1, qtype='dop') +
qu.bell_state(2, qtype='dop'))
assert_allclose(1.0, qu.entropy(a), atol=1e-12)
@pytest.mark.parametrize("evals, e", [([0, 1, 0, 0], 0),
([0, 0.5, 0, 0.5], 1),
([0.25, 0.25, 0.25, 0.25], 2)])
def test_list(self, evals, e):
assert_allclose(qu.entropy(evals), e)
@pytest.mark.parametrize("evals, e", [([0, 1, 0, 0], 0),
([0, 0.5, 0, 0.5], 1),
([0.25, 0.25, 0.25, 0.25], 2)])
def test_1darray(self, evals, e):
assert_allclose(qu.entropy(
|
np.asarray(evals)
|
numpy.asarray
|
import argparse
import os.path as osp
import mmcv
import os
import cv2
import math
import copy
import torch
import numpy as np
from PIL import Image, ImageDraw
from mmdet.apis import init_detector, inference_detector
from mmdet.core import multiclass_nms
from draw_box_in_img import draw_boxes_with_label_and_scores
ODAI_LABEL_MAP = {
'back-ground': 0,
'plane': 1,
'baseball-diamond': 2,
'bridge': 3,
'ground-track-field': 4,
'small-vehicle': 5,
'large-vehicle': 6,
'ship': 7,
'tennis-court': 8,
'basketball-court': 9,
'storage-tank': 10,
'soccer-ball-field': 11,
'roundabout': 12,
'harbor': 13,
'swimming-pool': 14,
'helicopter': 15,
}
def get_label_name_map():
reverse_dict = {}
for name, label in ODAI_LABEL_MAP.items():
reverse_dict[label] = name
return reverse_dict
def osp(savepath):
if not os.path.exists(savepath):
os.makedirs(savepath)
def parse_args():
parser = argparse.ArgumentParser(description='MMDet test detector')
parser.add_argument('--config', help='test config file path')
parser.add_argument('--checkpoint', help='checkpoint file')
parser.add_argument('--out', help='output result file')
parser.add_argument('--show', action='store_true', help='show results')
parser.add_argument('--cropsize', help='patch image size', default=512, type=int)
parser.add_argument('--stride', help='patch image stride', default=256, type=int)
parser.add_argument('--testImgpath', help='test image path', default='work_dirs/retinanet_hrnet_fpn_inference', type=str)
parser.add_argument('--saveTxtpath', help='test image path', default='work_dirs/retinanet_hrnet_fpn_inference', type=str)
parser.add_argument('--saveImgpath', help='test image path', default='work_dirs/retinanet_hrnet_fpn_inference', type=str)
parser.add_argument('--patchImgPath', help='test image path', default='work_dirs/retinanet_hrnet_fpn_inference', type=str)
args = parser.parse_args()
return args
def single_gpu_test(args, cfg, model):
testImgList = os.listdir(args.testImgpath)
for imgfile in testImgList:
imgfile = imgfile.strip()
img = Image.open(os.path.join(args.testImgpath, imgfile))
image = img.convert('RGB')
img = np.array(image)
width, height, channel = img.shape
rows = int(math.ceil(1.0 * (width - args.cropsize) / args.stride)) + 1
cols = int(math.ceil(1.0 * (height - args.cropsize) / args.stride)) + 1
multi_bboxes = list()
multi_scores = list()
for row in range(rows):
if width > args.cropsize:
y_start = min(row * args.stride, width - args.cropsize)
y_end = y_start + args.cropsize
else:
y_start = 0
y_end = width
for col in range(cols):
if height > args.cropsize:
x_start = min(col * args.stride, height - args.cropsize)
x_end = x_start + args.cropsize
else:
x_start = 0
x_end = height
subimg = copy.deepcopy(img[y_start:y_end, x_start:x_end, :])
w, h, c = np.shape(subimg)
outimg = np.zeros((args.cropsize, args.cropsize, 3))
outimg[0:w, 0:h, :] = subimg
result = inference_detector(model, outimg) #15
bboxes = np.vstack(result)
labels = [ #0-15
np.full(bbox.shape[0], i+1, dtype=np.int32)
for i, bbox in enumerate(result)
]
labels = np.concatenate(labels)
if len(bboxes) > 0:
# image = draw_boxes_with_label_and_scores(outimg, bboxes[:, :4], bboxes[:, 4], labels - 1, 0)
# image.save(os.path.join(args.patchImgPath, imgfile[:-4]+'_'+str(y_start)+'_'+str(x_start)+'.png'))
bboxes[:, :2] += [x_start, y_start]
multi_bboxes.append(bboxes[:, :5])
scores = np.zeros((bboxes.shape[0], len(ODAI_LABEL_MAP.keys()))) #0-15
for i, j in zip(range(bboxes.shape[0]), labels):
scores[i, j] = bboxes[i, 5]
multi_scores.append(scores)
crop_num = len(multi_bboxes)
if crop_num > 0:
multi_bboxes = np.vstack(multi_bboxes)
multi_scores = np.vstack(multi_scores)
multi_bboxes = torch.Tensor(multi_bboxes)
multi_scores = torch.Tensor(multi_scores)
score_thr = 0.1
nms=dict(type='nms', iou_thr=0.5)
max_per_img = 2000
det_bboxes, det_labels = multiclass_nms(multi_bboxes, multi_scores,
score_thr, nms,
max_per_img)
if det_bboxes.shape[0] > 0:
det_bboxes =
|
np.array(det_bboxes)
|
numpy.array
|
# -*- coding: utf-8 -*-
# @Time : 2020/9/28 13:07
# @Author : CaiXin
# @File : kitti_trajectory_utils.py
'''将kitti数据集的位姿转换成toolbox要求的格式'''
import argparse
import math
import os
import numpy as np
def quat2dcm(quaternion):
"""Returns direct cosine matrix from quaternion (Hamiltonian, [x y z w])
"""
q = np.array(quaternion[:4], dtype=np.float64, copy=True)
nq = np.dot(q, q)
if nq < np.finfo(float).eps * 4.0:
return np.identity(4)
q *= math.sqrt(2.0 / nq)
q = np.outer(q, q)
return np.array((
(1.0 - q[1, 1] - q[2, 2], q[0, 1] - q[2, 3], q[0, 2] + q[1, 3]),
(q[0, 1] + q[2, 3], 1.0 - q[0, 0] - q[2, 2], q[1, 2] - q[0, 3]),
(q[0, 2] - q[1, 3], q[1, 2] + q[0, 3], 1.0 - q[0, 0] - q[1, 1])),
dtype=np.float64)
def dcm2quat(matrix_3x3):
"""Return quaternion (Hamiltonian, [x y z w]) from rotation matrix.
This algorithm comes from "Quaternion Calculus and Fast Animation",
Ken Shoemake, 1987 SIGGRAPH course notes
(from Eigen)
"""
q = np.empty((4,), dtype=np.float64)
M = np.array(matrix_3x3, dtype=np.float64, copy=False)[:4, :4]
t = np.trace(M)
if t > 0.0:
t = math.sqrt(t + 1.0)
q[3] = 0.5 * t
t = 0.5 / t
q[0] = (M[2, 1] - M[1, 2]) * t
q[1] = (M[0, 2] - M[2, 0]) * t
q[2] = (M[1, 0] - M[0, 1]) * t
else:
i, j, k = 0, 1, 2
if M[1, 1] > M[0, 0]:
i, j, k = 1, 2, 0
if M[2, 2] > M[i, i]:
i, j, k = 2, 0, 1
t = math.sqrt(M[i, i] - M[j, j] - M[k, k] + 1.0)
q[i] = 0.5 * t
t = 0.5 / t
q[3] = (M[k, j] - M[j, k]) * t
q[j] = (M[i, j] + M[j, i]) * t
q[k] = (M[k, i] + M[i, k]) * t
return q
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='''Analyze trajectories''')
parser.add_argument('--data-root', type=str, default=' ', help='Kitti odometry dataset folder')
parser.add_argument('--est-root', type=str, default=' ', help='Folder that estimated pose file exists')
parser.add_argument('--sequence-idx', type=str, default='09', help='Specify the sequence to be converted')
parser.add_argument('--gt', type=str, default='True', help='if false, use estimated poses')
parser.add_argument('--filename', type=str, default='est_{0}.txt', help='filename of estimated poses, if with "--gt True", ignore this')
parser.add_argument('--out-dir', type=str, default='../../kitti/', help='to save the trajectory of the modified format')
args = parser.parse_args()
assert os.path.exists(args.data_root)
# read raw timestamps and poses
data_root = args.data_root
sequence_name = args.sequence_idx
is_groundtruth = args.gt
timestamp_file = os.path.join(data_root, 'sequences', sequence_name, 'times.txt')
if is_groundtruth == 'True':
poses_file = os.path.join(data_root, 'poses', '{0}.txt'.format(sequence_name))
else:
poses_file = os.path.join(args.est_root, args.filename)
timestamps = np.genfromtxt(timestamp_file).astype(np.float64)
poses = np.genfromtxt(poses_file).astype(np.float64)
transform_matrices = poses.reshape(-1, 3, 4) # 此时的转换矩阵是3*4的形式
# result cache
N = len(timestamps)-1# 此处减1是因为VOLO的输出位姿个数比真值个数少1
positions =
|
np.zeros([N, 3])
|
numpy.zeros
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, division
import tempfile
import shutil
import atexit
import numpy as np
from nose.tools import eq_ as eq, assert_is_none, assert_is_instance, \
assert_raises
from numpy.testing import assert_array_equal
from zarr.creation import array, empty, zeros, ones, full, open_array, \
empty_like, zeros_like, ones_like, full_like, open_like, create
from zarr.sync import ThreadSynchronizer
from zarr.core import Array
from zarr.storage import DirectoryStore
from zarr.hierarchy import open_group
from zarr.errors import PermissionError
from zarr.codecs import Zlib
# something bcolz-like
class MockBcolzArray(object):
def __init__(self, data, chunklen):
self.data = data
self.chunklen = chunklen
def __getattr__(self, item):
return getattr(self.data, item)
def __getitem__(self, item):
return self.data[item]
# something h5py-like
class MockH5pyDataset(object):
def __init__(self, data, chunks):
self.data = data
self.chunks = chunks
def __getattr__(self, item):
return getattr(self.data, item)
def __getitem__(self, item):
return self.data[item]
def test_array():
# with numpy array
a = np.arange(100)
z = array(a, chunks=10)
eq(a.shape, z.shape)
eq(a.dtype, z.dtype)
assert_array_equal(a, z[:])
# with array-like
a = list(range(100))
z = array(a, chunks=10)
eq((100,), z.shape)
eq(np.asarray(a).dtype, z.dtype)
assert_array_equal(np.asarray(a), z[:])
# with another zarr array
z2 = array(z)
eq(z.shape, z2.shape)
eq(z.chunks, z2.chunks)
eq(z.dtype, z2.dtype)
assert_array_equal(z[:], z2[:])
# with chunky array-likes
b = np.arange(1000).reshape(100, 10)
c = MockBcolzArray(b, 10)
z3 = array(c)
eq(c.shape, z3.shape)
eq((10, 10), z3.chunks)
b =
|
np.arange(1000)
|
numpy.arange
|
"""Plot Offline RL for the actual paper.
USE THIS FOR OFFICIAL RESULTS! We should be able to run this with a simple
bash script to generate all the possible results we would need to share.
"""
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
plt.style.use('seaborn')
import seaborn as sns # Daniel: not sure if needed
# https://stackoverflow.com/questions/43080259/
# no-outlines-on-bins-of-matplotlib-histograms-or-seaborn-distplots/43080772
plt.rcParams["patch.force_edgecolor"] = True
import pandas as pd
from copy import deepcopy
import os
import os.path as osp
import numpy as np
import json
from spinup.user_config import DEFAULT_DATA_DIR as DDD
from spinup.teaching.plot_offline_rl import (
parse_to_get_data_type, smooth, sanity_checks, ragged_array, adjust_fig_suffix,
get_dirs_S_sorted, COLORS, COLORS_LINE, ENV_NAMES,
)
np.set_printoptions(linewidth=180, suppress=True)
# Matplotlib stuff
titlesize = 33
xsize = 31
ysize = 31
ticksize = 29
legendsize = 21 # adjust as needed
er_alpha = 0.25
lw = 2
NAME_TO_LABEL = {'ant': 'Ant-v3',
'halfcheetah': 'HalfCheetah-v3',
'hopper': 'Hopper-v3',
'walker2d': 'Walker2d-v3',}
# ------------------------------------------------------- #
# Methods to polish up labels, etc., for the actual paper #
# ------------------------------------------------------- #
def get_curriculum_label(args, tail):
"""Returns curriculum labels in a human-readable manner.
E.g., tail could be:
td3_offline_curriculum_ep_2500_logged_p_50000000_n_1000000
A lot depends on what notation we decide to use.
"""
curr_label = ''
if 'online_stud_total_25000_' in tail:
if 'ep_250_logged_scale_1.00t' in tail:
#curr_label = 'C_scale(t; c=1.00); 2.5%'
curr_label = '2.5%'
elif 'ep_250_logged_p_1000000_n_1000000_' in tail:
#curr_label = 'C_add(t; f=1M); 2.5%'
curr_label = '2.5%'
else:
raise ValueError(tail)
elif 'online_stud_total_50000_' in tail:
if 'ep_250_logged_scale_1.00t' in tail:
#curr_label = 'C_scale(t; c=1.00); 5.0%'
curr_label = '5.0%'
elif 'ep_250_logged_p_1000000_n_1000000_' in tail:
#curr_label = 'C_add(t; f=1M); 5.0%'
curr_label = '5.0%'
else:
raise ValueError(tail)
elif 'online_stud_total_100000_' in tail:
if 'ep_250_logged_scale_1.00t' in tail:
#curr_label = 'C_scale(t; c=1.00); 10.0%'
curr_label = '10.0%'
elif 'ep_250_logged_p_1000000_n_1000000_' in tail:
#curr_label = 'C_add(t; f=1M); 10.0%'
curr_label = '10.0%'
else:
raise ValueError(tail)
# Now the offline cases:
elif 'ep_250_logged_p_1000000_n_1000000' in tail:
curr_label = 'C_add(t; f=1M)'
elif 'ep_250_logged_p_1000000_n_500000' in tail:
curr_label = 'C_add(t; f=500K)'
elif 'ep_250_logged_p_1000000_n_200000' in tail:
curr_label = 'C_add(t; f=200K)'
elif 'ep_250_logged_p_1000000_n_100000' in tail:
curr_label = 'C_add(t; f=100K)'
elif 'ep_250_logged_p_1000000_n_50000' in tail:
curr_label = 'C_add(t; f=50K)'
elif 'ep_250_logged_p_800000_n_0' in tail:
curr_label = 'C_add(t; p=800K, f=0)'
elif 'ep_250_logged_scale_0.50t' in tail:
curr_label = 'C_scale(t; c=0.50)'
elif 'ep_250_logged_scale_0.75t' in tail:
curr_label = 'C_scale(t; c=0.75)'
elif 'ep_250_logged_scale_1.00t' in tail:
curr_label = 'C_scale(t; c=1.00)'
elif 'ep_250_logged_scale_1.10t' in tail:
curr_label = 'C_scale(t; c=1.10)'
elif 'ep_250_logged_scale_1.25t' in tail:
curr_label = 'C_scale(t; c=1.25)'
elif 'ep_2500_logged_p_50000000_n_1000000' in tail:
curr_label = 'C_add(t; f=1M)'
elif 'ep_2500_logged_scale_1.00t' in tail:
curr_label = 'C_scale(t; c=1.00)'
else:
raise ValueError(tail)
return curr_label
def polish_figname(teacher_seed_dir, fig_suffix, args):
"""Special case for ease of use in Overleaf, need to remove these symbols."""
fig_suffix = adjust_fig_suffix(fig_suffix, args)
fig_suffix = fig_suffix.replace('[','')
fig_suffix = fig_suffix.replace(']','')
fig_suffix = fig_suffix.replace('\'','')
figname = osp.join(teacher_seed_dir, fig_suffix)
return figname
def get_teacher_stats(teacher_seed_dir):
# Derive original online DeepRL teacher results from 'progress.txt'.
# Let's also do the two stats that we compute for the student as well.
prog_file = osp.join(teacher_seed_dir, 'progress.txt')
assert os.path.exists(prog_file), prog_file
teacher_data = pd.read_table(prog_file)
teacher_base = os.path.basename(teacher_seed_dir)
t_perf_np = teacher_data['AverageTestEpRet'].to_numpy()
t_stat_1 = np.mean(t_perf_np[-10:])
t_stat_2 = np.mean(t_perf_np[3:])
t_label = f'{teacher_base}, M1: {t_stat_1:0.1f}, M2: {t_stat_2:0.1f}' # To report in table.
return (teacher_data, teacher_base, t_perf_np, t_label)
def get_student_stats(s_sub_dir, teacher_seed_dir):
# Similarly, load the student file statistics.
print(f'\t{s_sub_dir}')
prog_file = osp.join(s_sub_dir, 'progress.txt')
config_file = osp.join(s_sub_dir, 'config.json')
assert os.path.exists(prog_file), prog_file
assert os.path.exists(config_file), config_file
with open(config_file, 'rb') as fh:
config_data = json.load(fh)
student_data = pd.read_table(prog_file)
sanity_checks(config=config_data,
progress=student_data,
teacher_path=teacher_seed_dir)
return (config_data, student_data)
# ------------------------------------------------------- #
# Plotting methods! #
# ------------------------------------------------------- #
def report_table_plot(args, teacher_seed_dir, student_dirs):
"""This is for reporting a table. See spinup.teaching.plot_offline_rl for details
and documentation. The input is the same as the plot method.
NOTE(daniel) As I've reiterated to myself many times, we need to ensure that for
any runs that used overlap, we actually run them with consistent test-time episode
statistics. The easiest way is to re-run with 10 fixed test episodes for reporting
here. Then we do any episodes for overlap. See `spinup.teaching.offline_rl`.
M1 = FINAL, so it should hopefully get the last 100 episodes.
M2 = ALL, so it should get all (well, except the first few due to random policy).
Update: let's just merge the plotting code here, it does the same thing, right?
And for this we may want to see what happens with no subplots. Just overlay? With
the long-horizon runs, this will make it easier, right?
(June 08) What about putting all the 'add' curricula to the left, 'scale' to the right?
(June 13) Minor edits to make it 'wider' (reduces vertical space :D), etc. Getting close!
"""
window = args.window
nrows, ncols = 1, 2
fig, ax = plt.subplots(nrows, ncols, sharey=True, squeeze=False, figsize=(10*ncols, 6*nrows))
env_plot = NAME_TO_LABEL[args.name]
# Load teacher statistics.
(teacher_data, teacher_base, _, t_label) = get_teacher_stats(teacher_seed_dir)
# Plot teacher performance, SMOOTHED for readability. Note: no more teacher labels.
ret_train = smooth(teacher_data['AverageEpRet'], window)
ret_test = smooth(teacher_data['AverageTestEpRet'], window)
# Prior code (for debugging only)
#label_train = f'{teacher_base} (Train)'
#label_test = f'{teacher_base} (Test)'
#ax[0,0].plot(ret_train, ls='--', lw=lw, color='black', label=label_train)
#ax[0,0].plot(ret_test, ls='-', lw=lw, color='black', label=label_test)
# Newer code (for actual plots)
label_teach = f'Teacher'
ax[0,0].plot(ret_test, ls='--', lw=lw, color='black', label=label_teach)
#ax[0,0].axhline(y=np.max(ret_test), color='black', lw=0.5, linestyle='--')
ax[0,1].plot(ret_test, ls='--', lw=lw, color='black', label=label_teach)
#ax[0,1].axhline(y=np.max(ret_test), color='black', lw=0.5, linestyle='--')
# Student should be: <student_exp>/<teacher_base_with_seed>_<seed>/
# We MUST have `teacher_base` in the directory after <student_exp>.
s_labels, s_labels_std, s_M1s, s_M2s, s_M1e, s_M2e = [], [], [], [], [], []
sidx = 0
for sd in student_dirs:
student_subdirs = sorted([osp.join(sd,x) for x in os.listdir(sd) if teacher_base in x])
if len(student_subdirs) == 0:
continue
s_stats_1, s_stats_2 = [], []
student_stats = []
terminated_early = 0 # number of seeds that terminated early
# Now `student_subdirs`, for THIS PARTICULAR student, go through its RANDOM SEEDS.
# Combine all student runs together with random seeds. Note: smoothing applied BEFORE
# we append to `student_stats`, and before we take the mean / std for the plot. But,
# compute desired statistics BEFORE smoothing (the last 10 and the average over all
# evaluations) because we want to report those numbers in tables [see docs above].
for s_sub_dir in student_subdirs:
_, student_data = get_student_stats(s_sub_dir, teacher_seed_dir)
# DO NOT DO SMOOTHING. Compute statistics M1 (stat_1) and M2 (stat_2).
perf = student_data['AverageTestEpRet'].to_numpy()
assert (len(perf) <= 250) or (len(perf) in [2500]), len(perf)
if len(perf) < 250:
print(f'Note: for {sd}, len(perf): {len(perf)}')
terminated_early += 1
stat_1 = np.mean(perf[-10:])
stat_2 = np.mean(perf[3:])
s_stats_1.append(stat_1)
s_stats_2.append(stat_2)
# Now smooth and add to student_stats for plotting later.
student_result = smooth(student_data['AverageTestEpRet'], window)
student_stats.append(student_result)
# Extract label which is the <student_exp> not the <teacher_base_with_seed> portion.
_, tail = os.path.split(sd)
for name in ENV_NAMES:
if name in tail:
tail = tail.replace(f'{name}_', '')
nb_seeds = len(s_stats_1)
s_label = f'{tail} (x{nb_seeds}, e{terminated_early})' # newline due to space
s_label = s_label.replace('_offline_curriculum_', '_curr_') # use for no newline
s_label_std = str(s_label)
# Shape is (num_seeds, num_recordings=250), usually 250 due to (1M steps = 250 epochs).
# TODO(daniel) Recording of s_stat_{1,2} might not be intended if we have ragged array.
s_M1 = np.mean(s_stats_1) # last 10
s_M2 = np.mean(s_stats_2) # all (except first 3)
s_M1_err = np.std(s_stats_1) / np.sqrt(nb_seeds) # last 10
s_M2_err = np.std(s_stats_2) / np.sqrt(nb_seeds) # all (except first 3)
s_label += f'\n\t{s_M1:0.1f} & {s_M2:0.1f}'
s_label_std += f'\n\t{s_M1:0.1f} $\pm$ {s_M1_err:0.1f} & {s_M2:0.1f} $\pm$ {s_M2_err:0.1f}'
s_labels.append(s_label)
s_labels_std.append(s_label_std)
s_M1s.append(s_M1)
s_M2s.append(s_M2)
s_M1e.append(s_M1_err)
s_M2e.append(s_M2_err)
# Actually plot (with error regions if applicable). Standard error of the mean.
# Also need to update the student label.
curr_label = get_curriculum_label(args, tail)
_row = 0 if 'add' in curr_label else 1
label_curve = f'{curr_label}' # It's understood this means 'Student'.
student_ret, student_std, _ = ragged_array(student_stats)
x_vals = np.arange(len(student_ret))
ax[0,_row].plot(x_vals, student_ret, lw=lw, color=COLORS[sidx], label=label_curve)
if nb_seeds > 1:
ax[0,_row].fill_between(x_vals,
student_ret - (student_std / np.sqrt(nb_seeds)),
student_ret + (student_std / np.sqrt(nb_seeds)),
color=COLORS[sidx],
alpha=0.5)
sidx += 1
# Print table! Hopefully later in LaTeX code. Do w/ and w/out st-dev (actually, err).
M1_max = max(s_M1s)
M2_max = max(s_M2s)
M1_ste = s_M1e[ s_M1s.index(M1_max) ] # Index of the M1_max, get corresponding std-err.
M2_ste = s_M2e[ s_M2s.index(M2_max) ] # Index of the M2_max, get corresponding std-err.
M1_thresh = M1_max - M1_ste
M2_thresh = M2_max - M2_ste
print('='*100)
print(t_label)
print('\nFor students, thresholds for M1 and M2 (with std err subtracted):')
print(f'\t{M1_max:0.1f} - {M1_ste:0.1f} = {M1_thresh:0.1f}')
print(f'\t{M2_max:0.1f} - {M2_ste:0.1f} = {M2_thresh:0.1f}\n')
# Iterate, check overlapping standard errors.
for (s_label, s_label_std, s_M1, s_M2, M1err, M2err) in \
zip(s_labels, s_labels_std, s_M1s, s_M2s, s_M1e, s_M2e):
print(s_label)
print(s_label_std)
if s_M1 == max(s_M1s):
print(f'\tNOTE: max M1.')
elif (s_M1+M1err >= M1_thresh):
print(f'\tNOTE: bold M1. {s_M1:0.1f} + {M1err:0.1f} = {(s_M1+M1err):0.1f}')
if s_M2 == max(s_M2s):
print(f'\tNOTE: max M2.')
elif (s_M2+M2err >= M2_thresh):
print(f'\tNOTE: bold M2. {s_M2:0.1f} + {M2err:0.1f} = {(s_M2+M2err):0.1f}')
print()
print('='*100)
# Now let's get back to plotting.
ax[0,0].set_title(f'Performance ({env_plot})', size=titlesize)
ax[0,1].set_title(f'Performance ({env_plot})', size=titlesize)
ax[0,0].set_xlabel('Train Epochs', size=xsize)
ax[0,1].set_xlabel('Train Epochs', size=xsize)
ax[0,0].set_ylabel('Test Return', size=ysize)
ax[0,1].set_ylabel('Test Return', size=ysize)
for r in range(nrows):
for c in range(ncols):
leg = ax[r,c].legend(loc="best", ncol=1, prop={'size':legendsize})
for legobj in leg.legendHandles:
legobj.set_linewidth(5.0)
ax[r,c].tick_params(axis='x', labelsize=ticksize)
ax[r,c].tick_params(axis='y', labelsize=ticksize)
plt.tight_layout()
fig_suffix = f'plot_paper_{env_plot}.png'
figname = polish_figname(teacher_seed_dir, fig_suffix, args)
plt.savefig(figname)
print(f'\nSAVED FIGURE: {figname}')
def report_table_plot_2500(args, teacher_seed_dir, student_dirs):
"""Reporting a table and making plots for the 2500 epoch case."""
window = args.window
nrows, ncols = 1, 1
fig, ax = plt.subplots(nrows, ncols, sharey=True, squeeze=False, figsize=(17*ncols, 5*nrows))
env_plot = NAME_TO_LABEL[args.name]
# Load teacher statistics.
(teacher_data, teacher_base, _, t_label) = get_teacher_stats(teacher_seed_dir)
# Plot teacher performance, SMOOTHED for readability. Note: no more teacher labels.
ret_train = smooth(teacher_data['AverageEpRet'], window)
ret_test = smooth(teacher_data['AverageTestEpRet'], window)
# Prior code (for debugging only)
#label_train = f'{teacher_base} (Train)'
#label_test = f'{teacher_base} (Test)'
#ax[0,0].plot(ret_train, ls='--', lw=lw, color='black', label=label_train)
#ax[0,0].plot(ret_test, ls='-', lw=lw, color='black', label=label_test)
# Newer code (for actual plots)
label_teach = f'Teacher'
ax[0,0].plot(ret_test, ls='-', lw=lw, color='black', label=label_teach)
ax[0,0].axhline(y=np.max(ret_test), color='black', lw=1.0, linestyle='--')
# Student should be: <student_exp>/<teacher_base_with_seed>_<seed>/
# We MUST have `teacher_base` in the directory after <student_exp>.
sidx = 0
s_labels, s_labels_std = [], []
for sd in student_dirs:
student_subdirs = sorted([osp.join(sd,x) for x in os.listdir(sd) if teacher_base in x])
if len(student_subdirs) == 0:
continue
s_stats_1, s_stats_2 = [], []
student_stats = []
terminated_early = 0 # number of seeds that terminated early
# Now `student_subdirs`, for THIS PARTICULAR student, go through its RANDOM SEEDS.
# Combine all student runs together with random seeds. Note: smoothing applied BEFORE
# we append to `student_stats`, and before we take the mean / std for the plot. But,
# compute desired statistics BEFORE smoothing (the last 10 and the average over all
# evaluations) because we want to report those numbers in tables [see docs above].
for s_sub_dir in student_subdirs:
_, student_data = get_student_stats(s_sub_dir, teacher_seed_dir)
# DO NOT DO SMOOTHING. Compute statistics M1 (stat_1) and M2 (stat_2).
perf = student_data['AverageTestEpRet'].to_numpy()
assert (len(perf) <= 250) or (len(perf) in [2500]), len(perf)
if len(perf) < 250:
print(f'Note: for {sd}, len(perf): {len(perf)}')
terminated_early += 1
stat_1 = np.mean(perf[-10:])
stat_2 = np.mean(perf[3:])
s_stats_1.append(stat_1)
s_stats_2.append(stat_2)
# Now smooth and add to student_stats for plotting later.
student_result = smooth(student_data['AverageTestEpRet'], window)
student_stats.append(student_result)
# Extract label which is the <student_exp> not the <teacher_base_with_seed> portion.
_, tail = os.path.split(sd)
for name in ENV_NAMES:
if name in tail:
tail = tail.replace(f'{name}_', '')
nb_seeds = len(s_stats_1)
s_label = f'{tail} (x{nb_seeds}, e{terminated_early})' # newline due to space
s_label = s_label.replace('_offline_curriculum_', '_curr_') # use for no newline
s_label_std = str(s_label)
# Shape is (num_seeds, num_recordings=250), usually 250 due to (1M steps = 250 epochs).
# TODO(daniel) Recording of s_stat_{1,2} might not be intended if we have ragged array.
s_M1 = np.mean(s_stats_1) # last 10
s_M2 = np.mean(s_stats_2) # all (except first 3)
s_M1_err = np.std(s_stats_1) / np.sqrt(nb_seeds) # last 10
s_M2_err = np.std(s_stats_2) / np.sqrt(nb_seeds) # all (except first 3)
s_label += f'\n\t{s_M1:0.1f} & {s_M2:0.1f}'
s_label_std += f'\n\t{s_M1:0.1f} $\pm$ {s_M1_err:0.1f} & {s_M2:0.1f} $\pm$ {s_M2_err:0.1f}'
s_labels.append(s_label)
s_labels_std.append(s_label_std)
# Actually plot (with error regions if applicable). Standard error of the mean.
# Also need to update the student label.
curr_label = get_curriculum_label(args, tail)
label_curve = f'Student: {curr_label}'
student_ret, student_std, _ = ragged_array(student_stats)
x_vals = np.arange(len(student_ret))
if np.max(x_vals) > 250:
ax[0,0].axvline(x=250, color='black', lw=1.0, linestyle='--')
ax[0,0].plot(x_vals, student_ret, lw=lw, color=COLORS[sidx], label=label_curve)
if nb_seeds > 1:
ax[0,0].fill_between(x_vals,
student_ret - (student_std /
|
np.sqrt(nb_seeds)
|
numpy.sqrt
|
"""
Sample implementation of Artificial Bee Colony Algorithm.
Reference : https://link.springer.com/content/pdf/10.1007/s10898-007-9149-x.pdf
"""
import numpy as np
from utils import getInitialPoint, setup_logger
from utils.common import ResultManager
np.random.seed(0)
logger = setup_logger(__name__)
def minimize(dimension, objective, max_iter, max_visit=10, num_population=100, *args, **kwargs):
# step1 : initialization
x = getInitialPoint((num_population, dimension), objective)
all_candidates = np.arange(num_population)
v = np.array([objective(t) for t in x])
cnt = np.zeros(num_population)
def update(i):
x_i = x[i].copy()
j = np.random.randint(0, dimension-1)
k = np.random.randint(0, num_population-1)
phi = np.random.normal()
x_i[j] -= phi*(x_i[j] - x[k][j])
v_new = objective(x_i)
if v_new <= v[i]:
x[i] = x_i
v[i] = v_new
cnt[i] += 1
def random_update():
candidate = np.where(cnt == max_visit)[0]
for i in candidate:
x_i = getInitialPoint((dimension, ), objective)
v_new = objective(x_i)
if v_new <= v[i]:
x[i] = x_i
v[i] = v_new
cnt[i] = 1
result = ResultManager(objective, __name__, logger, *args, **kwargs)
m =
|
np.min(v)
|
numpy.min
|
'''
Usage: python md_readPCD.py numEpiInicial numEpiFinal 3D/2D
eg: python readAllPCD_xy.py 10 10 3D
Will get only the 10th episode
This code is utilized to read PCD files genereted by blensor and post-processing it, at the end generates a quantized
matrix of obstacles for each 'episode' for each receiver present in the episode.
The point cloud data gets quantized utilizing the Quantization Parameters (QP)
where the area is delimited by Parameter_max and Parameter_min. Is advised to match these parameters to the delimitation parameters in
md_generateMatrixChannels.py.
Inside the quantized matrix each point of obstacle is identified by 1, the Tx is identified by -1 and the Rx by -2
This process aims to fit all the data of the PCD files into a same shape of matrix
because the number of points returned by each scan is potentially different
also to decrease the number of points using quantization.
'''
import sys
import os
import csv
import argparse
import sqlite3
import shutil
import numpy as np
#import sparse
import scipy
import scipy.spatial.distance as dist
import pypcd
from datetime import datetime
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from scipy import ndimage
def main():
startTime = datetime.now()
print('Check Quantization parameters and Tx position before run!')
fileToRead = 'matrixChannels.csv'
if len(sys.argv) == 5:
starting_episode = sys.argv[1]
last_episode = sys.argv[2]
type_data = sys.argv[3]
scan_type = ''
if int(sys.argv[4]) == 1:
scan_type = '_noisy'
else:
print('Usage: python ' + sys.argv[0] + ' numEpiInicial numEpiFinal 3D/2D noise(1)/noiseless(0)')
exit(1)
outputFolder = './obstacles_new_'+scan_type+type_data+'/'
if not os.path.exists(outputFolder):
os.makedirs(outputFolder)
# Configuration of parameters
dictvehicle = {1.59 : 5, 3.2 : 9.5, 4.3 : 13} #CarSize/BusSize/TruckSize
# Quantization parameters
QP = {'Xp':1.15,'Yp':1.25,'Zp':1,'Xmax': 767,'Ymax': 679, 'Zmax': 10, 'Xmin': 744,'Ymin': 429, 'Zmin': 0 } #X Y Z
Tx = [746, 560, 4]
max_dist_LIDAR = 100 # in meters
dx = np.arange(QP['Xmin'],QP['Xmax'],QP['Xp'])
dy = np.arange(QP['Ymin'],QP['Ymax'],QP['Yp'])
if type_data == '3D':
dz = np.arange(QP['Zmin'],QP['Zmax'],QP['Zp'])
zeros_array = np.zeros((10, np.size(dx), np.size(dy), np.size(dz)), np.int8)
else:
zeros_array = np.zeros((10,
|
np.size(dx)
|
numpy.size
|
import argparse
import numpy as np
import os
from scipy.special import gammaln, digamma
from scipy.misc import logsumexp
def calc_N_d_K__vb_coord_ascent__many_tries(
word_id_d_Ud=None,
word_ct_d_Ud=None,
topics_KV=None,
alpha_K=None,
init_pi_d_K=None,
init_name=None,
init_name_list=None,
coldstart_initname='prior_mean',
prng=np.random,
verbose=False,
do_trace_elbo=True,
**lstep_kwargs):
""" Estimate token-assignment counts for VB approximate posterior.
Returns
-------
N_d_K : 1D array, size K
N_d_K[k] : count of usage of topic k in document d
"""
K = alpha_K.size
if init_name is not None:
init_name_list = init_name.split("+")
if init_name_list is None:
init_name_list = [coldstart_initname]
assert isinstance(init_name_list, list)
# Precompute likelihoods
# lik_d_UdK : 2D array, Ud x K
lik_d_UdK = topics_KV[:, word_id_d_Ud].T.copy()
log_lik_d_UdK = np.log(1e-100 + lik_d_UdK)
best_ELBO = -np.inf
best_N_d_K = None
best_info = None
for init_name in init_name_list:
if init_name.count("_x") > 0:
n_reps = int(init_name.split("_x")[1])
else:
n_reps = 1
for rep in range(n_reps):
init_P_d_K = make_initial_P_d_K(
init_name,
prng=prng,
alpha_K=alpha_K,
init_P_d_K_list=[init_pi_d_K])
if verbose:
pprint__N_d_K(init_P_d_K, "init")
cur_N_d_K, cur_info = calc_N_d_K__vb_coord_ascent(
word_ct_d_Ud=word_ct_d_Ud,
lik_d_UdK=lik_d_UdK,
log_lik_d_UdK=log_lik_d_UdK,
alpha_K=alpha_K,
init_P_d_K=init_P_d_K,
verbose=verbose,
do_trace_elbo=do_trace_elbo,
**lstep_kwargs)
cur_ELBO = calc_elbo_for_single_doc__simplified_from_N_d_K(
word_ct_d_Ud=word_ct_d_Ud,
log_lik_d_UdK=log_lik_d_UdK,
alpha_K=alpha_K,
N_d_K=cur_N_d_K)
if verbose:
pprint__N_d_K(cur_N_d_K, "final", cur_ELBO)
if cur_ELBO > best_ELBO + 1e-6:
best_ELBO = cur_ELBO
best_N_d_K = cur_N_d_K
best_info = cur_info
if verbose:
print ("best: %s" % init_name)
elif cur_ELBO > best_ELBO - 1e-6:
if verbose:
print ("tied: %s" % init_name)
if verbose:
print ("")
best_info['ELBO'] = best_ELBO
return best_N_d_K, best_info
def calc_N_d_K__vb_coord_ascent(
word_id_d_Ud=None,
word_ct_d_Ud=None,
lik_d_UdK=None,
log_lik_d_UdK=None,
topics_KV=None,
alpha_K=None,
init_theta_d_K=None,
init_N_d_K=None,
init_P_d_K=None,
lstep_converge_thr=0.0001,
lstep_max_iters=100,
do_trace_elbo=False,
verbose=False,
**unused_kwargs):
""" Estimate token-assignment counts for VB approximate posterior.
Uses one run of coordinate descent.
Returns
-------
N_d_K : 1D array, size K
info_dict : dict
"""
if lik_d_UdK is None:
lik_d_UdK = topics_KV[:, word_id_d_Ud].T.copy()
if log_lik_d_UdK is None and do_trace_elbo:
log_lik_d_UdK = np.log(1e-100 + lik_d_UdK)
P_d_K = np.zeros_like(alpha_K)
sumresp_U = np.zeros_like(word_ct_d_Ud)
if init_P_d_K is not None:
P_d_K[:] = init_P_d_K
N_d_K = np.zeros_like(alpha_K)
np.dot(lik_d_UdK, P_d_K, out=sumresp_U)
np.dot(word_ct_d_Ud / sumresp_U, lik_d_UdK, out=N_d_K)
N_d_K *= P_d_K
elif init_theta_d_K is not None:
N_d_K =
|
np.maximum(init_theta_d_K - alpha_K, 1e-10)
|
numpy.maximum
|
# Copyright 2021 AstroLab Software
# Author: <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pyspark.sql.functions import pandas_udf, PandasUDFType
from pyspark.sql.types import DoubleType, StringType
import pandas as pd
import numpy as np
import os
from fink_science.conversion import mag2fluxcal_snana
from fink_science.utilities import load_scikit_model, load_pcs
from fink_science.kilonova.lib_kn import extract_all_filters_fink
from fink_science.kilonova.lib_kn import get_features_name
from fink_science import __file__
from fink_science.tester import spark_unit_tests
@pandas_udf(DoubleType(), PandasUDFType.SCALAR)
def knscore(jd, fid, magpsf, sigmapsf, model_path=None, pcs_path=None, npcs=None) -> pd.Series:
""" Return the probability of an alert to be a Kilonova using a Random
Forest Classifier.
Parameters
----------
jd: Spark DataFrame Column
JD times (float)
fid: Spark DataFrame Column
Filter IDs (int)
magpsf, sigmapsf: Spark DataFrame Columns
Magnitude from PSF-fit photometry, and 1-sigma error
model_path: Spark DataFrame Column, optional
Path to the trained model. Default is None, in which case the default
model `data/models/KN_model_2PC.pkl` is loaded.
pcs_path: Spark DataFrame Column, optional
Path to the Principal Component file. Default is None, in which case
the `data/models/components.csv` is loaded.
npcs: Spark DataFrame Column, optional
Integer representing the number of Principal Component to use. It
should be consistent to the training model used. Default is None (i.e.
default npcs for the default `model_path`, that is 1).
Returns
----------
probabilities: 1D np.array of float
Probability between 0 (non-KNe) and 1 (KNe).
Examples
----------
>>> from fink_science.utilities import concat_col
>>> from pyspark.sql import functions as F
>>> df = spark.read.load(ztf_alert_sample)
# Required alert columns
>>> what = ['jd', 'fid', 'magpsf', 'sigmapsf']
# Use for creating temp name
>>> prefix = 'c'
>>> what_prefix = [prefix + i for i in what]
# Append temp columns with historical + current measurements
>>> for colname in what:
... df = concat_col(df, colname, prefix=prefix)
# Perform the fit + classification (default model)
>>> args = [F.col(i) for i in what_prefix]
>>> df = df.withColumn('pKNe', knscore(*args))
# Note that we can also specify a model
>>> extra_args = [F.lit(model_path), F.lit(comp_path), F.lit(2)]
>>> args = [F.col(i) for i in what_prefix] + extra_args
>>> df = df.withColumn('pKNe', knscore(*args))
# Drop temp columns
>>> df = df.drop(*what_prefix)
>>> df.agg({"pKNe": "min"}).collect()[0][0]
0.0
>>> df.agg({"pKNe": "max"}).collect()[0][0] < 1.0
True
"""
epoch_lim = [-50, 50]
time_bin = 0.25
flux_lim = 0
# Flag empty alerts
mask = magpsf.apply(lambda x: np.sum(np.array(x) ==
|
np.array(x)
|
numpy.array
|
#!/usr/bin/env python
"""
Test module for BDM2 Elements
"""
from __future__ import print_function
from builtins import object
import proteus.test_utils.TestTools
import os
import sys
import inspect
proteus.test_utils.TestTools.addSubFolders( inspect.currentframe() )
from proteus.iproteus import *
from proteus import Comm
comm = Comm.get()
Profiling.logLevel=7
Profiling.verbose=True
import numpy as np
import pytest
import bdm_tests_template as bt_temp
@pytest.mark.PostProcessingTools
class TestBDM2Reference1(object):
@classmethod
def setup_class(cls):
pass
@classmethod
def teardown_class(cls):
pass
def setup_method(self,method):
"""Initialize the test problem. """
from importlib import reload
reload(bt_temp)
self.transport_obj = bt_temp.ns.modelList[0].levelModelList[0]
self.bdm2_obj = self.transport_obj.velocityPostProcessor.vpp_algorithms[0]
self._setRelativePath()
def teardown_method(self,method):
"""Tear down the test problem. """
filenames = ['poisson_bdm1_test.h5', 'poisson_bdm1_test.xmf','reference_triangle.ele',
'reference_triangle.node', 'reference_triangle.poly','proteus.log']
for file in filenames:
if os.path.exists(file):
try:
os.remove(file)
except OSError as e:
print ("Error: %s - %s." %(e.filename, e.strerror ))
else:
pass
def _setRelativePath(self):
self.scriptdir = os.path.dirname(__file__)
def test_BDM2_reference_triangle(self):
'''
Test the construction of a BDM2 projection matrix and rhs
on the reference triangle
'''
# ******************* TEST PROJECTION MATRIX CONSTRUCTION ************
# need to override factored BDM projection matrix
self.bdm2_obj.BDMprojectionMat_element \
= np.zeros_like(self.bdm2_obj.BDMprojectionMat_element)
self.bdm2_obj.buildLocalBDM2projectionMatrices \
(self.bdm2_obj.degree,
self.bdm2_obj.vt.ebq[('w*dS_u',0)],
self.bdm2_obj.vt.ebq['n'],
self.bdm2_obj.vt.ebq[('v',0)],
self.bdm2_obj.q[('w',0)],
self.bdm2_obj.weightedInteriorTestGradients,
self.bdm2_obj.weightedInteriorDivFreeElement,
self.bdm2_obj.piola_trial_function,
self.bdm2_obj.edgeFlags,
self.bdm2_obj.BDMprojectionMat_element)
# The following .savetxt command generates the comparison output. Be sure
# this is actually generating what you want before you uncomment! The
# currently stored file should be correct.
# np.savetxt('bdm2_ref_proj_mat.txt', bdm2_obj.BDMprojectionMat_element[0])
rel_path = "comparison_files/bdm2_ref_proj_mat.txt"
comparison_matrix = np.loadtxt(os.path.join(self.scriptdir,rel_path), dtype = float)
np.testing.assert_almost_equal(comparison_matrix,self.bdm2_obj.BDMprojectionMat_element[0],decimal=6)
# ******************** TEST RHS CONSTRUCTION *************************
# construct a RHS vector from a velocity field of all 1's
self.bdm2_obj.ebq[('velocity',0)] = np.ones_like(self.bdm2_obj.ebq[('velocity',0)])
self.bdm2_obj.q[('velocity',0)] = np.ones_like(self.bdm2_obj.q[('velocity',0)])
self.bdm2_obj.buildBDM2rhs(self.bdm2_obj.BDMprojectionMat_element,
self.bdm2_obj.BDMprojectionMatPivots_element,
self.bdm2_obj.vt.ebq[('w*dS_u',0)],
self.bdm2_obj.vt.ebq['n'],
self.bdm2_obj.weightedInteriorTestGradients,
self.bdm2_obj.weightedInteriorDivFreeElement,
self.bdm2_obj.ebq[('velocity',0)],
self.bdm2_obj.q[('velocity',0)],
self.bdm2_obj.q[('velocity_dofs',0)],
self.bdm2_obj.edgeFlags)
test_rhs = self.bdm2_obj.q[('velocity_dofs',0)]
comparison_rhs = np.array([ 3.33333333e-01, 3.33333333e-01, 1.33333333e+00,
-1.66666667e-01, -1.66666667e-01, -6.66666667e-01,
-1.66666667e-01, -1.66666667e-01, -6.66666667e-01,
-1.00000000e+00, 5.00000000e-01, 4.33680869e-19])
|
np.testing.assert_almost_equal(comparison_rhs,test_rhs[0],decimal=6)
|
numpy.testing.assert_almost_equal
|
"""Classes for handling unit cell transformation."""
from itertools import product
import numpy as np
import numpy.linalg as la
from . import spacegroups
class UnitCell:
"""Class for storing and performing calculations on unit cell parameters.
The constructor expects alpha, beta, and gamma to be in degrees.
"""
def __init__(self, a=1.0, b=1.0, c=1.0,
alpha=90.0, beta=90.0, gamma=90.0,
space_group="P1"):
self.a = a
self.b = b
self.c = c
self.alpha = alpha
self.beta = beta
self.gamma = gamma
self.set_space_group(space_group)
self._sin_alpha = np.sin(np.deg2rad(self.alpha))
self._sin_beta = np.sin(np.deg2rad(self.beta))
self._sin_gamma = np.sin(np.deg2rad(self.gamma))
self._cos_alpha = np.cos(np.deg2rad(self.alpha))
self._cos_beta = np.cos(np.deg2rad(self.beta))
self._cos_gamma = np.cos(np.deg2rad(self.gamma))
self.orth_to_frac = self.calc_fractionalization_matrix()
self.frac_to_orth = self.calc_orthogonalization_matrix()
## check our math!
#assert np.allclose(self.orth_to_frac, la.inv(self.frac_to_orth))
def __str__(self):
return "UnitCell(a=%f, b=%f, c=%f, alpha=%f, beta=%f, gamma=%f)" % (
self.a, self.b, self.c, self.alpha, self.beta, self.gamma)
def copy(self):
return UnitCell(self.a, self.b, self.c, self.alpha, self.beta,
self.gamma, self.space_group.number)
@property
def abc(self):
return np.asarray([self.a, self.b, self.c], float)
def calc_v(self):
"""Calculates the volume of the rhombohedral created by the
unit vectors a1/|a1|, a2/|a2|, a3/|a3|.
"""
return np.sqrt(1 -
(self._cos_alpha * self._cos_alpha) -
(self._cos_beta * self._cos_beta) -
(self._cos_gamma * self._cos_gamma) +
(2 * self._cos_alpha * self._cos_beta * self._cos_gamma)
)
def calc_volume(self):
"""Calculates the volume of the unit cell.
"""
return self.a * self.b * self.c * self.calc_v()
def calc_reciprocal_unit_cell(self):
"""Corresponding reciprocal unit cell.
"""
V = self.calc_volume()
ra = (self.b * self.c * self._sin_alpha) / V
rb = (self.a * self.c * self._sin_beta) / V
rc = (self.a * self.b * self._sin_gamma) / V
ralpha = np.arccos(
(self._cos_beta * self._cos_gamma - self._cos_alpha) / (self._sin_beta * self._sin_gamma))
rbeta = np.arccos(
(self._cos_alpha * self._cos_gamma - self._cos_beta) / (self._sin_alpha * self._sin_gamma))
rgamma = np.arccos(
(self._cos_alpha * self._cos_beta - self._cos_gamma) / (self._sin_alpha * self._sin_beta))
return UnitCell(ra, rb, rc, ralpha, rbeta, rgamma)
def calc_orthogonalization_matrix(self):
"""Cartesian to fractional coordinates.
"""
v = self.calc_v()
f11 = self.a
f12 = self.b * self._cos_gamma
f13 = self.c * self._cos_beta
f22 = self.b * self._sin_gamma
f23 = (self.c * (self._cos_alpha - self._cos_beta * self._cos_gamma)) / (self._sin_gamma)
f33 = (self.c * v) / self._sin_gamma
orth_to_frac = np.array([ [f11, f12, f13],
[0.0, f22, f23],
[0.0, 0.0, f33] ], float)
return orth_to_frac
def calc_fractionalization_matrix(self):
"""Fractional to Cartesian coordinates.
"""
v = self.calc_v()
o11 = 1.0 / self.a
o12 = - self._cos_gamma / (self.a * self._sin_gamma)
o13 = (self._cos_gamma * self._cos_alpha - self._cos_beta) / (self.a * v * self._sin_gamma)
o22 = 1.0 / (self.b * self._sin_gamma)
o23 = (self._cos_gamma * self._cos_beta - self._cos_alpha) / (self.b * v * self._sin_gamma)
o33 = self._sin_gamma / (self.c * v)
frac_to_orth = np.array([[o11, o12, o13],
[0.0, o22, o23],
[0.0, 0.0, o33] ], float)
return frac_to_orth
def calc_orth_to_frac(self, v):
"""Calculates and returns the fractional coordinate vector of
orthogonal vector v.
"""
return np.dot(self.orth_to_frac, v)
def calc_frac_to_orth(self, v):
"""Calculates and returns the orthogonal coordinate vector of
fractional vector v.
"""
return np.dot(self.frac_to_orth, v)
def calc_orth_symop(self, symop):
"""Calculates the orthogonal space symmetry operation (return SymOp)
given a fractional space symmetry operation (argument SymOp).
"""
RF = np.dot(symop.R, self.orth_to_frac)
ORF = np.dot(self.frac_to_orth, RF)
Ot = np.dot(self.frac_to_orth, symop.t)
return spacegroups.SymOp(ORF, Ot)
def calc_orth_symop2(self, symop):
"""Calculates the orthogonal space symmetry operation (return SymOp)
given a fractional space symmetry operation (argument SymOp).
"""
RF = np.dot(symop.R, self.orth_to_frac)
ORF = np.dot(self.frac_to_orth, RF)
Rt = np.dot(symop.R, symop.t)
ORt =
|
np.dot(self.frac_to_orth, Rt)
|
numpy.dot
|
from unittest import TestCase
from tempfile import TemporaryDirectory
from pathlib import Path
from giant.camera_models import PinholeModel, OwenModel, BrownModel, OpenCVModel, save, load
import numpy as np
import giant.rotations as at
import lxml.etree as etree
class TestPinholeModel(TestCase):
def setUp(self):
self.Class = PinholeModel
def test___init__(self):
model = self.Class(intrinsic_matrix=np.array([[1, 2, 3], [4, 5, 6]]), focal_length=10.5, field_of_view=20.5,
use_a_priori=True, misalignment=[np.zeros(3), np.ones(3)],
estimation_parameters='basic intrinsic',
a1=1, a2=2, a3=3)
np.testing.assert_array_equal(model.intrinsic_matrix, [[1, 2, 3], [4, 5, 6]])
self.assertEqual(model.focal_length, 10.5)
self.assertEqual(model.field_of_view, 20.5)
self.assertTrue(model.use_a_priori)
self.assertEqual(model.estimation_parameters, ['basic intrinsic'])
self.assertEqual(model.a1, 1)
self.assertEqual(model.a2, 2)
self.assertEqual(model.a3, 3)
model = self.Class(kx=1, ky=2, px=4, py=5, focal_length=10.5, field_of_view=20.5,
use_a_priori=True, misalignment=[np.zeros(3), np.ones(3)],
estimation_parameters=['focal_length', 'px'])
np.testing.assert_array_equal(model.intrinsic_matrix, [[1, 0, 4], [0, 2, 5]])
self.assertEqual(model.focal_length, 10.5)
self.assertEqual(model.field_of_view, 20.5)
self.assertTrue(model.use_a_priori)
self.assertEqual(model.estimation_parameters, ['focal_length', 'px'])
def test_estimation_parameters(self):
model = self.Class()
model.estimation_parameters = 'kx'
self.assertEqual(model.estimation_parameters, ['kx'])
model.estimate_multiple_misalignments = False
model.estimation_parameters = ['px', 'py', 'Multiple misalignments']
self.assertEqual(model.estimation_parameters, ['px', 'py', 'multiple misalignments'])
self.assertTrue(model.estimate_multiple_misalignments)
def test_kx(self):
model = self.Class(intrinsic_matrix=np.array([[1, 0, 0], [0, 0, 0]]))
self.assertEqual(model.kx, 1)
model.kx = 100
self.assertEqual(model.kx, 100)
self.assertEqual(model.intrinsic_matrix[0, 0], 100)
def test_ky(self):
model = self.Class(intrinsic_matrix=np.array([[0, 0, 0], [0, 3, 0]]))
self.assertEqual(model.ky, 3)
model.ky = 100
self.assertEqual(model.ky, 100)
self.assertEqual(model.intrinsic_matrix[1, 1], 100)
def test_px(self):
model = self.Class(intrinsic_matrix=np.array([[0, 0, 20], [0, 3, 0]]))
self.assertEqual(model.px, 20)
model.px = 100
self.assertEqual(model.px, 100)
self.assertEqual(model.intrinsic_matrix[0, 2], 100)
def test_py(self):
model = self.Class(intrinsic_matrix=np.array([[0, 0, 0], [0, 0, 10]]))
self.assertEqual(model.py, 10)
model.py = 100
self.assertEqual(model.py, 100)
self.assertEqual(model.intrinsic_matrix[1, 2], 100)
def test_a1(self):
model = self.Class(temperature_coefficients=np.array([10, 0, 0]))
self.assertEqual(model.a1, 10)
model.a1 = 100
self.assertEqual(model.a1, 100)
self.assertEqual(model.temperature_coefficients[0], 100)
def test_a2(self):
model = self.Class(temperature_coefficients=np.array([0, 10, 0]))
self.assertEqual(model.a2, 10)
model.a2 = 100
self.assertEqual(model.a2, 100)
self.assertEqual(model.temperature_coefficients[1], 100)
def test_a3(self):
model = self.Class(temperature_coefficients=np.array([0, 0, 10]))
self.assertEqual(model.a3, 10)
model.a3 = 100
self.assertEqual(model.a3, 100)
self.assertEqual(model.temperature_coefficients[2], 100)
def test_intrinsic_matrix_inv(self):
model = self.Class(kx=5, ky=10, px=100, py=-5)
np.testing.assert_array_almost_equal(model.intrinsic_matrix @ np.vstack([model.intrinsic_matrix_inv,
[0, 0, 1]]),
[[1, 0, 0], [0, 1, 0]])
np.testing.assert_array_almost_equal(model.intrinsic_matrix_inv @ np.vstack([model.intrinsic_matrix,
[0, 0, 1]]),
[[1, 0, 0], [0, 1, 0]])
def test_get_temperature_scale(self):
model = self.Class(temperature_coefficients=[1, 2, 3.])
self.assertEqual(model.get_temperature_scale(1), 7)
np.testing.assert_array_equal(model.get_temperature_scale([1, 2]), [7, 35])
np.testing.assert_array_equal(model.get_temperature_scale([-1, 2.]), [-1, 35])
def test_apply_distortion(self):
inputs = [[0, 0], [1, 0], [-1, 0], [1.5, 0], [-1.5, 0], [[1.5], [0]], [[1.5, -1], [0, 0]],
[0, 1], [0, -1], [0, 1.5], [0, -1.5], [[0], [1.5]], [[0, 0], [1.5, -1]], [1, 1]]
model = self.Class()
for inp in inputs:
gnom_dist = model.apply_distortion(np.array(inp))
np.testing.assert_array_almost_equal(gnom_dist, inp)
def test_get_projections(self):
points = [[0, 0, 1], [-0.1, 0.2, 2.2], [[-0.1], [0.2], [2.2]], [[-0.1, 0], [0.2, 0], [2.2, 1]]]
model = self.Class(focal_length=8.7, kx=500, ky=500.5, px=1500, py=1500.5, a1=1, a2=2, a3=3)
with self.subTest(misalignment=None):
for point in points:
gnom, _, pix = model.get_projections(point)
gnom_true = model.focal_length * np.array(point[:2]) / point[2]
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], gnom_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_equal(gnom, gnom_true)
np.testing.assert_array_equal(pix, pix_true)
with self.subTest(temperature=1):
for point in points:
gnom, _, pix = model.get_projections(point, temperature=1)
gnom_true = model.focal_length * np.array(point[:2]) / point[2]
gnom_true *= model.get_temperature_scale(1)
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], gnom_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_equal(gnom, gnom_true)
np.testing.assert_array_equal(pix, pix_true)
with self.subTest(temperature=-10.5):
for point in points:
gnom, _, pix = model.get_projections(point, temperature=-10.5)
gnom_true = model.focal_length * np.array(point[:2]) / point[2]
gnom_true *= model.get_temperature_scale(-10.5)
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], gnom_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_equal(gnom, gnom_true)
np.testing.assert_array_equal(pix, pix_true)
model = self.Class(focal_length=8.7, kx=500, ky=500.5, px=1500, py=1500.5,
misalignment=[0, 0, np.pi])
with self.subTest(misalignment=[0, 0, np.pi]):
for point in points:
gnom, _, pix = model.get_projections(point)
gnom_true = -model.focal_length * np.array(point[:2]) / point[2]
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], gnom_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(gnom, gnom_true)
np.testing.assert_array_almost_equal(pix, pix_true)
model = self.Class(focal_length=8.7, kx=500, ky=500.5, px=1500, py=1500.5,
misalignment=[np.pi, 0, 0])
with self.subTest(misalignment=[np.pi, 0, 0]):
for point in points:
gnom, _, pix = model.get_projections(point)
gnom_true = model.focal_length * np.array(point[:2]) / point[2]
gnom_true[0] *= -1
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], gnom_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(gnom, gnom_true)
np.testing.assert_array_almost_equal(pix, pix_true)
model = self.Class(focal_length=8.7, kx=500, ky=500.5, px=1500, py=1500.5,
misalignment=[0, np.pi, 0])
with self.subTest(misalignment=[0, np.pi, 0]):
for point in points:
gnom, _, pix = model.get_projections(point)
gnom_true = model.focal_length * np.array(point[:2]) / point[2]
gnom_true[1] *= -1
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], gnom_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(gnom, gnom_true)
np.testing.assert_array_almost_equal(pix, pix_true)
model = self.Class(focal_length=8.7, kx=500, ky=500.5, px=1500, py=1500.5,
misalignment=[1, 0.2, 0.3])
with self.subTest(misalignment=[1, 0.2, 0.3]):
rot_mat = at.rotvec_to_rotmat([1, 0.2, 0.3]).squeeze()
for point in points:
point_new = rot_mat @ point
gnom, _, pix = model.get_projections(point)
gnom_true = model.focal_length * np.array(point_new[:2]) / np.array(point_new[2])
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], gnom_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(gnom, gnom_true)
np.testing.assert_array_almost_equal(pix, pix_true)
model = self.Class(focal_length=8.7, kx=500, ky=500.5, px=1500, py=1500.5,
misalignment=[[1, 0.2, 0.3], [0, 0, np.pi]])
model.estimate_multiple_misalignments = True
with self.subTest(misalignment=[[1, 0.2, 0.3], [0, 0, np.pi]]):
for point in points:
rot_mat = at.rotvec_to_rotmat([1, 0.2, 0.3]).squeeze()
point_new = rot_mat @ point
gnom, _, pix = model.get_projections(point, image=0)
gnom_true = model.focal_length * np.array(point_new[:2]) / np.array(point_new[2])
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], gnom_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(gnom, gnom_true)
np.testing.assert_array_almost_equal(pix, pix_true)
gnom, _, pix = model.get_projections(point, image=1)
gnom_true = -model.focal_length * np.array(point[:2]) / point[2]
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], gnom_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(gnom, gnom_true)
np.testing.assert_array_almost_equal(pix, pix_true)
def test_project_onto_image(self):
points = [[0, 0, 1], [-0.1, 0.2, 2.2], [[-0.1], [0.2], [2.2]], [[-0.1, 0], [0.2, 0], [2.2, 1]]]
model = self.Class(focal_length=8.7, kx=500, ky=500.5, px=1500, py=1500.5, a1=-1e-3, a2=1e-6, a3=-7e-8)
with self.subTest(misalignment=None):
for point in points:
pix = model.project_onto_image(point)
gnom_true = model.focal_length * np.array(point[:2]) / point[2]
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], gnom_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_equal(pix, pix_true)
with self.subTest(temperature=1):
for point in points:
pix = model.project_onto_image(point, temperature=1)
gnom_true = model.focal_length * np.array(point[:2]) / point[2]
gnom_true *= model.get_temperature_scale(1)
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], gnom_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_equal(pix, pix_true)
with self.subTest(temperature=-10.5):
for point in points:
pix = model.project_onto_image(point, temperature=-10.5)
gnom_true = model.focal_length * np.array(point[:2]) / point[2]
gnom_true *= model.get_temperature_scale(-10.5)
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], gnom_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_equal(pix, pix_true)
model = self.Class(focal_length=8.7, kx=500, ky=500.5, px=1500, py=1500.5,
misalignment=[0, 0, np.pi])
with self.subTest(misalignment=[0, 0, np.pi]):
for point in points:
pix = model.project_onto_image(point)
gnom_true = -model.focal_length * np.array(point[:2]) / point[2]
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], gnom_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(pix, pix_true)
model = self.Class(focal_length=8.7, kx=500, ky=500.5, px=1500, py=1500.5,
misalignment=[np.pi, 0, 0])
with self.subTest(misalignment=[np.pi, 0, 0]):
for point in points:
pix = model.project_onto_image(point)
gnom_true = model.focal_length * np.array(point[:2]) / point[2]
gnom_true[0] *= -1
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], gnom_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(pix, pix_true)
model = self.Class(focal_length=8.7, kx=500, ky=500.5, px=1500, py=1500.5,
misalignment=[0, np.pi, 0])
with self.subTest(misalignment=[0, np.pi, 0]):
for point in points:
pix = model.project_onto_image(point)
gnom_true = model.focal_length * np.array(point[:2]) / point[2]
gnom_true[1] *= -1
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], gnom_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(pix, pix_true)
model = self.Class(focal_length=8.7, kx=500, ky=500.5, px=1500, py=1500.5,
misalignment=[1, 0.2, 0.3])
with self.subTest(misalignment=[1, 0.2, 0.3]):
rot_mat = at.rotvec_to_rotmat([1, 0.2, 0.3]).squeeze()
for point in points:
point_new = rot_mat @ point
pix = model.project_onto_image(point)
gnom_true = model.focal_length * np.array(point_new[:2]) / np.array(point_new[2])
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], gnom_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(pix, pix_true)
model = self.Class(focal_length=8.7, kx=500, ky=500.5, px=1500, py=1500.5,
misalignment=[[1, 0.2, 0.3], [0, 0, np.pi]])
model.estimate_multiple_misalignments = True
with self.subTest(misalignment=[[1, 0.2, 0.3], [0, 0, np.pi]]):
for point in points:
rot_mat = at.rotvec_to_rotmat([1, 0.2, 0.3]).squeeze()
point_new = rot_mat @ point
pix = model.project_onto_image(point, image=0)
gnom_true = model.focal_length * np.array(point_new[:2]) / np.array(point_new[2])
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], gnom_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(pix, pix_true)
pix = model.project_onto_image(point, image=1)
gnom_true = -model.focal_length * np.array(point[:2]) / point[2]
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], gnom_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(pix, pix_true)
def test_compute_pixel_jacobian(self):
def num_deriv(uvec, cmodel, delta=1e-8, image=0, temperature=0) -> np.ndarray:
uvec = np.array(uvec).reshape(3, -1)
pix_true = cmodel.project_onto_image(uvec, image=image, temperature=temperature)
uvec_pert = uvec + [[delta], [0], [0]]
pix_pert_x_f = cmodel.project_onto_image(uvec_pert, image=image, temperature=temperature)
uvec_pert = uvec + [[0], [delta], [0]]
pix_pert_y_f = cmodel.project_onto_image(uvec_pert, image=image, temperature=temperature)
uvec_pert = uvec + [[0], [0], [delta]]
pix_pert_z_f = cmodel.project_onto_image(uvec_pert, image=image, temperature=temperature)
uvec_pert = uvec - [[delta], [0], [0]]
pix_pert_x_b = cmodel.project_onto_image(uvec_pert, image=image, temperature=temperature)
uvec_pert = uvec - [[0], [delta], [0]]
pix_pert_y_b = cmodel.project_onto_image(uvec_pert, image=image, temperature=temperature)
uvec_pert = uvec - [[0], [0], [delta]]
pix_pert_z_b = cmodel.project_onto_image(uvec_pert, image=image, temperature=temperature)
return np.array([(pix_pert_x_f-pix_pert_x_b)/(2*delta),
(pix_pert_y_f-pix_pert_y_b)/(2*delta),
(pix_pert_z_f-pix_pert_z_b)/(2*delta)]).swapaxes(0, -1)
model_param = {"focal_length": 100.75, "kx": 30, "ky": 40,
"px": 4005.23, "py": 4005.23,
"misalignment": [[1e-8, 1e-9, 1e-10], [-1e-8, 2e-9, -1e-11], [2e-10, -5e-12, 1e-9]],
"a1": 1e-6, "a2": 2e-7, "a3": 3e-8}
inputs = [np.array([[0.5, 0, 1]]).T,
np.array([[0, 0.5, 1], [0.5, 0.5, 1], [-0.5, 0, 1]]).T,
np.array([[0.1, -0.5, 1], [-0.5, -0.5, 1], [5, 10, 1000.23], [1, 2, 1200.23]]).T]
temperatures = [0, 1, -1, 10.5, -10.5]
model = self.Class(**model_param)
model.estimate_multiple_misalignments = True
for temp in temperatures:
for input in inputs:
for image in range(3):
with self.subTest(image=image, temp=temp, input=input):
jac_ana = model.compute_pixel_jacobian(input, image=image, temperature=temp)
jac_num = num_deriv(input, model, image=image, temperature=temp, delta=1e-2)
np.testing.assert_allclose(jac_ana, jac_num, rtol=1e-3, atol=1e-10)
def test__compute_dcamera_point_dgnomic(self):
def num_deriv(gnomic_locations, cmodel, delta=1e-6) -> np.ndarray:
def g2u(g):
v = np.vstack([g, cmodel.focal_length*np.ones(g.shape[-1])])
return v/np.linalg.norm(v, axis=0, keepdims=True)
gnomic_locations = np.asarray(gnomic_locations).reshape(2, -1)
gnom_pert = gnomic_locations + [[delta], [0]]
cam_loc_pert_x_f = g2u(gnom_pert)
gnom_pert = gnomic_locations + [[0], [delta]]
cam_loc_pert_y_f = g2u(gnom_pert)
gnom_pert = gnomic_locations - [[delta], [0]]
cam_loc_pert_x_b = g2u(gnom_pert)
gnom_pert = gnomic_locations - [[0], [delta]]
cam_loc_pert_y_b = g2u(gnom_pert)
return np.array([(cam_loc_pert_x_f -cam_loc_pert_x_b)/(2*delta),
(cam_loc_pert_y_f -cam_loc_pert_y_b)/(2*delta)]).swapaxes(0, -1)
model_param = {"focal_length": 100.75, "kx": 300, "ky": 400,
"px": 1005.23, "py": 1005.23,
"misalignment": [[1e-8, 1e-9, 1e-10], [-1e-8, 2e-9, -1e-11], [2e-10, -5e-12, 1e-9]],
"a1": 1e-6, "a2": 2e-7, "a3": 3e-8}
inputs = [np.array([[0, 0]]).T,
np.array([[0, 2000], [2000, 0], [2000, 2000]]).T,
np.array([[1000, 1000], [1000, 2000], [2000, 1000], [0, 1000], [1000, 0]]).T]
model = self.Class(**model_param)
model.estimate_multiple_misalignments = True
for input in inputs:
with self.subTest(input=input):
jac_ana = []
for gnom in input.T:
jac_ana.append(
model._compute_dcamera_point_dgnomic(gnom, np.sqrt(np.sum(gnom*gnom) + model.focal_length**2)))
jac_ana = np.array(jac_ana)
jac_num = num_deriv(input, model)
np.testing.assert_almost_equal(jac_ana, jac_num)
def test__compute_dgnomic_ddist_gnomic(self):
def num_deriv(dist_gnomic_locations, cmodel, delta=1e-6) -> np.ndarray:
def dg2g(dg):
gnomic_guess = dg.copy()
# perform the fpa
for _ in np.arange(20):
# get the distorted location assuming the current guess is correct
gnomic_guess_distorted = cmodel.apply_distortion(gnomic_guess)
# subtract off the residual distortion from the gnomic guess
gnomic_guess += dg - gnomic_guess_distorted
# check for convergence
if np.all(np.linalg.norm(gnomic_guess_distorted - dg, axis=0) <= 1e-15):
break
return gnomic_guess
dist_gnomic_locations = np.asarray(dist_gnomic_locations).reshape(2, -1)
dist_gnom_pert = dist_gnomic_locations + [[delta], [0]]
gnom_loc_pert_x_f = dg2g(dist_gnom_pert)
dist_gnom_pert = dist_gnomic_locations + [[0], [delta]]
gnom_loc_pert_y_f = dg2g(dist_gnom_pert)
dist_gnom_pert = dist_gnomic_locations - [[delta], [0]]
gnom_loc_pert_x_b = dg2g(dist_gnom_pert)
dist_gnom_pert = dist_gnomic_locations - [[0], [delta]]
gnom_loc_pert_y_b = dg2g(dist_gnom_pert)
return np.array([(gnom_loc_pert_x_f - gnom_loc_pert_x_b)/(2*delta),
(gnom_loc_pert_y_f - gnom_loc_pert_y_b)/(2*delta)]).swapaxes(0, -1)
model_param = {"focal_length": 100.75, "kx": 300, "ky": 400,
"px": 1005.23, "py": 1005.23,
"misalignment": [[1e-8, 1e-9, 1e-10], [-1e-8, 2e-9, -1e-11], [2e-10, -5e-12, 1e-9]],
"a1": 1e-6, "a2": 2e-7, "a3": 3e-8}
inputs = [np.array([[0, 0]]).T,
np.array([[0, 0.1], [0.1, 0], [0.1, 0.1]]).T,
np.array([[-0.1, 0], [0, -0.1], [-0.1, -0.1], [0.1, -0.1], [-0.1, 0.1]]).T]
model = self.Class(**model_param)
model.estimate_multiple_misalignments = True
for input in inputs:
with self.subTest(input=input):
jac_ana = []
for dist_gnom in input.T:
jac_ana.append(model._compute_dgnomic_ddist_gnomic(dist_gnom))
jac_ana = np.array(jac_ana)
jac_num = num_deriv(input, model)
np.testing.assert_almost_equal(jac_ana, jac_num)
def test_compute_unit_vector_jacobian(self):
def num_deriv(pixels, cmodel, delta=1e-6, image=0, temperature=0) -> np.ndarray:
pixels = np.array(pixels).reshape(2, -1)
pix_pert = pixels + [[delta], [0]]
uvec_pert_x_f = cmodel.pixels_to_unit(pix_pert, image=image, temperature=temperature)
pix_pert = pixels + [[0], [delta]]
uvec_pert_y_f = cmodel.pixels_to_unit(pix_pert, image=image, temperature=temperature)
pix_pert = pixels - [[delta], [0]]
uvec_pert_x_b = cmodel.pixels_to_unit(pix_pert, image=image, temperature=temperature)
pix_pert = pixels - [[0], [delta]]
uvec_pert_y_b = cmodel.pixels_to_unit(pix_pert, image=image, temperature=temperature)
return np.array([(uvec_pert_x_f-uvec_pert_x_b)/(2*delta),
(uvec_pert_y_f-uvec_pert_y_b)/(2*delta)]).swapaxes(0, -1)
model_param = {"focal_length": 100.75, "kx": 300, "ky": 400,
"px": 1005.23, "py": 1005.23,
"misalignment": [[1e-8, 1e-9, 1e-10], [-1e-8, 2e-9, -1e-11], [2e-10, -5e-12, 1e-9]],
"a1": 1e-6, "a2": 2e-7, "a3": 3e-8}
inputs = [np.array([[0, 0]]).T,
np.array([[0, 2000], [2000, 0], [2000, 2000]]).T,
np.array([[1000, 1000], [1000, 2000], [2000, 1000], [0, 1000], [1000, 0]]).T]
temperatures = [0, 1, -1, 10.5, -10.5]
model = self.Class(**model_param)
model.estimate_multiple_misalignments = True
for temp in temperatures:
for input in inputs:
for image in range(3):
with self.subTest(image=image, temp=temp, input=input):
jac_ana = model.compute_unit_vector_jacobian(input, image=image, temperature=temp)
jac_num = num_deriv(input, model, image=image, temperature=temp, delta=1e-2)
np.testing.assert_allclose(jac_ana, jac_num, rtol=1e-3, atol=1e-10)
def test__compute_dcamera_point_dmisalignment(self):
def num_deriv(loc, dtheta, delta=1e-10) -> np.ndarray:
mis_pert = at.rotvec_to_rotmat(np.array(dtheta) + [delta, 0, 0]).squeeze()
point_pert_x_f = mis_pert @ loc
mis_pert = at.rotvec_to_rotmat(np.array(dtheta) + [0, delta, 0]).squeeze()
point_pert_y_f = mis_pert @ loc
mis_pert = at.rotvec_to_rotmat(np.array(dtheta) + [0, 0, delta]).squeeze()
point_pert_z_f = mis_pert @ loc
mis_pert = at.rotvec_to_rotmat(np.array(dtheta) - [delta, 0, 0]).squeeze()
point_pert_x_b = mis_pert @ loc
mis_pert = at.rotvec_to_rotmat(np.array(dtheta) - [0, delta, 0]).squeeze()
point_pert_y_b = mis_pert @ loc
mis_pert = at.rotvec_to_rotmat(np.array(dtheta) - [0, 0, delta]).squeeze()
point_pert_z_b = mis_pert @ loc
return np.array([(point_pert_x_f - point_pert_x_b) / (2 * delta),
(point_pert_y_f - point_pert_y_b) / (2 * delta),
(point_pert_z_f - point_pert_z_b) / (2 * delta)]).T
inputs = [[1, 0, 0], [0, 1, 0], [0, 0, 1], [np.sqrt(3), np.sqrt(3), np.sqrt(3)],
[-1, 0, 0], [0, -1, 0], [0, 0, -1], [-np.sqrt(3), -np.sqrt(3), -np.sqrt(3)],
[1, 0, 100], [0, 0.5, 1]]
misalignment = [[1e-8, 0, 0], [0, 1e-8, 0], [0, 0, 1e-8], [1e-9, 1e-9, 1e-9],
[-1e-8, 0, 0], [0, -1e-8, 0], [0, 0, -1e-8], [-1e-9, -1e-9, -1e-9],
[1e-9, 2.3e-9, -0.5e-9]]
for mis in misalignment:
with self.subTest(misalignment=mis):
for inp in inputs:
num = num_deriv(inp, mis)
# noinspection PyTypeChecker
ana = self.Class._compute_dcamera_point_dmisalignment(inp)
np.testing.assert_allclose(num, ana, atol=1e-10, rtol=1e-4)
def test__compute_dpixel_ddistorted_gnomic(self):
def num_deriv(loc, cmodel, delta=1e-8, temperature=0) -> np.ndarray:
loc_pert = np.array(loc) + [delta, 0]
loc_pert *= cmodel.get_temperature_scale(temperature)
pix_pert_x_f = cmodel.intrinsic_matrix[:, :2] @ loc_pert + cmodel.intrinsic_matrix[:, 2]
loc_pert = np.array(loc) + [0, delta]
loc_pert *= cmodel.get_temperature_scale(temperature)
pix_pert_y_f = cmodel.intrinsic_matrix[:, :2] @ loc_pert + cmodel.intrinsic_matrix[:, 2]
loc_pert = np.array(loc) - [delta, 0]
loc_pert *= cmodel.get_temperature_scale(temperature)
pix_pert_x_b = cmodel.intrinsic_matrix[:, :2] @ loc_pert + cmodel.intrinsic_matrix[:, 2]
loc_pert = np.array(loc) - [0, delta]
loc_pert *= cmodel.get_temperature_scale(temperature)
pix_pert_y_b = cmodel.intrinsic_matrix[:, :2] @ loc_pert + cmodel.intrinsic_matrix[:, 2]
return np.array(
[(pix_pert_x_f - pix_pert_x_b) / (2 * delta), (pix_pert_y_f - pix_pert_y_b) / (2 * delta)]).T
intrins_coefs = [{"kx": 1.5, "ky": 0, "px": 0, "py": 0, 'a1': 0, 'a2': 0, 'a3': 0},
{"kx": 0, "ky": 1.5, "px": 0, "py": 0, 'a1': 0, 'a2': 0, 'a3': 0},
{"kx": 0, "ky": 0, "px": 1.5, "py": 0, 'a1': 0, 'a2': 0, 'a3': 0},
{"kx": 0, "ky": 0, "px": 0, "py": 1.5, 'a1': 0, 'a2': 0, 'a3': 0},
{"kx": 0, "ky": 0, "px": 0, "py": 0, 'a1': 1.5, 'a2': 0, 'a3': 0},
{"kx": 0, "ky": 0, "px": 0, "py": 0, 'a1': 0, 'a2': 1.5, 'a3': 0},
{"kx": 0, "ky": 0, "px": 0, "py": 0, 'a1': 0, 'a2': 0, 'a3': 1.5},
{"kx": 1.5, "ky": 1.5, "px": 1.5, "py": 1.5, 'a1': 1.5, 'a2': 1.5, 'a3': 1.5}]
inputs = [[1e-6, 1e-6], [1, 0], [-1, 0], [1.5, 0], [-1.5, 0], [0, 1], [0, -1], [0, 1.5], [0, -1.5], [1, 1]]
temperatures = [0, 1, -1, 10.5, -10.5]
for intrins_coef in intrins_coefs:
model = self.Class(**intrins_coef)
for temp in temperatures:
with self.subTest(temp=temp, **intrins_coef):
for inp in inputs:
num = num_deriv(inp, model, temperature=temp)
ana = model._compute_dpixel_ddistorted_gnomic(temperature=temp)
np.testing.assert_allclose(num, ana, atol=1e-10)
def test__compute_dgnomic_dcamera_point(self):
def num_deriv(loc, cmodel, delta=1e-8) -> np.ndarray:
loc_pert = np.array(loc) + [delta, 0, 0]
gnom_pert_x_f = cmodel.get_projections(loc_pert)[0]
loc_pert = np.array(loc) + [0, delta, 0]
gnom_pert_y_f = cmodel.get_projections(loc_pert)[0]
loc_pert = np.array(loc) + [0, 0, delta]
gnom_pert_z_f = cmodel.get_projections(loc_pert)[0]
loc_pert = np.array(loc) - [delta, 0, 0]
gnom_pert_x_b = cmodel.get_projections(loc_pert)[0]
loc_pert = np.array(loc) - [0, delta, 0]
gnom_pert_y_b = cmodel.get_projections(loc_pert)[0]
loc_pert = np.array(loc) - [0, 0, delta]
gnom_pert_z_b = cmodel.get_projections(loc_pert)[0]
return np.array([(gnom_pert_x_f - gnom_pert_x_b) / (2 * delta),
(gnom_pert_y_f - gnom_pert_y_b) / (2 * delta),
(gnom_pert_z_f - gnom_pert_z_b) / (2 * delta)]).T
intrins_coefs = [{"focal_length": 1},
{"focal_length": 2.5},
{"focal_length": 1000},
{"focal_length": 0.1}]
inputs = [[0, 0, 1], [0.5, 0, 1], [0, 0.5, 1], [0.5, 0.5, 1], [-0.5, 0, 1], [0, -0.5, 1], [-0.5, -0.5, 1],
[5, 10, 1000.23], [0.5, 1e-14, 1]]
for intrins_coef in intrins_coefs:
model = self.Class(**intrins_coef)
with self.subTest(**intrins_coef):
for inp in inputs:
num = num_deriv(inp, model)
ana = model._compute_dgnomic_dcamera_point(np.array(inp))
np.testing.assert_allclose(num, ana, atol=1e-9, rtol=1e-5)
def test__compute_dgnomic_dfocal_length(self):
def num_deriv(loc, cmodel, delta=1e-8) -> np.ndarray:
model_pert = cmodel.copy()
model_pert.focal_length += delta
gnom_pert_f = model_pert.get_projections(loc)[0]
model_pert = cmodel.copy()
model_pert.focal_length -= delta
gnom_pert_b = model_pert.get_projections(loc)[0]
# noinspection PyTypeChecker
return np.asarray((gnom_pert_f - gnom_pert_b) / (2 * delta))
intrins_coefs = [{"focal_length": 1},
{"focal_length": 2.5},
{"focal_length": 1000},
{"focal_length": 0.1}]
inputs = [[0, 0, 1], [0.5, 0, 1], [0, 0.5, 1], [0.5, 0.5, 1], [-0.5, 0, 1], [0, -0.5, 1], [-0.5, -0.5, 1]]
for intrins_coef in intrins_coefs:
model = self.Class(**intrins_coef)
with self.subTest(**intrins_coef):
for inp in inputs:
num = num_deriv(inp, model)
ana = model._compute_dgnomic_dfocal_length(np.array(inp))
np.testing.assert_allclose(num, ana, atol=1e-10, rtol=1e-5)
def test__compute_dpixel_dintrinsic(self):
def num_deriv(loc, cmodel, delta=1e-6) -> np.ndarray:
model_pert = cmodel.copy()
model_pert.kx += delta
pix_pert_kx_f = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.ky += delta
pix_pert_ky_f = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.px += delta
pix_pert_px_f = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.py += delta
pix_pert_py_f = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.kx -= delta
pix_pert_kx_b = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.ky -= delta
pix_pert_ky_b = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.px -= delta
pix_pert_px_b = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.py -= delta
pix_pert_py_b = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
return np.array([(pix_pert_kx_f - pix_pert_kx_b) / (2 * delta),
(pix_pert_ky_f - pix_pert_ky_b) / (2 * delta),
(pix_pert_px_f - pix_pert_px_b) / (2 * delta),
(pix_pert_py_f - pix_pert_py_b) / (2 * delta)]).T
intrins_coefs = [{"kx": 1.5, "ky": 0, "px": 0, "py": 0},
{"kx": 0, "ky": 1.5, "px": 0, "py": 0},
{"kx": 0, "ky": 0, "px": 1.5, "py": 0},
{"kx": 0, "ky": 0, "px": 0, "py": 1.5},
{"kx": 1.5, "ky": 1.5, "px": 1.5, "py": 1.5}]
inputs = [[1e-6, 1e-6], [1, 0], [-1, 0], [1.5, 0], [-1.5, 0], [0, 1], [0, -1], [0, 1.5], [0, -1.5], [1, 1]]
for intrins_coef in intrins_coefs:
model = self.Class(**intrins_coef)
with self.subTest(**intrins_coef):
for inp in inputs:
num = num_deriv(inp, model)
ana = model._compute_dpixel_dintrinsic(np.array(inp))
np.testing.assert_allclose(num, ana, atol=1e-10)
def test__compute_dpixel_dtemperature_coeffs(self):
def num_deriv(loc, cmodel, delta=1e-6, temperature=0) -> np.ndarray:
loc = np.array(loc)
model_pert = cmodel.copy()
model_pert.a1 += delta
loc_copy = loc * model_pert.get_temperature_scale(temperature)
pix_pert_a1_f = model_pert.intrinsic_matrix[:, :2] @ loc_copy + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.a2 += delta
loc_copy = loc * model_pert.get_temperature_scale(temperature)
pix_pert_a2_f = model_pert.intrinsic_matrix[:, :2] @ loc_copy + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.a3 += delta
loc_copy = loc * model_pert.get_temperature_scale(temperature)
pix_pert_a3_f = model_pert.intrinsic_matrix[:, :2] @ loc_copy + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.a1 -= delta
loc_copy = loc * model_pert.get_temperature_scale(temperature)
pix_pert_a1_b = model_pert.intrinsic_matrix[:, :2] @ loc_copy + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.a2 -= delta
loc_copy = loc * model_pert.get_temperature_scale(temperature)
pix_pert_a2_b = model_pert.intrinsic_matrix[:, :2] @ loc_copy + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.a3 -= delta
loc_copy = loc * model_pert.get_temperature_scale(temperature)
pix_pert_a3_b = model_pert.intrinsic_matrix[:, :2] @ loc_copy + model_pert.intrinsic_matrix[:, 2]
return np.array([(pix_pert_a1_f - pix_pert_a1_b) / (2 * delta),
(pix_pert_a2_f - pix_pert_a2_b) / (2 * delta),
(pix_pert_a3_f - pix_pert_a3_b) / (2 * delta)]).T
intrins_coefs = [{"kx": 1.5, "ky": 0, "px": 0, "py": 0, 'a1': 0, 'a2': 0, 'a3': 0},
{"kx": 0, "ky": 1.5, "px": 0, "py": 0, 'a1': 0, 'a2': 0, 'a3': 0},
{"kx": 0, "ky": 0, "px": 1.5, "py": 0, 'a1': 0, 'a2': 0, 'a3': 0},
{"kx": 0, "ky": 0, "px": 0, "py": 1.5, 'a1': 0, 'a2': 0, 'a3': 0},
{"kx": 0, "ky": 0, "px": 0, "py": 0, 'a1': 1.5, 'a2': 0, 'a3': 0},
{"kx": 0, "ky": 0, "px": 0, "py": 0, 'a1': 0, 'a2': 1.5, 'a3': 0},
{"kx": 0, "ky": 0, "px": 0, "py": 0, 'a1': 0, 'a2': 0, 'a3': 1.5},
{"kx": 1.5, "ky": 1.5, "px": 1.5, "py": 1.5, 'a1': 1.5, 'a2': 1.5, 'a3': 1.5}]
inputs = [[1e-6, 1e-6], [1, 0], [-1, 0], [1.5, 0], [-1.5, 0], [0, 1], [0, -1], [0, 1.5], [0, -1.5], [1, 1]]
temperatures = [0, 1, -1, -10.5, 10.5]
for intrins_coef in intrins_coefs:
model = self.Class(**intrins_coef)
for temp in temperatures:
with self.subTest(temp=temp, **intrins_coef):
for inp in inputs:
num = num_deriv(inp, model, temperature=temp)
ana = model._compute_dpixel_dtemperature_coeffs(inp, temperature=temp)
np.testing.assert_allclose(num, ana, atol=1e-10)
def test_get_jacobian_row(self):
def num_deriv(loc, cmodel, delta=1e-8, image=0, temperature=0) -> np.ndarray:
model_pert = cmodel.copy()
model_pert.focal_length += delta
pix_pert_f_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.focal_length -= delta
pix_pert_f_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.kx += delta
pix_pert_kx_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.ky += delta
pix_pert_ky_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.px += delta
pix_pert_px_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.py += delta
pix_pert_py_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.kx -= delta
pix_pert_kx_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.ky -= delta
pix_pert_ky_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.px -= delta
pix_pert_px_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.py -= delta
pix_pert_py_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a1 += delta
pix_pert_a1_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a2 += delta
pix_pert_a2_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a3 += delta
pix_pert_a3_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a1 -= delta
pix_pert_a1_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a2 -= delta
pix_pert_a2_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a3 -= delta
pix_pert_a3_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
delta_misalignment = 1e-6
model_pert = cmodel.copy()
model_pert.misalignment[image][0] += delta_misalignment
pix_pert_mx_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][1] += delta_misalignment
pix_pert_my_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][2] += delta_misalignment
pix_pert_mz_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][0] -= delta_misalignment
pix_pert_mx_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][1] -= delta_misalignment
pix_pert_my_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][2] -= delta_misalignment
pix_pert_mz_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
return np.vstack([(pix_pert_f_f - pix_pert_f_b) / (delta * 2),
(pix_pert_kx_f - pix_pert_kx_b) / (delta * 2),
(pix_pert_ky_f - pix_pert_ky_b) / (delta * 2),
(pix_pert_px_f - pix_pert_px_b) / (delta * 2),
(pix_pert_py_f - pix_pert_py_b) / (delta * 2),
(pix_pert_a1_f - pix_pert_a1_b) / (delta * 2),
(pix_pert_a2_f - pix_pert_a2_b) / (delta * 2),
(pix_pert_a3_f - pix_pert_a3_b) / (delta * 2),
np.zeros((image * 3, 2)),
(pix_pert_mx_f - pix_pert_mx_b) / (delta_misalignment * 2),
(pix_pert_my_f - pix_pert_my_b) / (delta_misalignment * 2),
(pix_pert_mz_f - pix_pert_mz_b) / (delta_misalignment * 2)]).T
# TODO: investigate why this fails with slightly larger misalignments and temperature coefficients
model_param = {"focal_length": 100.75, "kx": 30, "ky": 40, "px": 4005.23, "py": 4005.23,
"a1": 1e-4, "a2": 2e-7, "a3": 3e-8,
"misalignment": [[2e-15, -1.2e-14, 5e-16], [-1e-14, 2e-14, -1e-15]]}
inputs = [[0.5, 0, 1], [0, 0.5, 1], [0.5, 0.5, 1], [-0.5, 0, 1], [0, -0.5, 1], [-0.5, -0.5, 1],
[5, 10, 1000.23], [[10], [-22], [1200.23]]]
temperatures = [0, -1, 1, -10.5, 10.5]
model = self.Class(**model_param)
model.estimate_multiple_misalignments = True
for inp in inputs:
for temp in temperatures:
with self.subTest(temp=temp, inp=inp):
num = num_deriv(inp, model, delta=1, temperature=temp)
ana = model._get_jacobian_row(np.array(inp), 0, 1, temperature=temp)
np.testing.assert_allclose(ana, num, rtol=1e-2, atol=1e-10)
num = num_deriv(inp, model, delta=1, image=1, temperature=temp)
ana = model._get_jacobian_row(np.array(inp), 1, 2, temperature=temp)
np.testing.assert_allclose(ana, num, atol=1e-10, rtol=1e-2)
def test_compute_jacobian(self):
def num_deriv(loc, cmodel, delta=1e-8, image=0, nimages=1, temperature=0) -> np.ndarray:
model_pert = cmodel.copy()
model_pert.focal_length += delta
pix_pert_f_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.focal_length -= delta
pix_pert_f_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.kx += delta
pix_pert_kx_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.ky += delta
pix_pert_ky_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.px += delta
pix_pert_px_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.py += delta
pix_pert_py_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.kx -= delta
pix_pert_kx_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.ky -= delta
pix_pert_ky_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.px -= delta
pix_pert_px_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.py -= delta
pix_pert_py_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a1 += delta
pix_pert_a1_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a2 += delta
pix_pert_a2_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a3 += delta
pix_pert_a3_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a1 -= delta
pix_pert_a1_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a2 -= delta
pix_pert_a2_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a3 -= delta
pix_pert_a3_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
delta_misalignment = 1e-6
model_pert = cmodel.copy()
model_pert.misalignment[image][0] += delta_misalignment
pix_pert_mx_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][1] += delta_misalignment
pix_pert_my_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][2] += delta_misalignment
pix_pert_mz_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][0] -= delta_misalignment
pix_pert_mx_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][1] -= delta_misalignment
pix_pert_my_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][2] -= delta_misalignment
pix_pert_mz_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
return np.vstack([(pix_pert_f_f - pix_pert_f_b) / (delta * 2),
(pix_pert_kx_f - pix_pert_kx_b) / (delta * 2),
(pix_pert_ky_f - pix_pert_ky_b) / (delta * 2),
(pix_pert_px_f - pix_pert_px_b) / (delta * 2),
(pix_pert_py_f - pix_pert_py_b) / (delta * 2),
(pix_pert_a1_f - pix_pert_a1_b) / (delta * 2),
(pix_pert_a2_f - pix_pert_a2_b) / (delta * 2),
(pix_pert_a3_f - pix_pert_a3_b) / (delta * 2),
np.zeros((image * 3, 2)),
(pix_pert_mx_f - pix_pert_mx_b) / (delta_misalignment * 2),
(pix_pert_my_f - pix_pert_my_b) / (delta_misalignment * 2),
(pix_pert_mz_f - pix_pert_mz_b) / (delta_misalignment * 2),
np.zeros(((nimages - image - 1) * 3, 2))]).T
model_param = {"focal_length": 100.75, "kx": 30, "ky": 40, "px": 4005.23, "py": 4005.23,
"a1": 1e-5, "a2": 1e-6, "a3": 1e-7,
"misalignment": [[0, 0, 1e-15], [0, 2e-15, 0], [3e-15, 0, 0]]}
inputs = [np.array([[0.5, 0, 1]]).T,
np.array([[0, 0.5, 1], [0.5, 0.5, 1], [-0.5, 0, 1000]]).T,
np.array([[0.1, -0.5, 1], [-0.5, -0.5, 1], [5, 10, 1000.23], [1, 2, 1200.23]]).T]
temperatures = [0, 1, -1, 10.5, -10.5]
model = self.Class(**model_param, estimation_parameters=['intrinsic',
'temperature dependence',
'multiple misalignments'])
for temp in temperatures:
with self.subTest(temp=temp):
model.use_a_priori = False
jac_ana = model.compute_jacobian(inputs, temperature=temp)
jac_num = []
numim = len(inputs)
for ind, inp in enumerate(inputs):
for vec in inp.T:
jac_num.append(num_deriv(vec.T, model, delta=1, image=ind, nimages=numim, temperature=temp))
np.testing.assert_allclose(jac_ana, np.vstack(jac_num), rtol=1e-2, atol=1e-10)
model.use_a_priori = True
jac_ana = model.compute_jacobian(inputs, temperature=temp)
jac_num = []
numim = len(inputs)
for ind, inp in enumerate(inputs):
for vec in inp.T:
jac_num.append(num_deriv(vec.T, model, delta=1, image=ind, nimages=numim, temperature=temp))
jac_num = np.vstack(jac_num)
jac_num = np.pad(jac_num, [(0, jac_num.shape[1]), (0, 0)], 'constant', constant_values=0)
jac_num[-jac_num.shape[1]:] = np.eye(jac_num.shape[1])
np.testing.assert_allclose(jac_ana, jac_num, rtol=1e-2, atol=1e-10)
def test_remove_jacobian_columns(self):
jac = np.arange(30).reshape(1, -1)
model = self.Class()
for est_param, vals in model.element_dict.items():
model.estimation_parameters = [est_param]
expected = jac[0, vals]
np.testing.assert_array_equal(model._remove_jacobian_columns(jac), [expected])
def test_apply_update(self):
model_param = {"focal_length": 0, "kx": 0, "ky": 0,
"px": 0, "py": 0, "a1": 0, "a2": 0, "a3": 0,
"misalignment": [[0, 0, 0], [0, 0, 0]]}
model = self.Class(**model_param, estimation_parameters=['intrinsic',
'temperature dependence',
'multiple misalignments'])
update_vec = np.arange(14)
model.apply_update(update_vec)
keys = list(model_param.keys())
keys.remove('misalignment')
for key in keys:
self.assertEqual(getattr(model, key), update_vec[model.element_dict[key][0]])
for ind, vec in enumerate(update_vec[8:].reshape(-1, 3)):
np.testing.assert_array_almost_equal(at.Rotation(vec).q, at.Rotation(model.misalignment[ind]).q)
def test_pixels_to_gnomic(self):
gnomic = [[1, 0], [0, 1], [-1, 0], [0, -1],
[0.5, 0], [0, 0.5], [-0.5, 0], [0, -0.5],
[0.5, 0.5], [-0.5, -0.5], [0.5, -0.5], [-0.5, 0.5],
[[1, 0, 0.5], [0, 1.5, -0.5]]]
model = self.Class(kx=2000, ky=-3000.2, px=1025, py=937.567,
a1=1e-3, a2=2e-6, a3=-5.5e-8)
temperatures = [0, 1, -1, 10.5, -10.5]
for gnoms in gnomic:
for temp in temperatures:
with self.subTest(gnoms=gnoms, temp=temp):
dis_gnoms = np.asarray(model.apply_distortion(gnoms)).astype(float)
dis_gnoms *= model.get_temperature_scale(temp)
pixels = ((model.intrinsic_matrix[:, :2] @ dis_gnoms).T + model.intrinsic_matrix[:, 2]).T
gnoms_solved = model.pixels_to_gnomic(pixels, temperature=temp)
np.testing.assert_allclose(gnoms_solved, gnoms)
def test_undistort_pixels(self):
intrins_param = {"kx": 3000, "ky": 4000, "px": 4005.23, 'py': 2000.33, 'a1': 1e-6, 'a2': 1e-5, 'a3': 2e-5}
pinhole = [[0, 0], [1, 0], [-1, 0], [1.5, 0], [-1.5, 0], [[1.5], [0]], [[1.5, -1], [0, 0]],
[0, 1], [0, -1], [0, 1.5], [0, -1.5], [[0], [1.5]], [[0, 0], [1.5, -1]], [1, 1]]
model = self.Class(**intrins_param)
temperatures = [0, 1, -1, 10.5, -10.5]
for gnom in pinhole:
gnom = np.asarray(gnom).astype(float)
for temp in temperatures:
with self.subTest(gnom=gnom, temp=temp):
mm_dist = model.apply_distortion(np.array(gnom))
temp_scale = model.get_temperature_scale(temp)
mm_dist *= temp_scale
pix_dist = ((model.intrinsic_matrix[:, :2] @ mm_dist).T + model.intrinsic_matrix[:, 2]).T
pix_undist = model.undistort_pixels(pix_dist, temperature=temp)
gnom *= temp_scale
pix_pinhole = ((model.intrinsic_matrix[:, :2] @ gnom).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_allclose(pix_undist, pix_pinhole, atol=1e-13)
def test_pixels_to_unit(self):
intrins_param = {"focal_length": 32.7, "kx": 3000, "ky": 4000,
"px": 4005.23, 'py': 2000.33, 'a1': 1e-5, 'a2': -1e-10, 'a3': 2e-4,
'misalignment': [[1e-10, 2e-13, -3e-12], [4e-8, -5.3e-9, 9e-15]]}
camera_vecs = [[0, 0, 1], [0.01, 0, 1], [-0.01, 0, 1], [0, 0.01, 1], [0, -0.01, 1], [0.01, 0.01, 1],
[-0.01, -0.01, 1], [[0.01, -0.01], [-0.01, 0.01], [1, 1]]]
temperatures = [0, 1, -1, 10.5, -10.5]
model = self.Class(**intrins_param)
# TODO: consider adjusting so this isn't needed
model.estimate_multiple_misalignments = True
for vec in camera_vecs:
for image in [0, 1]:
for temp in temperatures:
with self.subTest(vec=vec, image=image, temp=temp):
pixel_loc = model.project_onto_image(vec, image=image, temperature=temp)
unit_vec = model.pixels_to_unit(pixel_loc, image=image, temperature=temp)
unit_true = np.array(vec).astype(np.float64)
unit_true /= np.linalg.norm(unit_true, axis=0, keepdims=True)
np.testing.assert_allclose(unit_vec, unit_true, atol=1e-13)
def test_overwrite(self):
model1 = self.Class(field_of_view=10, intrinsic_matrix=np.array([[1, 0, 3], [0, 5, 6]]), focal_length=60,
misalignment=[[1, 2, 3], [4, 5, 6]], use_a_priori=False,
estimation_parameters=['multiple misalignments'])
model2 = self.Class(field_of_view=20, intrinsic_matrix=np.array([[11, 0, 13], [0, 15, 16]]), focal_length=160,
misalignment=[[11, 12, 13], [14, 15, 16]], use_a_priori=True,
estimation_parameters=['single misalignment'])
modeltest = model1.copy()
modeltest.overwrite(model2)
self.assertEqual(model2.field_of_view, modeltest.field_of_view)
self.assertEqual(model2.use_a_priori, modeltest.use_a_priori)
self.assertEqual(model2.estimate_multiple_misalignments, modeltest.estimate_multiple_misalignments)
np.testing.assert_array_equal(model2.intrinsic_matrix, modeltest.intrinsic_matrix)
np.testing.assert_array_equal(model2.misalignment, modeltest.misalignment)
np.testing.assert_array_equal(model2.estimation_parameters, modeltest.estimation_parameters)
modeltest = model2.copy()
modeltest.overwrite(model1)
self.assertEqual(model1.field_of_view, modeltest.field_of_view)
self.assertEqual(model1.use_a_priori, modeltest.use_a_priori)
self.assertEqual(model1.estimate_multiple_misalignments, modeltest.estimate_multiple_misalignments)
np.testing.assert_array_equal(model1.intrinsic_matrix, modeltest.intrinsic_matrix)
np.testing.assert_array_equal(model1.misalignment, modeltest.misalignment)
np.testing.assert_array_equal(model1.estimation_parameters, modeltest.estimation_parameters)
def test_distort_pixels(self):
model = self.Class(kx=1000, ky=-950.5, px=4500, py=139.32, a1=1e-3, a2=1e-4, a3=1e-5)
pixels = [[0, 1], [1, 0], [-1, 0], [0, -1], [9000., 200.2],
[[4500, 100, 10.98], [0, 139.23, 200.3]]]
temperatures = [0, 1, -1, 10.5, -10.5]
for pix in pixels:
for temp in temperatures:
with self.subTest(pix=pix, temp=temp):
undist_pix = model.undistort_pixels(pix, temperature=temp)
dist_pix = model.distort_pixels(undist_pix, temperature=temp)
np.testing.assert_allclose(dist_pix, pix)
def test_distortion_map(self):
model = self.Class(kx=100, ky=-985.234, px=1000, py=1095)
rows, cols, dist = model.distortion_map((2000, 250), step=10)
# noinspection PyTypeChecker
np.testing.assert_allclose(dist, 0, atol=1e-10)
rl, cl = np.arange(0, 2000, 10), np.arange(0, 250, 10)
rs, cs = np.meshgrid(rl, cl, indexing='ij')
np.testing.assert_array_equal(rows, rs)
np.testing.assert_array_equal(cols, cs)
def test_undistort_image(self):
# not sure how best to do this test...
pass
def test_copy(self):
model = self.Class()
model_copy = model.copy()
model.kx = 1000
model.ky = 999
model.px = 100
model.py = -20
model.a1 = 5
model.a2 = 6
model.a3 = 7
model._focal_length = 11231
model.field_of_view = 1231231
model.use_a_priori = True
model.estimation_parameters = ['a1', 'kx', 'ky']
model.estimate_multiple_misalignments = True
model.misalignment = [1231241, 123124, .12]
self.assertNotEqual(model.kx, model_copy.kx)
self.assertNotEqual(model.ky, model_copy.ky)
self.assertNotEqual(model.px, model_copy.px)
self.assertNotEqual(model.py, model_copy.py)
self.assertNotEqual(model.a1, model_copy.a1)
self.assertNotEqual(model.a2, model_copy.a2)
self.assertNotEqual(model.a3, model_copy.a3)
self.assertNotEqual(model.focal_length, model_copy.focal_length)
self.assertNotEqual(model.field_of_view, model_copy.field_of_view)
self.assertNotEqual(model.use_a_priori, model_copy.use_a_priori)
self.assertNotEqual(model.estimate_multiple_misalignments, model_copy.estimate_multiple_misalignments)
self.assertNotEqual(model.estimation_parameters, model_copy.estimation_parameters)
self.assertTrue((model.misalignment != model_copy.misalignment).all())
def test_to_from_elem(self):
element = etree.Element(self.Class.__name__)
model = self.Class(focal_length=20, field_of_view=5, use_a_priori=True,
misalignment=[1, 2, 3], kx=2, ky=200, px=50, py=300,
a1=37, a2=1, a3=-1230,
estimation_parameters=['a1', 'multiple misalignments'], n_rows=20, n_cols=30)
model_copy = model.copy()
with self.subTest(misalignment=True):
element = model.to_elem(element, misalignment=True)
self.assertEqual(model, model_copy)
model_new = self.Class.from_elem(element)
self.assertEqual(model, model_new)
with self.subTest(misalignment=False):
element = model.to_elem(element, misalignment=False)
self.assertEqual(model, model_copy)
model_new = self.Class.from_elem(element)
model.estimation_parameters[-1] = 'single misalignment'
model.estimate_multiple_misalignments = False
model.misalignment = np.zeros(3)
self.assertEqual(model, model_new)
class TestOwenModel(TestPinholeModel):
def setUp(self):
self.Class = OwenModel
def test___init__(self):
model = self.Class(intrinsic_matrix=np.array([[1, 2, 3], [4, 5, 6]]), focal_length=10.5, field_of_view=20.5,
use_a_priori=True, misalignment=[np.zeros(3), np.ones(3)],
distortion_coefficients=np.array([1, 2, 3, 4, 5, 6]),
estimation_parameters='basic intrinsic',
a1=1, a2=2, a3=3)
np.testing.assert_array_equal(model.intrinsic_matrix, [[1, 2, 3], [4, 5, 6]])
self.assertEqual(model.focal_length, 10.5)
self.assertEqual(model.field_of_view, 20.5)
self.assertTrue(model.use_a_priori)
self.assertEqual(model.estimation_parameters, ['basic intrinsic'])
self.assertEqual(model.a1, 1)
self.assertEqual(model.a2, 2)
self.assertEqual(model.a3, 3)
np.testing.assert_array_equal(model.distortion_coefficients, np.arange(1, 7))
model = self.Class(kx=1, ky=2, px=4, py=5, focal_length=10.5, field_of_view=20.5,
use_a_priori=True, misalignment=[np.zeros(3), np.ones(3)], kxy=80, kyx=90,
estimation_parameters=['focal_length', 'px'], n_rows=500, n_cols=600,
e1=1, radial2=2, pinwheel2=3, e4=4, tangential_x=6, e5=5)
np.testing.assert_array_equal(model.intrinsic_matrix, [[1, 80, 4], [90, 2, 5]])
self.assertEqual(model.focal_length, 10.5)
self.assertEqual(model.field_of_view, 20.5)
self.assertTrue(model.use_a_priori)
self.assertEqual(model.estimation_parameters, ['focal_length', 'px'])
self.assertEqual(model.n_rows, 500)
self.assertEqual(model.n_cols, 600)
np.testing.assert_array_equal(model.distortion_coefficients, [2, 4, 5, 6, 1, 3])
def test_kxy(self):
model = self.Class(intrinsic_matrix=np.array([[0, 1, 0], [0, 0, 0]]))
self.assertEqual(model.kxy, 1)
model.kxy = 100
self.assertEqual(model.kxy, 100)
self.assertEqual(model.intrinsic_matrix[0, 1], 100)
def test_kyx(self):
model = self.Class(intrinsic_matrix=np.array([[0, 0, 0], [3, 0, 0]]))
self.assertEqual(model.kyx, 3)
model.kyx = 100
self.assertEqual(model.kyx, 100)
self.assertEqual(model.intrinsic_matrix[1, 0], 100)
def test_e1(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 0, 0, 1, 0]))
self.assertEqual(model.e1, 1)
model.e1 = 100
self.assertEqual(model.e1, 100)
self.assertEqual(model.distortion_coefficients[4], 100)
def test_e2(self):
model = self.Class(distortion_coefficients=np.array([1, 0, 0, 0, 0, 0]))
self.assertEqual(model.e2, 1)
model.e2 = 100
self.assertEqual(model.e2, 100)
self.assertEqual(model.distortion_coefficients[0], 100)
def test_e3(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 0, 0, 0, 1]))
self.assertEqual(model.e3, 1)
model.e3 = 100
self.assertEqual(model.e3, 100)
self.assertEqual(model.distortion_coefficients[5], 100)
def test_e4(self):
model = self.Class(distortion_coefficients=np.array([0, 1, 0, 0, 0, 0]))
self.assertEqual(model.e4, 1)
model.e4 = 100
self.assertEqual(model.e4, 100)
self.assertEqual(model.distortion_coefficients[1], 100)
def test_e5(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 1, 0, 0, 0]))
self.assertEqual(model.e5, 1)
model.e5 = 100
self.assertEqual(model.e5, 100)
self.assertEqual(model.distortion_coefficients[2], 100)
def test_e6(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 0, 1, 0, 0]))
self.assertEqual(model.e6, 1)
model.e6 = 100
self.assertEqual(model.e6, 100)
self.assertEqual(model.distortion_coefficients[3], 100)
def test_pinwheel1(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 0, 0, 1, 0]))
self.assertEqual(model.pinwheel1, 1)
model.pinwheel1 = 100
self.assertEqual(model.pinwheel1, 100)
self.assertEqual(model.distortion_coefficients[4], 100)
def test_radial2(self):
model = self.Class(distortion_coefficients=np.array([1, 0, 0, 0, 0, 0]))
self.assertEqual(model.radial2, 1)
model.radial2 = 100
self.assertEqual(model.radial2, 100)
self.assertEqual(model.distortion_coefficients[0], 100)
def test_pinwheel2(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 0, 0, 0, 1]))
self.assertEqual(model.pinwheel2, 1)
model.pinwheel2 = 100
self.assertEqual(model.pinwheel2, 100)
self.assertEqual(model.distortion_coefficients[5], 100)
def test_radial4(self):
model = self.Class(distortion_coefficients=np.array([0, 1, 0, 0, 0, 0]))
self.assertEqual(model.radial4, 1)
model.radial4 = 100
self.assertEqual(model.radial4, 100)
self.assertEqual(model.distortion_coefficients[1], 100)
def test_tangential_y(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 1, 0, 0, 0]))
self.assertEqual(model.tangential_y, 1)
model.tangential_y = 100
self.assertEqual(model.tangential_y, 100)
self.assertEqual(model.distortion_coefficients[2], 100)
def test_tangential_x(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 0, 1, 0, 0]))
self.assertEqual(model.tangential_x, 1)
model.tangential_x = 100
self.assertEqual(model.tangential_x, 100)
self.assertEqual(model.distortion_coefficients[3], 100)
def test_apply_distortion(self):
dist_coefs = [{"radial2": 1.5, "radial4": 0, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 1.5, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 1.5, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 0, "tangential_y": 1.5,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 1.5, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 1.5}]
inputs = [[0, 0], [1, 0], [-1, 0], [1.5, 0], [-1.5, 0], [[1.5], [0]], [[1.5, -1], [0, 0]],
[0, 1], [0, -1], [0, 1.5], [0, -1.5], [[0], [1.5]], [[0, 0], [1.5, -1]], [1, 1]]
solus = [[[0, 0], [2.5, 0], [-2.5, 0], [1.5 + 1.5 ** 4, 0], [-(1.5 + 1.5 ** 4), 0],
[[(1.5 + 1.5 ** 4)], [0]], [[(1.5 + 1.5 ** 4), -2.5], [0, 0]],
[0, 2.5], [0, -2.5], [0, (1.5 + 1.5 ** 4)], [0, -(1.5 + 1.5 ** 4)], [[0], [(1.5 + 1.5 ** 4)]],
[[0, 0], [(1.5 + 1.5 ** 4), -2.5]], [1 + 2 * 1.5, 1 + 2 * 1.5]],
[[0, 0], [2.5, 0], [-2.5, 0], [(1.5 + 1.5 ** 6), 0], [-(1.5 + 1.5 ** 6), 0],
[[(1.5 + 1.5 ** 6)], [0]], [[(1.5 + 1.5 ** 6), -2.5], [0, 0]],
[0, 2.5], [0, -2.5], [0, (1.5 + 1.5 ** 6)], [0, -(1.5 + 1.5 ** 6)], [[0], [(1.5 + 1.5 ** 6)]],
[[0, 0], [(1.5 + 1.5 ** 6), -2.5]], [1 + 4 * 1.5, 1 + 4 * 1.5]],
[[0, 0], [2.5, 0], [0.5, 0], [(1.5 + 1.5 ** 3), 0], [-1.5 + 1.5 ** 3, 0],
[[(1.5 + 1.5 ** 3)], [0]], [[(1.5 + 1.5 ** 3), 0.5], [0, 0]],
[0, 1], [0, -1], [0, 1.5], [0, -1.5], [[0], [1.5]], [[0, 0], [1.5, -1]], [2.5, 2.5]],
[[0, 0], [1, 0], [-1, 0], [1.5, 0], [-1.5, 0], [[1.5], [0]], [[1.5, -1], [0, 0]],
[0, 2.5], [0, 0.5], [0, 1.5 + 1.5 ** 3], [0, -1.5 + 1.5 ** 3], [[0], [1.5 + 1.5 ** 3]],
[[0, 0], [1.5 + 1.5 ** 3, 0.5]], [2.5, 2.5]],
[[0, 0], [1, 1.5], [-1, -1.5], [1.5, 1.5 ** 3], [-1.5, -1.5 ** 3],
[[1.5], [1.5 ** 3]], [[1.5, -1], [1.5 ** 3, -1.5]],
[-1.5, 1], [1.5, -1], [-1.5 ** 3, 1.5], [1.5 ** 3, -1.5], [[-1.5 ** 3], [1.5]],
[[-1.5 ** 3, 1.5], [1.5, -1]],
[1 - np.sqrt(2) * 1.5, 1 + np.sqrt(2) * 1.5]],
[[0, 0], [1, 1.5], [-1, -1.5], [1.5, 1.5 ** 5], [-1.5, -1.5 ** 5],
[[1.5], [1.5 ** 5]], [[1.5, -1], [1.5 ** 5, -1.5]],
[-1.5, 1], [1.5, -1], [-1.5 ** 5, 1.5], [1.5 ** 5, -1.5], [[-1.5 ** 5], [1.5]],
[[-1.5 ** 5, 1.5], [1.5, -1]],
[1 - 2 * np.sqrt(2) * 1.5, 1 + 2 * np.sqrt(2) * 1.5]]]
for dist, sols in zip(dist_coefs, solus):
with self.subTest(**dist):
model = self.Class(**dist)
for inp, solu in zip(inputs, sols):
gnom_dist = model.apply_distortion(np.array(inp))
np.testing.assert_array_almost_equal(gnom_dist, solu)
def test_get_projections(self):
points = [[0, 0, 1], [-0.1, 0.2, 2.2], [[-0.1], [0.2], [2.2]], [[-0.1, 0], [0.2, 0], [2.2, 1]]]
model = self.Class(focal_length=8.7, kx=500, ky=500.5, kxy=1.5, kyx=-1.5, px=1500, py=1500.5,
radial2=1e-3, radial4=-2.2e-5, tangential_y=1e-3, tangential_x=1e-6,
pinwheel1=1e-6, pinwheel2=-2.23e-8, a1=1e-1, a2=1e-6, a3=-3e-7)
temps = [0, 1, -1, 10, -10]
for temp in temps:
with self.subTest(misalignment=None, temp=temp):
for point in points:
pin, dist, pix = model.get_projections(point, temperature=temp)
pin_true = model.focal_length * np.array(point[:2]) / point[2]
dist_true = model.apply_distortion(pin_true)
dist_true *= model.get_temperature_scale(temp)
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], dist_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_equal(pin, pin_true)
np.testing.assert_array_equal(dist, dist_true)
np.testing.assert_array_equal(pix, pix_true)
model = self.Class(focal_length=8.7, kx=500, ky=500.5, kxy=1.5, kyx=-1.5, px=1500, py=1500.5,
radial2=1e-3, radial4=-2.2e-5, tangential_y=1e-3, tangential_x=1e-6,
pinwheel1=1e-6, pinwheel2=-2.23 - 8, misalignment=[0, 0, np.pi])
with self.subTest(misalignment=[0, 0, np.pi]):
for point in points:
pin, dist, pix = model.get_projections(point)
pin_true = -model.focal_length * np.array(point[:2]) / point[2]
dist_true = model.apply_distortion(pin_true)
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], dist_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(pin, pin_true)
np.testing.assert_array_almost_equal(dist, dist_true)
np.testing.assert_array_almost_equal(pix, pix_true)
model = self.Class(focal_length=8.7, kx=500, ky=500.5, kxy=1.5, kyx=-1.5, px=1500, py=1500.5,
radial2=1e-3, radial4=-2.2e-5, tangential_y=1e-3, tangential_x=1e-6,
pinwheel1=1e-6, pinwheel2=-2.23 - 8, misalignment=[np.pi, 0, 0])
with self.subTest(misalignment=[np.pi, 0, 0]):
for point in points:
pin, dist, pix = model.get_projections(point)
pin_true = model.focal_length * np.array(point[:2]) / point[2]
pin_true[0] *= -1
dist_true = model.apply_distortion(pin_true)
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], dist_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(pin, pin_true)
np.testing.assert_array_almost_equal(dist, dist_true)
np.testing.assert_array_almost_equal(pix, pix_true)
model = self.Class(focal_length=8.7, kx=500, ky=500.5, kxy=1.5, kyx=-1.5, px=1500, py=1500.5,
radial2=1e-3, radial4=-2.2e-5, tangential_y=1e-3, tangential_x=1e-6,
pinwheel1=1e-6, pinwheel2=-2.23 - 8, misalignment=[0, np.pi, 0])
with self.subTest(misalignment=[0, np.pi, 0]):
for point in points:
pin, dist, pix = model.get_projections(point)
pin_true = model.focal_length * np.array(point[:2]) / point[2]
pin_true[1] *= -1
dist_true = model.apply_distortion(pin_true)
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], dist_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(pin, pin_true)
np.testing.assert_array_almost_equal(dist, dist_true)
np.testing.assert_array_almost_equal(pix, pix_true)
model = self.Class(focal_length=8.7, kx=500, ky=500.5, kxy=1.5, kyx=-1.5, px=1500, py=1500.5,
radial2=1e-3, radial4=-2.2e-5, tangential_y=1e-3, tangential_x=1e-6,
pinwheel1=1e-6, pinwheel2=-2.23 - 8, misalignment=[1, 0.2, 0.3])
with self.subTest(misalignment=[1, 0.2, 0.3]):
rot_mat = at.rotvec_to_rotmat([1, 0.2, 0.3]).squeeze()
for point in points:
point_new = rot_mat @ point
pin, dist, pix = model.get_projections(point)
pin_true = model.focal_length * np.array(point_new[:2]) / np.array(point_new[2])
dist_true = model.apply_distortion(pin_true)
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], dist_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(pin, pin_true)
np.testing.assert_array_almost_equal(dist, dist_true)
np.testing.assert_array_almost_equal(pix, pix_true)
model = self.Class(focal_length=8.7, kx=500, ky=500.5, kxy=1.5, kyx=-1.5, px=1500, py=1500.5,
radial2=1e-3, radial4=-2.2e-5, tangential_y=1e-3, tangential_x=1e-6,
pinwheel1=1e-6, pinwheel2=-2.23 - 8, misalignment=[[1, 0.2, 0.3], [0, 0, np.pi]])
model.estimate_multiple_misalignments = True
with self.subTest(misalignment=[[1, 0.2, 0.3], [0, 0, np.pi]]):
rot_mat = at.rotvec_to_rotmat([1, 0.2, 0.3]).squeeze()
for point in points:
point_new = rot_mat @ point
pin, dist, pix = model.get_projections(point, image=0)
pin_true = model.focal_length * np.array(point_new[:2]) / np.array(point_new[2])
dist_true = model.apply_distortion(pin_true)
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], dist_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(pin, pin_true)
np.testing.assert_array_almost_equal(dist, dist_true)
np.testing.assert_array_almost_equal(pix, pix_true)
pin, dist, pix = model.get_projections(point, image=1)
pin_true = -model.focal_length * np.array(point[:2]) / point[2]
dist_true = model.apply_distortion(pin_true)
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], dist_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(pin, pin_true)
np.testing.assert_array_almost_equal(dist, dist_true)
np.testing.assert_array_almost_equal(pix, pix_true)
def test_project_onto_image(self):
points = [[0, 0, 1], [-0.1, 0.2, 2.2], [[-0.1], [0.2], [2.2]], [[-0.1, 0], [0.2, 0], [2.2, 1]]]
model = self.Class(focal_length=8.7, kx=500, ky=500.5, kxy=1.5, kyx=-1.5, px=1500, py=1500.5,
radial2=1e-3, radial4=-2.2e-5, tangential_y=1e-3, tangential_x=1e-6,
pinwheel1=1e-6, pinwheel2=-2.23 - 8, a1=1, a2=2, a3=-3)
temps = [0, 1, -1, 10, -10]
for temp in temps:
with self.subTest(misalignment=None, temp=temp):
for point in points:
_, __, pix = model.get_projections(point, temperature=temp)
pix_proj = model.project_onto_image(point, temperature=temp)
np.testing.assert_array_equal(pix, pix_proj)
model = self.Class(focal_length=8.7, kx=500, ky=500.5, kxy=1.5, kyx=-1.5, px=1500, py=1500.5,
radial2=1e-3, radial4=-2.2e-5, tangential_y=1e-3, tangential_x=1e-6,
pinwheel1=1e-6, pinwheel2=-2.23 - 8, misalignment=[[1, 0.2, 0.3], [0, 0, np.pi]])
model.estimate_multiple_misalignments = True
with self.subTest(misalignment=[[1, 0.2, 0.3], [0, 0, np.pi]]):
for point in points:
_, __, pix = model.get_projections(point, image=0)
pix_proj = model.project_onto_image(point, image=0)
np.testing.assert_array_equal(pix, pix_proj)
_, __, pix = model.get_projections(point, image=1)
pix_proj = model.project_onto_image(point, image=1)
np.testing.assert_array_equal(pix, pix_proj)
def test_compute_pixel_jacobian(self):
def num_deriv(uvec, cmodel, delta=1e-8, image=0, temperature=0) -> np.ndarray:
uvec = np.array(uvec).reshape(3, -1)
pix_true = cmodel.project_onto_image(uvec, image=image, temperature=temperature)
uvec_pert = uvec + [[delta], [0], [0]]
pix_pert_x_f = cmodel.project_onto_image(uvec_pert, image=image, temperature=temperature)
uvec_pert = uvec + [[0], [delta], [0]]
pix_pert_y_f = cmodel.project_onto_image(uvec_pert, image=image, temperature=temperature)
uvec_pert = uvec + [[0], [0], [delta]]
pix_pert_z_f = cmodel.project_onto_image(uvec_pert, image=image, temperature=temperature)
uvec_pert = uvec - [[delta], [0], [0]]
pix_pert_x_b = cmodel.project_onto_image(uvec_pert, image=image, temperature=temperature)
uvec_pert = uvec - [[0], [delta], [0]]
pix_pert_y_b = cmodel.project_onto_image(uvec_pert, image=image, temperature=temperature)
uvec_pert = uvec - [[0], [0], [delta]]
pix_pert_z_b = cmodel.project_onto_image(uvec_pert, image=image, temperature=temperature)
return np.array([(pix_pert_x_f-pix_pert_x_b)/(2*delta),
(pix_pert_y_f-pix_pert_y_b)/(2*delta),
(pix_pert_z_f-pix_pert_z_b)/(2*delta)]).swapaxes(0, -1)
model_param = {"focal_length": 100.75, "radial2": 1.5e-5, "radial4": 1.5e-5, "tangential_x": 1.5e-5,
"tangential_y": 1.5e-5, "pinwheel1": 1.5e-5, "pinwheel2": 1.5e-5, "kx": 30, "ky": 40,
"kxy": 0.5, "kyx": -0.8, "px": 4005.23, "py": 4005.23,
"misalignment": [[1e-8, 1e-9, 1e-10], [-1e-8, 2e-9, -1e-11], [2e-10, -5e-12, 1e-9]],
"a1": 1e-6, "a2": 2e-7, "a3": 3e-8}
inputs = [np.array([[0.5, 0, 1]]).T,
np.array([[0, 0.5, 1], [0.5, 0.5, 1], [-0.5, 0, 1]]).T,
np.array([[0.1, -0.5, 1], [-0.5, -0.5, 1], [5, 10, 1000.23], [1, 2, 1200.23]]).T]
temperatures = [0, 1, -1, 10.5, -10.5]
model = self.Class(**model_param)
model.estimate_multiple_misalignments = True
for temp in temperatures:
for input in inputs:
for image in range(3):
with self.subTest(image=image, temp=temp, input=input):
jac_ana = model.compute_pixel_jacobian(input, image=image, temperature=temp)
jac_num = num_deriv(input, model, image=image, temperature=temp, delta=1e-6)
np.testing.assert_allclose(jac_ana, jac_num, rtol=1e-3, atol=1e-10)
def test__compute_dcamera_point_dgnomic(self):
def num_deriv(gnomic_locations, cmodel, delta=1e-6) -> np.ndarray:
def g2u(g):
v = np.vstack([g, cmodel.focal_length*np.ones(g.shape[-1])])
return v/np.linalg.norm(v, axis=0, keepdims=True)
gnomic_locations = np.asarray(gnomic_locations).reshape(2, -1)
gnom_pert = gnomic_locations + [[delta], [0]]
cam_loc_pert_x_f = g2u(gnom_pert)
gnom_pert = gnomic_locations + [[0], [delta]]
cam_loc_pert_y_f = g2u(gnom_pert)
gnom_pert = gnomic_locations - [[delta], [0]]
cam_loc_pert_x_b = g2u(gnom_pert)
gnom_pert = gnomic_locations - [[0], [delta]]
cam_loc_pert_y_b = g2u(gnom_pert)
return np.array([(cam_loc_pert_x_f -cam_loc_pert_x_b)/(2*delta),
(cam_loc_pert_y_f -cam_loc_pert_y_b)/(2*delta)]).swapaxes(0, -1)
model_param = {"focal_length": 100.75, "radial2": 1.5e-8, "radial4": 1.5e-8, "tangential_x": 1.5e-8,
"tangential_y": 1.5e-8, "pinwheel1": 1.5e-8, "pinwheel2": 1.5e-8, "kx": 300, "ky": 400,
"kxy": 0.5, "kyx": -0.8, "px": 1005.23, "py": 1005.23,
"misalignment": [[1e-8, 1e-9, 1e-10], [-1e-8, 2e-9, -1e-11], [2e-10, -5e-12, 1e-9]],
"a1": 1e-6, "a2": 2e-7, "a3": 3e-8}
inputs = [np.array([[0, 0]]).T,
np.array([[0, 2000], [2000, 0], [2000, 2000]]).T,
np.array([[1000, 1000], [1000, 2000], [2000, 1000], [0, 1000], [1000, 0]]).T]
model = self.Class(**model_param)
model.estimate_multiple_misalignments = True
for input in inputs:
with self.subTest(input=input):
jac_ana = []
for gnom in input.T:
jac_ana.append(
model._compute_dcamera_point_dgnomic(gnom, np.sqrt(np.sum(gnom*gnom) + model.focal_length**2)))
jac_ana = np.array(jac_ana)
jac_num = num_deriv(input, model)
np.testing.assert_almost_equal(jac_ana, jac_num)
def test__compute_dgnomic_ddist_gnomic(self):
def num_deriv(dist_gnomic_locations, cmodel, delta=1e-6) -> np.ndarray:
def dg2g(dg):
gnomic_guess = dg.copy()
# perform the fpa
for _ in np.arange(20):
# get the distorted location assuming the current guess is correct
gnomic_guess_distorted = cmodel.apply_distortion(gnomic_guess)
# subtract off the residual distortion from the gnomic guess
gnomic_guess += dg - gnomic_guess_distorted
# check for convergence
if np.all(np.linalg.norm(gnomic_guess_distorted - dg, axis=0) <= 1e-15):
break
return gnomic_guess
dist_gnomic_locations = np.asarray(dist_gnomic_locations).reshape(2, -1)
dist_gnom_pert = dist_gnomic_locations + [[delta], [0]]
gnom_loc_pert_x_f = dg2g(dist_gnom_pert)
dist_gnom_pert = dist_gnomic_locations + [[0], [delta]]
gnom_loc_pert_y_f = dg2g(dist_gnom_pert)
dist_gnom_pert = dist_gnomic_locations - [[delta], [0]]
gnom_loc_pert_x_b = dg2g(dist_gnom_pert)
dist_gnom_pert = dist_gnomic_locations - [[0], [delta]]
gnom_loc_pert_y_b = dg2g(dist_gnom_pert)
return np.array([(gnom_loc_pert_x_f - gnom_loc_pert_x_b)/(2*delta),
(gnom_loc_pert_y_f - gnom_loc_pert_y_b)/(2*delta)]).swapaxes(0, -1)
model_param = {"focal_length": 100.75, "radial2": 1.5e-8, "radial4": 1.5e-8, "tangential_x": 1.5e-8,
"tangential_y": 1.5e-8, "pinwheel1": 1.5e-8, "pinwheel2": 1.5e-8, "kx": 300, "ky": 400,
"kxy": 0.5, "kyx": -0.8, "px": 1005.23, "py": 1005.23,
"misalignment": [[1e-8, 1e-9, 1e-10], [-1e-8, 2e-9, -1e-11], [2e-10, -5e-12, 1e-9]],
"a1": 1e-6, "a2": 2e-7, "a3": 3e-8}
inputs = [np.array([[0, 0]]).T,
np.array([[0, 0.1], [0.1, 0], [0.1, 0.1]]).T,
np.array([[-0.1, 0], [0, -0.1], [-0.1, -0.1], [0.1, -0.1], [-0.1, 0.1]]).T]
model = self.Class(**model_param)
model.estimate_multiple_misalignments = True
for input in inputs:
with self.subTest(input=input):
jac_ana = []
for dist_gnom in input.T:
jac_ana.append(model._compute_dgnomic_ddist_gnomic(dist_gnom))
jac_ana = np.array(jac_ana)
jac_num = num_deriv(input, model)
np.testing.assert_almost_equal(jac_ana, jac_num)
def test_compute_unit_vector_jacobian(self):
def num_deriv(pixels, cmodel, delta=1e-6, image=0, temperature=0) -> np.ndarray:
pixels = np.array(pixels).reshape(2, -1)
pix_pert = pixels + [[delta], [0]]
uvec_pert_x_f = cmodel.pixels_to_unit(pix_pert, image=image, temperature=temperature)
pix_pert = pixels + [[0], [delta]]
uvec_pert_y_f = cmodel.pixels_to_unit(pix_pert, image=image, temperature=temperature)
pix_pert = pixels - [[delta], [0]]
uvec_pert_x_b = cmodel.pixels_to_unit(pix_pert, image=image, temperature=temperature)
pix_pert = pixels - [[0], [delta]]
uvec_pert_y_b = cmodel.pixels_to_unit(pix_pert, image=image, temperature=temperature)
return np.array([(uvec_pert_x_f-uvec_pert_x_b)/(2*delta),
(uvec_pert_y_f-uvec_pert_y_b)/(2*delta)]).swapaxes(0, -1)
model_param = {"focal_length": 100.75, "radial2": 1.5e-8, "radial4": 1.5e-8, "tangential_x": 1.5e-8,
"tangential_y": 1.5e-8, "pinwheel1": 1.5e-8, "pinwheel2": 1.5e-8, "kx": 300, "ky": 400,
"kxy": 0.5, "kyx": -0.8, "px": 1005.23, "py": 1005.23,
"misalignment": [[1e-8, 1e-9, 1e-10], [-1e-8, 2e-9, -1e-11], [2e-10, -5e-12, 1e-9]],
"a1": 1e-6, "a2": 2e-7, "a3": 3e-8}
inputs = [np.array([[0, 0]]).T,
np.array([[0, 2000], [2000, 0], [2000, 2000]]).T,
np.array([[1000, 1000], [1000, 2000], [2000, 1000], [0, 1000], [1000, 0]]).T]
temperatures = [0, 1, -1, 10.5, -10.5]
model = self.Class(**model_param)
model.estimate_multiple_misalignments = True
for temp in temperatures:
for input in inputs:
for image in range(3):
with self.subTest(image=image, temp=temp, input=input):
jac_ana = model.compute_unit_vector_jacobian(input, image=image, temperature=temp)
jac_num = num_deriv(input, model, image=image, temperature=temp, delta=1e-2)
np.testing.assert_allclose(jac_ana, jac_num, rtol=1e-3, atol=1e-10)
def test__compute_ddistortion_dgnomic(self):
def num_deriv(loc, cmodel, delta=1e-8) -> np.ndarray:
loc_pert = np.array(loc) + [delta, 0]
dist_pert_x_f = cmodel.apply_distortion(loc_pert) - loc_pert
loc_pert = np.array(loc) + [0, delta]
dist_pert_y_f = cmodel.apply_distortion(loc_pert) - loc_pert
loc_pert = np.array(loc) - [delta, 0]
dist_pert_x_b = cmodel.apply_distortion(loc_pert) - loc_pert
loc_pert = np.array(loc) - [0, delta]
dist_pert_y_b = cmodel.apply_distortion(loc_pert) - loc_pert
return np.array(
[(dist_pert_x_f - dist_pert_x_b) / (2 * delta), (dist_pert_y_f - dist_pert_y_b) / (2 * delta)]).T
dist_coefs = [{"radial2": 1.5, "radial4": 0, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 1.5, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 1.5, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 0, "tangential_y": 1.5,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 1.5, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 1.5},
{"e1": -1.5, "e2": -1.5, "e3": -1.5, "e4": -1.5, "e5": -1.5, "e6": -1.5}]
inputs = [[1e-6, 1e-6], [1, 0], [-1, 0], [1.5, 0], [-1.5, 0], [0, 1], [0, -1], [0, 1.5], [0, -1.5], [1, 1]]
for dist_coef in dist_coefs:
model = self.Class(**dist_coef)
with self.subTest(**dist_coef):
for inp in inputs:
r = np.sqrt(inp[0] ** 2 + inp[1] ** 2)
r2 = r ** 2
r3 = r ** 3
r4 = r ** 4
num = num_deriv(inp, model)
ana = model._compute_ddistortion_dgnomic(np.array(inp), r, r2, r3, r4)
np.testing.assert_allclose(num, ana, atol=1e-10)
def test__compute_dpixel_ddistorted_gnomic(self):
def num_deriv(loc, cmodel, delta=1e-8, temperature=0) -> np.ndarray:
loc_pert = np.array(loc) + [delta, 0]
loc_pert *= cmodel.get_temperature_scale(temperature)
pix_pert_x_f = cmodel.intrinsic_matrix[:, :2] @ loc_pert + cmodel.intrinsic_matrix[:, 2]
loc_pert = np.array(loc) + [0, delta]
loc_pert *= cmodel.get_temperature_scale(temperature)
pix_pert_y_f = cmodel.intrinsic_matrix[:, :2] @ loc_pert + cmodel.intrinsic_matrix[:, 2]
loc_pert = np.array(loc) - [delta, 0]
loc_pert *= cmodel.get_temperature_scale(temperature)
pix_pert_x_b = cmodel.intrinsic_matrix[:, :2] @ loc_pert + cmodel.intrinsic_matrix[:, 2]
loc_pert = np.array(loc) - [0, delta]
loc_pert *= cmodel.get_temperature_scale(temperature)
pix_pert_y_b = cmodel.intrinsic_matrix[:, :2] @ loc_pert + cmodel.intrinsic_matrix[:, 2]
return np.array(
[(pix_pert_x_f - pix_pert_x_b) / (2 * delta), (pix_pert_y_f - pix_pert_y_b) / (2 * delta)]).T
intrins_coefs = [{"kx": 1.5, "kxy": 0, "ky": 0, "kyx": 0, "px": 0, "py": 0},
{"kx": 0, "kxy": 1.5, "ky": 0, "kyx": 0, "px": 0, "py": 0},
{"kx": 0, "kxy": 0, "ky": 1.5, "kyx": 0, "px": 0, "py": 0},
{"kx": 0, "kxy": 0, "ky": 0, "kyx": 1.5, "px": 0, "py": 0},
{"kx": 0, "kxy": 0, "ky": 0, "kyx": 0, "px": 1.5, "py": 0},
{"kx": 0, "kxy": 0, "ky": 0, "kyx": 0, "px": 0, "py": 1.5},
{"kx": 0, "kxy": 0, "ky": 0, "kyx": 0, "px": 0, "py": 0, "a1": 1.5, "a2": 0, "a3": 0},
{"kx": 0, "kxy": 0, "ky": 0, "kyx": 0, "px": 0, "py": 0, "a1": 0, "a2": 1.5, "a3": 0},
{"kx": 0, "kxy": 0, "ky": 0, "kyx": 0, "px": 0, "py": 0, "a1": 0, "a2": 0, "a3": 1.5},
{"kx": 1.5, "kxy": 1.5, "ky": 1.5, "kyx": 1.5, "px": 1.5, "py": 1.5,
"a1": 1.5, "a2": 1.5, "a3": 1.5}]
inputs = [[1e-6, 1e-6], [1, 0], [-1, 0], [1.5, 0], [-1.5, 0], [0, 1], [0, -1], [0, 1.5], [0, -1.5], [1, 1]]
temps = [0, 1, -1, 10.5, -10.5]
for temp in temps:
for intrins_coef in intrins_coefs:
model = self.Class(**intrins_coef)
with self.subTest(**intrins_coef, temp=temp):
for inp in inputs:
num = num_deriv(inp, model, temperature=temp)
ana = model._compute_dpixel_ddistorted_gnomic(temperature=temp)
np.testing.assert_allclose(num, ana, atol=1e-10)
def test__compute_dpixel_dintrinsic(self):
def num_deriv(loc, cmodel, delta=1e-6) -> np.ndarray:
model_pert = cmodel.copy()
model_pert.kx += delta
pix_pert_kx_f = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.kxy += delta
pix_pert_kxy_f = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.kyx += delta
pix_pert_kyx_f = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.ky += delta
pix_pert_ky_f = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.px += delta
pix_pert_px_f = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.py += delta
pix_pert_py_f = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.kx -= delta
pix_pert_kx_b = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.kxy -= delta
pix_pert_kxy_b = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.kyx -= delta
pix_pert_kyx_b = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.ky -= delta
pix_pert_ky_b = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.px -= delta
pix_pert_px_b = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.py -= delta
pix_pert_py_b = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
return np.array([(pix_pert_kx_f - pix_pert_kx_b) / (2 * delta),
(pix_pert_kxy_f - pix_pert_kxy_b) / (2 * delta),
(pix_pert_kyx_f - pix_pert_kyx_b) / (2 * delta),
(pix_pert_ky_f - pix_pert_ky_b) / (2 * delta),
(pix_pert_px_f - pix_pert_px_b) / (2 * delta),
(pix_pert_py_f - pix_pert_py_b) / (2 * delta)]).T
intrins_coefs = [{"kx": 1.5, "kxy": 0, "ky": 0, "kyx": 0, "px": 0, "py": 0},
{"kx": 0, "kxy": 1.5, "ky": 0, "kyx": 0, "px": 0, "py": 0},
{"kx": 0, "kxy": 0, "ky": 1.5, "kyx": 0, "px": 0, "py": 0},
{"kx": 0, "kxy": 0, "ky": 0, "kyx": 1.5, "px": 0, "py": 0},
{"kx": 0, "kxy": 0, "ky": 0, "kyx": 0, "px": 1.5, "py": 0},
{"kx": 0, "kxy": 0, "ky": 0, "kyx": 0, "px": 0, "py": 1.5},
{"kx": 1.5, "kxy": 1.5, "ky": 1.5, "kyx": 1.5, "px": 1.5, "py": 1.5}]
inputs = [[1e-6, 1e-6], [1, 0], [-1, 0], [1.5, 0], [-1.5, 0], [0, 1], [0, -1], [0, 1.5], [0, -1.5], [1, 1]]
for intrins_coef in intrins_coefs:
model = self.Class(**intrins_coef)
with self.subTest(**intrins_coef):
for inp in inputs:
num = num_deriv(inp, model)
ana = model._compute_dpixel_dintrinsic(np.array(inp))
np.testing.assert_allclose(num, ana, atol=1e-10)
def test__compute_ddistorted_gnomic_ddistortion(self):
def num_deriv(loc, cmodel, delta=1e-8) -> np.ndarray:
model_pert = cmodel.copy()
model_pert.radial2 += delta
loc_pert_r2_f = model_pert.apply_distortion(loc)
model_pert = cmodel.copy()
model_pert.radial4 += delta
loc_pert_r4_f = model_pert.apply_distortion(loc)
model_pert = cmodel.copy()
model_pert.tangential_y += delta
loc_pert_ty_f = model_pert.apply_distortion(loc)
model_pert = cmodel.copy()
model_pert.tangential_x += delta
loc_pert_tx_f = model_pert.apply_distortion(loc)
model_pert = cmodel.copy()
model_pert.pinwheel1 += delta
loc_pert_p1_f = model_pert.apply_distortion(loc)
model_pert = cmodel.copy()
model_pert.pinwheel2 += delta
loc_pert_p2_f = model_pert.apply_distortion(loc)
model_pert = cmodel.copy()
model_pert.radial2 -= delta
loc_pert_r2_b = model_pert.apply_distortion(loc)
model_pert = cmodel.copy()
model_pert.radial4 -= delta
loc_pert_r4_b = model_pert.apply_distortion(loc)
model_pert = cmodel.copy()
model_pert.tangential_y -= delta
loc_pert_ty_b = model_pert.apply_distortion(loc)
model_pert = cmodel.copy()
model_pert.tangential_x -= delta
loc_pert_tx_b = model_pert.apply_distortion(loc)
model_pert = cmodel.copy()
model_pert.pinwheel1 -= delta
loc_pert_p1_b = model_pert.apply_distortion(loc)
model_pert = cmodel.copy()
model_pert.pinwheel2 -= delta
loc_pert_p2_b = model_pert.apply_distortion(loc)
return np.array([(loc_pert_r2_f - loc_pert_r2_b) / (2 * delta),
(loc_pert_r4_f - loc_pert_r4_b) / (2 * delta),
(loc_pert_ty_f - loc_pert_ty_b) / (2 * delta),
(loc_pert_tx_f - loc_pert_tx_b) / (2 * delta),
(loc_pert_p1_f - loc_pert_p1_b) / (2 * delta),
(loc_pert_p2_f - loc_pert_p2_b) / (2 * delta)]).T
dist_coefs = [{"radial2": 1.5, "radial4": 0, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 1.5, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 1.5, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 0, "tangential_y": 1.5,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 1.5, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 1.5}]
inputs = [[1e-6, 1e-6], [1, 0], [-1, 0], [1.5, 0], [-1.5, 0], [0, 1], [0, -1], [0, 1.5], [0, -1.5], [1, 1]]
for dist_coef in dist_coefs:
model = self.Class(**dist_coef)
with self.subTest(**dist_coef):
for inp in inputs:
r = np.sqrt(inp[0] ** 2 + inp[1] ** 2)
r2 = r ** 2
r3 = r ** 3
r4 = r ** 4
num = num_deriv(inp, model)
ana = model._compute_ddistorted_gnomic_ddistortion(np.array(inp), r, r2, r3, r4)
np.testing.assert_allclose(num, ana, atol=1e-10)
def test_get_jacobian_row(self):
def num_deriv(loc, cmodel, delta=1e-8, image=0, temperature=0) -> np.ndarray:
model_pert = cmodel.copy()
model_pert.focal_length += delta
pix_pert_f_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.focal_length -= delta
pix_pert_f_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.kx += delta
pix_pert_kx_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.kxy += delta
pix_pert_kxy_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.kyx += delta
pix_pert_kyx_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.ky += delta
pix_pert_ky_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.px += delta
pix_pert_px_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.py += delta
pix_pert_py_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.kx -= delta
pix_pert_kx_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.kxy -= delta
pix_pert_kxy_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.kyx -= delta
pix_pert_kyx_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.ky -= delta
pix_pert_ky_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.px -= delta
pix_pert_px_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.py -= delta
pix_pert_py_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.radial2 += delta
pix_pert_r2_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.radial4 += delta
pix_pert_r4_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.tangential_y += delta
pix_pert_ty_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.tangential_x += delta
pix_pert_tx_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.pinwheel1 += delta
pix_pert_p1_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.pinwheel2 += delta
pix_pert_p2_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.radial2 -= delta
pix_pert_r2_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.radial4 -= delta
pix_pert_r4_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.tangential_y -= delta
pix_pert_ty_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.tangential_x -= delta
pix_pert_tx_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.pinwheel1 -= delta
pix_pert_p1_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.pinwheel2 -= delta
pix_pert_p2_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a1 += delta
pix_pert_a1_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a2 += delta
pix_pert_a2_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a3 += delta
pix_pert_a3_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a1 -= delta
pix_pert_a1_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a2 -= delta
pix_pert_a2_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a3 -= delta
pix_pert_a3_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
delta_m = 1e-6
model_pert = cmodel.copy()
model_pert.misalignment[image][0] += delta_m
pix_pert_mx_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][1] += delta_m
pix_pert_my_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][2] += delta_m
pix_pert_mz_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][0] -= delta_m
pix_pert_mx_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][1] -= delta_m
pix_pert_my_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][2] -= delta_m
pix_pert_mz_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
return np.vstack([(pix_pert_f_f - pix_pert_f_b) / (delta * 2),
(pix_pert_kx_f - pix_pert_kx_b) / (delta * 2),
(pix_pert_kxy_f - pix_pert_kxy_b) / (delta * 2),
(pix_pert_kyx_f - pix_pert_kyx_b) / (delta * 2),
(pix_pert_ky_f - pix_pert_ky_b) / (delta * 2),
(pix_pert_px_f - pix_pert_px_b) / (delta * 2),
(pix_pert_py_f - pix_pert_py_b) / (delta * 2),
(pix_pert_r2_f - pix_pert_r2_b) / (delta * 2),
(pix_pert_r4_f - pix_pert_r4_b) / (delta * 2),
(pix_pert_ty_f - pix_pert_ty_b) / (delta * 2),
(pix_pert_tx_f - pix_pert_tx_b) / (delta * 2),
(pix_pert_p1_f - pix_pert_p1_b) / (delta * 2),
(pix_pert_p2_f - pix_pert_p2_b) / (delta * 2),
(pix_pert_a1_f - pix_pert_a1_b) / (delta * 2),
(pix_pert_a2_f - pix_pert_a2_b) / (delta * 2),
(pix_pert_a3_f - pix_pert_a3_b) / (delta * 2),
np.zeros((image * 3, 2)),
(pix_pert_mx_f - pix_pert_mx_b) / (delta_m * 2),
(pix_pert_my_f - pix_pert_my_b) / (delta_m * 2),
(pix_pert_mz_f - pix_pert_mz_b) / (delta_m * 2)]).T
model_param = {"focal_length": 100.75, "radial2": 1.5e-5, "radial4": 1.5e-5, "tangential_x": 1.5e-5,
"tangential_y": 1.5e-5, "pinwheel1": 1.5e-5, "pinwheel2": 1.5e-5, "kx": 30, "ky": 40,
"kxy": 0.5, "kyx": -0.8, "px": 4005.23, "py": 4005.23,
"misalignment": [[1e-8, 1e-9, 1e-10], [-1e-8, 2e-9, -1e-11]],
"a1": 1e-6, "a2": 2e-7, "a3": 3e-8}
inputs = [[0.1, 0, 1], [0, 0.1, 1], [0.1, 0.1, 1], [-0.1, 0, 1], [0, -0.1, 1], [-0.1, -0.1, 1],
[5, 10, 1000.23], [[1], [2], [1200.23]]]
temps = [0, 1, -1, 10.5, -10.5]
model = self.Class(**model_param)
model.estimate_multiple_misalignments = True
for temp in temps:
for inp in inputs:
with self.subTest(temp=temp, inp=inp):
num = num_deriv(inp, model, delta=1e-3, temperature=temp)
ana = model._get_jacobian_row(np.array(inp), 0, 1, temperature=temp)
np.testing.assert_allclose(ana, num, rtol=1e-3, atol=1e-10)
num = num_deriv(inp, model, delta=1e-3, image=1, temperature=temp)
ana = model._get_jacobian_row(np.array(inp), 1, 2, temperature=temp)
np.testing.assert_allclose(ana, num, atol=1e-10, rtol=1e-3)
def test_compute_jacobian(self):
def num_deriv(loc, cmodel, delta=1e-8, image=0, nimages=1, temperature=0) -> np.ndarray:
model_pert = cmodel.copy()
model_pert.focal_length += delta
pix_pert_f_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.focal_length -= delta
pix_pert_f_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.kx += delta
pix_pert_kx_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.kxy += delta
pix_pert_kxy_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.kyx += delta
pix_pert_kyx_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.ky += delta
pix_pert_ky_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.px += delta
pix_pert_px_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.py += delta
pix_pert_py_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.kx -= delta
pix_pert_kx_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.kxy -= delta
pix_pert_kxy_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.kyx -= delta
pix_pert_kyx_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.ky -= delta
pix_pert_ky_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.px -= delta
pix_pert_px_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.py -= delta
pix_pert_py_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.radial2 += delta
pix_pert_r2_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.radial4 += delta
pix_pert_r4_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.tangential_y += delta
pix_pert_ty_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.tangential_x += delta
pix_pert_tx_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.pinwheel1 += delta
pix_pert_p1_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.pinwheel2 += delta
pix_pert_p2_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.radial2 -= delta
pix_pert_r2_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.radial4 -= delta
pix_pert_r4_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.tangential_y -= delta
pix_pert_ty_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.tangential_x -= delta
pix_pert_tx_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.pinwheel1 -= delta
pix_pert_p1_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.pinwheel2 -= delta
pix_pert_p2_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a1 += delta
pix_pert_a1_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a2 += delta
pix_pert_a2_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a3 += delta
pix_pert_a3_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a1 -= delta
pix_pert_a1_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a2 -= delta
pix_pert_a2_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a3 -= delta
pix_pert_a3_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][0] += delta
pix_pert_mx_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][1] += delta
pix_pert_my_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][2] += delta
pix_pert_mz_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][0] -= delta
pix_pert_mx_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][1] -= delta
pix_pert_my_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][2] -= delta
pix_pert_mz_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
return np.vstack([(pix_pert_f_f - pix_pert_f_b) / (delta * 2),
(pix_pert_kx_f - pix_pert_kx_b) / (delta * 2),
(pix_pert_kxy_f - pix_pert_kxy_b) / (delta * 2),
(pix_pert_kyx_f - pix_pert_kyx_b) / (delta * 2),
(pix_pert_ky_f - pix_pert_ky_b) / (delta * 2),
(pix_pert_px_f - pix_pert_px_b) / (delta * 2),
(pix_pert_py_f - pix_pert_py_b) / (delta * 2),
(pix_pert_r2_f - pix_pert_r2_b) / (delta * 2),
(pix_pert_r4_f - pix_pert_r4_b) / (delta * 2),
(pix_pert_ty_f - pix_pert_ty_b) / (delta * 2),
(pix_pert_tx_f - pix_pert_tx_b) / (delta * 2),
(pix_pert_p1_f - pix_pert_p1_b) / (delta * 2),
(pix_pert_p2_f - pix_pert_p2_b) / (delta * 2),
(pix_pert_a1_f - pix_pert_a1_b) / (delta * 2),
(pix_pert_a2_f - pix_pert_a2_b) / (delta * 2),
(pix_pert_a3_f - pix_pert_a3_b) / (delta * 2),
np.zeros((image * 3, 2)),
(pix_pert_mx_f - pix_pert_mx_b) / (delta * 2),
(pix_pert_my_f - pix_pert_my_b) / (delta * 2),
(pix_pert_mz_f - pix_pert_mz_b) / (delta * 2),
np.zeros(((nimages - image - 1) * 3, 2))]).T
model_param = {"focal_length": 100.75, "radial2": 1.5e-5, "radial4": 1.5e-5, "tangential_x": 1.5e-5,
"tangential_y": 1.5e-5, "pinwheel1": 1.5e-5, "pinwheel2": 1.5e-5, "kx": 30, "ky": 40,
"kxy": 0.5, "kyx": -0.8, "px": 4005.23, "py": 4005.23,
"misalignment": [[1e-8, 1e-9, 1e-10], [-1e-8, 2e-9, -1e-11], [2e-10, -5e-12, 1e-9]],
"a1": 1e-6, "a2": 2e-7, "a3": 3e-8}
inputs = [np.array([[0.5, 0, 1]]).T,
np.array([[0, 0.5, 1], [0.5, 0.5, 1], [-0.5, 0, 1]]).T,
np.array([[0.1, -0.5, 1], [-0.5, -0.5, 1], [5, 10, 1000.23], [1, 2, 1200.23]]).T]
temperatures = [0, 1, -1, 10.5, -10.5, [1, -10, 10]]
model = self.Class(**model_param, estimation_parameters=['intrinsic', 'temperature dependence',
'multiple misalignments'])
for temp in temperatures:
with self.subTest(temp=temp):
model.use_a_priori = False
jac_ana = model.compute_jacobian(inputs, temperature=temp)
jac_num = []
numim = len(inputs)
for ind, inp in enumerate(inputs):
if isinstance(temp, list):
templ = temp[ind]
else:
templ = temp
for vec in inp.T:
jac_num.append(num_deriv(vec.T, model, delta=1e-4, image=ind, nimages=numim, temperature=templ))
np.testing.assert_allclose(jac_ana, np.vstack(jac_num), rtol=1e-3, atol=1e-10)
model.use_a_priori = True
jac_ana = model.compute_jacobian(inputs, temperature=temp)
jac_num = []
numim = len(inputs)
for ind, inp in enumerate(inputs):
if isinstance(temp, list):
templ = temp[ind]
else:
templ = temp
for vec in inp.T:
jac_num.append(num_deriv(vec.T, model, delta=1e-4, image=ind, nimages=numim, temperature=templ))
jac_num = np.vstack(jac_num)
jac_num = np.pad(jac_num, [(0, jac_num.shape[1]), (0, 0)], 'constant', constant_values=0)
jac_num[-jac_num.shape[1]:] = np.eye(jac_num.shape[1])
np.testing.assert_allclose(jac_ana, jac_num, rtol=1e-3, atol=1e-10)
def test_apply_update(self):
model_param = {"focal_length": 0, "radial2": 0, "radial4": 0, "tangential_x": 0,
"tangential_y": 0, "pinwheel1": 0, "pinwheel2": 0, "kx": 0, "ky": 0,
"kxy": 0, "kyx": 0, "px": 0,
"misalignment": [[0, 0, 0], [0, 0, 0]],
"a1": 0, "a2": 0, "a3": 0}
model = self.Class(**model_param, estimation_parameters=['intrinsic', "temperature dependence",
'multiple misalignments'])
update_vec = np.arange(22)
model.apply_update(update_vec)
keys = list(model_param.keys())
keys.remove('misalignment')
for key in keys:
self.assertEqual(getattr(model, key), update_vec[model.element_dict[key][0]])
for ind, vec in enumerate(update_vec[16:].reshape(-1, 3)):
np.testing.assert_array_almost_equal(at.Rotation(vec).q, at.Rotation(model.misalignment[ind]).q)
def test_pixels_to_gnomic(self):
intrins_param = {"kx": 3000, "ky": 4000, "kxy": 0.5, "kyx": -0.8, "px": 4005.23, 'py': 2000.33,
'a1': 1e-6, 'a2': 1e-7, 'a3': 1e-8}
dist_coefs = [{"radial2": 1.5e-3, "radial4": 0, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 1.5e-3, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 1.5e-3, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 0, "tangential_y": 1.5e-3,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 1.5e-3, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 1.5e-3},
{"radial2": 1.5e-3, "radial4": 1.5e-3, "tangential_x": 1.5e-3, "tangential_y": 1.5e-3,
"pinwheel1": 1.5e-3, "pinwheel2": 1.5e-3}]
pinhole = [[0, 0], [1, 0], [-1, 0], [1.5, 0], [-1.5, 0], [[1.5], [0]], [[1.5, -1], [0, 0]],
[0, 1], [0, -1], [0, 1.5], [0, -1.5], [[0], [1.5]], [[0, 0], [1.5, -1]], [1, 1]]
temperatures = [0, 1, -1, 10.5, -10.5]
for temp in temperatures:
for dist in dist_coefs:
model = self.Class(**dist, **intrins_param)
for gnoms in pinhole:
with self.subTest(**dist, temp=temp, gnoms=gnoms):
mm_dist = model.apply_distortion(np.array(gnoms))
mm_dist *= model.get_temperature_scale(temp)
pix_dist = ((model.intrinsic_matrix[:, :2] @ mm_dist).T + model.intrinsic_matrix[:, 2]).T
mm_undist = model.pixels_to_gnomic(pix_dist, temperature=temp)
np.testing.assert_allclose(mm_undist, gnoms, atol=1e-13)
def test_undistort_pixels(self):
intrins_param = {"kx": 3000, "ky": 4000, "kxy": 0.5, "kyx": -0.8, "px": 4005.23, 'py': 2000.33,
"a1": 1e-3, "a2": 1e-4, "a3": 1e-5}
dist_coefs = [{"radial2": 1.5e-3, "radial4": 0, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 1.5e-3, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 1.5e-3, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 0, "tangential_y": 1.5e-3,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 1.5e-3, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 1.5e-3},
{"radial2": 1.5e-3, "radial4": 1.5e-3, "tangential_x": 1.5e-3, "tangential_y": 1.5e-3,
"pinwheel1": 1.5e-3, "pinwheel2": 1.5e-3}]
pinhole = [[0, 0], [1, 0], [-1, 0], [1.5, 0], [-1.5, 0], [[1.5], [0]], [[1.5, -1], [0, 0]],
[0, 1], [0, -1], [0, 1.5], [0, -1.5], [[0], [1.5]], [[0, 0], [1.5, -1]], [1, 1]]
temperatures = [0, 1, -1, 10.5, -10.5]
for temp in temperatures:
for dist in dist_coefs:
model = self.Class(**dist, **intrins_param)
for gnoms in pinhole:
with self.subTest(**dist, temp=temp, gnoms=gnoms):
gnoms = np.array(gnoms).astype(np.float64)
mm_dist = model.apply_distortion(np.array(gnoms))
mm_dist *= model.get_temperature_scale(temp)
pix_dist = ((model.intrinsic_matrix[:, :2] @ mm_dist).T + model.intrinsic_matrix[:, 2]).T
pix_undist = model.undistort_pixels(pix_dist, temperature=temp)
gnoms *= model.get_temperature_scale(temp)
pix_pinhole = ((model.intrinsic_matrix[:, :2] @ gnoms).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_allclose(pix_undist, pix_pinhole, atol=1e-13)
def test_pixels_to_unit(self):
intrins_param = {"focal_length": 32.7, "kx": 3000, "ky": 4000, "kxy": 0.5, "kyx": -0.8,
"px": 4005.23, 'py': 2000.33, "a1": 1e-6, "a2": 1e-7, "a3": -3e-5}
dist_coefs = [{"radial2": 1.5e-3, "radial4": 0, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 1.5e-3, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 1.5e-3, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 0, "tangential_y": 1.5e-3,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 1.5e-3, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 1.5e-3},
{"radial2": 1.5e-3, "radial4": 1.5e-3, "tangential_x": 1.5e-3, "tangential_y": 1.5e-3,
"pinwheel1": 1.5e-3, "pinwheel2": 1.5e-3},
{"misalignment": np.array([1e-11, 2e-12, -1e-10])},
{"misalignment": np.array([[1e-11, 2e-12, -1e-10], [-1e-13, 1e-11, 2e-12]]),
"estimation_parameters": "multiple misalignments"}]
camera_vecs = [[0, 0, 1], [0.01, 0, 1], [-0.01, 0, 1], [0, 0.01, 1], [0, -0.01, 1], [0.01, 0.01, 1],
[-0.01, -0.01, 1], [[0.01, -0.01], [-0.01, 0.01], [1, 1]]]
temperatures = [0, 1, -1, 10.5, -10.5]
for temp in temperatures:
for dist in dist_coefs:
model = self.Class(**dist, **intrins_param)
for vec in camera_vecs:
with self.subTest(**dist, temp=temp, vec=vec):
pixel_loc = model.project_onto_image(vec, image=-1, temperature=temp)
unit_vec = model.pixels_to_unit(pixel_loc, image=-1, temperature=temp)
unit_true = np.array(vec).astype(np.float64)
unit_true /= np.linalg.norm(unit_true, axis=0, keepdims=True)
np.testing.assert_allclose(unit_vec, unit_true, atol=1e-13)
def test_overwrite(self):
model1 = self.Class(field_of_view=10, intrinsic_matrix=np.array([[1, 2, 3], [4, 5, 6]]),
distortion_coefficients=np.array([1, 2, 3, 4, 5, 6]), focal_length=60,
misalignment=[[1, 2, 3], [4, 5, 6]], use_a_priori=False,
estimation_parameters=['multiple misalignments'], a1=0, a2=3, a3=5)
model2 = self.Class(field_of_view=20, intrinsic_matrix=np.array([[11, 12, 13], [14, 15, 16]]),
distortion_coefficients=np.array([11, 12, 13, 14, 15, 16]), focal_length=160,
misalignment=[[11, 12, 13], [14, 15, 16]], use_a_priori=True,
estimation_parameters=['single misalignment'], a1=-100, a2=-200, a3=-300)
modeltest = model1.copy()
modeltest.overwrite(model2)
self.assertEqual(modeltest, model2)
modeltest = model2.copy()
modeltest.overwrite(model1)
self.assertEqual(modeltest, model1)
def test_intrinsic_matrix_inv(self):
model = self.Class(kx=5, ky=10, kxy=20, kyx=-30.4, px=100, py=-5)
np.testing.assert_array_almost_equal(
model.intrinsic_matrix @ np.vstack([model.intrinsic_matrix_inv, [0, 0, 1]]),
[[1, 0, 0], [0, 1, 0]])
np.testing.assert_array_almost_equal(
model.intrinsic_matrix_inv @ np.vstack([model.intrinsic_matrix, [0, 0, 1]]),
[[1, 0, 0], [0, 1, 0]])
def test_distort_pixels(self):
model = self.Class(kx=1000, ky=-950.5, px=4500, py=139.32, a1=1e-3, a2=1e-4, a3=1e-5,
kxy=0.5, kyx=-8, radial2=1e-5, radial4=1e-5, pinwheel2=1e-7, pinwheel1=-1e-12,
tangential_x=1e-6, tangential_y=2e-12)
pixels = [[0, 1], [1, 0], [-1, 0], [0, -1], [9000., 200.2],
[[4500, 100, 10.98], [0, 139.23, 200.3]]]
temperatures = [0, 1, -1, 10.5, -10.5]
for pix in pixels:
for temp in temperatures:
with self.subTest(pix=pix, temp=temp):
undist_pix = model.undistort_pixels(pix, temperature=temp)
dist_pix = model.distort_pixels(undist_pix, temperature=temp)
np.testing.assert_allclose(dist_pix, pix, atol=1e-10)
def test_distortion_map(self):
model = self.Class(kx=100, ky=-985.234, px=1000, py=1095, kxy=10, kyx=-5,
e1=1e-6, e2=1e-12, e3=-4e-10, e5=6e-7, e6=-1e-5, e4=1e-7,
a1=1e-6, a2=-1e-7, a3=4e-12)
rows, cols, dist = model.distortion_map((2000, 250), step=10)
rl, cl = np.arange(0, 2000, 10), np.arange(0, 250, 10)
rs, cs = np.meshgrid(rl, cl, indexing='ij')
np.testing.assert_array_equal(rows, rs)
np.testing.assert_array_equal(cols, cs)
distl = model.distort_pixels(np.vstack([cs.flatten(), rs.flatten()]))
np.testing.assert_array_equal(distl - np.vstack([cs.flatten(), rs.flatten()]), dist)
class TestBrownModel(TestPinholeModel):
def setUp(self):
self.Class = BrownModel
# Not supported for this model
test__compute_dgnomic_dfocal_length = None
def test___init__(self):
model = self.Class(intrinsic_matrix=np.array([[1, 2, 3], [4, 5, 6]]), field_of_view=20.5,
use_a_priori=True, misalignment=[np.zeros(3), np.ones(3)],
distortion_coefficients=np.array([1, 2, 3, 4, 5]),
estimation_parameters='basic intrinsic',
a1=1, a2=2, a3=3)
np.testing.assert_array_equal(model.intrinsic_matrix, [[1, 2, 3], [4, 5, 6]])
self.assertEqual(model.focal_length, 1)
self.assertEqual(model.field_of_view, 20.5)
self.assertTrue(model.use_a_priori)
self.assertEqual(model.estimation_parameters, ['basic intrinsic'])
self.assertEqual(model.a1, 1)
self.assertEqual(model.a2, 2)
self.assertEqual(model.a3, 3)
np.testing.assert_array_equal(model.distortion_coefficients, np.arange(1, 6))
model = self.Class(kx=1, fy=2, px=4, py=5, field_of_view=20.5,
use_a_priori=True, misalignment=[np.zeros(3), np.ones(3)], kxy=80,
estimation_parameters=['kx', 'px'], n_rows=500, n_cols=600,
radial2=1, radial4=2, k3=3, p1=4, tiptilt_x=5)
np.testing.assert_array_equal(model.intrinsic_matrix, [[1, 80, 4], [0, 2, 5]])
self.assertEqual(model.focal_length, 1)
self.assertEqual(model.field_of_view, 20.5)
self.assertTrue(model.use_a_priori)
self.assertEqual(model.estimation_parameters, ['kx', 'px'])
self.assertEqual(model.n_rows, 500)
self.assertEqual(model.n_cols, 600)
np.testing.assert_array_equal(model.distortion_coefficients, np.arange(1, 6))
def test_fx(self):
model = self.Class(intrinsic_matrix=np.array([[1, 0, 0], [0, 0, 0]]))
self.assertEqual(model.kx, 1)
model.kx = 100
self.assertEqual(model.kx, 100)
self.assertEqual(model.intrinsic_matrix[0, 0], 100)
def test_fy(self):
model = self.Class(intrinsic_matrix=np.array([[0, 0, 0], [0, 1, 0]]))
self.assertEqual(model.ky, 1)
model.ky = 100
self.assertEqual(model.ky, 100)
self.assertEqual(model.intrinsic_matrix[1, 1], 100)
def test_kxy(self):
model = self.Class(intrinsic_matrix=np.array([[0, 1, 0], [0, 0, 0]]))
self.assertEqual(model.kxy, 1)
model.kxy = 100
self.assertEqual(model.kxy, 100)
self.assertEqual(model.intrinsic_matrix[0, 1], 100)
def test_alpha(self):
model = self.Class(intrinsic_matrix=np.array([[0, 1, 0], [0, 0, 0]]))
self.assertEqual(model.alpha, 1)
model.alpha = 100
self.assertEqual(model.alpha, 100)
self.assertEqual(model.intrinsic_matrix[0, 1], 100)
def test_k1(self):
model = self.Class(distortion_coefficients=np.array([1, 0, 0, 0, 0]))
self.assertEqual(model.k1, 1)
model.k1 = 100
self.assertEqual(model.k1, 100)
self.assertEqual(model.distortion_coefficients[0], 100)
def test_k2(self):
model = self.Class(distortion_coefficients=np.array([0, 1, 0, 0, 0]))
self.assertEqual(model.k2, 1)
model.k2 = 100
self.assertEqual(model.k2, 100)
self.assertEqual(model.distortion_coefficients[1], 100)
def test_k3(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 1, 0, 0]))
self.assertEqual(model.k3, 1)
model.k3 = 100
self.assertEqual(model.k3, 100)
self.assertEqual(model.distortion_coefficients[2], 100)
def test_p1(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 0, 1, 0]))
self.assertEqual(model.p1, 1)
model.p1 = 100
self.assertEqual(model.p1, 100)
self.assertEqual(model.distortion_coefficients[3], 100)
def test_p2(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 0, 0, 1]))
self.assertEqual(model.p2, 1)
model.p2 = 100
self.assertEqual(model.p2, 100)
self.assertEqual(model.distortion_coefficients[4], 100)
def test_radial2(self):
model = self.Class(distortion_coefficients=np.array([1, 0, 0, 0, 0]))
self.assertEqual(model.radial2, 1)
model.radial2 = 100
self.assertEqual(model.radial2, 100)
self.assertEqual(model.distortion_coefficients[0], 100)
def test_radial4(self):
model = self.Class(distortion_coefficients=np.array([0, 1, 0, 0, 0]))
self.assertEqual(model.radial4, 1)
model.radial4 = 100
self.assertEqual(model.radial4, 100)
self.assertEqual(model.distortion_coefficients[1], 100)
def test_radial6(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 1, 0, 0]))
self.assertEqual(model.radial6, 1)
model.radial6 = 100
self.assertEqual(model.radial6, 100)
self.assertEqual(model.distortion_coefficients[2], 100)
def test_tiptilt_y(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 0, 1, 0]))
self.assertEqual(model.tiptilt_y, 1)
model.tiptilt_y = 100
self.assertEqual(model.tiptilt_y, 100)
self.assertEqual(model.distortion_coefficients[3], 100)
def test_tiptilt_x(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 0, 0, 1]))
self.assertEqual(model.tiptilt_x, 1)
model.tiptilt_x = 100
self.assertEqual(model.tiptilt_x, 100)
self.assertEqual(model.distortion_coefficients[4], 100)
def test_apply_distortion(self):
dist_coefs = [{"k1": 1.5, "k2": 0, "k3": 0, "p1": 0, "p2": 0},
{"k1": 0, "k2": 1.5, "k3": 0, "p1": 0, "p2": 0},
{"k1": 0, "k2": 0, "k3": 1.5, "p1": 0, "p2": 0},
{"k1": 0, "k2": 0, "k3": 0, "p1": 1.5, "p2": 0},
{"k1": 0, "k2": 0, "k3": 0, "p1": 0, "p2": 1.5}]
inputs = [[0, 0], [1, 0], [-1, 0], [1.5, 0], [-1.5, 0], [[1.5], [0]], [[1.5, -1], [0, 0]],
[0, 1], [0, -1], [0, 1.5], [0, -1.5], [[0], [1.5]], [[0, 0], [1.5, -1]], [1, 1]]
solus = [[[0, 0], [2.5, 0], [-2.5, 0], [1.5 + 1.5 ** 4, 0], [-(1.5 + 1.5 ** 4), 0],
[[(1.5 + 1.5 ** 4)], [0]], [[(1.5 + 1.5 ** 4), -2.5], [0, 0]],
[0, 2.5], [0, -2.5], [0, (1.5 + 1.5 ** 4)], [0, -(1.5 + 1.5 ** 4)], [[0], [(1.5 + 1.5 ** 4)]],
[[0, 0], [(1.5 + 1.5 ** 4), -2.5]], [1 + 2 * 1.5, 1 + 2 * 1.5]],
[[0, 0], [2.5, 0], [-2.5, 0], [(1.5 + 1.5 ** 6), 0], [-(1.5 + 1.5 ** 6), 0],
[[(1.5 + 1.5 ** 6)], [0]], [[(1.5 + 1.5 ** 6), -2.5], [0, 0]],
[0, 2.5], [0, -2.5], [0, (1.5 + 1.5 ** 6)], [0, -(1.5 + 1.5 ** 6)], [[0], [(1.5 + 1.5 ** 6)]],
[[0, 0], [(1.5 + 1.5 ** 6), -2.5]], [1 + 4 * 1.5, 1 + 4 * 1.5]],
[[0, 0], [2.5, 0], [-2.5, 0], [(1.5 + 1.5 ** 8), 0], [-(1.5 + 1.5 ** 8), 0],
[[(1.5 + 1.5 ** 8)], [0]], [[(1.5 + 1.5 ** 8), -2.5], [0, 0]],
[0, 2.5], [0, -2.5], [0, (1.5 + 1.5 ** 8)], [0, -(1.5 + 1.5 ** 8)], [[0], [(1.5 + 1.5 ** 8)]],
[[0, 0], [(1.5 + 1.5 ** 8), -2.5]], [1 + 8 * 1.5, 1 + 8 * 1.5]],
[[0, 0], [1, 1.5], [-1, 1.5], [1.5, 1.5 ** 3], [-1.5, 1.5 ** 3], [[1.5], [1.5 ** 3]],
[[1.5, -1], [1.5 ** 3, 1.5]],
[0, 1 + 3 * 1.5], [0, -1 + 3 * 1.5], [0, 1.5 + 3 * 1.5 ** 3], [0, -1.5 + 3 * 1.5 ** 3],
[[0], [1.5 + 3 * 1.5 ** 3]],
[[0, 0], [1.5 + 3 * 1.5 ** 3, -1 + 3 * 1.5]], [1 + 2 * 1.5, 1 + 4 * 1.5]],
[[0, 0], [1 + 3 * 1.5, 0], [-1 + 3 * 1.5, 0], [1.5 + 3 * 1.5 ** 3, 0], [-1.5 + 3 * 1.5 ** 3, 0],
[[1.5 + 3 * 1.5 ** 3], [0]], [[1.5 + 3 * 1.5 ** 3, -1 + 3 * 1.5], [0, 0]],
[1.5, 1], [1.5, -1], [1.5 ** 3, 1.5], [1.5 ** 3, -1.5], [[1.5 ** 3], [1.5]],
[[1.5 ** 3, 1.5], [1.5, -1]],
[1 + 4 * 1.5, 1 + 2 * 1.5]]]
for dist, sols in zip(dist_coefs, solus):
with self.subTest(**dist):
model = self.Class(**dist)
for inp, solu in zip(inputs, sols):
gnom_dist = model.apply_distortion(np.array(inp))
np.testing.assert_array_almost_equal(gnom_dist, solu)
def test_get_projections(self):
points = [[0, 0, 1], [-0.1, 0.2, 2.2], [[-0.1], [0.2], [2.2]], [[-0.1, 0], [0.2, 0], [2.2, 1]]]
model = self.Class(fx=4050.5, fy=3050.25, alpha=1.5, px=1500, py=1500.5,
k1=0.5, k2=-0.3, k3=0.15, p1=1e-7, p2=1e-6, a1=1e-1, a2=-1e-6, a3=3e-7)
temps = [0, 1, -1, 10, -10]
for temp in temps:
with self.subTest(misalignment=None, temp=temp):
for point in points:
pin, dist, pix = model.get_projections(point, temperature=temp)
pin_true = np.array(point[:2]) / point[2]
dist_true = model.apply_distortion(pin_true)
dist_true *= model.get_temperature_scale(temp)
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], dist_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_equal(pin, pin_true)
np.testing.assert_array_equal(dist, dist_true)
np.testing.assert_array_equal(pix, pix_true)
model = self.Class(fx=4050.5, fy=3050.25, alpha=1.5, px=1500, py=1500.5,
k1=0.5, k2=-0.3, k3=0.15, p1=1e-7, p2=1e-6, misalignment=[0, 0, np.pi])
with self.subTest(misalignment=[0, 0, np.pi]):
for point in points:
pin, dist, pix = model.get_projections(point)
pin_true = -np.array(point[:2]) / point[2]
dist_true = model.apply_distortion(pin_true)
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], dist_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(pin, pin_true)
np.testing.assert_array_almost_equal(dist, dist_true)
np.testing.assert_array_almost_equal(pix, pix_true)
model = self.Class(fx=4050.5, fy=3050.25, alpha=1.5, px=1500, py=1500.5,
k1=0.5, k2=-0.3, k3=0.15, p1=1e-7, p2=1e-6, misalignment=[np.pi, 0, 0])
with self.subTest(misalignment=[np.pi, 0, 0]):
for point in points:
pin, dist, pix = model.get_projections(point)
pin_true = np.array(point[:2]) / point[2]
pin_true[0] *= -1
dist_true = model.apply_distortion(pin_true)
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], dist_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(pin, pin_true)
np.testing.assert_array_almost_equal(dist, dist_true)
np.testing.assert_array_almost_equal(pix, pix_true)
model = self.Class(fx=4050.5, fy=3050.25, alpha=1.5, px=1500, py=1500.5,
k1=0.5, k2=-0.3, k3=0.15, p1=1e-7, p2=1e-6, misalignment=[0, np.pi, 0])
with self.subTest(misalignment=[0, np.pi, 0]):
for point in points:
pin, dist, pix = model.get_projections(point)
pin_true = np.array(point[:2]) / point[2]
pin_true[1] *= -1
dist_true = model.apply_distortion(pin_true)
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], dist_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(pin, pin_true)
np.testing.assert_array_almost_equal(dist, dist_true)
np.testing.assert_array_almost_equal(pix, pix_true)
model = self.Class(fx=4050.5, fy=3050.25, alpha=1.5, px=1500, py=1500.5,
k1=0.5, k2=-0.3, k3=0.15, p1=1e-7, p2=1e-6, misalignment=[1, 0.2, 0.3])
with self.subTest(misalignment=[1, 0.2, 0.3]):
rot_mat = at.rotvec_to_rotmat([1, 0.2, 0.3]).squeeze()
for point in points:
point_new = rot_mat @ point
pin, dist, pix = model.get_projections(point)
pin_true = np.array(point_new[:2]) / np.array(point_new[2])
dist_true = model.apply_distortion(pin_true)
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], dist_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(pin, pin_true)
np.testing.assert_array_almost_equal(dist, dist_true)
|
np.testing.assert_array_almost_equal(pix, pix_true)
|
numpy.testing.assert_array_almost_equal
|
import numpy as np
#Data ID (Global ID, local ID), (Abundance, Molar Mass)
DataFromHITRAN = [((1,1),(0.997317,18.010565)), ((1,2),(0.002000,20.014811)), ((1,3),(3.718840e-4,19.01478)), #H2O
((2,1),(0.984204,43.98983)), ((2,2),(0.011057,44.993185)), ((2,3),(0.003947,45.994076)), #CO2
((3,1),(0.992901,47.984745)), ((3,2),(0.00398194,49.988991)), ((3,3),(0.00199097,49.988991)), #O3
((5,1),(0.986544,27.994915)), ((5,2),(0.011084,28.99827)), ((5,3),(0.001978,29.999161)), #CO
((6,1),(0.988274,16.0313)), ((6,2),(0.011103,17.034655)), ((6,3),(6.157510e-4,17.037475)), #CH4
((7,1),(0.995262,31.98983)), ((7,2),(0.003991,33.994076)),((7,3),(7.422350e-4,32.994045)), #O2
((22,1),(0.992687,28.006148)),((22,2), (0.007478,29.003182)), #N2
((45,1),(0.999688,2.01565)), ((45,2),(3.114320e-4,3.021825))] #H2
def GetMolecularMass(M,I):
ALLGlobalID =
|
np.array([Item[0][0] for Item in DataFromHITRAN])
|
numpy.array
|
import numpy as np
#import cv2
#import h5py
#import scipy.io as sio
import numpy as np
import glob
import os
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import confusion_matrix,f1_score,accuracy_score,classification_report,recall_score,precision_score
#from sklearn.externals import joblib
import matplotlib.pyplot as plt
#import pandas as pd
from PredictionsLoader import PredictionsLoaderNPY, PredictionsLoaderModel
#====================================
def labels_predictions_filter_transform(label_test,predictions,class_n,
debug=1):
predictions=predictions.argmax(axis=np.ndim(predictions)-1)
predictions=np.reshape(predictions,-1)
label_test=label_test.argmax(axis=np.ndim(label_test)-1)
label_test=np.reshape(label_test,-1)
predictions=predictions[label_test<class_n]
label_test=label_test[label_test<class_n]
if debug>0:
print("Predictions",predictions.shape)
print("Label_test",label_test.shape)
return label_test,predictions
def metrics_get(label_test,predictions,only_basics=False,debug=1):
if debug>0:
print(predictions.shape,predictions.dtype)
print(label_test.shape,label_test.dtype)
metrics={}
metrics['f1_score']=f1_score(label_test,predictions,average=None)
if debug>0:
print("f1_score",metrics['f1_score'])
if only_basics==False:
metrics['f1_score_weighted']=f1_score(label_test,predictions,average='weighted')
metrics['recall']=recall_score(label_test,predictions,average=None)
metrics['precision']=precision_score(label_test,predictions,average=None)
if debug>0:
print(confusion_matrix_.sum(axis=1)[:, np.newaxis].diagonal())
print(confusion_matrix_.diagonal())
print(np.sum(confusion_matrix_,axis=1))
print(metrics)
print(confusion_matrix_)
print(metrics['precision'])
print(metrics['recall'])
return metrics
#===== normy3
path='/home/lvc/Jorg/deep_learning/LSTM-Final-Project/cv_data/normy/fcn/seq1/'
#path='/home/lvc/Jorg/deep_learning/LSTM-Final-Project/cv_data/normy/fcn/seq2/'
#path='/home/lvc/Jorg/deep_learning/LSTM-Final-Project/hn_data/normy/fcn_16/'
#path='/home/lvc/Jorg/deep_learning/LSTM-Final-Project/hn_data/normy/fcn_8/'
#path='/home/lvc/Jorg/deep_learning/LSTM-Final-Project/hn_data/normy/fcn_8/'
#======== normy3_check
path='/home/lvc/Jorg/deep_learning/LSTM-Final-Project/cv_data/normy3_check/fcn/seq1/'
#=========== normy3_check2
path='/home/lvc/Jorg/deep_learning/LSTM-Final-Project/hn_data/normy3_check2/fcn/'
path='/home/lvc/Jorg/deep_learning/LSTM-Final-Project/cv_data/normy3_check2/fcn/seq2/'
path='/home/lvc/Jorg/deep_learning/LSTM-Final-Project/cv_data/normy3_check2/fcn/seq1/'
# gold
path='/home/lvc/Jorg/deep_learning/LSTM-Final-Project/cv_data/normy3_check2/fcn/seq2/gold/'
# ====== normy3B
#path='/home/lvc/Jorg/deep_learning/LSTM-Final-Project/cv_data/normy3B/fcn/seq1/'
#path='/home/lvc/Jorg/deep_learning/LSTM-Final-Project/cv_data/normy3B/fcn/seq2/'
path='/home/lvc/Jorg/sbsr/fcn_model/keras_time_semantic_fcn/'
# ======= convlstm playground
path='/home/lvc/Jorg/deep_learning/LSTM-Final-Project/cv_data/convlstm_playground/fcn_original500/'
path='/home/lvc/Jorg/deep_learning/LSTM-Final-Project/cv_data/convlstm_playground/fcn_original/'
# === tranfer
path='/home/lvc/Jorg/igarss/fcn_transfer_learning_for_RS/results/transfer_fcn_seq2_to_seq1/'
# === normy3 check
path='/home/lvc/Jorg/igarss/fcn_transfer_learning_for_RS/results/normy3_check/seq1/fcn/'
# ======== ConvRNN
path='/home/lvc/Jorg/igarss/convrnn_remote_sensing/results/cv/densenet/'
prediction_path=path+'prediction.npy'
#prediction_path='/home/lvc/Jorg/igarss/convrnn_remote_sensing/results/cv/prediction_ConvLSTM_DenseNet_eyesight.npy'
# =========seq2seq
def experiment_analyze(dataset='cv',
prediction_filename='prediction_DenseNetTimeDistributed_blockgoer.npy',
prediction_type='npy', mode='each_date',debug=1):
#path='/home/lvc/Jorg/igarss/convrnn_remote_sensing/results/seq2seq_ignorelabel/'+dataset+'/'
base_path="../../results/convlstm_results/"
path=base_path+dataset+'/'
prediction_path=path+prediction_filename
path_test='../../../../dataset/dataset/'+dataset+'_data/patches_bckndfixed/test/'
print('path_test',path_test)
#prediction_type = 'model'
if prediction_type=='npy':
predictionsLoader = PredictionsLoaderNPY()
predictions, label_test = predictionsLoader.loadPredictions(prediction_path,path+'labels.npy')
elif prediction_type=='model':
model_path=base_path + 'model/'+dataset+'/'+prediction_filename
print('model_path',model_path)
predictionsLoader = PredictionsLoaderModel(path_test)
predictions, label_test = predictionsLoader.loadPredictions(model_path)
# mode='each_date',debug=1):
# path='/home/lvc/Jorg/igarss/convrnn_remote_sensing/results/seq2seq_ignorelabel/'+dataset+'/'
# prediction_path=path+prediction_filename
# predictions=np.load(prediction_path)
# label_test=np.load(path+'labels.npy')
# if debug>0:
# print(predictions.shape)
# print(label_test.shape)
class_n=predictions.shape[-1]
if mode=='each_date':
metrics_t={'f1_score':[],'overall_acc':[],
'average_acc':[]}
label_test_v=label_test.argmax(axis=4).flatten()
label_test_v=label_test_v[label_test_v<class_n]
label_unique=np.unique(label_test_v)
print("label_unique",label_unique)
labels_unique_t=[]
for t in range(label_test.shape[1]):
predictions_t = predictions[:,t,:,:,:]
label_test_t = label_test[:,t,:,:,:]
label_test_t,predictions_t = labels_predictions_filter_transform(
label_test_t, predictions_t, class_n=class_n,
debug=debug)
print("predictions_t",np.unique(
predictions_t).shape)
print("label_test_t",np.unique(
label_test_t).shape)
label_unique_t=np.unique(label_test_t)
predictions_unique_t=np.unique(predictions_t)
classes_t = np.unique(np.concatenate((label_unique_t,predictions_unique_t),0))
##print("classes_t.shape",classes_t.shape)
metrics = metrics_get(label_test_t, predictions_t,
only_basics=True, debug=debug)
##print("metrics['f1_score'].shape",metrics['f1_score'].shape)
#metrics_t['f1_score'].append(metrics['f1_score'])
#metrics_t['overall_acc'].append(metrics['overall_acc'])
metrics_ordered={'f1_score':np.zeros(label_unique.shape)}
valid_classes_counter=0
##print(metrics_ordered['f1_score'])
for clss in range(label_unique.shape[0]):
#print(clss)
if np.any(classes_t==clss): # If this timestep t has class clss
##print("1",valid_classes_counter)
##print("2",classes_t[valid_classes_counter])
##print("3",metrics['f1_score'][valid_classes_counter])
metrics_ordered['f1_score'][clss]=metrics['f1_score'][valid_classes_counter]
valid_classes_counter+=1
if np.any(label_unique_t==clss):
pass
else:
metrics_ordered['f1_score'][clss]=np.nan
metrics_t['f1_score'].append(metrics_ordered['f1_score'])
labels_unique_t.append(label_unique_t)
print("class_n",t,metrics['f1_score'].shape)
print(metrics_t)
return metrics_t
elif mode=='global':
label_test,predictions=labels_predictions_filter_transform(
label_test,predictions, class_n=class_n)
print(np.unique(predictions,return_counts=True))
print(np.unique(label_test,return_counts=True))
metrics=metrics_get(label_test,predictions)
return metrics
def experiments_analyze(dataset,experiment_list,mode='each_date'):
experiment_metrics=[]
for experiment in experiment_list:
print("Starting experiment:",experiment)
experiment_metrics.append(experiment_analyze(
dataset=dataset,
prediction_filename=experiment,
mode=mode,debug=0))
return experiment_metrics
def experiment_groups_analyze(dataset,experiment_group,mode='each_date'):
save=True
if save==True:
experiment_metrics=[]
for group in experiment_group:
group_metrics=[]
for experiment in group:
print("======determining prediction type")
if experiment[-3:]=='npy':
prediction_type='npy'
elif experiment[-2:]=='h5':
prediction_type='model'
print("Starting experiment: {}. Prediction type: {}".format(experiment,prediction_type))
group_metrics.append(experiment_analyze(
dataset=dataset,
prediction_filename=experiment,
mode=mode,debug=0,
prediction_type=prediction_type))
experiment_metrics.append(group_metrics)
# for model_id in range(len(experiment_metrics[0])):
# for date_id in range(len(experiment_metrics[0][model_id]):
np.save('experiment_metrics.npy',experiment_metrics)
else:
experiment_metrics=np.load('experiment_metrics.npy')
metrics={}
total_metrics=[]
for exp_id in range(len(experiment_metrics[0])):
exp_id=int(exp_id)
print(len(experiment_metrics))
print(len(experiment_metrics[0]))
print(experiment_metrics[0][0])
print(experiment_metrics[0][0]['f1_score'])
print(experiment_metrics[1][0]['f1_score'])
print("exp_id",exp_id)
#[experiment_metrics[int(i)][exp_id]['f1_score'] for i in range(len(experiment_metrics))]
# a=[experiment_metrics[int(i)][exp_id]['f1_score'] for i in range(len(experiment_metrics))]
# print("1",a)
# print("2",np.array(a))
# print("3",np.array(a).shape)
# print("4",a[0][0])
metrics_by_date=[]
for date_id in range(len(experiment_metrics[0][0])):
date_id=int(date_id)
print("1",len(experiment_metrics[0]))
print("2",len(experiment_metrics[0][0]))
print("3",len(experiment_metrics[0][0]['f1_score'][0])) # class f1 score for the first date
print("4",experiment_metrics[0][exp_id]['f1_score'][date_id].shape) # class f1 score for the first date
print("4",experiment_metrics[1][exp_id]['f1_score'][date_id].shape) # class f1 score for the first date
metrics_by_group=[experiment_metrics[int(i)][exp_id]['f1_score'][date_id] for i in range(len(experiment_metrics))]
print("1",np.asarray(metrics_by_group).shape)
print("1",metrics_by_group)
print("2",len(metrics_by_group))
print("3",metrics_by_group[0].shape)
metrics_by_group=np.stack((metrics_by_group[0],metrics_by_group[1]))
print("concatenated metrics_by_group.shape",metrics_by_group.shape)
metrics_average=np.average(metrics_by_group,axis=0)
print("metrics_average.shape",metrics_average.shape)
metrics_by_date.append(metrics_average)
print("len(metrics_by_date)",len(metrics_by_date))
metrics['f1_score']=np.average(np.array(
[experiment_metrics[int(i)][exp_id]['f1_score'] for i in range(len(experiment_metrics))]),
axis=0)
#print("metrics f1 score",metrics['f1_score'])
#metrics['overall_acc']=np.average(np.asarray(
# [experiment_metrics[int(i)][exp_id]['overall_acc'] for i in range(len(experiment_metrics))]),
# axis=0)
#metrics['average_acc']=np.average(np.asarray(
# [experiment_metrics[int(i)][exp_id]['average_acc'] for i in range(len(experiment_metrics))]),
# axis=0)
#total_metrics.append(metrics.copy())
print("total metrics f1 score",metrics)
if dataset=='cv':
important_classes=[0,1,2,3,4,7,9]
important_dates=[0,2,4,5,6,8,10,11,13]
elif dataset=='lm':
important_classes=[0,1,2,3,4,5,6,7]
important_dates=range(metrics['f1_score'].shape[0])
print("metrics['f1_score'].shape",metrics['f1_score'].shape)
metrics['f1_score']=metrics['f1_score'][:,important_classes]
print("metrics['f1_score'].shape",metrics['f1_score'].shape)
metrics['f1_score']=metrics['f1_score'][important_dates,:]
print("metrics['f1_score'].shape",metrics['f1_score'].shape)
# print("metrics['f1_score'].shape",metrics['f1_score'].shape)
metrics['f1_score']*=100
print("Exp id",experiment_group[0][exp_id])
np.savetxt(
"averaged_metrics_"+dataset+"_"+experiment_group[0][exp_id]+".csv",
np.transpose(metrics['f1_score']), delimiter=",",fmt='%1.1f')
print("metrics['f1_score'].shape",metrics['f1_score'].shape)
print("total merics len",len(metrics))
#print(total_metrics)
return metrics
def experiments_plot(metrics,experiment_list,dataset):
t_len=len(metrics[0]['f1_score'])
print("t_len",t_len)
indices = range(t_len) # t_len
X = np.arange(t_len)
exp_id=0
width=0.5
colors=['b','y','c','m','r']
#colors=['#7A9AAF','#293C4B','#FF8700']
#colors=['#4225AC','#1DBBB9','#FBFA17']
##colors=['b','#FBFA17','c']
#colors=['#966A51','#202B3F','#DA5534']
exp_handler=[] # here I save the plot for legend later
exp_handler2=[] # here I save the plot for legend later
exp_handler3=[] # here I save the plot for legend later
figsize=(8,4)
fig, ax = plt.subplots(figsize=figsize)
#fig2, ax2 = plt.subplots(figsize=figsize)
#fig3, ax3 = plt.subplots(figsize=figsize)
fig.subplots_adjust(bottom=0.2)
#fig2.subplots_adjust(bottom=0.2)
#fig3.subplots_adjust(bottom=0.2)
for experiment in experiment_list:
print("experiment",experiment)
print(exp_id)
metrics[exp_id]['f1_score']=np.transpose(
|
np.asarray(metrics[exp_id]['f1_score'])
|
numpy.asarray
|
import numpy as np
class Snake:
"""
A implementation of the game snake
Example layout for a 4x2 field:
------
|....|
|....|
------
The snake can move on the '.' tiles.
The snake can be controlled by either turning left or turning right or do nothing (continue moving straight).
With every step it will move one tile and it will grow one tile.
The game ends if it hits the wall or if it hits itself. The goal is to grow the snake as long as possible, where
the maximum length is n_x x n_y, i.e., in the case of our example 8.
"""
current_direction = 0
# Directions correspond to velocity (vx, vy)
# 0 ... (1, 0)
# 1 ... (-1, 0)
# 2 ... (0, 1)
# 3 ... (0, -1)
current_velocity = [1, 0]
# Snake is the current snake
snake = []
food = []
score = 0
moves = 0
def __init__(self, n_x, n_y):
"""
Args:
n_x (int): number of tiles in x direction (horizontal).
n_y (int): number of tiles in y direction (vertical).
sdl2 (bool, optional): sdl2 output.
"""
self.n_x = n_x
self.n_y = n_y
# Max score depends on the number of tiles and on the initial length of the snake, which is, for now, hardcoded 1
self.max_score = self.n_x*self.n_y - 1
# Visibility range is the number of tiles in each direction the snake can see when the get_view_obs() function is ued
self.visibility_range = 4
def step(self, action):
"""
Perform one step
# Directions correspond to velocity (vx, vy)
# 0 ... (1, 0)
# 1 ... (-1, 0)
# 2 ... (0, 1)
# 3 ... (0, -1)
It returns a new observation after the step is performed.
"""
self.moves += 1
self.current_velocity = self._get_velocity_for_direction(action)
(head_x, head_y) = self.snake[-1]
new_head = (head_x+self.current_velocity[0], head_y+self.current_velocity[1])
ok = self._check_head(new_head)
if not ok:
return self.get_obs(), True, [self.moves]
else:
self.snake.append(new_head)
if self._check_pos_has_food(new_head):
self.score += 1
if self.score == self.max_score:
return self.get_obs(), True, [self.moves]
else:
self.food = [self._gen_food()]
else:
self.snake.pop(0)
return self.get_obs(), False, [self.moves]
def get_actions(self):
return 4
def get_score(self):
return self.score
def set_visibility_range(self, visibility_range):
self.visibility_range = visibility_range
def _check_head(self, head):
if self._check_in_snake(head) and head != self.snake[0]:
return False
if head[0] >= self.n_x or head[0] < 0:
return False
if head[1] >= self.n_y or head[1] < 0:
return False
return True
def _check_pos_has_food(self, pos):
if pos in self.food:
return True
else:
return False
def _check_in_snake(self, coords):
if coords in self.snake:
return True
def _check_pos_is_wall(self, pos):
if pos[0] >= self.n_x or pos[0] < 0:
return True
if pos[1] >= self.n_y or pos[1] < 0:
return True
def reset(self):
""" Reset the game """
self.score = 0
self.moves = 0
self.snake = []
x = np.random.randint(0, self.n_x)
y = np.random.randint(0, self.n_y)
self.snake.append((x, y))
self.food = [self._gen_food()]
return self.get_obs()
def _gen_food(self):
x =
|
np.random.randint(0, self.n_x)
|
numpy.random.randint
|
# plots the three effect size graphs
# generates a dictionary of metaboline name and average value
def generatedct(names, values):
dct0 = {}
for ii in range(0, len(names)):
dct0[names[ii]] = values[ii]
return dct0
def findmaxdif(lst):
tmp = [lst[0][1], abs(int(lst[0][0]) - int(lst[0][2]))]
for ii in lst:
diff0 = abs(int(ii[0]) - int(ii[2]))
if diff0 > tmp[1]:
tmp = [ii[1], diff0]
return tmp
def makedct(excep):
dct = {}
for ii in excep:
dct[ii[1]] = abs(int(ii[0]) - int(ii[2]))
return dct
def getnoise(std0, mean0): # returns the noise for a list of std/mean. for each
noise0 = []
for ii in range(0, len(std0)):
if mean0[ii] != 0:
noise0.append(std0[ii]/mean0[ii])
elif mean0[ii] == 0:
noise0.append(0)
return noise0
def ttestlst(lst1, lst2): # calculates the t-test from the valuse of the metabolites
ttest0 = []
for ii in range(0, len(lst1)):
temp = ttest_ind(lst1[ii], lst2[ii])
ttest0.append(temp[1])
return ttest0
def getindices(lst): # return the indices having t < 0.05
ind0 = []
for ii in range(0, len(lst)):
if lst[ii] < 0.05:
ind0.append(ii)
return ind0
def strtofloat0(lst): # returns a list of float, given al ist of numbers in str type
ans = []
for ii in lst:
tmp = [float(kk) for kk in ii]
ans.append(tmp)
return ans
def getvalues0(lstval0, start1, len1, start2, len2): # returns lists with the values of the groups that we want to compare
arrval0 = np.array(lstval0)
grp1 = arrval0[:, start1:(start1+len1)]
grp2 = arrval0[:, start2:(start2+len2)]
grp1lst = grp1.tolist()
grp2lst = grp2.tolist()
return grp1lst, grp2lst
#
def getMNpval00(lst1, lst2): # calculates the mann-whitney p-val over the two lists, each element in the lists is a list of values to compare
pval00 = []
# print(len(lst1), len(lst2)) # 140 - good
for ii in range(0, len(lst1)):
if (sum(lst1[ii]) + sum(lst2[ii]) == 0): # if all values are 0's give pval = 1, for fdr purposes
print(ii)
pval00.append(1)
if (lst1[ii] != lst2[ii]) and ((sum(lst1[ii]) + sum(lst2[ii])) != 0): # if lst1[ii] is not the same length as lst2[ii] need to check directly not all 0's, so look at sum
anstmp = mannwhitneyu(lst1[ii], lst2[ii], alternative = 'two-sided')
pval00.append(anstmp[1])
return pval00
def fdrandmarkers(lst): # receives a list of p-vaslues, returns the list of markers that passes FDR
fdr0 = statsmodels.stats.multitest.multipletests(lst, alpha=0.1, method='fdr_bh', is_sorted=False, returnsorted=False)
mrks = [] # the markers passing FDR
jj = 1
for ii in fdr0[0]:
if ii == True:
mrks.append(jj)
jj = jj + 1
return mrks
def getoverlap(lst1,lst2): # returns the numebr of elemnts that apper in the two lists
ind = 0
for ii in lst1:
if ii in lst2:
ind = ind + 1
return ind
def commonmrkrs (lst1, lst2): # returns the elemnts that apper in the two lists
common0 = []
for ii in lst1:
if ii in lst2:
common0.append(ii)
return common0
def findmedian(lst): # gets a list, every element is a list of values, returns a list of medians
med0 = []
for ii in lst:
tmp = statistics.median(ii)
med0.append(tmp)
return med0
def checkdirec0(lst): # receives the list of medians and checks which elements has the same trend from young to old
indlst = []
ind = 0
for ii in lst: # here ii[0] id pld, ii[1] young, ii[2] AL, ii[3] CR
# if (ii[1] < ii[0] < ii[3]) or (ii[1] > ii[0] > ii[3]): # CR
if (ii[1] < ii[0] < ii[2]) or (ii[1] > ii[0] > ii[2]): # only looking at AL
indlst.append(ind)
ind = ind + 1
return indlst
def permdata0(lst): # receives a list, where each element is a list of values (metabolite values) and returns permutation of the tags!!!!
lstarr = np.array(lst)
lstarrt = lstarr.transpose()
tmp = np.random.permutation(lstarrt)
tmpt = tmp.transpose()
ans = tmpt.tolist()
return ans
def sortpolarind(indiceslipid, indicespolar, polarlst0): # receives the originall ist of polar markers, returns the list in same order as lipid markers (same order of animals)
indiceslipid1 = []
indicespolar1 = []
for ii in range (0, len(indicespolar)): # standardizing all animals' names/indices to be uppercase
indiceslipid1.append(indiceslipid[ii].upper())
indicespolar1.append(indicespolar[ii].upper())
polarlst0arr = np.array(polarlst0)
polarlst0arrt = polarlst0arr.transpose()
polarlst11 = polarlst0arrt.tolist()
polarsorted = []
for ii in range (0, len(indicespolar)):
if indiceslipid1[ii] == indicespolar1[ii]:
polarsorted.append(polarlst11[ii])
else:
tmp0 = indiceslipid1[ii]
tmp1 = indicespolar1.index(tmp0)
polarsorted.append(polarlst11[tmp1])
polarsortedarr =
|
np.array(polarsorted)
|
numpy.array
|
import numpy as np
import pyeer.eer_info
import pytest
import sklearn.metrics
import audmetric
@pytest.mark.parametrize('truth,prediction,labels,to_string', [
(
np.random.randint(0, 10, size=5),
np.random.randint(0, 10, size=5),
None,
False,
),
(
np.random.randint(0, 10, size=1),
np.random.randint(0, 10, size=1),
list(range(1, 10)),
False,
),
(
np.random.randint(0, 10, size=10),
np.random.randint(0, 10, size=10),
list(range(1, 10)),
False,
),
(
np.random.randint(0, 10, size=10),
np.random.randint(0, 10, size=10),
None,
True,
),
(
np.array([]),
np.array([]),
None,
False,
),
(
np.zeros(10),
np.zeros(10),
None,
False,
),
(
np.arange(10),
np.arange(10),
list(range(1, 10)),
False,
),
(
np.arange(10),
np.arange(1, 11),
list(range(1, 10)),
False,
),
(
np.arange(5),
np.array([1, 2, 3, 4, 6]),
list(range(5)),
False
)
])
def test_accuracy(truth, prediction, labels, to_string):
if to_string:
truth = [str(w) for w in truth]
prediction = [str(w) for w in prediction]
if len(prediction) == 0:
accuracy = np.NaN
else:
if labels:
mask = np.nonzero(
np.logical_and(
np.isin(truth, labels),
np.isin(prediction, labels)
)
)
truth = truth[mask]
prediction = prediction[mask]
accuracy = sklearn.metrics.accuracy_score(truth, prediction)
np.testing.assert_almost_equal(
audmetric.accuracy(truth, prediction, labels=labels),
accuracy,
)
@pytest.mark.parametrize(
'truth, prediction, expected_eer, expected_threshold',
[
(
[1, 1, 0, 0],
[1, 1, 0, 0],
0,
1,
),
(
[True, True, False, False],
[1, 1, 0, 0],
0,
1,
),
(
[True, True, False, False],
[True, True, False, False],
0,
1,
),
(
[1, 1, 0, 0],
[0.9, 0.9, 0.1, 0.1],
0,
0.9,
),
(
[1, 1, 0, 0],
[1, 0.1, 0.1, 0],
0.25,
0.1,
),
(
[1, 1, 0, 0],
[0.8, 0.7, 0.4, 0.1],
0,
0.7,
),
(
[1, 1, 0, 0, 0],
[0.8, 0.7, 0.4, 0.1, 0.1],
0,
0.7,
),
# Non integer truth not allowed
pytest.param(
[0.9, 0.9, 0.1, 0.1],
[0.9, 0.9, 0.1, 0.1],
0,
1,
marks=pytest.mark.xfail(raises=ValueError),
),
]
)
def test_equal_error_rate(truth, prediction, expected_eer, expected_threshold):
eer, stats = audmetric.equal_error_rate(truth, prediction)
# Check expected results
assert type(eer) == float
assert type(stats.threshold) == float
assert eer == expected_eer
assert stats.threshold == expected_threshold
# Compare to pyeer package
truth = np.array(truth)
prediction = np.array(prediction)
pyeer_stats = pyeer.eer_info.get_eer_stats(
prediction[truth],
prediction[~truth],
)
assert eer == pyeer_stats.eer
assert stats.threshold == pyeer_stats.eer_th
def test_equal_error_rate_warnings():
# No imposter scores (division by 0)
truth = np.array([1, 1])
prediction = np.array([1, 1])
warning = 'invalid value encountered in true_divide'
with pytest.warns(RuntimeWarning, match=warning):
eer, stats = audmetric.equal_error_rate(truth, prediction)
pyeer_stats = pyeer.eer_info.get_eer_stats(
prediction[truth],
prediction[~truth],
)
assert eer == pyeer_stats.eer
assert stats.threshold == pyeer_stats.eer_th
# Curves to not overlap
truth = np.array([1, 1, 0])
prediction = np.array([.5, .5, .5])
warning = (
r'false match rate and false non-match rate curves '
r'do not intersect each other'
)
with pytest.warns(RuntimeWarning, match=warning):
eer, stats = audmetric.equal_error_rate(truth, prediction)
pyeer_stats = pyeer.eer_info.get_eer_stats(
prediction[truth],
prediction[~truth],
)
assert eer == pyeer_stats.eer
assert stats.threshold == pyeer_stats.eer_th
@pytest.mark.parametrize(
'truth,prediction,eer', [
([], [], 0),
([[]], [[]], 0),
([[None]], [[]], 1.),
([[None]], [[1]], 1.),
([[None]], [[1, 2]], 1.),
([[0], []], [[1], []], 0.5),
([[0, 1]], [[0]], 0.5),
([[0]], [[0, 1]], 0.5),
([[0, 1], [2]], [[0], [2]], 0.25),
pytest.param(
[[0, 1]], [[0], [2]], 0.,
marks=pytest.mark.xfail(raises=ValueError)
),
('lorem', 'lorm', 0.2),
(['lorem'], ['lorm'], 0.2),
(['lorem', 'ipsum'], ['lorm', 'ipsum'], 0.1),
pytest.param(
['lorem', 'ipsum'], ['lorm'], 0.,
marks=pytest.mark.xfail(raises=ValueError),
)
]
)
def test_event_error_rate(truth, prediction, eer):
np.testing.assert_equal(
audmetric.event_error_rate(truth, prediction),
eer
)
@pytest.mark.parametrize('truth,prediction', [
(
|
np.random.randint(0, 10, size=5)
|
numpy.random.randint
|
# encoding: utf-8
"""
Defines a common implementation of the PyNN API.
Simulator modules are not required to use any of the code herein, provided they
provide the correct interface, but it is suggested that they use as much as is
consistent with good performance (optimisations may require overriding some of
the default definitions given here).
Utility functions and classes:
is_conductance()
check_weight()
check_delay()
Accessing individual neurons:
IDMixin
Common API implementation/base classes:
1. Simulation set-up and control:
setup()
end()
run()
reset()
get_time_step()
get_current_time()
get_min_delay()
get_max_delay()
rank()
num_processes()
2. Creating, connecting and recording from individual neurons:
create()
connect()
set()
initialize()
build_record()
3. Creating, connecting and recording from populations of neurons:
Population
PopulationView
Assembly
Projection
:copyright: Copyright 2006-2013 by the PyNN team, see AUTHORS.
:license: CeCILL, see LICENSE for details.
$Id: common.py 1258 2013-01-31 15:01:25Z apdavison $
"""
import numpy, os
import logging
from warnings import warn
import operator
import tempfile
from pyNN import random, recording, errors, models, standardmodels, core, space, descriptions
from pyNN.recording import files
from itertools import chain
if not 'simulator' in locals():
simulator = None # should be set by simulator-specific modules
DEFAULT_WEIGHT = 0.0
DEFAULT_BUFFER_SIZE = 10000
DEFAULT_MAX_DELAY = 10.0
DEFAULT_TIMESTEP = 0.1
DEFAULT_MIN_DELAY = DEFAULT_TIMESTEP
logger = logging.getLogger("PyNN")
# =============================================================================
# Utility functions and classes
# =============================================================================
def is_conductance(target_cell):
"""
Returns True if the target cell uses conductance-based synapses, False if
it uses current-based synapses, and None if the synapse-basis cannot be
determined.
"""
if hasattr(target_cell, 'local') and target_cell.local and hasattr(target_cell, 'celltype'):
is_conductance = target_cell.celltype.conductance_based
else:
is_conductance = None
return is_conductance
def check_weight(weight, synapse_type, is_conductance):
if weight is None:
weight = DEFAULT_WEIGHT
if core.is_listlike(weight):
weight = numpy.array(weight)
nan_filter = (1 - numpy.isnan(weight)).astype(bool) # weight arrays may contain NaN, which should be ignored
filtered_weight = weight[nan_filter]
all_negative = (filtered_weight <= 0).all()
all_positive = (filtered_weight >= 0).all()
if not (all_negative or all_positive):
raise errors.InvalidWeightError("Weights must be either all positive or all negative")
elif numpy.isreal(weight):
all_positive = weight >= 0
all_negative = weight < 0
else:
raise errors.InvalidWeightError("Weight must be a number or a list/array of numbers.")
if is_conductance or synapse_type == 'excitatory':
if not all_positive:
raise errors.InvalidWeightError("Weights must be positive for conductance-based and/or excitatory synapses")
elif is_conductance == False and synapse_type == 'inhibitory':
if not all_negative:
raise errors.InvalidWeightError("Weights must be negative for current-based, inhibitory synapses")
else: # is_conductance is None. This happens if the cell does not exist on the current node.
logger.debug("Can't check weight, conductance status unknown.")
return weight
def check_delay(delay):
if delay is None:
delay = get_min_delay()
# If the delay is too small , we have to throw an error
if delay < get_min_delay() or delay > get_max_delay():
raise errors.ConnectionError("delay (%s) is out of range [%s,%s]" % \
(delay, get_min_delay(), get_max_delay()))
return delay
# =============================================================================
# Accessing individual neurons
# =============================================================================
class IDMixin(object):
"""
Instead of storing ids as integers, we store them as ID objects,
which allows a syntax like:
p[3,4].tau_m = 20.0
where p is a Population object.
"""
# Simulator ID classes should inherit both from the base type of the ID
# (e.g., int or long) and from IDMixin.
def __getattr__(self, name):
try:
val = self.__getattribute__(name)
except AttributeError:
if name == "parent":
raise Exception("parent is not set")
try:
val = self.get_parameters()[name]
except KeyError:
raise errors.NonExistentParameterError(name,
self.celltype.__class__.__name__,
self.celltype.get_parameter_names())
return val
def __setattr__(self, name, value):
if name == "parent":
object.__setattr__(self, name, value)
elif self.celltype.has_parameter(name):
self.set_parameters(**{name: value})
else:
object.__setattr__(self, name, value)
def set_parameters(self, **parameters):
"""
Set cell parameters, given as a sequence of parameter=value arguments.
"""
# if some of the parameters are computed from the values of other
# parameters, need to get and translate all parameters
if self.local:
if self.is_standard_cell:
computed_parameters = self.celltype.computed_parameters()
have_computed_parameters = numpy.any([p_name in computed_parameters
for p_name in parameters])
if have_computed_parameters:
all_parameters = self.get_parameters()
all_parameters.update(parameters)
parameters = all_parameters
parameters = self.celltype.translate(parameters)
self.set_native_parameters(parameters)
else:
raise errors.NotLocalError("Cannot set parameters for a cell that does not exist on this node.")
def get_parameters(self):
"""Return a dict of all cell parameters."""
if self.local:
parameters = self.get_native_parameters()
if self.is_standard_cell:
parameters = self.celltype.reverse_translate(parameters)
return parameters
else:
raise errors.NotLocalError("Cannot obtain parameters for a cell that does not exist on this node.")
@property
def celltype(self):
return self.parent.celltype
@property
def is_standard_cell(self):
return issubclass(self.celltype.__class__, standardmodels.StandardCellType)
def _set_position(self, pos):
"""
Set the cell position in 3D space.
Cell positions are stored in an array in the parent Population.
"""
assert isinstance(pos, (tuple, numpy.ndarray))
assert len(pos) == 3
self.parent._set_cell_position(self, pos)
def _get_position(self):
"""
Return the cell position in 3D space.
Cell positions are stored in an array in the parent Population, if any,
or within the ID object otherwise. Positions are generated the first
time they are requested and then cached.
"""
return self.parent._get_cell_position(self)
position = property(_get_position, _set_position)
@property
def local(self):
return self.parent.is_local(self)
def inject(self, current_source):
"""Inject current from a current source object into the cell."""
current_source.inject_into([self])
def get_initial_value(self, variable):
"""Get the initial value of a state variable of the cell."""
return self.parent._get_cell_initial_value(self, variable)
def set_initial_value(self, variable, value):
"""Set the initial value of a state variable of the cell."""
self.parent._set_cell_initial_value(self, variable, value)
def as_view(self):
"""Return a PopulationView containing just this cell."""
index = self.parent.id_to_index(self)
return self.parent[index:index+1]
# =============================================================================
# Functions for simulation set-up and control
# =============================================================================
def setup(timestep=DEFAULT_TIMESTEP, min_delay=DEFAULT_MIN_DELAY,
max_delay=DEFAULT_MAX_DELAY, **extra_params):
"""
Initialises/reinitialises the simulator. Any existing network structure is
destroyed.
extra_params contains any keyword arguments that are required by a given
simulator but not by others.
"""
invalid_extra_params = ('mindelay', 'maxdelay', 'dt')
for param in invalid_extra_params:
if param in extra_params:
raise Exception("%s is not a valid argument for setup()" % param)
if min_delay > max_delay:
raise Exception("min_delay has to be less than or equal to max_delay.")
if min_delay < timestep:
raise Exception("min_delay (%g) must be greater than timestep (%g)" % (min_delay, timestep))
def end(compatible_output=True):
"""Do any necessary cleaning up before exiting."""
raise NotImplementedError
def run(simtime):
"""Run the simulation for simtime ms."""
raise NotImplementedError
def reset():
"""
Reset the time to zero, neuron membrane potentials and synaptic weights to
their initial values, and delete any recorded data. The network structure
is not changed, nor is the specification of which neurons to record from.
"""
simulator.reset()
def initialize(cells, variable, value):
assert isinstance(cells, (BasePopulation, Assembly)), type(cells)
cells.initialize(variable, value)
def get_current_time():
"""Return the current time in the simulation."""
return simulator.state.t
def get_time_step():
"""Return the integration time step."""
return simulator.state.dt
def get_min_delay():
"""Return the minimum allowed synaptic delay."""
return simulator.state.min_delay
def get_max_delay():
"""Return the maximum allowed synaptic delay."""
return simulator.state.max_delay
def num_processes():
"""Return the number of MPI processes."""
return simulator.state.num_processes
def rank():
"""Return the MPI rank of the current node."""
return simulator.state.mpi_rank
# =============================================================================
# Low-level API for creating, connecting and recording from individual neurons
# =============================================================================
def build_create(population_class):
def create(cellclass, cellparams=None, n=1):
"""
Create n cells all of the same type.
If n > 1, return a list of cell ids/references.
If n==1, return just the single id.
"""
return population_class(n, cellclass, cellparams) # return the Population or Population.all_cells?
return create
def build_connect(projection_class, connector_class):
def connect(source, target, weight=0.0, delay=None, synapse_type=None,
p=1, rng=None):
"""
Connect a source of spikes to a synaptic target.
source and target can both be individual cells or lists of cells, in
which case all possible connections are made with probability p, using
either the random number generator supplied, or the default rng
otherwise. Weights should be in nA or µS.
"""
if isinstance(source, IDMixin):
source = source.as_view()
if isinstance(target, IDMixin):
target = target.as_view()
connector = connector_class(p_connect=p, weights=weight, delays=delay)
return projection_class(source, target, connector, target=synapse_type, rng=rng)
return connect
def set(cells, param, val=None):
"""
Set one or more parameters of an individual cell or list of cells.
param can be a dict, in which case val should not be supplied, or a string
giving the parameter name, in which case val is the parameter value.
"""
assert isinstance(cells, (BasePopulation, Assembly))
cells.set(param, val)
def build_record(variable, simulator):
def record(source, filename):
"""
Record spikes to a file. source can be an individual cell, a Population,
PopulationView or Assembly.
"""
# would actually like to be able to record to an array and choose later
# whether to write to a file.
if not isinstance(source, (BasePopulation, Assembly)):
source = source.parent
source._record(variable, to_file=filename)
# recorder_list is used by end()
if isinstance(source, BasePopulation):
simulator.recorder_list.append(source.recorders[variable]) # this is a bit hackish - better to add to Population.__del__?
if isinstance(source, Assembly):
for population in source.populations:
simulator.recorder_list.append(population.recorders[variable])
if variable == 'v':
record.__name__ = "record_v"
record.__doc__ = """
Record membrane potential to a file. source can be an individual
cell, a Population, PopulationView or Assembly."""
elif variable == 'gsyn':
record.__name__ = "record_gsyn"
record.__doc__ = """
Record synaptic conductances to a file. source can be an individual
cell, a Population, PopulationView or Assembly."""
return record
# =============================================================================
# High-level API for creating, connecting and recording from populations of
# neurons.
# =============================================================================
class BasePopulation(object):
record_filter = None
def __getitem__(self, index):
"""
Return either a single cell (ID object) from the Population, if index
is an integer, or a subset of the cells (PopulationView object), if
index is a slice or array.
Note that __getitem__ is called when using [] access, e.g.
p = Population(...)
p[2] is equivalent to p.__getitem__(2).
p[3:6] is equivalent to p.__getitem__(slice(3, 6))
"""
if isinstance(index, int):
return self.all_cells[index]
elif isinstance(index, (slice, list, numpy.ndarray)):
return PopulationView(self, index)
elif isinstance(index, tuple):
return PopulationView(self, list(index))
else:
raise TypeError("indices must be integers, slices, lists, arrays or tuples, not %s" % type(index).__name__)
def __len__(self):
"""Return the total number of cells in the population (all nodes)."""
return self.size
@property
def local_size(self):
return len(self.local_cells) # would self._mask_local.sum() be faster?
def __iter__(self):
"""Iterator over cell ids on the local node."""
return iter(self.local_cells)
@property
def conductance_based(self):
return self.celltype.conductance_based
def is_local(self, id):
"""
Determine whether the cell with the given ID exists on the local MPI node.
"""
assert id.parent is self
index = self.id_to_index(id)
return self._mask_local[index]
def all(self):
"""Iterator over cell ids on all nodes."""
return iter(self.all_cells)
def __add__(self, other):
"""
A Population/PopulationView can be added to another Population,
PopulationView or Assembly, returning an Assembly.
"""
assert isinstance(other, BasePopulation)
return Assembly(self, other)
def _get_cell_position(self, id):
index = self.id_to_index(id)
return self.positions[:, index]
def _set_cell_position(self, id, pos):
index = self.id_to_index(id)
self.positions[:, index] = pos
def _get_cell_initial_value(self, id, variable):
assert isinstance(self.initial_values[variable], core.LazyArray)
index = self.id_to_local_index(id)
return self.initial_values[variable][index]
def _set_cell_initial_value(self, id, variable, value):
assert isinstance(self.initial_values[variable], core.LazyArray)
index = self.id_to_local_index(id)
self.initial_values[variable][index] = value
def nearest(self, position):
"""Return the neuron closest to the specified position."""
# doesn't always work correctly if a position is equidistant between
# two neurons, i.e. 0.5 should be rounded up, but it isn't always.
# also doesn't take account of periodic boundary conditions
pos = numpy.array([position] * self.positions.shape[1]).transpose()
dist_arr = (self.positions - pos)**2
distances = dist_arr.sum(axis=0)
nearest = distances.argmin()
return self[nearest]
def sample(self, n, rng=None):
"""
Randomly sample n cells from the Population, and return a PopulationView
object.
"""
assert isinstance(n, int)
if not rng:
rng = random.NumpyRNG()
indices = rng.permutation(numpy.arange(len(self)))[0:n]
logger.debug("The %d cells recorded have indices %s" % (n, indices))
logger.debug("%s.sample(%s)", self.label, n)
return PopulationView(self, indices)
def get(self, parameter_name, gather=False):
"""
Get the values of a parameter for every local cell in the population.
"""
# if all the cells have the same value for this parameter, should
# we return just the number, rather than an array?
if hasattr(self, "_get_array"):
values = self._get_array(parameter_name).tolist()
else:
values = [getattr(cell, parameter_name) for cell in self] # list or array?
if gather == True and num_processes() > 1:
all_values = { rank(): values }
all_indices = { rank(): self.local_cells.tolist()}
all_values = recording.gather_dict(all_values)
all_indices = recording.gather_dict(all_indices)
if rank() == 0:
values = reduce(operator.add, all_values.values())
indices = reduce(operator.add, all_indices.values())
idx = numpy.argsort(indices)
values = numpy.array(values)[idx]
return values
def set(self, param, val=None):
"""
Set one or more parameters for every cell in the population. param
can be a dict, in which case val should not be supplied, or a string
giving the parameter name, in which case val is the parameter value.
val can be a numeric value, or list of such (e.g. for setting spike
times).
e.g. p.set("tau_m",20.0).
p.set({'tau_m':20,'v_rest':-65})
"""
#"""
# -- Proposed change to arguments --
#Set one or more parameters for every cell in the population.
#
#Each value may be a single number or a list/array of numbers of the same
#size as the population. If the parameter itself takes lists/arrays as
#values (e.g. spike times), then the value provided may be either a
#single lists/1D array, a list of lists/1D arrays, or a 2D array.
#
#e.g. p.set(tau_m=20.0).
# p.set(tau_m=20, v_rest=[-65.0, -65.3, ... , -67.2])
#"""
if isinstance(param, str):
param_dict = {param: val}
elif isinstance(param, dict):
param_dict = param
else:
raise errors.InvalidParameterValueError
param_dict = self.celltype.checkParameters(param_dict, with_defaults=False)
logger.debug("%s.set(%s)", self.label, param_dict)
if hasattr(self, "_set_array"):
self._set_array(**param_dict)
else:
for cell in self:
cell.set_parameters(**param_dict)
def tset(self, parametername, value_array):
"""
'Topographic' set. Set the value of parametername to the values in
value_array, which must have the same dimensions as the Population.
"""
#"""
# -- Proposed change to arguments --
#'Topographic' set. Each value in parameters should be a function that
#accepts arguments x,y,z and returns a single value.
#"""
if parametername not in self.celltype.get_parameter_names():
raise errors.NonExistentParameterError(parametername, self.celltype, self.celltype.get_parameter_names())
if (self.size,) == value_array.shape: # the values are numbers or non-array objects
local_values = value_array[self._mask_local]
assert local_values.size == self.local_cells.size, "%d != %d" % (local_values.size, self.local_cells.size)
elif len(value_array.shape) == 2: # the values are themselves 1D arrays
if value_array.shape[0] != self.size:
raise errors.InvalidDimensionsError("Population: %d, value_array first dimension: %s" % (self.size,
value_array.shape[0]))
local_values = value_array[self._mask_local] # not sure this works
else:
raise errors.InvalidDimensionsError("Population: %d, value_array: %s" % (self.size,
str(value_array.shape)))
assert local_values.shape[0] == self.local_cells.size, "%d != %d" % (local_values.size, self.local_cells.size)
try:
logger.debug("%s.tset('%s', array(shape=%s, min=%s, max=%s))",
self.label, parametername, value_array.shape,
value_array.min(), value_array.max())
except TypeError: # min() and max() won't work for non-numeric values
logger.debug("%s.tset('%s', non_numeric_array(shape=%s))",
self.label, parametername, value_array.shape)
# Set the values for each cell
if hasattr(self, "_set_array"):
self._set_array(**{parametername: local_values})
else:
for cell, val in zip(self, local_values):
setattr(cell, parametername, val)
def rset(self, parametername, rand_distr):
"""
'Random' set. Set the value of parametername to a value taken from
rand_distr, which should be a RandomDistribution object.
"""
# Note that we generate enough random numbers for all cells on all nodes
# but use only those relevant to this node. This ensures that the
# sequence of random numbers does not depend on the number of nodes,
# provided that the same rng with the same seed is used on each node.
logger.debug("%s.rset('%s', %s)", self.label, parametername, rand_distr)
if isinstance(rand_distr.rng, random.NativeRNG):
self._native_rset(parametername, rand_distr)
else:
rarr = rand_distr.next(n=self.all_cells.size, mask_local=False)
rarr = numpy.array(rarr) # isn't rarr already an array?
assert rarr.size == self.size, "%s != %s" % (rarr.size, self.size)
self.tset(parametername, rarr)
def _call(self, methodname, arguments):
"""
Call the method methodname(arguments) for every cell in the population.
e.g. p.call("set_background","0.1") if the cell class has a method
set_background().
"""
raise NotImplementedError()
def _tcall(self, methodname, objarr):
"""
`Topographic' call. Call the method methodname() for every cell in the
population. The argument to the method depends on the coordinates of
the cell. objarr is an array with the same dimensions as the
Population.
e.g. p.tcall("memb_init", vinitArray) calls
p.cell[i][j].memb_init(vInitArray[i][j]) for all i,j.
"""
raise NotImplementedError()
def randomInit(self, rand_distr):
"""
Set initial membrane potentials for all the cells in the population to
random values.
"""
warn("The randomInit() method is deprecated, and will be removed in a future release. Use initialize('v', rand_distr) instead.")
self.initialize('v', rand_distr)
def initialize(self, variable, value):
"""
Set initial values of state variables, e.g. the membrane potential.
`value` may either be a numeric value (all neurons set to the same
value) or a `RandomDistribution` object (each neuron gets a
different value)
"""
logger.debug("In Population '%s', initialising %s to %s" % (self.label, variable, value))
if isinstance(value, random.RandomDistribution):
initial_value = value.next(n=self.all_cells.size, mask_local=self._mask_local)
if self.local_size > 1:
assert len(initial_value) == self.local_size, "%d != %d" % (len(initial_value), self.local_size)
else:
initial_value = value
self.initial_values[variable] = core.LazyArray(initial_value, shape=(self.local_size,))
if hasattr(self, "_set_initial_value_array"):
self._set_initial_value_array(variable, initial_value)
else:
if isinstance(value, random.RandomDistribution):
for cell, val in zip(self, initial_value):
cell.set_initial_value(variable, val)
else:
for cell in self: # only on local node
cell.set_initial_value(variable, initial_value)
def can_record(self, variable):
"""Determine whether `variable` can be recorded from this population."""
return (variable in self.celltype.recordable)
def _add_recorder(self, variable):
"""Create a new Recorder for the supplied variable."""
assert variable not in self.recorders
if hasattr(self, "parent"):
population = self.grandparent
else:
population = self
logger.debug("Adding recorder for %s to %s" % (variable, self.label))
population.recorders[variable] = population.recorder_class(variable,
population=population)
def _record(self, variable, to_file=True):
"""
Private method called by record() and record_v().
"""
if variable is None: # reset the list of things to record
# note that if _record(None) is called on a view of a population
# recording will be reset for the entire population, not just the view
for recorder in self.recorders.values():
recorder.reset()
self.recorders = {}
else:
if not self.can_record(variable):
raise errors.RecordingError(variable, self.celltype)
logger.debug("%s.record('%s')", self.label, variable)
if variable not in self.recorders:
self._add_recorder(variable)
if self.record_filter is not None:
self.recorders[variable].record(self.record_filter)
else:
self.recorders[variable].record(self.all_cells)
if isinstance(to_file, basestring):
self.recorders[variable].file = to_file
def record(self, to_file=True):
"""
Record spikes from all cells in the Population.
"""
self._record('spikes', to_file)
def record_v(self, to_file=True):
"""
Record the membrane potential for all cells in the Population.
"""
self._record('v', to_file)
def record_gsyn(self, to_file=True):
"""
Record synaptic conductances for all cells in the Population.
"""
self._record('gsyn', to_file)
def printSpikes(self, file, gather=True, compatible_output=True):
"""
Write spike times to file.
file should be either a filename or a PyNN File object.
If compatible_output is True, the format is "spiketime cell_id",
where cell_id is the index of the cell counting along rows and down
columns (and the extension of that for 3-D).
This allows easy plotting of a `raster' plot of spiketimes, with one
line for each cell.
The timestep, first id, last id, and number of data points per cell are
written in a header, indicated by a '#' at the beginning of the line.
If compatible_output is False, the raw format produced by the simulator
is used. This may be faster, since it avoids any post-processing of the
spike files.
For parallel simulators, if gather is True, all data will be gathered
to the master node and a single output file created there. Otherwise, a
file will be written on each node, containing only the cells simulated
on that node.
"""
self.recorders['spikes'].write(file, gather, compatible_output, self.record_filter)
def getSpikes(self, gather=True, compatible_output=True):
"""
Return a 2-column numpy array containing cell ids and spike times for
recorded cells.
Useful for small populations, for example for single neuron Monte-Carlo.
"""
return self.recorders['spikes'].get(gather, compatible_output, self.record_filter)
# if we haven't called record(), this will give a KeyError. A more
# informative error message would be nice.
def print_v(self, file, gather=True, compatible_output=True):
"""
Write membrane potential traces to file.
file should be either a filename or a PyNN File object.
If compatible_output is True, the format is "v cell_id",
where cell_id is the index of the cell counting along rows and down
columns (and the extension of that for 3-D).
The timestep, first id, last id, and number of data points per cell are
written in a header, indicated by a '#' at the beginning of the line.
If compatible_output is False, the raw format produced by the simulator
is used. This may be faster, since it avoids any post-processing of the
voltage files.
For parallel simulators, if gather is True, all data will be gathered
to the master node and a single output file created there. Otherwise, a
file will be written on each node, containing only the cells simulated
on that node.
"""
self.recorders['v'].write(file, gather, compatible_output, self.record_filter)
def get_v(self, gather=True, compatible_output=True):
"""
Return a 2-column numpy array containing cell ids and Vm for
recorded cells.
"""
return self.recorders['v'].get(gather, compatible_output, self.record_filter)
def print_gsyn(self, file, gather=True, compatible_output=True):
"""
Write synaptic conductance traces to file.
file should be either a filename or a PyNN File object.
If compatible_output is True, the format is "t g cell_id",
where cell_id is the index of the cell counting along rows and down
columns (and the extension of that for 3-D).
The timestep, first id, last id, and number of data points per cell are
written in a header, indicated by a '#' at the beginning of the line.
If compatible_output is False, the raw format produced by the simulator
is used. This may be faster, since it avoids any post-processing of the
voltage files.
"""
self.recorders['gsyn'].write(file, gather, compatible_output, self.record_filter)
def get_gsyn(self, gather=True, compatible_output=True):
"""
Return a 3-column numpy array containing cell ids and synaptic
conductances for recorded cells.
"""
return self.recorders['gsyn'].get(gather, compatible_output, self.record_filter)
def get_spike_counts(self, gather=True):
"""
Returns the number of spikes for each neuron.
"""
return self.recorders['spikes'].count(gather, self.record_filter)
def meanSpikeCount(self, gather=True):
"""
Returns the mean number of spikes per neuron.
"""
spike_counts = self.recorders['spikes'].count(gather, self.record_filter)
total_spikes = sum(spike_counts.values())
if rank() == 0 or not gather: # should maybe use allgather, and get the numbers on all nodes
if len(spike_counts) > 0:
return float(total_spikes)/len(spike_counts)
else:
return 0
else:
return numpy.nan
def inject(self, current_source):
"""
Connect a current source to all cells in the Population.
"""
if not self.celltype.injectable:
raise TypeError("Can't inject current into a spike source.")
current_source.inject_into(self)
def save_positions(self, file):
"""
Save positions to file. The output format is id x y z
"""
# first column should probably be indices, not ids. This would make it
# simulator independent.
if isinstance(file, basestring):
file = files.StandardTextFile(file, mode='w')
cells = self.all_cells
result = numpy.empty((len(cells), 4))
result[:,0] = cells
result[:,1:4] = self.positions.T
if rank() == 0:
file.write(result, {'population' : self.label})
file.close()
class Population(BasePopulation):
"""
A group of neurons all of the same type.
"""
nPop = 0
def __init__(self, size, cellclass, cellparams=None, structure=None,
label=None):
"""
Create a population of neurons all of the same type.
size - number of cells in the Population. For backwards-compatibility,
n may also be a tuple giving the dimensions of a grid,
e.g. n=(10,10) is equivalent to n=100 with structure=Grid2D()
cellclass should either be a standardized cell class (a class inheriting
from common.standardmodels.StandardCellType) or a string giving the
name of the simulator-specific model that makes up the population.
cellparams should be a dict which is passed to the neuron model
constructor
structure should be a Structure instance.
label is an optional name for the population.
"""
if not isinstance(size, int): # also allow a single integer, for a 1D population
assert isinstance(size, tuple), "`size` must be an integer or a tuple of ints. You have supplied a %s" % type(size)
# check the things inside are ints
for e in size:
assert isinstance(e, int), "`size` must be an integer or a tuple of ints. Element '%s' is not an int" % str(e)
assert structure is None, "If you specify `size` as a tuple you may not specify structure."
if len(size) == 1:
structure = space.Line()
elif len(size) == 2:
nx, ny = size
structure = space.Grid2D(nx/float(ny))
elif len(size) == 3:
nx, ny, nz = size
structure = space.Grid3D(nx/float(ny), nx/float(nz))
else:
raise Exception("A maximum of 3 dimensions is allowed. What do you think this is, string theory?")
size = reduce(operator.mul, size)
self.size = size
self.label = label or 'population%d' % Population.nPop
self.celltype = cellclass(cellparams)
self._structure = structure or space.Line()
self._positions = None
self._is_sorted = True
# Build the arrays of cell ids
# Cells on the local node are represented as ID objects, other cells by integers
# All are stored in a single numpy array for easy lookup by address
# The local cells are also stored in a list, for easy iteration
self._create_cells(cellclass, cellparams, size)
self.initial_values = {}
for variable, value in self.celltype.default_initial_values.items():
self.initialize(variable, value)
self.recorders = {}
Population.nPop += 1
@property
def local_cells(self):
return self.all_cells[self._mask_local]
@property
def cell(self):
warn("The `Population.cell` attribute is not an official part of the \
API, and its use is deprecated. It will be removed in a future \
release. All uses of `cell` may be replaced by `all_cells`")
return self.all_cells
def id_to_index(self, id):
"""
Given the ID(s) of cell(s) in the Population, return its (their) index
(order in the Population).
>>> assert p.id_to_index(p[5]) == 5
>>> assert p.id_to_index(p.index([1,2,3])) == [1,2,3]
"""
if not numpy.iterable(id):
if not self.first_id <= id <= self.last_id:
raise ValueError("id should be in the range [%d,%d], actually %d" % (self.first_id, self.last_id, id))
return int(id - self.first_id) # this assumes ids are consecutive
else:
if isinstance(id, PopulationView):
id = id.all_cells
id = numpy.array(id)
if (self.first_id > id.min()) or (self.last_id < id.max()):
raise ValueError("ids should be in the range [%d,%d], actually [%d, %d]" % (self.first_id, self.last_id, id.min(), id.max()))
return (id - self.first_id).astype(int) # this assumes ids are consecutive
def id_to_local_index(self, id):
"""
Given the ID(s) of cell(s) in the Population, return its (their) index
(order in the Population), counting only cells on the local MPI node.
"""
if num_processes() > 1:
return self.local_cells.tolist().index(id) # probably very slow
#return numpy.nonzero(self.local_cells == id)[0][0] # possibly faster?
# another idea - get global index, use idx-sum(mask_local[:idx])?
else:
return self.id_to_index(id)
def _get_structure(self):
return self._structure
def _set_structure(self, structure):
assert isinstance(structure, space.BaseStructure)
if structure != self._structure:
self._positions = None # setting a new structure invalidates previously calculated positions
self._structure = structure
structure = property(fget=_get_structure, fset=_set_structure)
# arguably structure should be read-only, i.e. it is not possible to change it after Population creation
@property
def position_generator(self):
def gen(i):
return self.positions[:,i]
return gen
def _get_positions(self):
"""
Try to return self._positions. If it does not exist, create it and then
return it.
"""
if self._positions is None:
self._positions = self.structure.generate_positions(self.size)
assert self._positions.shape == (3, self.size)
return self._positions
def _set_positions(self, pos_array):
assert isinstance(pos_array, numpy.ndarray)
assert pos_array.shape == (3, self.size), "%s != %s" % (pos_array.shape, (3, self.size))
self._positions = pos_array.copy() # take a copy in case pos_array is changed later
self._structure = None # explicitly setting positions destroys any previous structure
positions = property(_get_positions, _set_positions,
"""A 3xN array (where N is the number of neurons in the Population)
giving the x,y,z coordinates of all the neurons (soma, in the
case of non-point models).""")
def describe(self, template='population_default.txt', engine='default'):
"""
Returns a human-readable description of the population.
The output may be customized by specifying a different template
togther with an associated template engine (see ``pyNN.descriptions``).
If template is None, then a dictionary containing the template context
will be returned.
"""
context = {
"label": self.label,
"celltype": self.celltype.describe(template=None),
"structure": None,
"size": self.size,
"size_local": len(self.local_cells),
"first_id": self.first_id,
"last_id": self.last_id,
}
if len(self.local_cells) > 0:
first_id = self.local_cells[0]
context.update({
"local_first_id": first_id,
"cell_parameters": first_id.get_parameters(),
})
if self.structure:
context["structure"] = self.structure.describe(template=None)
return descriptions.render(engine, template, context)
class PopulationView(BasePopulation):
"""
A view of a subset of neurons within a Population.
In most ways, Populations and PopulationViews have the same behaviour, i.e.
they can be recorded, connected with Projections, etc. It should be noted
that any changes to neurons in a PopulationView will be reflected in the
parent Population and vice versa.
It is possible to have views of views.
"""
def __init__(self, parent, selector, label=None):
"""
Create a view of a subset of neurons within a parent Population or
PopulationView.
selector - a slice or numpy mask array. The mask array should either be
a boolean array of the same size as the parent, or an
integer array containing cell indices, i.e. if p.size == 5,
PopulationView(p, array([False, False, True, False, True]))
PopulationView(p, array([2,4]))
PopulationView(p, slice(2,5,2))
will all create the same view.
"""
self.parent = parent
self.mask = selector # later we can have fancier selectors, for now we just have numpy masks
self.label = label or "view of %s with mask %s" % (parent.label, self.mask)
# maybe just redefine __getattr__ instead of the following...
self.celltype = self.parent.celltype
# If the mask is a slice, IDs will be consecutives without duplication.
# If not, then we need to remove duplicated IDs
if not isinstance(self.mask, slice):
if isinstance(self.mask, list):
self.mask = numpy.array(self.mask)
if self.mask.dtype is numpy.dtype('bool'):
if len(self.mask) != len(self.parent):
raise Exception("Boolean masks should have the size of Parent Population")
self.mask = numpy.arange(len(self.parent))[self.mask]
if len(numpy.unique(self.mask)) != len(self.mask):
logging.warning("PopulationView can contain only once each ID, duplicated IDs are remove")
self.mask = numpy.unique(self.mask)
self.all_cells = self.parent.all_cells[self.mask] # do we need to ensure this is ordered?
idx = numpy.argsort(self.all_cells)
self._is_sorted = numpy.all(idx == numpy.arange(len(self.all_cells)))
self.size = len(self.all_cells)
self._mask_local = self.parent._mask_local[self.mask]
self.local_cells = self.all_cells[self._mask_local]
self.first_id = numpy.min(self.all_cells) # only works if we assume all_cells is sorted, otherwise could use min()
self.last_id = numpy.max(self.all_cells)
self.recorders = self.parent.recorders
self.record_filter= self.all_cells
@property
def initial_values(self):
# this is going to be complex - if we keep initial_values as a dict,
# need to return a dict-like object that takes account of self.mask
raise NotImplementedError
@property
def structure(self):
return self.parent.structure
# should we allow setting structure for a PopulationView? Maybe if the
# parent has some kind of CompositeStructure?
@property
def positions(self):
return self.parent.positions.T[self.mask].T # make positions N,3 instead of 3,N to avoid all this transposing?
def id_to_index(self, id):
"""
Given the ID(s) of cell(s) in the PopulationView, return its/their
index/indices (order in the PopulationView).
>>> assert id_to_index(p.index(5)) == 5
>>> assert id_to_index(p.index([1,2,3])) == [1,2,3]
"""
if not numpy.iterable(id):
if self._is_sorted:
if id not in self.all_cells:
raise IndexError("ID %s not present in the View" %id)
return numpy.searchsorted(self.all_cells, id)
else:
result = numpy.where(self.all_cells == id)[0]
if len(result) == 0:
raise IndexError("ID %s not present in the View" %id)
else:
return result
else:
if self._is_sorted:
return numpy.searchsorted(self.all_cells, id)
else:
result = numpy.array([])
for item in id:
data = numpy.where(self.all_cells == item)[0]
if len(data) == 0:
raise IndexError("ID %s not present in the View" %item)
elif len(data) > 1:
raise Exception("ID %s is duplicated in the View" %item)
else:
result = numpy.append(result, data)
return result
@property
def grandparent(self):
"""
Returns the parent Population at the root of the tree (since the
immediate parent may itself be a PopulationView).
The name "grandparent" is of course a little misleading, as it could
be just the parent, or the great, great, great, ..., grandparent.
"""
if hasattr(self.parent, "parent"):
return self.parent.grandparent
else:
return self.parent
def describe(self, template='populationview_default.txt', engine='default'):
"""
Returns a human-readable description of the population view.
The output may be customized by specifying a different template
togther with an associated template engine (see ``pyNN.descriptions``).
If template is None, then a dictionary containing the template context
will be returned.
"""
context = {"label": self.label,
"parent": self.parent.label,
"mask": self.mask,
"size": self.size}
return descriptions.render(engine, template, context)
# =============================================================================
class Assembly(object):
"""
A group of neurons, may be heterogeneous, in contrast to a Population where
all the neurons are of the same type.
"""
count = 0
def __init__(self, *populations, **kwargs):
"""
Create an Assembly of Populations and/or PopulationViews.
kwargs may contain a keyword argument 'label'.
"""
if kwargs:
assert kwargs.keys() == ['label']
self.populations = []
for p in populations:
self._insert(p)
self.label = kwargs.get('label', 'assembly%d' % Assembly.count)
assert isinstance(self.label, basestring), "label must be a string or unicode"
Assembly.count += 1
def _insert(self, element):
if not isinstance(element, BasePopulation):
raise TypeError("argument is a %s, not a Population." % type(element).__name__)
if isinstance(element, PopulationView):
if not element.parent in self.populations:
double = False
for p in self.populations:
data = numpy.concatenate((p.all_cells, element.all_cells))
if len(numpy.unique(data))!= len(p.all_cells) + len(element.all_cells):
logging.warning('Adding a PopulationView to an Assembly containing elements already present is not posible')
double = True #Should we automatically remove duplicated IDs ?
break
if not double:
self.populations.append(element)
else:
logging.warning('Adding a PopulationView to an Assembly when parent Population is there is not possible')
elif isinstance(element, BasePopulation):
if not element in self.populations:
self.populations.append(element)
else:
logging.warning('Adding a Population twice in an Assembly is not possible')
@property
def local_cells(self):
result = self.populations[0].local_cells
for p in self.populations[1:]:
result = numpy.concatenate((result, p.local_cells))
return result
@property
def all_cells(self):
result = self.populations[0].all_cells
for p in self.populations[1:]:
result = numpy.concatenate((result, p.all_cells))
return result
def all(self):
"""Iterator over cell ids on all nodes."""
return iter(self.all_cells)
@property
def _is_sorted(self):
idx = numpy.argsort(self.all_cells)
return numpy.all(idx == numpy.arange(len(self.all_cells)))
@property
def _homogeneous_synapses(self):
syn = is_conductance(self.populations[0].all_cells[0])
for p in self.populations[1:]:
if syn != is_conductance(p.all_cells[0]):
return False
return True
@property
def conductance_based(self):
return all(p.celltype.conductance_based for p in self.populations)
@property
def _mask_local(self):
result = self.populations[0]._mask_local
for p in self.populations[1:]:
result = numpy.concatenate((result, p._mask_local))
return result
@property
def first_id(self):
return numpy.min(self.all_cells)
@property
def last_id(self):
return
|
numpy.max(self.all_cells)
|
numpy.max
|
import networkx as nx
import dgl,random,torch,hashlib
import numpy as np
import scipy.sparse as sp
from numpy.linalg import inv
import torch.nn as nn
# PGNN RPE
def single_source_shortest_path_length_range(graph, node_range, cutoff):
dists_dict = {}
for node in node_range:
dists_dict[node] = nx.single_source_shortest_path_length(graph, node, cutoff)
return dists_dict
def all_pairs_shortest_path_length_parallel(graph,cutoff=None,num_workers=4):
nodes = list(graph.nodes)
random.shuffle(nodes)
dists_dict=single_source_shortest_path_length_range(graph, nodes, cutoff)
return dists_dict
def precompute_dist_data(edge_index, num_nodes, approximate=0):
graph = nx.Graph()
edge_list = edge_index.transpose(1,0).tolist()
graph.add_edges_from(edge_list)
n = num_nodes
dists_array =
|
np.zeros((n, n))
|
numpy.zeros
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.