prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
import unittest
from itertools import product
from typing import Optional
import mock
import numpy as np
import pytest
from tensorkit import tensor as T
from tensorkit import *
from tensorkit.distributions import *
from tensorkit.distributions.normal import BaseNormal
from tensorkit.distributions.utils import copy_distribution
from tests.helper import *
class UnitNormalTestCase(TestCase):
def test_construct(self):
for shape, event_ndims, dtype in \
product(([], [2, 3]), range(0, 3), float_dtypes):
if event_ndims > len(shape):
continue
normal = UnitNormal(shape, dtype=dtype, event_ndims=event_ndims)
self.assertEqual(normal.value_shape, shape)
self.assertEqual(normal.dtype, dtype)
self.assertEqual(normal.event_ndims, event_ndims)
assert_equal(normal.mean, np.zeros(shape))
assert_equal(normal.std, np.ones(shape))
assert_equal(normal.logstd, np.zeros(shape))
def test_copy(self):
shape = [2, 3]
normal = UnitNormal(shape=[2, 3], event_ndims=1, dtype=T.float32)
# read out mean, std and logstd, to ensure these cached attrs are generated
assert_equal(normal.mean, np.zeros(shape))
assert_equal(normal.std, np.ones(shape))
assert_equal(normal.logstd, np.zeros(shape))
# same dtype and shape, the cached attrs are copied
normal2 = normal.copy(event_ndims=2)
self.assertIsInstance(normal2, UnitNormal)
self.assertEqual(normal2.dtype, T.float32)
self.assertEqual(normal2.value_shape, [2, 3])
self.assertEqual(normal2.event_ndims, 2)
for key in ('mean', 'std', 'logstd'):
self.assertIs(getattr(normal2, key), getattr(normal, key))
# shape mismatch, no copy cached attrs
normal2 = normal.copy(shape=[3])
self.assertIsInstance(normal2, UnitNormal)
self.assertEqual(normal2.dtype, T.float32)
self.assertEqual(normal2.value_shape, [3])
self.assertEqual(normal2.event_ndims, 1)
for key in ('mean', 'std', 'logstd'):
self.assertIsNot(getattr(normal2, key), getattr(normal, key))
# dtype mismatch, no copy cached attrs
normal2 = normal.copy(dtype=T.float64)
self.assertIsInstance(normal2, UnitNormal)
self.assertEqual(normal2.dtype, T.float64)
self.assertEqual(normal2.value_shape, [2, 3])
self.assertEqual(normal2.event_ndims, 1)
for key in ('mean', 'std', 'logstd'):
self.assertIsNot(getattr(normal2, key), getattr(normal, key))
def test_sample_and_log_prob(self):
for dtype in float_dtypes:
normal = UnitNormal(shape=[2, 3, 4], event_ndims=1, dtype=dtype)
# sample(n_samples=None)
t = normal.sample()
self.assertIsInstance(t, StochasticTensor)
self.assertIs(t.distribution, normal)
self.assertEqual(t.n_samples, None)
self.assertEqual(t.group_ndims, 0)
self.assertEqual(t.reparameterized, True)
self.assertIsInstance(t.tensor, T.Tensor)
self.assertEqual(T.get_dtype(t.tensor), dtype)
self.assertEqual(T.shape(t.tensor), [2, 3, 4])
for log_pdf in [t.log_prob(), normal.log_prob(t)]:
assert_allclose(
log_pdf,
T.random.randn_log_pdf(given=t.tensor, group_ndims=1)
)
# sample(n_samples=5)
t = normal.sample(n_samples=5, group_ndims=-1, reparameterized=False)
self.assertIsInstance(t, StochasticTensor)
self.assertIs(t.distribution, normal)
self.assertEqual(t.n_samples, 5)
self.assertEqual(t.group_ndims, -1)
self.assertEqual(t.reparameterized, False)
self.assertIsInstance(t.tensor, T.Tensor)
self.assertEqual(T.get_dtype(t.tensor), dtype)
self.assertEqual(T.shape(t.tensor), [5, 2, 3, 4])
for log_pdf in [t.log_prob(-1), normal.log_prob(t, -1)]:
assert_allclose(
log_pdf,
T.random.randn_log_pdf(given=t.tensor, group_ndims=0)
)
class _MyBaseNormal(BaseNormal):
_extra_args = ('xyz',)
def __init__(self,
mean: T.Tensor,
std: Optional[T.Tensor] = None,
*,
logstd: Optional[T.Tensor] = None,
reparameterized: bool = True,
event_ndims: int = 0,
device: Optional[str] = None,
validate_tensors: Optional[bool] = None,
xyz: int = 0):
super().__init__(
mean=mean, std=std, logstd=logstd, reparameterized=reparameterized,
event_ndims=event_ndims, device=device, validate_tensors=validate_tensors
)
self.xyz = xyz
class NormalTestCase(TestCase):
def test_construct(self):
mean = np.random.randn(3, 4)
logstd = np.random.randn(2, 3, 4)
std = np.exp(logstd)
for dtype in float_dtypes:
mean_t = T.as_tensor(mean, dtype=dtype)
std_t = T.as_tensor(std, dtype=dtype)
logstd_t = T.as_tensor(logstd, dtype=dtype)
mutual_params = {'std': std_t, 'logstd': logstd_t}
# construct from mean & std/logstd
for key, val in mutual_params.items():
other_key = [k for k in mutual_params if k != key][0]
normal = _MyBaseNormal(mean=mean_t, event_ndims=1, **{key: val})
self.assertEqual(normal.continuous, True)
self.assertEqual(normal.reparameterized, True)
self.assertEqual(normal.min_event_ndims, 0)
self.assertEqual(normal.event_ndims, 1)
self.assertIs(normal.mean, mean_t)
self.assertIs(getattr(normal, key), val)
assert_allclose(
getattr(normal, other_key),
mutual_params[other_key],
rtol=1e-4
)
self.assertEqual(normal._mutual_params, {key: val})
# mean and std/logstd must have the same dtype
for other_dtype in float_dtypes:
if other_dtype != dtype:
other_val = T.cast(val, other_dtype)
with pytest.raises(ValueError,
match=f'`{key}.dtype` != `mean.'
f'dtype`: {other_dtype} vs '
f'{dtype}'):
_ = _MyBaseNormal(mean=mean_t, **{key: other_val})
# must specify either std or logstd, but not both
with pytest.raises(ValueError,
match='Either `std` or `logstd` must be '
'specified, but not both.'):
_ = _MyBaseNormal(mean=mean_t, std=std_t, logstd=logstd_t)
with pytest.raises(ValueError,
match='Either `std` or `logstd` must be '
'specified, but not both.'):
_ = _MyBaseNormal(mean=mean_t, std=None, logstd=None)
# nan test
with pytest.raises(Exception,
match='Infinity or NaN value encountered'):
_ = _MyBaseNormal(mean=T.as_tensor(np.nan, dtype=dtype),
logstd=logstd_t, validate_tensors=True)
for key, val in mutual_params.items():
with pytest.raises(Exception,
match='Infinity or NaN value encountered'):
_ = _MyBaseNormal(mean=mean_t, validate_tensors=True,
**{key: T.as_tensor(np.nan, dtype=dtype)})
normal = _MyBaseNormal(mean=mean_t, std=T.zeros_like(std_t),
validate_tensors=True)
with pytest.raises(Exception,
match='Infinity or NaN value encountered'):
_ = normal.logstd
def test_copy(self):
mean = np.random.randn(3, 4)
logstd = np.random.randn(2, 3, 4)
mean_t = T.as_tensor(mean)
logstd_t = T.as_tensor(logstd)
normal = _MyBaseNormal(mean=mean_t, logstd=logstd_t, event_ndims=1,
xyz=123, reparameterized=False)
self.assertEqual(normal.xyz, 123)
with mock.patch('tensorkit.distributions.normal.copy_distribution',
wraps=copy_distribution) as f_copy:
normal2 = normal.copy(event_ndims=2)
self.assertIsInstance(normal2, _MyBaseNormal)
self.assertEqual(normal2.xyz, 123)
self.assertIs(normal2.mean, normal.mean)
self.assertIs(normal2.logstd, normal.logstd)
self.assertEqual(normal2.event_ndims, 2)
self.assertEqual(normal2.reparameterized, False)
self.assertEqual(f_copy.call_args, ((), {
'cls': _MyBaseNormal,
'base': normal,
'attrs': ('mean', 'reparameterized', 'event_ndims',
'device', 'validate_tensors', 'xyz'),
'mutual_attrs': (('std', 'logstd'),),
'original_mutual_params': {'logstd': normal.logstd},
'overrided_params': {'event_ndims': 2},
}))
def test_Normal(self):
mean = np.random.randn(3, 4)
logstd = | np.random.randn(2, 3, 4) | numpy.random.randn |
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 1 22:15:48 2015
@author: ajaver
"""
import os
import numpy as np
import pandas as pd
import tables
from scipy.interpolate import RectBivariateSpline
from scipy.interpolate import interp1d
from scipy.signal import savgol_filter
from tierpsy.analysis.ske_create.helperIterROI import getWormROI
from tierpsy.helper.params import min_num_skel_defaults
from tierpsy.helper.misc import TimeCounter, print_flush, save_modified_table
def smoothSkeletons(
skeleton,
length_resampling=131,
smooth_win=11,
pol_degree=3):
xx = savgol_filter(skeleton[:, 0], smooth_win, pol_degree)
yy = savgol_filter(skeleton[:, 1], smooth_win, pol_degree)
ii = np.arange(xx.size)
ii_new = np.linspace(0, xx.size - 1, length_resampling)
fx = interp1d(ii, xx)
fy = interp1d(ii, yy)
xx_new = fx(ii_new)
yy_new = fy(ii_new)
skel_new = np.vstack((xx_new, yy_new)).T
return skel_new
def getStraightenWormInt(worm_img, skeleton, half_width, width_resampling):
'''
Code to straighten the worm worms.
worm_image - image containing the worm
skeleton - worm skeleton
half_width - half width of the worm, if it is -1 it would try to calculated from cnt_widths
width_resampling - number of data points used in the intensity map along the worm width
length_resampling - number of data points used in the intensity map along the worm length
ang_smooth_win - window used to calculate the skeleton angles.
A small value will introduce noise, therefore obtaining bad perpendicular segments.
A large value will over smooth the skeleton, therefore not capturing the correct shape.
'''
assert not np.any(np.isnan(skeleton))
dX = np.diff(skeleton[:, 0])
dY = np.diff(skeleton[:, 1])
skel_angles = | np.arctan2(dY, dX) | numpy.arctan2 |
# -*- coding:utf-8 -*-
import os
import sys
import torch
import argparse
import numpy as np
import torch.nn as nn
import PIL.Image as Image
from torchvision import models
from torchvision import transforms
from torch.nn import functional as F
import cv2
import matplotlib as mpl
import os
os.environ['KMP_DUPLICATE_LIB_OK']='True'
mpl.use('Agg')
import matplotlib.pyplot as plt
sys.path.append('..')
from nn_process import save_jpg
INPUT_DIR = 'D:/Projects/python/Acemap-Paper-X-Ray/input/'
OUTPUT_DIR = 'D:/Projects/python/Acemap-Paper-X-Ray/output/'
def get_nn_model():
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model = models.resnet18(pretrained=False)
model.avgpool = nn.AdaptiveAvgPool2d((1, 1))
num_ftrs = model.fc.in_features
model.fc = nn.Linear(num_ftrs, 2)
ckp_path = OUTPUT_DIR + 'nn_output/model_best.pth.tar'
checkpoint = torch.load(ckp_path)
d = checkpoint['state_dict']
d = {k.replace('module.', ''): v for k, v in d.items()}
model.load_state_dict(d)
model = model.to(device)
return model
def load_img(path):
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
img = Image.open(path)
trans = transforms.Compose([
transforms.Resize(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
])
img = trans(img)
img = img.to(device)
img = torch.unsqueeze(img, 0)
return img
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--pdf_path", type=str, help="pdf_path")
args = parser.parse_args()
return args
def draw_heatmap(pdf_path, model, save_path="./heatmap.jpg"):
tmp_dir = INPUT_DIR + 'temp/'
if not os.path.exists(tmp_dir):
os.makedirs(tmp_dir)
tgt_path = tmp_dir + 'image.jpg'
save_jpg(pdf_path, tgt_path, tmp_dir)
img_path = tgt_path
img = load_img(img_path)
model.eval()
features = nn.Sequential(
model.conv1,
model.bn1,
model.relu,
model.maxpool,
model.layer1,
model.layer2,
model.layer3,
model.layer4,
)(img)
pooled = model.avgpool(features).view(1, 512)
output = model.fc(pooled)
def extract(g):
global features_grad
features_grad = g
pred = torch.argmax(output).item()
pred_class = output[:, pred]
features.register_hook(extract)
pred_class.backward()
grads = features_grad
pooled_grads = torch.nn.functional.adaptive_avg_pool2d(grads, (1, 1))
pooled_grads = pooled_grads[0]
features = features[0]
for j in range(512):
features[j, ...] *= pooled_grads[j, ...]
heatmap = features.detach().to(torch.device('cpu')).numpy()
heatmap = np.mean(heatmap, axis=0)
heatmap = | np.maximum(heatmap, 0) | numpy.maximum |
"""
Augmenters that are based on applying convolution kernels to images.
Do not import directly from this file, as the categorization is not final.
Use instead ::
from imgaug import augmenters as iaa
and then e.g. ::
seq = iaa.Sequential([
iaa.Sharpen((0.0, 1.0)),
iaa.Emboss((0.0, 1.0))
])
List of augmenters:
* Convolve
* Sharpen
* Emboss
* EdgeDetect
* DirectedEdgeDetect
For MotionBlur, see ``blur.py``.
"""
from __future__ import print_function, division, absolute_import
import types
import itertools
import numpy as np
import cv2
import six.moves as sm
from . import meta
import imgaug as ia
from .. import parameters as iap
from .. import dtypes as iadt
# TODO allow 3d matrices as input (not only 2D)
# TODO add _augment_keypoints and other _augment funcs, as these should do
# something for e.g. [[0, 0, 1]]
class Convolve(meta.Augmenter):
"""
Apply a convolution to input images.
dtype support::
* ``uint8``: yes; fully tested
* ``uint16``: yes; tested
* ``uint32``: no (1)
* ``uint64``: no (2)
* ``int8``: yes; tested (3)
* ``int16``: yes; tested
* ``int32``: no (2)
* ``int64``: no (2)
* ``float16``: yes; tested (4)
* ``float32``: yes; tested
* ``float64``: yes; tested
* ``float128``: no (1)
* ``bool``: yes; tested (4)
- (1) rejected by ``cv2.filter2D()``.
- (2) causes error: cv2.error: OpenCV(3.4.2) (...)/filter.cpp:4487:
error: (-213:The function/feature is not implemented)
Unsupported combination of source format (=1), and destination
format (=1) in function 'getLinearFilter'.
- (3) mapped internally to ``int16``.
- (4) mapped internally to ``float32``.
Parameters
----------
matrix : None or (H, W) ndarray or imgaug.parameters.StochasticParameter or callable, optional
The weight matrix of the convolution kernel to apply.
* If ``None``, the input images will not be changed.
* If a 2D numpy array, that array will always be used for all
images and channels as the kernel.
* If a callable, that method will be called for each image
via ``parameter(image, C, random_state)``. The function must
either return a list of ``C`` matrices (i.e. one per channel)
or a 2D numpy array (will be used for all channels) or a
3D ``HxWxC`` numpy array. If a list is returned, each entry may
be ``None``, which will result in no changes to the respective
channel.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.bit_generator.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> matrix = np.array([[0, -1, 0],
>>> [-1, 4, -1],
>>> [0, -1, 0]])
>>> aug = iaa.Convolve(matrix=matrix)
Convolves all input images with the kernel shown in the ``matrix``
variable.
>>> def gen_matrix(image, nb_channels, random_state):
>>> matrix_A = np.array([[0, -1, 0],
>>> [-1, 4, -1],
>>> [0, -1, 0]])
>>> matrix_B = np.array([[0, 1, 0],
>>> [1, -4, 1],
>>> [0, 1, 0]])
>>> if image.shape[0] % 2 == 0:
>>> return [matrix_A] * nb_channels
>>> else:
>>> return [matrix_B] * nb_channels
>>> aug = iaa.Convolve(matrix=gen_matrix)
Convolves images that have an even height with matrix A and images
having an odd height with matrix B.
"""
def __init__(self, matrix=None,
name=None, deterministic=False, random_state=None):
super(Convolve, self).__init__(
name=name, deterministic=deterministic, random_state=random_state)
if matrix is None:
self.matrix = None
self.matrix_type = "None"
elif ia.is_np_array(matrix):
assert matrix.ndim == 2, (
"Expected convolution matrix to have exactly two dimensions, "
"got %d (shape %s)." % (matrix.ndim, matrix.shape))
self.matrix = matrix
self.matrix_type = "constant"
elif ia.is_callable(matrix):
self.matrix = matrix
self.matrix_type = "function"
else:
raise Exception(
"Expected float, int, tuple/list with 2 entries or "
"StochasticParameter. Got %s." % (
type(matrix),))
def _augment_images(self, images, random_state, parents, hooks):
iadt.gate_dtypes(images,
allowed=["bool",
"uint8", "uint16",
"int8", "int16",
"float16", "float32", "float64"],
disallowed=["uint32", "uint64", "uint128", "uint256",
"int32", "int64", "int128", "int256",
"float96", "float128", "float256"],
augmenter=self)
rss = random_state.duplicate(len(images))
for i, image in enumerate(images):
_height, _width, nb_channels = images[i].shape
input_dtype = image.dtype
if image.dtype.type in [np.bool_, np.float16]:
image = image.astype(np.float32, copy=False)
elif image.dtype.type == np.int8:
image = image.astype(np.int16, copy=False)
if self.matrix_type == "None":
matrices = [None] * nb_channels
elif self.matrix_type == "constant":
matrices = [self.matrix] * nb_channels
elif self.matrix_type == "function":
matrices = self.matrix(images[i], nb_channels, rss[i])
if ia.is_np_array(matrices) and matrices.ndim == 2:
matrices = np.tile(
matrices[..., np.newaxis],
(1, 1, nb_channels))
is_valid_list = (isinstance(matrices, list)
and len(matrices) == nb_channels)
is_valid_array = (ia.is_np_array(matrices)
and matrices.ndim == 3
and matrices.shape[2] == nb_channels)
assert is_valid_list or is_valid_array, (
"Callable provided to Convole must return either a "
"list of 2D matrices (one per image channel) "
"or a 2D numpy array "
"or a 3D numpy array where the last dimension's size "
"matches the number of image channels. "
"Got type %s." % (type(matrices),))
if ia.is_np_array(matrices):
# Shape of matrices is currently (H, W, C), but in the
# loop below we need the first axis to be the channel
# index to unify handling of lists of arrays and arrays.
# So we move the channel axis here to the start.
matrices = matrices.transpose((2, 0, 1))
else:
raise Exception("Invalid matrix type")
image_aug = image
for channel in sm.xrange(nb_channels):
if matrices[channel] is not None:
# ndimage.convolve caused problems here cv2.filter2D()
# always returns same output dtype as input dtype
image_aug[..., channel] = cv2.filter2D(
image_aug[..., channel],
-1,
matrices[channel]
)
if input_dtype == np.bool_:
image_aug = image_aug > 0.5
elif input_dtype in [np.int8, np.float16]:
image_aug = iadt.restore_dtypes_(image_aug, input_dtype)
images[i] = image_aug
return images
def get_parameters(self):
return [self.matrix, self.matrix_type]
class Sharpen(Convolve):
"""
Sharpen images and alpha-blend the result with the original input images.
dtype support::
See ``imgaug.augmenters.convolutional.Convolve``.
Parameters
----------
alpha : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Blending factor of the sharpened image. At ``0.0``, only the original
image is visible, at ``1.0`` only its sharpened version is visible.
* If a number, exactly that value will always be used.
* If a tuple ``(a, b)``, a random value will be sampled from the
interval ``[a, b]`` per image.
* If a list, a random value will be sampled from that list
per image.
* If a ``StochasticParameter``, a value will be sampled from that
parameter per image.
lightness : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Lightness/brightness of the sharped image.
Sane values are somewhere in the interval ``[0.5, 2.0]``.
The value ``0.0`` results in an edge map. Values higher than ``1.0``
create bright images. Default value is ``1.0``.
* If a number, exactly that value will always be used.
* If a tuple ``(a, b)``, a random value will be sampled from the
interval ``[a, b]`` per image.
* If a list, a random value will be sampled from that list
per image.
* If a ``StochasticParameter``, a value will be sampled from that
parameter per image.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.bit_generator.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> aug = iaa.Sharpen(alpha=(0.0, 1.0))
Sharpens input images and blends the sharpened image with the input image
using a random blending factor between ``0%`` and ``100%`` (uniformly
sampled).
>>> aug = iaa.Sharpen(alpha=(0.0, 1.0), lightness=(0.75, 2.0))
Sharpens input images with a variable `lightness` sampled uniformly from
the interval ``[0.75, 2.0]`` and with a fully random blending factor
(as in the above example).
"""
def __init__(self, alpha=0, lightness=1,
name=None, deterministic=False, random_state=None):
alpha_param = iap.handle_continuous_param(
alpha, "alpha",
value_range=(0, 1.0), tuple_to_uniform=True, list_to_choice=True)
lightness_param = iap.handle_continuous_param(
lightness, "lightness",
value_range=(0, None), tuple_to_uniform=True, list_to_choice=True)
def _create_matrices(_image, nb_channels, random_state_func):
alpha_sample = alpha_param.draw_sample(
random_state=random_state_func)
assert 0 <= alpha_sample <= 1.0, (
"Expected 'alpha' to be in the interval [0.0, 1.0], "
"got %.4f." % (alpha_sample,))
lightness_sample = lightness_param.draw_sample(
random_state=random_state_func)
matrix_nochange = np.array([
[0, 0, 0],
[0, 1, 0],
[0, 0, 0]
], dtype=np.float32)
matrix_effect = np.array([
[-1, -1, -1],
[-1, 8+lightness_sample, -1],
[-1, -1, -1]
], dtype=np.float32)
matrix = (
(1-alpha_sample) * matrix_nochange
+ alpha_sample * matrix_effect
)
return [matrix] * nb_channels
super(Sharpen, self).__init__(
matrix=_create_matrices, name=name, deterministic=deterministic,
random_state=random_state)
class Emboss(Convolve):
"""
Emboss images and alpha-blend the result with the original input images.
The embossed version pronounces highlights and shadows,
letting the image look as if it was recreated on a metal plate ("embossed").
dtype support::
See ``imgaug.augmenters.convolutional.Convolve``.
Parameters
----------
alpha : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Blending factor of the embossed image. At ``0.0``, only the original
image is visible, at ``1.0`` only its embossed version is visible.
* If a number, exactly that value will always be used.
* If a tuple ``(a, b)``, a random value will be sampled from the
interval ``[a, b]`` per image.
* If a list, a random value will be sampled from that list
per image.
* If a ``StochasticParameter``, a value will be sampled from that
parameter per image.
strength : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Parameter that controls the strength of the embossing.
Sane values are somewhere in the interval ``[0.0, 2.0]`` with ``1.0``
being the standard embossing effect. Default value is ``1.0``.
* If a number, exactly that value will always be used.
* If a tuple ``(a, b)``, a random value will be sampled from the
interval ``[a, b]`` per image.
* If a list, then a random value will be sampled from that list
per image.
* If a ``StochasticParameter``, a value will be sampled from the
parameter per image.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.bit_generator.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> aug = iaa.Emboss(alpha=(0.0, 1.0), strength=(0.5, 1.5))
Emboss an image with a strength sampled uniformly from the interval
``[0.5, 1.5]`` and alpha-blend the result with the original input image
using a random blending factor between ``0%`` and ``100%``.
"""
def __init__(self, alpha=0, strength=1,
name=None, deterministic=False, random_state=None):
alpha_param = iap.handle_continuous_param(
alpha, "alpha",
value_range=(0, 1.0), tuple_to_uniform=True, list_to_choice=True)
strength_param = iap.handle_continuous_param(
strength, "strength",
value_range=(0, None), tuple_to_uniform=True, list_to_choice=True)
def _create_matrices(_image, nb_channels, random_state_func):
alpha_sample = alpha_param.draw_sample(
random_state=random_state_func)
assert 0 <= alpha_sample <= 1.0, (
"Expected 'alpha' to be in the interval [0.0, 1.0], "
"got %.4f." % (alpha_sample,))
strength_sample = strength_param.draw_sample(
random_state=random_state_func)
matrix_nochange = np.array([
[0, 0, 0],
[0, 1, 0],
[0, 0, 0]
], dtype=np.float32)
matrix_effect = np.array([
[-1-strength_sample, 0-strength_sample, 0],
[0-strength_sample, 1, 0+strength_sample],
[0, 0+strength_sample, 1+strength_sample]
], dtype=np.float32)
matrix = (
(1-alpha_sample) * matrix_nochange
+ alpha_sample * matrix_effect
)
return [matrix] * nb_channels
super(Emboss, self).__init__(
matrix=_create_matrices, name=name, deterministic=deterministic,
random_state=random_state)
# TODO add tests
# TODO move this to edges.py?
class EdgeDetect(Convolve):
"""
Generate a black & white edge image and alpha-blend it with the input image.
dtype support::
See ``imgaug.augmenters.convolutional.Convolve``.
Parameters
----------
alpha : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Blending factor of the edge image. At ``0.0``, only the original
image is visible, at ``1.0`` only the edge image is visible.
* If a number, exactly that value will always be used.
* If a tuple ``(a, b)``, a random value will be sampled from the
interval ``[a, b]`` per image.
* If a list, a random value will be sampled from that list
per image.
* If a ``StochasticParameter``, a value will be sampled from that
parameter per image.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.bit_generator.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> aug = iaa.EdgeDetect(alpha=(0.0, 1.0))
Detect edges in an image, mark them as black (non-edge) and white (edges)
and alpha-blend the result with the original input image using a random
blending factor between ``0%`` and ``100%``.
"""
def __init__(self, alpha=0, name=None, deterministic=False,
random_state=None):
alpha_param = iap.handle_continuous_param(
alpha, "alpha",
value_range=(0, 1.0), tuple_to_uniform=True, list_to_choice=True)
def _create_matrices(_image, nb_channels, random_state_func):
alpha_sample = alpha_param.draw_sample(
random_state=random_state_func)
assert 0 <= alpha_sample <= 1.0, (
"Expected 'alpha' to be in the interval [0.0, 1.0], "
"got %.4f." % (alpha_sample,))
matrix_nochange = np.array([
[0, 0, 0],
[0, 1, 0],
[0, 0, 0]
], dtype=np.float32)
matrix_effect = np.array([
[0, 1, 0],
[1, -4, 1],
[0, 1, 0]
], dtype=np.float32)
matrix = (
(1-alpha_sample) * matrix_nochange
+ alpha_sample * matrix_effect
)
return [matrix] * nb_channels
super(EdgeDetect, self).__init__(
matrix=_create_matrices, name=name, deterministic=deterministic,
random_state=random_state)
# TODO add tests
# TODO merge EdgeDetect and DirectedEdgeDetect?
# TODO deprecate and rename to AngledEdgeDetect
# TODO rename arg "direction" to "angle"
# TODO change direction/angle value range to (0, 360)
# TODO move this to edges.py?
class DirectedEdgeDetect(Convolve):
"""
Detect edges from specified angles and alpha-blend with the input image.
This augmenter first detects edges along a certain angle.
Usually, edges are detected in x- or y-direction, while here the edge
detection kernel is rotated to match a specified angle.
The result of applying the kernel is a black (non-edges) and white (edges)
image. That image is alpha-blended with the input image.
dtype support::
See ``imgaug.augmenters.convolutional.Convolve``.
Parameters
----------
alpha : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Blending factor of the edge image. At ``0.0``, only the original
image is visible, at ``1.0`` only the edge image is visible.
* If a number, exactly that value will always be used.
* If a tuple ``(a, b)``, a random value will be sampled from the
interval ``[a, b]`` per image.
* If a list, a random value will be sampled from that list
per image.
* If a ``StochasticParameter``, a value will be sampled from that
parameter per image.
direction : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Angle (in degrees) of edges to pronounce, where ``0`` represents
``0`` degrees and ``1.0`` represents 360 degrees (both clockwise,
starting at the top). Default value is ``(0.0, 1.0)``, i.e. pick a
random angle per image.
* If a number, exactly that value will always be used.
* If a tuple ``(a, b)``, a random value will be sampled from the
interval ``[a, b]`` will be sampled per image.
* If a list, then a random value will be sampled from that list
per image.
* If a ``StochasticParameter``, a value will be sampled from the
parameter per image.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.bit_generator.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> aug = iaa.DirectedEdgeDetect(alpha=1.0, direction=0)
Turn input images into edge images in which edges are detected from
the top side of the image (i.e. the top sides of horizontal edges are
part of the edge image, while vertical edges are ignored).
>>> aug = iaa.DirectedEdgeDetect(alpha=1.0, direction=90/360)
Same as before, but edges are detected from the right. Horizontal edges
are now ignored.
>>> aug = iaa.DirectedEdgeDetect(alpha=1.0, direction=(0.0, 1.0))
Same as before, but edges are detected from a random angle sampled
uniformly from the interval ``[0deg, 360deg]``.
>>> aug = iaa.DirectedEdgeDetect(alpha=(0.0, 0.3), direction=0)
Similar to the previous examples, but here the edge image is alpha-blended
with the input image. The result is a mixture between the edge image and
the input image. The blending factor is randomly sampled between ``0%``
and ``30%``.
"""
def __init__(self, alpha=0, direction=(0.0, 1.0),
name=None, deterministic=False, random_state=None):
alpha_param = iap.handle_continuous_param(
alpha, "alpha",
value_range=(0, 1.0), tuple_to_uniform=True, list_to_choice=True)
direction_param = iap.handle_continuous_param(
direction, "direction",
value_range=None, tuple_to_uniform=True, list_to_choice=True)
def _create_matrices(_image, nb_channels, random_state_func):
alpha_sample = alpha_param.draw_sample(
random_state=random_state_func)
assert 0 <= alpha_sample <= 1.0, (
"Expected 'alpha' to be in the interval [0.0, 1.0], "
"got %.4f." % (alpha_sample,))
direction_sample = direction_param.draw_sample(
random_state=random_state_func)
deg = int(direction_sample * 360) % 360
rad = | np.deg2rad(deg) | numpy.deg2rad |
import numpy as np
#npyfile = np.load('data2005-2007.npy')
def outlierlong(npyfile):
#making a list of all the station names
allstationnames = | np.unique(npyfile[1:, 0]) | numpy.unique |
# Copyright 2020 <NAME>. All rights reserved.
# <EMAIL>
# Licensed under the MIT License (except for lines 180-265)
import cv2
import numpy as np
from scipy.signal import argrelextrema
import traceback
from .control import mtx, mtx2
class Vars:
pass
class Camera:
def __init__(self, mtx, h, trans, trans_inv, flip=False):
self.f_u = f_u = mtx[0, 0]
self.f_v = f_v = mtx[1, 1]
if not flip:
self.c_u = c_u = mtx[0, 2]
self.c_v = c_v = mtx[1, 2]
else:
self.c_u = c_u = 639 - mtx[0, 2]
self.c_v = c_v = 479 - mtx[1, 2]
self.h = h
self.M = trans @ np.array([[-h / f_u, 0., h * c_u / f_u],
[0., 0., -h],
[0., -1 / f_v, c_v / f_v]], dtype=np.float32)
# if flip:
# self.M_inv = np.array([[-1, 0, 639],
# [0., 1, 0],
# [0., 0, 1]], dtype=np.float32) @ \
# np.array([[f_u, c_u, 0],
# [0., c_v, h * f_v],
# [0., 1, 0]], dtype=np.float32) @ trans_inv
# else:
# self.M_inv = np.array([[f_u, c_u, 0],
# [0., c_v, h * f_v],
# [0., 1, 0]], dtype=np.float32) @ trans_inv
self.M_inv = np.array([[f_u, c_u, 0],
[0., c_v, h * f_v],
[0., 1, 0]], dtype=np.float32) @ trans_inv
def warpImg(self, img):
return cv2.warpPerspective(img, self.M, (500, 300))
def unWarpPts(self, pts):
return cv2.perspectiveTransform(np.array([pts], dtype=np.float32), self.M_inv)[0]
class LaneDetector:
def __init__(self, cam, name=''):
self.cam: Camera = cam
self.explored = []
self.name = name
def imshow(self, name, img):
return
cv2.imshow(self.name + name, img)
def canny(self, img, par1=200, par2=400):
l = cv2.cvtColor(img, cv2.COLOR_BGR2HLS)[:, :, 1]
blur = cv2.bilateralFilter(l, 7, 10, 20)
edge = cv2.Canny(blur, par1, par2)
return edge
def findLines(self, img):
"""undistorted image => lines"""
# Image Transformation
# edge = cv2.Canny(blur, 300, 500)
edge = self.canny(img, 200, 400)
warp = self.cam.warpImg(edge)
self.imshow('warp', warp)
# Histogram Search
histogram = np.sum(warp, axis=0)
histogram = self.smooth(histogram, 20)
histogram_near = np.sum(warp[270:], axis=0)
histogram_near = self.smooth(histogram_near, 20)
maxima, = argrelextrema(histogram, np.greater)
maxima_near, = argrelextrema(histogram_near, np.greater)
maxima = sorted(np.concatenate((maxima, maxima_near)))
maxima = np.delete(maxima, np.argwhere(np.ediff1d(maxima) < 30) + 1)
maxima = np.delete(maxima, np.where(np.isin(maxima, maxima_near)))
maxima = sorted(maxima_near, key=lambda x: abs(x - 250)) + sorted(maxima, key=lambda x: abs(x - 250))
# print(maxima_near, maxima)
# Sliding Windows
height = warp.shape[0]
pts = warp.nonzero()
self.explored = []
result = []
aux = warp.copy()
for start_x in maxima:
line_points = self.follow_line(height, pts, start_x, aux=aux)
# print(line_points)
if line_points is not None:
line_points, centers = line_points
line = self.cam.unWarpPts(line_points)
centers = self.cam.unWarpPts(np.array(centers, dtype=np.float32))
result.append((line_points, line, centers))
self.imshow('aux', aux)
result.sort(key=lambda x: x[0][0, 0])
result = [u[2] for u in result]
return result
def follow_line(self, height, pts, start_x, windows=20, half_width=25, thresh=30, aux=None):
for x_range in self.explored:
if x_range[0] < start_x < x_range[1]:
return
h = height // windows
pts_y = pts[0]
pts_x = pts[1]
cur_x = start_x
point_ids = []
dx = 0
cnt = 0
last_x = None
min_x = start_x
max_x = start_x
min_y = height
max_y = -1
centers = []
skip = -1
for window in range(windows):
y0 = height - (window + 1) * h
y1 = height - window * h
x0 = cur_x - half_width
x1 = cur_x + half_width
if aux is not None:
cv2.rectangle(aux, (int(x0), int(y0)), (int(x1), int(y1)),
(255 * (window / windows), 255 * (windows - window) / windows, 0), 2)
pts_in_window, = ((y0 <= pts_y) & (pts_y < y1) & (x0 <= pts_x) & (pts_x < x1)).nonzero()
point_ids.append(pts_in_window)
if len(pts_in_window) > thresh:
cur_x = np.mean(pts_x[pts_in_window])
for x_range in self.explored:
if x_range[0] < cur_x < x_range[1]:
break
centers.append((cur_x, (y0 + y1) / 2))
if last_x is not None:
dx = cur_x - last_x
last_x = cur_x
cnt += 1
if min_y > y0:
min_y = y0
if max_y < y1:
max_y = y1
if min_x > cur_x:
min_x = cur_x
if max_x < cur_x:
max_x = cur_x
skip = 0
else:
last_x = None
cur_x += dx
if skip >= 0:
skip += 1
if skip > 2:
break
point_ids = np.concatenate(point_ids)
if len(point_ids) < 100 or cnt < 5:
return
x = pts_x[point_ids]
y = pts_y[point_ids]
try:
fit = np.polyfit(y, x, 2)
f = np.poly1d(fit)
line_y = np.arange(min_y, max_y + 15, 15)
line_x = f(line_y)
# print(line_x)
self.explored.append((min_x - half_width / 2, max_x + half_width / 2))
return np.column_stack((np.array(line_x, dtype=np.int), np.array(line_y, dtype=np.int))), centers
except:
traceback.print_exc()
pass
# Lines 212-265 is a copy/modification of https://github.com/scipy/scipy-cookbook/blob/master/ipython/SignalSmooth.ipynb
# Copyright (c) 2001, 2002 Enthought, Inc.
# All rights reserved.
#
# Copyright (c) 2003-2017 SciPy Developers.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# a. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# b. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# c. Neither the name of Enthought nor the names of the SciPy Developers
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
@staticmethod
def smooth(x, window_len=11, window='hanning'):
"""smooth the data using a window with requested size.
This method is based on the convolution of a scaled window with the signal.
The signal is prepared by introducing reflected copies of the signal
(with the window size) in both ends so that transient parts are minimized
in the begining and end part of the output signal.
input:
x: the input signal
window_len: the dimension of the smoothing window; should be an odd integer
window: the type of window from 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'
flat window will produce a moving average smoothing.
output:
the smoothed signal
example:
t=linspace(-2,2,0.1)
x=sin(t)+randn(len(t))*0.1
y=smooth(x)
see also:
numpy.hanning, numpy.hamming, numpy.bartlett, numpy.blackman, numpy.convolve
scipy.signal.lfilter
TODO: the window parameter could be the window itself if an array instead of a string
NOTE: length(output) != length(input), to correct this: return y[(window_len/2-1):-(window_len/2)] instead of just y.
"""
if x.ndim != 1:
raise ValueError("smooth only accepts 1 dimension arrays.")
if x.size < window_len:
raise ValueError("Input vector needs to be bigger than window size.")
if window_len < 3:
return x
if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:
raise ValueError("Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'")
s = np.r_[x[window_len - 1:0:-1], x, x[-2:-window_len - 1:-1]]
# print(len(s))
if window == 'flat': # moving average
w = | np.ones(window_len, 'd') | numpy.ones |
"""smp_base.models_actinf
..moduleauthor:: <NAME>, 2016-2017
Active inference models based on :mod:`smp.actinf` project code.
This file contains the models_learners which can be used as adaptive models
of sensorimotor contexts designed for an active inference
approach. Currently implemented models are
- k nearest neighbours (knn)
- sparse online gaussian process models powered by Harold Soh's OTL library (soesgp, storkgp)
- gaussian mixture model based on pypr's gmm (gmm)
- hebbian connected SOM via bruno lara, guido schillaci (hebbsom)
- incremental gaussian mixtures (igmm via juan acevedo-valle)
- SOMs connected with hebbian associative links
TODO:
- consolidate calling convention / api for all model types
-- init with single argument config dictionary
-- predict, fit, sample, conditionals, visualize
-- common test code
- implement missing models
- missing: single hidden layer networks: linear/elm/res with RLS/FORCE/MDN/EH, merge with otl
- missing: imol/models.py
- missing: im/models.py
- missing: smp/models_seq.py
- missing: smp/models_karpmdn.py
- MDN model: florens, karpathy, hardmaru, amjad, cbonnett, edward
- including 'predict_naive' and 'predict_full' methods that would capture returning confidences about the current prediction
- other variables that might be used by the context to modulate exploration, learning and behaviour
- disambiguate static and dynamic (conditional inference types) idim/odim
- consistent sampling from probabilistic models (gmm, hebbsom, ...): sample from prior, stick with last sample's vicinity
- model visualization
- def visualize for all models
- plot current / final som configuration
- plot densities
- hebbsom
- som track residual error from map training
- som use residual for adjusting rbf width
- som extend sampling to sample actual prediction from gaussian with unit's mu and sigma
"""
import pickle
from functools import partial
import numpy as np
import scipy.sparse as sparse
import scipy.stats as stats
import pylab as pl
import matplotlib.gridspec as gridspec
import pandas as pd
from pandas.plotting import scatter_matrix
from smp_base.models import smpModelInit, smpModel
from smp_base.plot_utils import savefig
from smp_base.plot_models import plot_nodes_over_data_1d_components_fig, plot_nodes_over_data_1d_components
# KNN
from sklearn.neighbors import KNeighborsRegressor
# Online Gaussian Processes
try:
from otl_oesgp import OESGP
from otl_storkgp import STORKGP
HAVE_SOESGP = True
except ImportError as e:
print("couldn't import online GP models:", e)
HAVE_SOESGP = False
# Gaussian mixtures PyPR
try:
import pypr.clustering.gmm as gmm
except ImportError as e:
print("Couldn't import pypr.clustering.gmm", e)
# hebbsom
try:
from kohonen.kohonen import Map, Parameters, ExponentialTimeseries, ConstantTimeseries
from kohonen.kohonen import Gas, GrowingGas, GrowingGasParameters, Filter
from kohonen.kohonen import argsample
except ImportError as e:
print("Couldn't import lmjohns3's kohonon SOM lib", e)
# IGMM
try:
from igmm_cond import IGMM_COND
except ImportError as e:
print("Couldn't import IGMM lib", e)
# requirements: otl, kohonen, pypr, igmm
from smp_base.models_reservoirs import LearningRules
import logging
from smp_base.common import get_module_logger
logger = get_module_logger(modulename = 'models_actinf', loglevel = logging.DEBUG)
saveplot = False # True
model_classes = ["KNN", "SOESGP", "STORKGP", "GMM", "HebbSOM", ",IGMM", "all"]
class smpKNN(smpModel):
"""smpKNN
k-NN function approximator smpmodel originally used for the active
inference developmental model but generally reusable.
"""
defaults = {
'idim': 1,
'odim': 1,
'n_neighbors': 5,
'prior': 'random', # ['random', 'linear']
'prior_width': 0.01,
}
@smpModelInit()
def __init__(self, conf):
"""smpKNN.__init__
init
"""
smpModel.__init__(self, conf)
# comply
if not hasattr(self, 'modelsize'):
self.modelsize = 1000 # self.n_neighbors
# the scikit base model
self.fwd = KNeighborsRegressor(n_neighbors = self.n_neighbors)
# the data store
self.X_ = []
self.y_ = []
self.hidden_dist = np.zeros((1, self.n_neighbors))
self.hidden_dist_sum = np.zeros((1, 1))
self.hidden_dist_sum_avg = np.zeros((1, 1))
self.hidden_idx = np.zeros((1, self.n_neighbors))
# bootstrap the model with prior
self.bootstrap()
def get_params(self, *args, **kwargs):
if 'param' in kwargs:
if 'w_norm' in kwargs['param']:
# return np.tile(np.array([(len(self.X_) + len(self.y_))/2.0]), (self.odim, 1))
return np.tile(np.array([len(self.y_)]), (self.odim, 1))
return self.fwd.get_params()
def visualize(self):
pass
def bootstrap(self):
"""smpKNN.bootstrap
Bootstrap the model with some initial dummy samples to prepare it for inference after init
"""
# bootstrap model
self.n_samples_bootstrap = max(10, self.n_neighbors)
logger.info("%s.bootstrapping with %s prior" % (self.__class__.__name__, self.prior))
if self.prior == 'random':
for i in range(self.n_samples_bootstrap):
if self.idim == self.odim:
self.X_.append(np.ones((self.idim, )) * i * 0.1)
self.y_.append(np.ones((self.odim, )) * i * 0.1)
else:
noise_amp = self.prior_width
self.X_.append(np.random.uniform(
-noise_amp, noise_amp, (self.idim,)))
self.y_.append(np.random.uniform(
-noise_amp, noise_amp, (self.odim,)))
elif self.prior == 'linear':
for i in range(self.n_samples_bootstrap):
p_ = -self.prior_width/2.0 + float(i)/self.n_samples_bootstrap
X = np.ones((self.idim, )) * p_ + np.random.uniform(-0.01, 0.01)
y = np.ones((self.odim, )) * p_ + np.random.uniform(-0.01, 0.01)
self.X_.append(X)
self.y_.append(y)
# print(self.X_, self.y_)
self.fwd.fit(self.X_, self.y_)
def predict(self, X):
"""smpKNN.predict
Predict Y using X on the current model state
"""
# FIXME: change scikit to store intermediate query results
# or: fully local predict def
self.hidden_dist, self.hidden_idx = self.fwd.kneighbors(X)
self.hidden_dist_sum = np.mean(self.hidden_dist)
self.hidden_dist_sum_avg = 0.1 * self.hidden_dist_sum + 0.9 * self.hidden_dist_sum_avg
# self.hidden_idx_norm = self.hidden_idx.astype(np.float) * self.hidden_dist_sum_avg/1000.0
self.hidden_idx_norm = self.hidden_idx.astype(np.float) * 1e-3
# logger.debug('hidden dist = %s, idx = %s', self.hidden_dist, self.hidden_idx)
return self.fwd.predict(X)
def fit(self, X, y):
"""smpKNN.fit
Single fit Y to X step. If the input is a batch of data, fit
that entire batch and forgetting existing data in X' and
Y'. If the input is a single data point, append to X' and Y'
and refit the model to that new data.
"""
if X.shape[0] > 1: # batch of data
# self.modelsize = X.shape[0]
return self.fit_batch(X, y)
# logger.debug("%s.fit[%d] len(X_) = %d, len(y_) = %d, modelsize = %d", self.__class__.__name__, self.cnt, len(self.X_), len(self.y_), self.modelsize)
self.cnt += 1
# if len(self.X_) > self.modelsize: return
self.X_.append(X[0,:])
# self.y_.append(self.m[0,:])
# self.y_.append(self.goal[0,:])
self.y_.append(y[0,:])
self.fwd.fit(self.X_, self.y_)
def fit_batch(self, X, y):
"""smpKNN.fit
Batch fit Y to X
"""
self.X_ = X.tolist()
self.y_ = y.tolist()
self.fwd.fit(self.X_, self.y_)
################################################################################
# ActiveInference OTL library based model, base class implementing predict,
# predict_step (otl can't handle batches), fit, save and load methods
class smpOTLModel(smpModel):
"""smpOTLModel
Sparse online echo state gaussian process function approximator
for active inference
"""
defaults = {
'idim': 1,
'odim': 1,
'otlmodel_type': 'soesgp',
'otlmodel': None,
'memory': 1,
'lag_off': 1,
}
@smpModelInit()
def __init__(self, conf):
# if conf is None: conf = self.defaults
smpModel.__init__(self, conf)
# self.otlmodel_type = "soesgp"
# self.otlmodel = None
# introspection
self.cnt = 0
# explicit short term memory needed for tapping across lag gaps
self.r_l = []
print( "otlmodel.memory", self.memory)
self.r_ = np.zeros((self.modelsize, self.memory))
# self.r_ = np.random.uniform(-1, 1, (self.modelsize, self.memory)) * 1.0
# output variables arrays
self.pred = np.zeros((self.odim, 1))
self.var = np.zeros((self.odim, 1))
# output variables lists
self.pred_l = []
self.var_l = []
def update(self, X_):
# update state
self.otlmodel.update(X_)
# store state
self.r_ = np.roll(self.r_, shift = -1, axis = -1)
self.otlmodel.getState(self.r_l)
tmp = np.array([self.r_l]).T
# print("%s r_ = %s, r[...,[-1] = %s, tmp = %s" % (self.__class__.__name__, self.r_.shape, self.r_[...,[-1]].shape, tmp.shape))
self.r_[...,[-1]] = tmp.copy()
def predict(self, X,rollback = False):
# row vector input
if X.shape[0] > 1: # batch input
ret = np.zeros((X.shape[0], self.odim))
for i in range(X.shape[0]):
ret[i] = self.predict_step(X[i].flatten().tolist(), rollback = rollback)
return ret
else:
X_ = X.flatten().tolist()
return self.predict_step(X_, rollback = rollback)
def predict_step(self, X_, rollback = False):
# update state and store it
self.update(X_)
# predict output variables from state
self.otlmodel.predict(self.pred_l, self.var_l)
# return np.zeros((1, self.odim))
# set prediction variables
self.pred = np.array(self.pred_l)
self.var = np.abs(np.array(self.var_l))
# roll back the reservoir state if rollback on
if rollback:
self.r_ = np.roll(self.r_, shift = 1, axis = -1)
self.otlmodel.setState(self.r_[...,[-1]].copy().flatten().tolist())
self.cnt += 1
return self.pred.reshape((1, self.odim))
def fit(self, X, y, update = True):
"""smpOTLModel.fit
Fit model to data X, y
"""
if self.cnt < self.memory: return
if X.shape[0] > 1: # batch of data
return self.fit_batch(X, y)
if update:
X_ = X.flatten().tolist()
self.update(X_)
# print("X.shape", X.shape, len(X_), X_)
# self.otlmodel.update(X_)
# copy state into predefined structure
# self.otlmodel.getState(self.r)
# consider lag and restore respective state
# print("otlmodel.fit lag_off", self.lag_off)
r_lagged = self.r_[...,[-self.lag_off]]
# print ("r_lagged", r_lagged.shape)
self.otlmodel.setState(r_lagged.flatten().tolist())
# prepare target and fit
# print("soesgp.fit y", type(y))
y_ = y.flatten().tolist()
self.otlmodel.train(y_)
# restore chronologically most recent state
r_lagged = self.r_[...,[-1]]
self.otlmodel.setState(r_lagged.flatten().tolist())
def fit_batch(self, X, y):
for i in range(X.shape[0]):
self.fit(X[[i]], y[[i]])
def save(self, filename):
otlmodel_ = self.otlmodel
self.otlmodel.save(filename + "_%s_model" % self.otlmodel_type)
print("otlmodel", otlmodel_)
self.otlmodel = None
print("otlmodel", otlmodel_)
pickle.dump(self, open(filename, "wb"))
self.otlmodel = otlmodel_
print("otlmodel", self.otlmodel)
@classmethod
def load(cls, filename):
# otlmodel_ = cls.otlmodel
otlmodel_wrap = pickle.load(open(filename, "rb"))
print("%s.load cls.otlmodel filename = %s, otlmodel_wrap.otlmodel_type = %s" % (cls.__name__, filename, otlmodel_wrap.otlmodel_type))
if otlmodel_wrap.otlmodel_type == "soesgp":
otlmodel_cls = OESGP
elif otlmodel_wrap.otlmodel_type == "storkgp":
otlmodel_cls = STORKGP
else:
otlmodel_cls = OESGP
otlmodel_wrap.otlmodel = otlmodel_cls()
print("otlmodel_wrap.otlmodel", otlmodel_wrap.otlmodel)
otlmodel_wrap.otlmodel.load(filename + "_%s_model" % otlmodel_wrap.otlmodel_type)
# print("otlmodel_wrap.otlmodel", dir(otlmodel_wrap.otlmodel))
# cls.bootstrap(otlmodel_wrap)
# otlmodel_wrap.otlmodel = otlmodel_
return otlmodel_wrap
################################################################################
# Sparse Online Echo State Gaussian Process (SOESGP) OTL library model
class smpSOESGP(smpOTLModel):
"""smpSOESGP
Sparse online echo state gaussian process function approximator
for active inference
"""
# # for input modulation style
# defaults = {
# 'idim': 1,
# 'odim': 1,
# 'otlmodel_type': 'soesgp',
# 'otlmodel': None,
# 'modelsize': 300,
# 'input_weight': 2.0,
# 'output_feedback_weight': 0.0,
# 'activation_function': 1,
# 'leak_rate': 0.8, # 0.9,
# 'connectivity': 0.1,
# 'spectral_radius': 0.99, # 0.999,
# # 'kernel_params': [10.0, 10.0], # [2.0, 2.0],
# # 'noise': 0.01,
# # 'kernel_params': [10.0, 10.0], # [2.0, 2.0],
# # 'noise': 1.0, # 0.01,
# 'kernel_params': [2.0, 2.0], # [2.0, 2.0],
# 'noise': 5e-2, # 0.01,
# 'epsilon': 1e-3,
# 'capacity': 100, # 10
# 'random_seed': 101,
# 'visualize': False,
# }
# for self-sampling style
defaults = {
'idim': 1,
'odim': 1,
'otlmodel_type': 'soesgp',
'otlmodel': None,
'memory': 1,
'lag_off': 1,
'modelsize': 200,
'output_feedback_weight': 0.0,
'use_inputs_in_state': False,
'activation_function': 0,
'connectivity': 0.1,
# 'kernel_params': [10.0, 10.0], # [2.0, 2.0],
# 'noise': 0.01,
# 'kernel_params': [10.0, 10.0], # [2.0, 2.0],
# 'noise': 1.0, # 0.01,
# pointmass
'input_weight': 1.0,
'kernel_params': [10.0, 1.5],
'noise': 5e-3, #8e-2, # 0.01,
'leak_rate': 0.1, # 0.9,
'spectral_radius': 0.9,
# # barrel
# 'input_weight': 1.0,
# 'kernel_params': [1.2, 1.2], # [2.0, 2.0],
# 'noise': 1e-2,
# 'leak_rate': 0.9, # 0.9,
# 'spectral_radius': 0.99, # 0.999,
'epsilon': 1e-4,
'capacity': 200, # 10
'random_seed': 106,
'visualize': False,
}
@smpModelInit()
def __init__(self, conf):
smpOTLModel.__init__(self, conf = conf)
# self.otlmodel_type = "soesgp"
self.otlmodel = OESGP()
# self.res_size = 100 # 20
# self.input_weight = 1.0 # 1.0
# self.output_feedback_weight = 0.0
# self.activation_function = 1
# # leak_rate: x <= (1-lr) * input + lr * x
# self.leak_rate = 0.96 # 0.05 # 0.0 # 0.1 # 0.3
# self.connectivity = 0.1
# self.spectral_radius = 0.99
# # covariances
# self.kernel_params = [2.0, 2.0]
# # self.kernel_params = [1.0, 1.0]
# # self.kernel_params = [0.1, 0.1]
# self.noise = 0.05
# self.epsilon = 1e-3
# self.capacity = 100
# self.random_seed = 100 # FIXME: constant?
# self.X_ = []
# self.y_ = []
self.bootstrap()
def bootstrap(self):
from .models_reservoirs import res_input_matrix_random_sparse
self.otlmodel.init(self.idim, self.odim, self.modelsize, self.input_weight,
self.output_feedback_weight, self.activation_function,
self.leak_rate, self.connectivity, self.spectral_radius,
False, self.kernel_params, self.noise, self.epsilon,
self.capacity, self.random_seed)
im = res_input_matrix_random_sparse(self.idim, self.modelsize, 0.2) * self.input_weight
# print("im", type(im))
self.otlmodel.setInputWeights(im.tolist())
################################################################################
# StorkGP OTL based model
class smpSTORKGP(smpOTLModel):
"""smpSTORKGP
Sparse online echo state gaussian process function approximator
for active inference
"""
defaults = {
'idim': 1,
'odim': 1,
'otlmodel_type': 'storkgp',
'otlmodel': None,
'modelsize': 50,
'memory': 1,
'lag_off': 1,
'input_weight': 1.0,
'output_feedback_weight': 0.0,
'activation_function': 1,
'leak_rate': 0.96,
'connectivity': 0.1,
'spectral_radius': 0.99,
'kernel_params': [2.0, 2.0],
'noise': 0.05,
'epsilon': 1e-3,
'capacity': 100,
'random_seed': 100,
'visualize': False,
}
@smpModelInit()
def __init__(self, conf):
smpOTLModel.__init__(self, conf = conf)
# self.otlmodel_type = "storkgp"
self.otlmodel = STORKGP()
# self.res_size = self.modelsize # 100 # 20
self.bootstrap()
def bootstrap(self):
self.otlmodel.init(
self.idim, self.odim,
self.modelsize, # window size
0, # kernel type
[0.5, 0.99, 1.0, self.idim],
1e-4,
1e-4,
100 # seed
)
self.otlmodel.getState(self.r_l)
# print("|self.r_l| = ", len(self.r_l))
self.r_ = np.zeros((len(self.r_l), self.memory))
################################################################################
# inference type multivalued models: GMM, SOMHebb, MDN
# these are somewhat different in operation than the models above
# - fit vs. fit_batch
# - can create conditional submodels
# GMM - gaussian mixture model
class smpGMM(smpModel):
"""smpGMM
Gaussian mixture model based on PyPR's gmm
"""
defaults = {
'idim': 1, 'odim': 1, 'K': 10, 'fit_interval': 100,
'numepisodes': 10, 'visualize': False, 'em_max_iter': 1000}
@smpModelInit()
def __init__(self, conf):
"""smpGMM.__init__
"""
smpModel.__init__(self, conf)
self.cdim = self.idim + self.odim
# data
self.Xy_ = []
self.X_ = []
self.y_ = []
self.Xy = np.zeros((1, self.cdim))
# fitting configuration
# self.fit_interval = 100
self.fitted = False
# number of mixture components
# self.K = K
# list of K component idim x 1 centroid vectors
# self.cen_lst = []
self.cen_lst = [] # np.random.uniform(-1, 1, (self.K,)).tolist()
# list of K component idim x idim covariances
self.cov_lst = [] # [np.eye(self.cdim) * 0.1 for _ in range(self.K)]
# K mixture coeffs
# self.p_k = None
self.p_k = None # [1.0/self.K for _ in range(self.K)]
# log loss after training
self.logL = 0
print("%s.__init__, idim = %d, odim = %d" % (self.__class__.__name__, self.idim, self.odim))
def fit(self, X, y):
"""smpGMM.fit
Single step fit: X, y are single patterns
"""
# print("%s.fit" % (self.__class__.__name__), X.shape, y.shape)
if X.shape[0] == 1:
# single step update, add to internal data and refit if length matches update intervale
self.Xy_.append(np.hstack((X[0], y[0])))
self.X_.append(X[0])
self.y_.append(y[0])
if len(self.Xy_) % self.fit_interval == 0:
# print("len(Xy_)", len(self.Xy_), self.Xy_[99])
# pl.plot(self.Xy_)
# pl.show()
# self.fit_batch(self.Xy)
self.fit_batch(self.X_, self.y_)
else:
# batch fit, just fit model to the input data batch
self.Xy_ += np.hstack((X, y)).tolist()
# self.X_ += X.tolist()
# self.y_ += y.tolist()
# self.Xy = np.hstack((X, y))
# self.Xy = np.asarray(self.Xy_)
# print("X_, y_", self.X_, self.y_)
self.fit_batch(X, y)
def fit_batch(self, X, y):
"""smpGMM.fit_batch
Fit the GMM model with batch data
"""
# print("%s.fit X.shape = %s, y.shape = %s" % (self.__class__.__name__, X.shape, y.shape))
# self.Xy = np.hstack((X[:,3:], y[:,:]))
# self.Xy = np.hstack((X, y))
# self.Xy = np.asarray(self.Xy_)
# self.Xy = Xy
# X = np.asarray(X_)
# y = np.asarray(y_)
self.Xy = np.hstack((X, y))
# self.Xy = np.asarray(self.Xy_)
print("%s.fit_batch self.Xy.shape = %s" % (self.__class__.__name__, self.Xy.shape))
# fit gmm
# max_iter = 10
try:
self.cen_lst, self.cov_lst, self.p_k, self.logL = gmm.em_gm(
self.Xy, K = self.K, max_iter = self.em_max_iter,
verbose = False, iter_call = None)
self.fitted = True
except Exception as e:
print( "%s.fit_batch fit failed with %s" % (self.__class__.__name__, e.args ,))
# sys.exit()
print("%s.fit_batch Log likelihood (how well the data fits the model) = %f" % (self.__class__.__name__, self.logL))
def predict(self, X, rollback = False):
"""smpGMM.predict
Predict Y from X by forwarding to default sample call
"""
return self.sample(X, rollback = rollback)
def sample(self, X, rollback = False):
"""smpGMM.sample
Default sample function
Assumes the input is X with dims = idim located in
the first part of the conditional inference combined input vector
This method constructs the corresponding conditioning input from the reduced input
"""
print("%s.sample: X.shape = %s, idim = %d" % (self.__class__.__name__, X.shape, self.idim))
assert X.shape[1] == self.idim
# cond = np.zeros((, self.cdim))
uncond = np.empty((X.shape[0], self.odim))
uncond[:] = np.nan
# print("%s.sample: uncond.shape = %s" % (self.__class__.__name__, uncond.shape))
# np.array([np.nan for i in range(self.odim)])
cond = np.hstack((X, uncond))
# cond[:self.idim] = X.copy()
# cond[self.idim:] = np.nan
# print("%s.sample: cond.shape = %s" % (self.__class__.__name__, cond.shape))
if X.shape[0] > 1: # batch
return self.sample_batch(cond)
return self.sample_cond(cond)
def sample_cond(self, X):
"""smpGMM.sample_cond
Single sample from the GMM model with conditioning on single input pattern X
TODO: function conditional_dist, make predict/sample comply with sklearn and use the lowlevel
cond_dist for advanced uses like dynamic conditioning
"""
# gmm.cond_dist want's a (n, ) shape, not (1, n)
if len(X.shape) > 1:
cond = X[0]
else:
cond = X
# print("%s.sample_cond: cond.shape = %s" % (self.__class__.__name__, cond.shape))
if not self.fitted:
# return np.zeros((3,1))
# model has not been bootstrapped, return random goal
cond_sample = np.random.uniform(-1.0, 1.0, (1, self.odim)) # FIXME hardcoded shape
# cen_con = self.cen_lst
# cov_con = self.cov_lst
# new_p_k = self.p_k
else:
(cen_con, cov_con, new_p_k) = gmm.cond_dist(cond, self.cen_lst, self.cov_lst, self.p_k)
# print( "cen_con", cen_con, "cov_con", cov_con, "p_k", new_p_k)
cond_sample = gmm.sample_gaussian_mixture(cen_con, cov_con, new_p_k, samples = 1)
# print("%s.sample_cond: cond_sample.shape = %s" % (self.__class__.__name__, cond_sample.shape))
return cond_sample
def sample_batch(self, X):
"""smpGMM.sample_batch
If X has more than one rows, return batch of samples for
every condition row in X
"""
samples = np.zeros((X.shape[0], self.odim))
for i in range(X.shape[0]):
samples[i] = self.sample_cond(X[i])
return samples
# def sample_batch_legacy(self, X, cond_dims = [0], out_dims = [1], resample_interval = 1):
# """smpGMM.sample_batch_legacy
# Sample from gmm model with conditioning batch input X legacy function
# """
# # compute conditional
# sampmax = 20
# numsamplesteps = X.shape[0]
# odim = len(out_dims) # self.idim - X.shape[1]
# self.y_sample_ = np.zeros((odim,))
# self.y_sample = np.zeros((odim,))
# self.y_samples_ = np.zeros((sampmax, numsamplesteps, odim))
# self.y_samples = np.zeros((numsamplesteps, odim))
# self.cond = np.zeros_like(X[0])
# print("%s.sample_batch: y_samples_.shape = %s" % (self.__class__.__name__, self.y_samples_.shape))
# for i in range(numsamplesteps):
# # if i % 100 == 0:
# if i % resample_interval == 0:
# # print("%s.sample_batch: sampling gmm cond prob at step %d" % (self.__class__.__name__, i))
# ref_interval = 1
# # self.cond = self.logs["EP"][(i+ref_interval) % self.logs["EP"].shape[0]] # self.X__[i,:3]
# self.cond = X[(i+ref_interval) % numsamplesteps] # self.X__[i,:3]
# # self.cond = np.array()
# # self.cond[:2] = X_
# # print(self.cond, out_dims, X.shape)
# self.cond[out_dims] = np.nan
# (self.cen_con, self.cov_con, self.new_p_k) = gmm.cond_dist(self.cond, self.cen_lst, self.cov_lst, self.p_k)
# # print "run_hook_e2p_sample gmm.cond_dist:", np.array(self.cen_con).shape, np.array(self.cov_con).shape, self.new_p_k.shape
# samperr = 1e6
# j = 0
# while samperr > 0.1 and j < sampmax:
# self.y_sample = gmm.sample_gaussian_mixture(self.cen_con, self.cov_con, self.new_p_k, samples = 1)
# self.y_samples_[j,i] = self.y_sample
# samperr_ = np.linalg.norm(self.y_sample - X[(i+1) % numsamplesteps,:odim], 2)
# if samperr_ < samperr:
# samperr = samperr_
# self.y_sample_ = self.y_sample
# j += 1
# # print "sample/real err", samperr
# print("sampled", j, "times")
# else:
# # retain samples from last sampling interval boundary
# self.y_samples_[:,i] = self.y_samples_[:,i-1]
# # return sample array
# self.y_samples[i] = self.y_sample_
# return self.y_samples, self.y_samples_
# IGMM - incremental gaussian mixture model, from juan
class smpIGMM(smpModel):
"""smpIGMM
Gaussian mixture model based on PyPR's gmm
"""
defaults = {'idim': 1, 'odim': 1, 'K': 10, 'numepisodes': 10, 'visualize': False}
@smpModelInit()
def __init__(self, conf):
"""smpIGMM.__init__
"""
smpModel.__init__(self, conf)
self.cdim = self.idim + self.odim
# number of mixture components
# self.K = K
# list of K component idim x 1 centroid vectors
self.cen_lst = []
# list of K component idim x idim covariances
self.cov_lst = []
# K mixture coeffs
self.p_k = None
self.cen_lst = np.random.uniform(-1, 1, (self.K,)).tolist()
# list of K component idim x idim covariances
self.cov_lst = [np.eye(self.cdim) * 0.1 for _ in range(self.K)]
# K mixture coeffs
# self.p_k = None
self.p_k = [1.0/self.K for _ in range(self.K)]
# log loss after training
self.logL = 0
# data
self.Xy_ = []
self.X_ = []
self.y_ = []
self.Xy = np.zeros((1, self.cdim))
# fitting configuration
self.fit_interval = 100
self.fitted = False
self.model = IGMM_COND(min_components=3, forgetting_factor=0.5)
# print("%s.__init__, idim = %d, odim = %d" % (self.__class__.__name__, self.idim, self.odim))
def fit(self, X, y):
"""smpIGMM.fit
Single step fit: X, y are single patterns
"""
# print("%s.fit" % (self.__class__.__name__), X.shape, y.shape)
if X.shape[0] == 1:
# single step update, add to internal data and refit if length matches update intervale
self.Xy_.append(np.hstack((X[0], y[0])))
self.X_.append(X[0])
self.y_.append(y[0])
if len(self.Xy_) % self.fit_interval == 0:
# print("len(Xy_)", len(self.Xy_), self.Xy_[99])
# pl.plot(self.Xy_)
# pl.show()
# self.fit_batch(self.Xy)
self.fit_batch(self.X_, self.y_)
self.Xy_ = []
self.X_ = []
self.y_ = []
else:
# batch fit, just fit model to the input data batch
self.Xy_ += np.hstack((X, y)).tolist()
# self.X_ += X.tolist()
# self.y_ += y.tolist()
# self.Xy = np.hstack((X, y))
# self.Xy = np.asarray(self.Xy_)
# print("X_, y_", self.X_, self.y_)
self.fit_batch(X, y)
def fit_batch(self, X, y):
"""smpIGMM.fit_batch
Fit the IGMM model with batch data
"""
# print("%s.fit X.shape = %s, y.shape = %s" % (self.__class__.__name__, X.shape, y.shape))
# self.Xy = np.hstack((X[:,3:], y[:,:]))
# self.Xy = np.hstack((X, y))
# self.Xy = np.asarray(self.Xy_)
# self.Xy = Xy
# X = np.asarray(X_)
# y = np.asarray(y_)
self.Xy = np.hstack((X, y))
# self.Xy = np.asarray(self.Xy_)
print("%s.fit_batch self.Xy.shape = %s" % (self.__class__.__name__, self.Xy.shape))
# fit gmm
# self.cen_lst, self.cov_lst, self.p_k, self.logL = gmm.em_gm(self.Xy, K = self.K, max_iter = 1000,
# verbose = False, iter_call = None)
self.model.train(self.Xy)
self.fitted = True
# print("%s.fit_batch Log likelihood (how well the data fits the model) = %f" % (self.__class__.__name__, self.logL))
def predict(self, X):
"""smpIGMM.predict
Predict Y from X by forwarding to default sample call
"""
# print("IGMM.predict X.shape", X.shape, X)
return self.sample(X)
def sample(self, X):
"""smpIGMM.sample
Default sample function
Assumes the input is X with dims = idim located in
the first part of the conditional inference combined input vector
This method constructs the corresponding conditioning input from the reduced input
"""
# print("%s.sample: X.shape = %s, idim = %d" % (self.__class__.__name__, X.shape, self.idim))
assert X.shape[1] == self.idim
# cond = np.zeros((, self.cdim))
uncond = np.empty((X.shape[0], self.odim))
uncond[:] = np.nan
# print("%s.sample: uncond.shape = %s, %s" % (self.__class__.__name__, uncond.shape, uncond))
cond = np.hstack((X, uncond))
# cond[:self.idim] = X.copy()
# cond[self.idim:] = np.nan
# print("%s.sample: cond.shape = %s, %s" % (self.__class__.__name__, cond.shape, cond))
if X.shape[0] > 1: # batch
return self.sample_batch(cond)
sample = self.sample_cond(cond)
# print("%s.sample sample = %s, X = %s" % (self.__class__.__name__, sample.shape, X.shape))
# FIXME: fix that inference configuration
if sample.shape[1] == self.odim:
return sample
else:
return sample[...,X.shape[1]:]
def sample_cond(self, X):
"""smpIGMM.sample_cond
Single sample from the IGMM model with conditioning on single input pattern X
TODO: function conditional_dist, make predict/sample comply with sklearn and use the lowlevel
cond_dist for advanced uses like dynamic conditioning
"""
if not self.fitted:
# return np.zeros((3,1))
# model has not been bootstrapped, return random prediction
return np.random.uniform(-0.1, 0.1, (1, self.odim)) # FIXME hardcoded shape
# gmm.cond_dist want's a (n, ) shape, not (1, n)
if len(X.shape) > 1:
cond = X[0]
else:
cond = X
# print("%s.sample_cond: cond.shape = %s" % (self.__class__.__name__, cond.shape))
# (cen_con, cov_con, new_p_k) = gmm.cond_dist(cond, self.cen_lst, self.cov_lst, self.p_k)
# cond_sample = gmm.sample_gaussian_mixture(cen_con, cov_con, new_p_k, samples = 1)
cond_sample = self.model.sample_cond_dist(cond, 1)
# print("%s.sample_cond: cond_sample.shape = %s, %s" % (self.__class__.__name__, cond_sample.shape, cond_sample))
return cond_sample
def sample_batch(self, X):
"""smpIGMM.sample_batch
If X has more than one rows, return batch of samples for
every condition row in X
"""
samples = np.zeros((X.shape[0], self.odim))
for i in range(X.shape[0]):
samples[i] = self.sample_cond(X[i])
return samples
################################################################################
# Hebbian SOM model: connect to SOMs with hebbian links
class smpHebbianSOM(smpModel):
"""smpHebbianSOM class
Hebbian SOM model
FIXME: conf: kohonen/map.Map init distribution and scaling
FIXME: conf: fit_hebb onset delay
FIXME: conf: sampling mode (weights, gaussian(wgts, sigmas), ...
"""
defaults = {
'idim': 1, 'odim': 1, 'numepisodes': 100, 'visualize': False, 'mapsize_e': 10, 'mapsize_p': 10, 'som_lr': 1e-0,
'som_nhs': 3, 'init_range': (-1.0, 1.0)}
@smpModelInit()
def __init__(self, conf):
"""smpHebbianSOM
Two SOM's coding the input and output space connected by associative Hebbian links
"""
smpModel.__init__(self, conf)
# SOMs training self assessment
self.cnt_fit = 0
self.cnt_predict = 0
self.fitted = False
self.soms_cnt_fit = 0
self.soms_cnt_predict = 0
self.soms_fitted = False
self.hebb_cnt_fit = 0
self.hebb_cnt_predict = 0
self.hebb_fitted = False
self.decay_const = -1e-5
# learning rate proxy
self.ET = ExponentialTimeseries
self.CT = ConstantTimeseries
self.mapsize = 10 ** 2 # 100
# self.mapsize_e = mapsize_e # 100 # int(np.sqrt(self.mapsize)) # max(10, self.idim * 3)
# self.mapsize_p = mapsize_p # 150 # int(np.sqrt(self.mapsize)) # max(10, self.odim * 3)
self.numepisodes_som = self.numepisodes
self.numepisodes_hebb = self.numepisodes
# FIXME: make neighborhood_size decrease with time
# som_lr = som_lr # 1e0
# som_lr = 1e-1 # Haykin, p475
# som_lr = 5e-1
# som_lr = 5e-4
# self.som_nhs = 3 # 1.5
maptype = "som"
# maptype = "gas"
# SOM exteroceptive stimuli 2D input
if maptype == "som":
if self.idim == 1:
mapshape_e = (self.mapsize_e, )
else:
mapshape_e = (self.mapsize_e, self.mapsize_e)
# 1D better?
# mapshape_e = (self.mapsize_e, )
self.kw_e = self.kwargs(
shape = mapshape_e, dimension = self.idim, lr_init = self.som_lr,
neighborhood_size = self.som_nhs, init_variance = 1.0) #, z = 0.001)
# self.kw_e = self.kwargs(shape = (self.mapsize_e, self.mapsize_e), dimension = self.idim, lr_init = 0.5, neighborhood_size = 0.6)
self.som_e = Map(Parameters(**self.kw_e))
elif maptype == "gas":
self.kw_e = self.kwargs_gas(shape = (self.mapsize_e ** 2, ), dimension = self.idim, lr_init = self.som_lr, neighborhood_size = 0.5)
self.som_e = Gas(Parameters(**self.kw_e))
# SOM proprioceptive stimuli 3D input
if maptype == "som":
if self.idim == 1:
mapshape_p = (self.mapsize_p, )
else:
mapshape_p = (int(self.mapsize_p), int(self.mapsize_p))
# 1D better?
mapshape_p = (self.mapsize_p, )
self.kw_p = self.kwargs(shape = mapshape_p, dimension = self.odim, lr_init = self.som_lr,
neighborhood_size = self.som_nhs, init_variance = 0.2) #, z = 0.001)
# self.kw_p = self.kwargs(shape = (int(self.mapsize_p * 1.5), int(self.mapsize_p * 1.5)), dimension = self.odim, lr_init = 0.5, neighborhood_size = 0.7)
self.som_p = Map(Parameters(**self.kw_p))
elif maptype == "gas":
self.kw_p = self.kwargs_gas(shape = (self.mapsize_p ** 2, ), dimension = self.odim, lr_init = self.som_lr, neighborhood_size = 0.5)
self.som_p = Gas(Parameters(**self.kw_p))
print("HebbianSOM mapsize_e,p", self.mapsize_e, self.mapsize_p)
# FIXME: there was a nice trick for node distribution init in _some_ recently added paper
# create "filter" using existing SOM_e, filter computes activation on distance
self.filter_e = Filter(self.som_e, history=lambda: 0.0)
# print("neurons_e", self.filter_e.map.neurons)
self.filter_e.reset()
# print("neurons_e", self.filter_e.map.neurons)
self.filter_e_lr = self.filter_e.map._learning_rate
# kw_f_p = kwargs(shape = (mapsize * 3, mapsize * 3), dimension = 3, neighborhood_size = 0.5, lr_init = 0.1)
# filter_p = Filter(Map(Parameters(**kw_f_p)), history=lambda: 0.01)
# create "filter" using existing SOM_p, filter computes activation on distance
self.filter_p = Filter(self.som_p, history=lambda: 0.0)
self.filter_p.reset()
self.filter_p_lr = self.filter_p.map._learning_rate
# Hebbian links
# hebblink_som = np.random.uniform(-1e-4, 1e-4, (np.prod(som_e._shape), np.prod(som_p._shape)))
# hebblink_filter = np.random.uniform(-1e-4, 1e-4, (np.prod(filter_e.map._shape), np.prod(filter_p.map._shape)))
self.hebblink_som = np.zeros((np.prod(self.som_e._shape), np.prod(self.som_p._shape)))
# self.hebblink_filter = np.zeros((np.prod(self.filter_e.map._shape), np.prod(self.filter_p.map._shape)))
self.hebblink_filter = np.random.normal(0, 1e-6, (np.prod(self.filter_e.map._shape), np.prod(self.filter_p.map._shape)))
# # sparse hebblink
# self.hebblink_filter = sparse.rand(m = np.prod(self.filter_e.map._shape),
# n = np.prod(self.filter_p.map._shape)) * 1e-3
self.hebblink_use_activity = True # use activation or distance
# Hebbian learning rate
if self.hebblink_use_activity:
# self.hebblink_et = ExponentialTimeseries(self.decay_const, 1e-0, 0)
self.hebblink_et = ConstantTimeseries(1e-0)
# self.hebblink_et = ConstantTimeseries(0.0)
else:
self.hebblink_et = ConstantTimeseries(1e-12)
# visualization
if self.visualize:
self.figs.append(plot_nodes_over_data_1d_components_fig(title = self.__class__.__name__, numplots = self.idim + self.odim))
# SOM argument dict
def kwargs(self, shape=(10, 10), z=0.001, dimension=2, lr_init = 1.0, neighborhood_size = 1, init_variance = 1.0):
"""smpHebbianSOM params function for Map"""
return dict(
dimension = dimension,
shape = shape,
neighborhood_size = self.ET(self.decay_const, neighborhood_size, 0.1), # 1.0),
learning_rate=self.ET(self.decay_const, lr_init, 0.0),
# learning_rate=self.CT(lr_init),
noise_variance=z,
init_variance = init_variance)
def kwargs_gas(self, shape=(100,), z=0.001, dimension=3, lr_init = 1.0, neighborhood_size = 1):
"""smpHebbianSOM params function for Gas"""
return dict(
dimension=dimension,
shape=shape,
neighborhood_size = self.ET(self.decay_const, neighborhood_size, 1.0),
learning_rate=self.ET(self.decay_const, lr_init, 0.0),
noise_variance=z)
def visualize_model(self):
"""smpHebbianSOM.visualize_model
Plot the model state visualization
"""
e_nodes, p_nodes = hebbsom_get_map_nodes(self, self.idim, self.odim)
e_nodes_cov = np.tile(np.eye(self.idim) * 0.05, e_nodes.shape[0]).T.reshape((e_nodes.shape[0], self.idim, self.idim))
p_nodes_cov = np.tile(np.eye(self.odim) * 0.05, p_nodes.shape[0]).T.reshape((p_nodes.shape[0], self.odim, self.odim))
X = np.vstack(self.Xhist)
Y = np.vstack(self.Yhist)
# print(X.shape)
plot_nodes_over_data_1d_components(
fig = self.figs[0], X = X, Y = Y, mdl = self,
e_nodes = e_nodes, p_nodes = p_nodes, e_nodes_cov = e_nodes_cov, p_nodes_cov = p_nodes_cov,
saveplot = False
)
def set_learning_rate_constant(self, c = 0.0):
# print("fit_hebb", self.filter_e.map._learning_rate)
self.filter_e.map._learning_rate = self.CT(c)
self.filter_p.map._learning_rate = self.CT(c)
# fix the SOMs with learning rate constant 0
self.filter_e_lr = self.filter_e.map._learning_rate
self.filter_p_lr = self.filter_p.map._learning_rate
def fit_soms(self, X, y):
"""smpHebbianSOM"""
# print("%s.fit_soms fitting X = %s, y = %s" % (self.__class__.__name__, X.shape, y.shape))
# if X.shape[0] != 1, r
# e = EP[i,:dim_e]
# p = EP[i,dim_e:]
self.filter_e.map._learning_rate = self.filter_e_lr
self.filter_p.map._learning_rate = self.filter_p_lr
# don't learn twice
# som_e.learn(e)
# som_p.learn(p)
# TODO for j in numepisodes
if X.shape[0] > 1:
numepisodes = self.numepisodes_som
else:
numepisodes = 1
if X.shape[0] > 100:
print("%s.fit_soms batch fitting of size %d" % (self.__class__.__name__, X.shape[0]))
i = 0
j = 0
eps_convergence = 0.01
# eps_convergence = 0.005
dWnorm_e_ = 1 # short horizon
dWnorm_p_ = 1
dWnorm_e__ = dWnorm_e_ + 2 * eps_convergence # long horizon
dWnorm_p__ = dWnorm_p_ + 2 * eps_convergence
idx_shuffle = np.arange(X.shape[0])
# for j in range(numepisodes):
# (dWnorm_e_ == 0 and dWnorm_p_ == 0) or
# while (dWnorm_e_ > 0.05 and dWnorm_p_ > 0.05):
do_convergence = True
while (do_convergence) and (np.abs(dWnorm_e__ - dWnorm_e_) > eps_convergence and np.abs(dWnorm_p__ - dWnorm_p_) > eps_convergence): # and j < 10:
if j > 0 and j % 10 == 0:
print("%s.fit_soms episode %d / %d" % (self.__class__.__name__, j, numepisodes))
if X.shape[0] == 1:
# print("no convergence")
do_convergence = False
dWnorm_e = 0
dWnorm_p = 0
np.random.shuffle(idx_shuffle)
# print("neurons_e 1", self.filter_e.map.neurons.flatten())
for i in range(X.shape[0]):
# lidx = idx_shuffle[i]
lidx = i
self.filter_e.learn(X[lidx])
dWnorm_e += np.linalg.norm(self.filter_e.map.delta)
self.filter_p.learn(y[lidx])
dWnorm_p += np.linalg.norm(self.filter_p.map.delta)
# print("neurons_e 2", self.filter_e.map.neurons.flatten(), X, X[lidx])
dWnorm_e /= X.shape[0]
dWnorm_e /= self.filter_e.map.numunits
dWnorm_p /= X.shape[0]
dWnorm_p /= self.filter_p.map.numunits
# short
dWnorm_e_ = 0.8 * dWnorm_e_ + 0.2 * dWnorm_e
dWnorm_p_ = 0.8 * dWnorm_p_ + 0.2 * dWnorm_p
# long
dWnorm_e__ = 0.83 * dWnorm_e__ + 0.17 * dWnorm_e_
dWnorm_p__ = 0.83 * dWnorm_p__ + 0.17 * dWnorm_p_
# print("%s.fit_soms batch e |dW| = %f, %f, %f" % (self.__class__.__name__, dWnorm_e, dWnorm_e_, dWnorm_e__))
# print("%s.fit_soms batch p |dW| = %f, %f, %f" % (self.__class__.__name__, dWnorm_p, dWnorm_p_, dWnorm_p__))
j += 1
if True and self.soms_cnt_fit % 100 == 0:
print("%s.fit_soms batch e mean error = %f, min = %f, max = %f" % (
self.__class__.__name__,
np.asarray(self.filter_e.distances_).mean(),
np.asarray(self.filter_e.distances_[-1]).min(),
np.asarray(self.filter_e.distances_).max() ))
print("%s.fit_soms batch p mean error = %f, min = %f, max = %f" % (
self.__class__.__name__,
np.asarray(self.filter_p.distances_).mean(),
np.asarray(self.filter_p.distances_[-1]).min(),
np.asarray(self.filter_p.distances_).max() ))
# print np.argmin(som_e.distances(e)) # , som_e.distances(e)
self.soms_cnt_fit += 1
def fit_hebb(self, X, y):
"""smpHebbianSOM"""
# print("%s.fit_hebb fitting X = %s, y = %s" % (self.__class__.__name__, X.shape, y.shape))
if X.shape[0] == 1 and self.soms_cnt_fit < 200: # 200: # 1500:
return
# numepisodes_hebb = 1
if X.shape[0] > 100:
print("%s.fit_hebb batch fitting of size %d" % (self.__class__.__name__, X.shape[0]))
numsteps = X.shape[0]
################################################################################
# fix the SOMs with learning rate constant 0
self.filter_e_lr = self.filter_e.map._learning_rate
self.filter_p_lr = self.filter_p.map._learning_rate
# print("fit_hebb", self.filter_e.map._learning_rate)
self.filter_e.map._learning_rate = self.CT(0.0)
self.filter_p.map._learning_rate = self.CT(0.0)
e_shape = (np.prod(self.filter_e.map._shape), 1)
p_shape = (np.prod(self.filter_p.map._shape), 1)
eps_convergence = 0.05
z_err_coef_1 = 0.8
z_err_coef_2 = 0.83
z_err_norm_ = 1 # fast
z_err_norm__ = z_err_norm_ + 2 * eps_convergence # slow
Z_err_norm = np.zeros((self.numepisodes_hebb*numsteps,1))
Z_err_norm_ = np.zeros((self.numepisodes_hebb*numsteps,1))
W_norm = np.zeros((self.numepisodes_hebb*numsteps,1))
# # plotting
# pl.ion()
# fig = pl.figure()
# fig2 = pl.figure()
# TODO for j in numepisodes
# j = 0
if X.shape[0] > 1:
numepisodes = self.numepisodes_hebb
else:
numepisodes = 1
i = 0
dWnorm_ = 10.0
j = 0
# for j in range(numepisodes):
do_convergence = True
while do_convergence and z_err_norm_ > eps_convergence and np.abs(z_err_norm__ - z_err_norm_) > eps_convergence: # and j < 20:
if j > 0 and j % 10 == 0:
print("%s.fit_hebb episode %d / %d" % (self.__class__.__name__, j, numepisodes))
if X.shape[0] == 1:
# print("no convergence")
do_convergence = False
for i in range(X.shape[0]):
# just activate
self.filter_e.learn(X[i])
self.filter_p.learn(y[i])
# fetch data induced activity
if self.hebblink_use_activity:
p_ = self.filter_p.activity.reshape(p_shape)
# print(p_.shape)
else:
p_ = self.filter_p.distances(p).flatten().reshape(p_shape)
p__ = p_.copy()
# p_ = p_ ** 2
p_ = (p_ == np.max(p_)) * 1.0
e_ = self.filter_e.activity.reshape(e_shape) # flatten()
e__ = e_.copy()
# e_ = e_ ** 2
e_ = (e_ == np.max(e_)) * 1.0
# compute prediction for p using e activation and hebbian weights
if self.hebblink_use_activity:
# print(self.hebblink_filter.T.shape, self.filter_e.activity.reshape(e_shape).shape)
# p_bar = np.dot(self.hebblink_filter.T, self.filter_e.activity.reshape(e_shape))
# e_act = e_.reshape(e_shape)
# e_act
p_bar = np.dot(self.hebblink_filter.T, e_.reshape(e_shape))
# # sparse
# p_bar = self.hebblink_filter.T.dot(e_.reshape(e_shape))
# print("p_bar", type(p_bar))
else:
p_bar = np.dot(self.hebblink_filter.T, self.filter_e.distances(e).flatten().reshape(e_shape))
p_bar_ = p_bar.copy()
p_bar = (p_bar == np.max(p_bar)) * 1.0
# print("p_bar", type(p_bar), type(p_bar_))
# # plotting
# ax1 = fig.add_subplot(411)
# ax1.cla()
# ax1.plot(e_ * np.max(e__))
# ax1.plot(e__)
# ax2 = fig.add_subplot(412)
# ax2.cla()
# ax2.plot(p_ * np.max(p_bar_))
# ax2.plot(p__)
# ax2.plot(p_bar * np.max(p_bar_))
# ax2.plot(p_bar_)
# ax3 = fig.add_subplot(413)
# ax3.cla()
# ax3.plot(self.filter_e.distances_[-1])
# ax4 = fig.add_subplot(414)
# ax4.cla()
# ax4.plot(self.filter_p.distances_[-1])
# pl.pause(0.001)
# pl.draw()
# inject activity prediction
p_bar_sum = p_bar.sum()
if p_bar_sum > 0:
p_bar_normed = p_bar / p_bar_sum
else:
p_bar_normed = np.zeros(p_bar.shape)
# compute prediction error: data induced activity - prediction
# print("p_", np.linalg.norm(p_))
# print("p_bar", np.linalg.norm(p_bar))
z_err = p_ - p_bar
idx = np.argmax(p_bar_)
# print("sum E", np.sum(z_err))
# print("idx", p_bar_, idx, z_err[idx])
# z_err = (p_[idx] - p_bar[idx]) * np.ones_like(p_)
# z_err = np.ones_like(p_) *
# print("z_err", z_err)
# z_err = p_bar - p_
# z_err_norm = np.linalg.norm(z_err, 2)
z_err_norm = np.sum(np.abs(z_err))
# if j == 0 and i == 0:
# z_err_norm_ = z_err_norm
# else:
z_err_norm_ = z_err_coef_1 * z_err_norm_ + (1 - z_err_coef_1) * z_err_norm
z_err_norm__ = z_err_coef_2 * z_err_norm__ + (1 - z_err_coef_2) * z_err_norm
w_norm = np.linalg.norm(self.hebblink_filter)
# logidx = (j*numsteps) + i
# Z_err_norm [logidx] = z_err_norm
# Z_err_norm_[logidx] = z_err_norm_
# W_norm [logidx] = w_norm
# z_err = p_bar - self.filter_p.activity.reshape(p_bar.shape)
# print "p_bar.shape", p_bar.shape
# print "self.filter_p.activity.flatten().shape", self.filter_p.activity.flatten().shape
# if i % 100 == 0:
# print("%s.fit_hebb: iter %d/%d: z_err.shape = %s, |z_err| = %f, |W| = %f, |p_bar_normed| = %f" % (self.__class__.__name__, logidx, (self.numepisodes_hebb*numsteps), z_err.shape, z_err_norm_, w_norm, np.linalg.norm(p_bar_normed)))
# d_hebblink_filter = et() * np.outer(self.filter_e.activity.flatten(), self.filter_p.activity.flatten())
eta = self.hebblink_et()
if eta > 0.0:
if False and self.hebblink_use_activity:
# eta = 5e-4
# outer = np.outer(self.filter_e.activity.flatten(), np.clip(z_err, 0, 1))
# outer = np.outer(e_, np.clip(z_err, 0, 1))
# outer = np.outer(e_, p_)
# outer = np.outer(e_, p__ * np.clip(z_err, 0, 1))
# FIXME: this can be optimized with sparsity
# print("e_", e_, e__, p_)
outer = np.outer(e_ * e__, p_)
# print(outer.shape, self.hebblink_filter.shape)
# print("outer", outer)
# print("modulator", z_err[idx])
# d_hebblink_filter = eta * outer * (-1e-3 - z_err[idx])
# d_hebblink_filter = eta * np.outer(z_err, self.filter_e.activity.flatten()).T
# d_hebblink_filter = eta * outer * np.abs((z_err_norm_ - z_err_norm))
# d_hebblink_filter = eta * outer * (z_err_norm - z_err_norm_)
d_hebblink_filter = eta * outer
# # plotting
# f2ax1 = fig2.add_subplot(111)
# f2ax1.imshow(self.hebblink_filter.T, interpolation="none")
# # im = f2ax1.imshow(outer, interpolation="none")
# # f2ax2 = pl.colorbar(im, ax=f2ax1)
# pl.pause(1e-5)
# pl.draw()
elif self.hebblink_use_activity:
e_idx = np.argmax(e_)
p_idx = np.argmax(p_)
# print("e_", e_idx, "p_", p_idx)
d_hebblink_filter = np.zeros_like(self.hebblink_filter)
else:
d_hebblink_filter = eta * np.outer(self.filter_e.distances(e), z_err)
# does what?
self.hebblink_filter[e_idx, p_idx] += eta * e__[e_idx]
dWnorm = np.linalg.norm(d_hebblink_filter)
dWnorm_ = 0.8 * dWnorm_ + 0.2 * dWnorm
# print ("dWnorm", dWnorm)
# self.hebblink_filter += d_hebblink_filter
# print("hebblink_filter type", type(self.hebblink_filter))
# print("np.linalg.norm(self.hebblink_filter, 2)", np.linalg.norm(self.hebblink_filter, 2))
self.hebblink_filter /= np.linalg.norm(self.hebblink_filter, 2)
j += 1
if False and self.hebb_cnt_fit % 100 == 0:
# print("hebblink_filter type", type(self.hebblink_filter))
# print(Z_err_norm)
# print("%s.fit_hebb error p/p_bar %f" % (self.__class__.__name__, np.array(Z_err_norm)[:logidx].mean()))
print("%s.fit_hebb |dW| = %f, |W| = %f, mean err = %f / %f" % (self.__class__.__name__, dWnorm_, w_norm, np.min(z_err), np.max(z_err)))
# z_err_norm_, z_err_norm__))
# print("%s.fit_hebb |W| = %f" % (self.__class__.__name__, w_norm))
self.hebb_cnt_fit += 1
def fit(self, X, y):
"""smpHebbianSOM
Fit model to data
"""
# print("%s.fit fitting X = %s, y = %s" % (self.__class__.__name__, X, y))
# if X,y have more than one row, train do batch training on SOMs and links
# otherwise do single step update on both or just the latter?
self.fit_soms(X, y)
self.fit_hebb(X, y)
self.fitted = True
# if self.visualize:
# self.Xhist.append(X)
# self.Yhist.append(y)
# if self.cnt_fit % 100 == 0:
# self.visualize_model()
self.cnt_fit += 1
def predict(self, X):
"""smpHebbianSOM"""
return self.sample(X)
def sample(self, X):
"""smpHebbianSOM.sample"""
# print("%s.sample X.shape = %s, %d" % (self.__class__.__name__, X.shape, 0))
if len(X.shape) == 2 and X.shape[0] > 1: # batch
return self.sample_batch(X)
return self.sample_cond(X)
def sample_cond(self, X):
"""smpHebbianSOM.sample_cond: draw single sample from model conditioned on X"""
# print("%s.sample_cond X.shape = %s, %d" % (self.__class__.__name__, X.shape, 0))
# fix the SOMs with learning rate constant 0
self.filter_e_lr = self.filter_e.map._learning_rate
self.filter_p_lr = self.filter_p.map._learning_rate
# print("fit_hebb", self.filter_e.map._learning_rate)
self.filter_e.map._learning_rate = self.CT(0.0)
self.filter_p.map._learning_rate = self.CT(0.0)
e_shape = (np.prod(self.filter_e.map._shape), 1)
p_shape = (np.prod(self.filter_p.map._shape), 1)
# activate input network
self.filter_e.learn(X)
# pl.plot(self.filter_e.
# propagate activation via hebbian associative links
if self.hebblink_use_activity:
e_ = self.filter_e.activity.reshape((np.prod(self.filter_e.map._shape), 1))
e_ = (e_ == np.max(e_)) * 1.0
e2p_activation = np.dot(self.hebblink_filter.T, e_)
# print("e2p_activation", e2p_activation)
self.filter_p.activity = np.clip((e2p_activation / (np.sum(e2p_activation) + 1e-9)).reshape(self.filter_p.map._shape), 0, np.inf)
else:
e2p_activation = np.dot(self.hebblink_filter.T, self.filter_e.distances(e).flatten().reshape(e_shape))
# sample the output network with
sidxs = self.filter_p.sample(100)
# print("sidxs", stats.mode(sidxs)[0], sidxs)
# sidx = self.filter_p.sample(1)[0]
# find the mode (most frequent realization) of distribution
sidx = stats.mode(sidxs)[0][0]
e2p_w_p_weights = self.filter_p.neuron(self.filter_p.flat_to_coords(sidx))
# e2p_w_p_weights = self.filter_p.neuron(self.filter_p.flat_to_coords(np.argmax(self.filter_p.activity)))
# ret = np.random.normal(e2p_w_p_weights, self.filter_p.sigmas[sidx], (1, self.odim))
ret = np.random.normal(e2p_w_p_weights, np.sqrt(self.filter_p.sigmas[sidx]), (1, self.odim))
# ret = np.random.normal(e2p_w_p_weights, 0.01, (1, self.odim))
# print("hebbsom sample", sidx, e2p_w_p_weights) # , sidxs) # , self.filter_p.sigmas[sidx])
# ret = e2p_w_p_weights.reshape((1, self.odim))
return ret
def sample_prior(self):
"""smpHebbianSOM.sample_prior
Sample from input map prior distribution
"""
# print("pr")
# pass
# print("prior", self.filter_e.map.prior)
# sidxs = argsample(self.filter_e.map.prior, n = 1)
sidxs = argsample(np.sum(self.filter_e.sigmas, axis = 1), n = 1)
prior_sample_mu = self.filter_e.neuron(self.filter_e.flat_to_coords(sidxs[0]))
# print ('prior_sample_mu', prior_sample_mu.shape, self.filter_e.sigmas[sidxs[0]].shape)
# prior_sample = np.random.normal(prior_sample_mu, self.filter_e.sigmas[sidxs[0]]).reshape((self.idim, 1))
prior_sample = prior_sample_mu.reshape((self.idim, 1))
# print("prior_sample", prior_sample)
return prior_sample
# def sample_cond_legacy(self, X):
# """smpHebbianSOM.sample_cond: sample from model conditioned on X"""
# sampling_search_num = 100
# e_shape = (np.prod(self.filter_e.map._shape), 1)
# p_shape = (np.prod(self.filter_p.map._shape), 1)
# # P_ = np.zeros((X.shape[0], self.odim))
# # E_ = np.zeros((X.shape[0], self.idim))
# e2p_w_p_weights = self.filter_p.neuron(self.filter_p.flat_to_coords(self.filter_p.sample(1)[0]))
# for i in range(X.shape[0]):
# # e = EP[i,:dim_e]
# # p = EP[i,dim_e:]
# e = X[i]
# # print np.argmin(som_e.distances(e)), som_e.distances(e)
# self.filter_e.learn(e)
# # print "self.filter_e.winner(e)", self.filter_e.winner(e)
# # filter_p.learn(p)
# # print "self.filter_e.activity.shape", self.filter_e.activity.shape
# # import pdb; pdb.set_trace()
# if self.hebblink_use_activity:
# e2p_activation = np.dot(self.hebblink_filter.T, self.filter_e.activity.reshape((np.prod(self.filter_e.map._shape), 1)))
# self.filter_p.activity = np.clip((e2p_activation / np.sum(e2p_activation)).reshape(self.filter_p.map._shape), 0, np.inf)
# else:
# e2p_activation = np.dot(self.hebblink_filter.T, self.filter_e.distances(e).flatten().reshape(e_shape))
# # print "e2p_activation.shape, np.sum(e2p_activation)", e2p_activation.shape, np.sum(e2p_activation)
# # print "self.filter_p.activity.shape", self.filter_p.activity.shape
# # print "np.sum(self.filter_p.activity)", np.sum(self.filter_p.activity), (self.filter_p.activity >= 0).all()
# # self.filter_p.learn(p)
# # emodes: 0, 1, 2
# emode = 0 #
# if i % 1 == 0:
# if emode == 0:
# e2p_w_p_weights_ = []
# for k in range(sampling_search_num):
# # filter.sample return the index of the sampled unit
# e2p_w_p_weights = self.filter_p.neuron(self.filter_p.flat_to_coords(self.filter_p.sample(1)[0]))
# e2p_w_p_weights_.append(e2p_w_p_weights)
# pred = np.array(e2p_w_p_weights_)
# # print "pred", pred
# # # if we can compare against something
# # pred_err = np.linalg.norm(pred - p, 2, axis=1)
# # # print "np.linalg.norm(e2p_w_p_weights - p, 2)", np.linalg.norm(e2p_w_p_weights - p, 2)
# # e2p_w_p = np.argmin(pred_err)
# # if not pick any
# e2p_w_p = np.random.choice(pred.shape[0])
# # print("pred_err", e2p_w_p, pred_err[e2p_w_p])
# e2p_w_p_weights = e2p_w_p_weights_[e2p_w_p]
# elif emode == 1:
# if self.hebblink_use_activity:
# e2p_w_p = np.argmax(e2p_activation)
# else:
# e2p_w_p = np.argmin(e2p_activation)
# e2p_w_p_weights = self.filter_p.neuron(self.filter_p.flat_to_coords(e2p_w_p))
# elif emode == 2:
# e2p_w_p = self.filter_p.winner(p)
# e2p_w_p_weights = self.filter_p.neuron(self.filter_p.flat_to_coords(e2p_w_p))
# # P_[i] = e2p_w_p_weights
# # E_[i] = environment.compute_sensori_effect(P_[i])
# # print("e2p shape", e2p_w_p_weights.shape)
# return e2p_w_p_weights.reshape((1, self.odim))
def sample_batch(self, X):
"""smpHebbianSOM.sample_batch: If X has more than one rows, return batch of samples for
every condition row in X"""
samples = np.zeros((X.shape[0], self.odim))
for i in range(X.shape[0]):
samples[i] = self.sample_cond(X[i])
return samples
def sample_batch_legacy(self, X, cond_dims = [0], out_dims = [1], resample_interval = 1):
"""smpHebbianSOM"""
print("%s.sample_batch_legacy data X = %s" % (self.__class__.__name__, X))
sampmax = 20
numsamplesteps = X.shape[0]
odim = len(out_dims) # self.idim - X.shape[1]
self.y_sample_ = np.zeros((odim,))
self.y_sample = np.zeros((odim,))
self.y_samples_ = np.zeros((sampmax, numsamplesteps, odim))
self.y_samples = np.zeros((numsamplesteps, odim))
self.cond = np.zeros_like(X[0])
return self.y_samples, self.y_samples_
################################################################################
# models_actinf: model testing and plotting code
################################################################################
def hebbsom_get_map_nodes(mdl, idim, odim):
"""hebbsom_get_map_nodes
Get all the nodes of the coupled SOM maps
"""
e_nodes = mdl.filter_e.map.neurons
p_nodes = mdl.filter_p.map.neurons
# print("e_nodes", e_nodes.shape, "p_nodes", p_nodes.shape)
e_nodes = e_nodes.reshape((-1,idim))
p_nodes = p_nodes.reshape((-1,odim))
# print("e_nodes", e_nodes.shape, "p_nodes", p_nodes.shape)
return (e_nodes, p_nodes)
def hebbsom_predict_full(X, Y, mdl):
"""hebbsom_predict_full
Predict using a HebbSOM and return full internal activations as tuple
- (predictions (samples), distances (SOM distance func), activiations (distances after act. func))
"""
distances = []
activities = []
predictions = np.zeros_like(Y)
# have to loop over single steps until we generalize predict function to also yield distances and activities
for h in range(X.shape[0]):
# X_ = (Y[h]).reshape((1, odim))
X_ = X[h]
# print("X_", X_.shape, X_)
# predict proprio 3D from extero 2D
predictions[h] = mdl.predict(X_)
# print("X_.shape = %s, %d" % (X_.shape, 0))
# print("prediction.shape = %s, %d" % (prediction.shape, 0))
distances.append(mdl.filter_e.distances(X_).flatten())
activities.append(mdl.filter_e.activity.flatten())
activities_sorted = activities[-1].argsort()
# print("Y[h]", h, Y[h].shape, prediction.shape)
return (predictions, distances, activities)
def plot_nodes_over_data_scattermatrix(X, Y, mdl, e_nodes, p_nodes, e_nodes_cov, p_nodes_cov, saveplot = False):
"""plot_nodes_over_data_scattermatrix
Plot SOM node locations over input data as scattermatrix all X
comps over all Y comps.
"""
idim = X.shape[1]
odim = Y.shape[1]
numplots = idim + odim
# e_nodes, p_nodes = hebbsom_get_map_nodes(mdl, idim, odim)
dfcols = []
dfcols += ["e_%d" % i for i in range(idim)]
dfcols += ["p_%d" % i for i in range(odim)]
# X_plus_e_nodes = np.vstack((X, e_nodes))
# Y_plus_p_nodes = np.vstack((Y, p_nodes))
# df = pd.DataFrame(np.hstack((X_plus_e_nodes, Y_plus_p_nodes)), columns=dfcols)
df = pd.DataFrame(np.hstack((X, Y)), columns=dfcols)
sm = scatter_matrix(df, alpha=0.2, figsize=(5,5), diagonal="hist")
# print("sm = %s" % (sm))
# loop over i/o components
idims = list(range(idim))
odims = list(range(idim, idim+odim))
for i in range(numplots):
for j in range(numplots):
if i != j and i in idims and j in idims:
# center = np.array()
# x1, x2 = gmm.gauss_ellipse_2d(centroids[i], ccov[i])
sm[i,j].plot(e_nodes[:,j], e_nodes[:,i], "ro", alpha=0.5, markersize=8)
if i != j and i in odims and j in odims:
sm[i,j].plot(p_nodes[:,j-idim], p_nodes[:,i-idim], "ro", alpha=0.5, markersize=8)
# if i != j and i in idims and j in odims:
# sm[i,j].plot(p_nodes[:,j-idim], e_nodes[:,i], "go", alpha=0.5, markersize=8)
# if i != j and i in odims and j in idims:
# sm[i,j].plot(e_nodes[:,j], p_nodes[:,i-idim], "go", alpha=0.5, markersize=8)
# get figure reference from axis and show
fig = sm[0,0].get_figure()
fig.suptitle("Predictions over data scattermatrix (%s)" % (mdl.__class__.__name__))
if saveplot:
filename = "plot_nodes_over_data_scattermatrix_%s.jpg" % (mdl.__class__.__name__,)
savefig(fig, filename)
fig.show()
def plot_nodes_over_data_scattermatrix_hexbin(X, Y, mdl, predictions, distances, activities, saveplot = False):
"""models_actinf.plot_nodes_over_data_scattermatrix_hexbin
Plot models nodes (if applicable) over the hexbinned data
expanding dimensions as a scattermatrix.
"""
idim = X.shape[1]
odim = Y.shape[1]
numplots = idim * odim + 2
fig = pl.figure()
fig.suptitle("Predictions over data xy scattermatrix/hexbin (%s)" % (mdl.__class__.__name__))
gs = gridspec.GridSpec(idim, odim)
figaxes = []
for i in range(idim):
figaxes.append([])
for o in range(odim):
figaxes[i].append(fig.add_subplot(gs[i,o]))
err = 0
# colsa = ["k", "r", "g", "c", "m", "y"]
# colsb = ["k", "r", "g", "c", "m", "y"]
colsa = ["k" for col in range(idim)]
colsb = ["r" for col in range(odim)]
for i in range(odim): # odim * 2
for j in range(idim):
# pl.subplot(numplots, 1, (i*idim)+j+1)
ax = figaxes[j][i]
# target = Y[h,i]
# X__ = X_[j] # X[h,j]
# err += np.sum(np.square(target - prediction))
# ax.plot(X__, [target], colsa[j] + ".", alpha=0.25, label="target_%d" % i)
# ax.plot(X__, [prediction[0,i]], colsb[j] + "o", alpha=0.25, label="pred_%d" % i)
# ax.plot(X[:,j], Y[:,i], colsa[j] + ".", alpha=0.25, label="target_%d" % i)
ax.hexbin(X[:,j], Y[:,i], gridsize = 20, alpha=0.75, cmap=pl.get_cmap("gray"))
ax.plot(X[:,j], predictions[:,i], colsb[j] + "o", alpha=0.15, label="pred_%d" % i, markersize=8)
# pred1 = mdl.filter_e.neuron(mdl.filter_e.flat_to_coords(activities_sorted[-1]))
# ax.plot(X__, [pred1], "ro", alpha=0.5)
# pred2 = mdl.filter_e.neuron(mdl.filter_e.flat_to_coords(activities_sorted[-2]))
# ax.plot(X__, [pred2], "ro", alpha=0.25)
# print("accum total err = %f" % (err / X.shape[0] / (idim * odim)))
if saveplot:
filename = "plot_nodes_over_data_scattermatrix_hexbin_%s.jpg" % (mdl.__class__.__name__,)
savefig(fig, filename)
fig.show()
def plot_hebbsom_links_distances_activations(X, Y, mdl, predictions, distances, activities, saveplot = False):
"""plot the hebbian link matrix, and all node distances and activities for all inputs"""
hebblink_log = np.log(mdl.hebblink_filter.T + 1.0)
fig = pl.figure()
fig.suptitle("Debugging SOM: hebbian links, distances, activities (%s)" % (mdl.__class__.__name__))
gs = gridspec.GridSpec(4, 1)
# pl.plot(X, Y, "k.", alpha=0.5)
# pl.subplot(numplots, 1, numplots-1)
ax1 = fig.add_subplot(gs[0])
ax1.set_title('hebbian associative links')
# im1 = ax1.imshow(mdl.hebblink_filter, interpolation="none", cmap=pl.get_cmap("gray"))
im1 = ax1.pcolormesh(hebblink_log, cmap=pl.get_cmap("gray"))
ax1.set_xlabel("in (e)")
ax1.set_ylabel("out (p)")
cbar = fig.colorbar(mappable = im1, ax=ax1, orientation="horizontal")
ax2 = fig.add_subplot(gs[1])
ax2.set_title('distances over time')
distarray = np.array(distances)
# print("distarray.shape", distarray.shape)
pcm = ax2.pcolormesh(distarray.T)
cbar = fig.colorbar(mappable = pcm, ax=ax2, orientation="horizontal")
# pl.subplot(numplots, 1, numplots)
ax3 = fig.add_subplot(gs[2])
ax3.set_title('activations propagated via hebbian links')
actarray = np.array(activities)
# print("actarray.shape", actarray.shape)
pcm = ax3.pcolormesh(actarray.T)
cbar = fig.colorbar(mappable = pcm, ax=ax3, orientation="horizontal")
ax4 = fig.add_subplot(gs[3])
ax4.set_title('flattened link table')
ax4.plot(hebblink_log.flatten())
# print("hebblink_log", hebblink_log)
if saveplot:
filename = "plot_hebbsom_links_distances_activations_%s.jpg" % (mdl.__class__.__name__,)
savefig(fig, filename)
fig.show()
def plot_mdn_mues_over_data_scan(X, Y, mdl, saveplot = False):
mues = []
sigs = []
pis = []
print("plot_mdn_mues_over_data_scan: X", X.shape)
fig = pl.figure()
gs = gridspec.GridSpec(2, 2)
dim = Y.shape[1]
xscan = np.linspace(-np.pi, np.pi, 101).reshape((-1, 1))
num_mu = mdl.mixcomps * dim
# num_sig = mixcomps * d ** 2
num_sig = ((dim ** 2 - dim)/2 + dim) * mdl.mixcomps
num_pi = mdl.mixcomps
if X.shape[1] > 1:
xscan = np.hstack((xscan, xscan))
print("xscan", xscan.shape)
xscan = X[:100]
for xs in xscan:
# print("xs", xs)
xs = np.atleast_2d(xs)
print("xs", xs)
y = mdl.predict(xs)
# mues.append(mdl.model.z[:mdl.mixcomps,0])
# sigs.append(np.exp(mdl.model.z[mdl.mixcomps:(2*mdl.mixcomps),0]))
# pis.append(mdl.lr.softmax(mdl.model.z[(2*mdl.mixcomps):,0]))
mues.append(mdl.model.z[:num_mu])
sigs.append(np.exp(mdl.model.z[num_mu:num_mu + num_sig]))
pis.append(mdl.lr.softmax(mdl.model.z[-num_pi:]))
# print("xs", xs, "ys", y)
# print("mues", mues)
numpoints = xscan.shape[0]
mues = np.vstack(mues).reshape((numpoints, mdl.mixcomps, dim))
sigs = np.vstack(sigs).reshape((numpoints, mdl.mixcomps, num_sig / mdl.mixcomps))
pis = np.vstack(pis).reshape((numpoints, mdl.mixcomps))
print("mues", mues.shape)
print("sigs", sigs.shape)
print("pis", pis.shape)
colors = ['r', 'g', 'b', 'c', 'y', 'm']
for h in range(dim):
# ax = fig.add_subplot(dim, 2, h + 1)
ax = fig.add_subplot(gs[h,0])
for i in range(mdl.mixcomps):
for j in range(xscan.shape[0]):
# print("mues", mues[[j],[i]], "pis", pis[j,i])
ax.plot(
xscan[[j]], mues[[j],[i],[h]],
marker = 'o', markerfacecolor = colors[i % len(colors)],
markeredgecolor = colors[i % len(colors)],
alpha = pis[j,i])
# ax.plot(xscan[[j]], mues[[j],[i],[h]] - sigs[[j],[i],[h]], "bo", alpha = pis[j,i], markersize = 2.5)
# ax.plot(xscan[[j]], mues[[j],[i],[h]] + sigs[[j],[i],[h]], "bo", alpha = pis[j,i], markersize = 2.5)
ax = fig.add_subplot(gs[0,1])
if dim == 1:
plot_predictions_over_data(X, Y, mdl, saveplot, ax = ax, datalim = 1000)
else:
plot_predictions_over_data_2D(X, Y, mdl, saveplot, ax = ax, datalim = 1000)
for i in range(mdl.mixcomps):
ax.plot(mues[:,i,0], mues[:,i,1], linestyle = "none", marker = 'o', markerfacecolor = colors[i % len(colors)], alpha = np.mean(pis[:,i]))
# ax.plot(xscan, mues - sigs, "bo", alpha = 0.5, markersize = 2.0)
# ax.plot(xscan, mues + sigs, "bo", alpha = 0.5, markersize = 2.0)
# ax.plot(xscan, mues, "ro", alpha = 0.5)
# ax.plot(mues, xscan, "ro", alpha = 0.5)
if saveplot:
filename = "plot_mdn_mues_over_data_scan_%s.jpg" % (mdl.__class__.__name__,)
savefig(fig, filename)
fig.show()
def plot_predictions_over_data(X, Y, mdl, saveplot = False, ax = None, datalim = 1000):
do_hexbin = False
if X.shape[0] > 4000:
do_hexbin = False # True
X = X[-4000:]
Y = Y[-4000:]
# plot prediction
idim = X.shape[1]
odim = Y.shape[1]
numsamples = 1 # 2
Y_samples = []
for i in range(numsamples):
Y_samples.append(mdl.predict(X))
# print("Y_samples[0]", Y_samples[0])
fig = pl.figure()
fig.suptitle("Predictions over data xy (numsamples = %d, (%s)" % (numsamples, mdl.__class__.__name__))
gs = gridspec.GridSpec(odim, 1)
for i in range(odim):
ax = fig.add_subplot(gs[i])
target = Y[:,i]
if do_hexbin:
ax.hexbin(X, Y, gridsize = 20, alpha=1.0, cmap=pl.get_cmap("gray"))
else:
ax.plot(X, target, "k.", label="Y_", alpha=0.5)
for j in range(numsamples):
prediction = Y_samples[j][:,i]
# print("X", X.shape, "prediction", prediction.shape)
# print("X", X, "prediction", prediction)
if do_hexbin:
ax.hexbin(X[:,i], prediction, gridsize = 30, alpha=0.6, cmap=pl.get_cmap("Reds"))
else:
ax.plot(X[:,i], prediction, "r.", label="Y_", alpha=0.25)
# get limits
xlim = ax.get_xlim()
ylim = ax.get_ylim()
error = target - prediction
mse = np.mean(np.square(error))
mae = np.mean(np.abs(error))
xran = xlim[1] - xlim[0]
yran = ylim[1] - ylim[0]
ax.text(xlim[0] + xran * 0.1, ylim[0] + yran * 0.3, "mse = %f" % mse)
ax.text(xlim[0] + xran * 0.1, ylim[0] + yran * 0.5, "mae = %f" % mae)
if saveplot:
filename = "plot_predictions_over_data_%s.jpg" % (mdl.__class__.__name__,)
savefig(fig, filename)
fig.show()
def plot_predictions_over_data_2D(X, Y, mdl, saveplot = False, ax = None, datalim = 1000):
do_hexbin = False
if X.shape[0] > datalim:
do_hexbin = False # True
X = X[-datalim:]
Y = Y[-datalim:]
# plot prediction
idim = X.shape[1]
odim = Y.shape[1]
numsamples = 1 # 2
Y_samples = []
for i in range(numsamples):
Y_samples.append(mdl.predict(X))
# print("Y_samples[0]", Y_samples[0].shape)
# Y_samples
if ax is None:
fig = pl.figure()
fig.suptitle("Predictions over data xy (numsamples = %d, (%s)" % (numsamples, mdl.__class__.__name__))
gs = gridspec.GridSpec(1, 1)
ax = fig.add_subplot(gs[0])
else:
fig = None
ax.plot(Y[:,0], Y[:,1], 'ko', alpha = 0.1)
ax.plot(Y_samples[0][:,0], Y_samples[0][:,1], 'r.', alpha = 0.1)
ax.set_aspect(1)
# for i in range(odim):
# ax = fig.add_subplot(gs[i])
# target = Y[:,i]
# if do_hexbin:
# ax.hexbin(X, Y, gridsize = 20, alpha=1.0, cmap=pl.get_cmap("gray"))
# else:
# ax.plot(X, target, "k.", label="Y_", alpha=0.5)
# for j in range(numsamples):
# prediction = Y_samples[j][:,i]
# # print("X", X.shape, "prediction", prediction.shape)
# # print("X", X, "prediction", prediction)
# if do_hexbin:
# ax.hexbin(X[:,i], prediction, gridsize = 30, alpha=0.6, cmap=pl.get_cmap("Reds"))
# else:
# ax.plot(X[:,i], prediction, "r.", label="Y_", alpha=0.25)
# # get limits
# xlim = ax.get_xlim()
# ylim = ax.get_ylim()
# error = target - prediction
# mse = np.mean(np.square(error))
# mae = np.mean(np.abs(error))
# xran = xlim[1] - xlim[0]
# yran = ylim[1] - ylim[0]
# ax.text(xlim[0] + xran * 0.1, ylim[0] + yran * 0.3, "mse = %f" % mse)
# ax.text(xlim[0] + xran * 0.1, ylim[0] + yran * 0.5, "mae = %f" % mae)
if fig is not None:
if saveplot:
filename = "plot_predictions_over_data_%s.jpg" % (mdl.__class__.__name__,)
savefig(fig, filename)
fig.show()
def plot_predictions_over_data_ts(X, Y, mdl, saveplot = False):
# plot prediction
idim = X.shape[1]
odim = Y.shape[1]
numsamples = 2
Y_samples = []
print("Xxx", X.shape)
for i in range(numsamples):
Y_samples.append(mdl.predict(X))
print("Y_samples[0]", Y_samples[0])
fig = pl.figure()
fig.suptitle("Predictions over data timeseries (numsamples = %d), (%s)" % (numsamples, mdl.__class__.__name__))
gs = gridspec.GridSpec(odim, 1)
for i in range(odim):
# pl.subplot(odim, 2, (i*2)+1)
ax = fig.add_subplot(gs[i])
target = Y[:,i]
ax.plot(target, "k.", label="Y_", alpha=0.5)
# pl.subplot(odim, 2, (i*2)+2)
# prediction = Y_[:,i]
# pl.plot(target, "k.", label="Y")
mses = []
maes = []
errors = []
for j in range(numsamples):
prediction = Y_samples[j][:,i]
error = target - prediction
errors.append(error)
mse = np.mean(np.square(error))
mae = np.mean(np.abs(error))
mses.append(mse)
maes.append(mae)
# pl.plot(prediction, target, "r.", label="Y_", alpha=0.25)
ax.plot(prediction, "r.", label="Y_", alpha=0.25)
errors = np.asarray(errors)
# print("errors.shape", errors.shape)
aes = np.min(np.abs(errors), axis=0)
ses = np.min(np.square(errors), axis=0)
mae = np.mean(aes)
mse = np.mean(ses)
# get limits
xlim = ax.get_xlim()
ylim = ax.get_ylim()
xran = xlim[1] - xlim[0]
yran = ylim[1] - ylim[0]
ax.text(xlim[0] + xran * 0.1, ylim[0] + yran * 0.3, "mse = %f" % mse)
ax.text(xlim[0] + xran * 0.1, ylim[0] + yran * 0.5, "mae = %f" % mae)
# pl.plot(X[:,i], Y[:,i], "k.", alpha=0.25)
if saveplot:
filename = "plot_predictions_over_data_ts_%s.jpg" % (mdl.__class__.__name__,)
savefig(fig, filename)
fig.show()
def get_class_from_name(name = "KNN"):
"""models_actinf.get_class_from_name
Get a class by a common name string.
"""
if name == "KNN":
cls = smpKNN
elif name == "SOESGP":
cls = smpSOESGP
elif name == "STORKGP":
cls = smpSTORKGP
elif name == "GMM":
cls = partial(smpGMM, K = 20)
elif name == "IGMM":
cls = partial(smpIGMM, K = 20)
elif name == "HebbSOM":
cls = smpHebbianSOM
elif name == 'resRLS':
from smp_base.models_learners import smpSHL
cls = smpSHL
else:
cls = smpKNN
return cls
def generate_inverted_sinewave_dataset(N = 1000, f = 1.0, p = 0.0, a1 = 1.0, a2 = 0.3):
"""models_actinf.generate_inverted_sinewave_dataset
Generate the inverted sine dataset used in Bishop's (Bishop96)
mixture density paper
Returns:
- matrices X, Y
"""
X = np.linspace(0,1,N)
# FIXME: include phase p
Y = a1 * X + a2 * np.sin(f * (2 * 3.1415926) * X) + np.random.uniform(-0.1, 0.1, N)
X,Y = Y[:,np.newaxis],X[:,np.newaxis]
# pl.subplot(211)
# pl.plot(Y, X, "ko", alpha=0.25)
# pl.subplot(212)
# pl.plot(X, Y, "ko", alpha=0.25)
# pl.show()
return X,Y
def generate_2devensimpler_component(x):
"""models_actinf.generate_2devensimpler_component
Generate a two-dimensional correspondence dataset to test
covariance learning of the multivariate mixture density learning
rule.
Returns:
- matrix X
"""
y1_1 = np.sin(x * 10.0) * 0.5 + x * 0.3 + x ** 2 * 0.05
y1_1 += np.random.normal(0, np.abs(x - | np.mean(x) | numpy.mean |
import logging
from typing import Optional
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from numpy.linalg import LinAlgError
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
handler = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
class L1QR:
def __init__(self, y: pd.Series, x: pd.DataFrame, alpha: float) -> None:
"""Python implementation of the L1 norm QR algorithm of
<NAME> (2008): L1-Norm Quantile Regression, http://dx.doi.org/10.1198/106186008X289155
Args:
y: Vector of response data
x: Matrix of covariates
alpha: Quantile of interest
"""
self.x = x.to_numpy()
self.y = y.to_numpy()
self.var_names = x.columns
self.alpha = alpha
# set by fit()
self.beta0: Optional[np.array] = None
self.beta: Optional[np.array] = None
self.s: Optional[np.array] = None
self.b0: Optional[pd.Series] = None
self.b: Optional[pd.DataFrame] = None
def fit(self, s_max: float = np.inf) -> None:
"""Estimate the model.
Args:
s_max: Stop the algorithm prematurely when the L1 norm of the slope coefficients reaches s_max
"""
n, k = self.x.shape
if self.y.size != n:
raise Exception('y and x have different number of rows!')
logger.info(f'Initialization lasso quantile regression for n={n}, k={k}, and alpha={self.alpha}')
xc = np.hstack((np.ones((n, 1)), self.x)) # Store x a second time with intercept
eps1 = 10 ** -10 # Some low value
eps2 = 10 ** -10 # Convergence criterion
max_steps = n * np.min((k, n - 1)) # Maximum number of steps for the algorithm
ind_n = np.arange(n) # Index of the observations
ind_k = np.arange(k) # Index of the variables
beta0 = np.zeros((max_steps + 1, 1)) # Stores the estimates of the constant term
beta = np.zeros((max_steps + 1, k)) # Stores the estimates of the slope parameters
s = np.zeros(max_steps + 1) # Stores the penalty parameter
y_can_be_ordered_strictly = np.unique(self.y).size != n
if y_can_be_ordered_strictly:
logger.info('Adding noise to y because y contains duplicate values')
self.y += np.random.normal(loc=0, scale=10 ** -5, size=self.y.size)
logger.info('Finding initial solution')
# There are actually two cases, first if n*tau is integer, second if tau*n is non-integer.
# Here I assume that in the latter case all the weight is on the first component (see section 2.2)
ini_beta0 = np.sort(self.y)[int(np.floor(self.alpha * n))] # Initial beta0 (see 2.2.1)
ini_beta = np.zeros(k) # Initial beta (see 2.2.1)
ind_e = np.array(int(np.argwhere(self.y == ini_beta0))) # Index of the first point in the elbow
ind_l = ind_n[self.y < self.y[ind_e]] # All points that are left of the elbow
ind_r = ind_n[self.y > self.y[ind_e]] # All points that are right of the elbow
residual = self.y - ini_beta0 # Initial residuals
# Add the first variable to the active set
inactive = ind_k # All variables not in V
tmp_e, tmp_l, tmp_r = ind_e, ind_l, ind_r # Create a copy of the index sets
lambda_var = np.zeros((2, inactive.size)) # First row: sign=1, second row: sign=-1
lambda_var[lambda_var == 0] = -np.inf # Initially set to -inf (want to maximize lambda)
b = np.array([0, 1]) # The 1_0 vector (see p. 171 bottom)
nu_var = np.zeros((2, inactive.size, b.size)) # 3d array: nu for sign=1 in first dimension, sign=-1 in second
for j_idx, j_star in enumerate(inactive):
x_v = xc[:, np.append(0, j_star + 1)]
# Sign of the next variable to include may be either positive or negative
for sign in (1, -1):
index = np.where(sign == 1, 0, 1) # Index in nu_var and lambda_var
# Combination of (2.10) and (2.11)
x0 = np.vstack((np.hstack((1, np.mat(self.x)[tmp_e, j_star])), np.hstack((0, sign))))
try: # Check if x0 has full rank
nu_tmp = np.linalg.solve(x0, b) # Solve system (p. 171 bottom)
nu_var[index, j_idx, :] = nu_tmp
# Store sets that are used to compute -lambda* (p. 172)
x_l = x_v.take(tmp_l, axis=0, mode='clip')
x_r = x_v.take(tmp_r, axis=0, mode='clip')
# Save lambda achieved by the current variable. If sign of last entry != sign then leave at -inf.
if np.sign(nu_tmp[-1]) == sign:
lambda_var[index, j_idx] = -((1 - self.alpha) * np.dot(x_l, nu_tmp).sum() -
self.alpha * np.dot(x_r, nu_tmp).sum())
except LinAlgError:
logger.debug(f'sign: {sign}')
# Select the nu corresponding to the maximum lambda and store the maximum lambda
nu_var = nu_var[lambda_var.argmax(axis=0), np.arange(inactive.size), :]
lambda_var = lambda_var.max(axis=0)
# Store the active variable
ind_v = inactive[lambda_var.argmax()]
# Store initial nu0 and nu
nu0 = nu_var[ind_v, 0]
nu = nu_var[ind_v, 1:]
beta0[0] = ini_beta0
beta[0] = ini_beta
logger.debug(f'Initial beta0: {ini_beta0}')
logger.debug(f'Initial beta: {ini_beta}')
# Main loop
logger.info('Entering main loop')
drop = False
idx = 0
while idx < max_steps:
logger.debug(f'Index: {idx}')
idx += 1
# Calculate how far we need to move (the minimum distance between points and elbow)
if np.atleast_1d(nu).size == 1: # Make sure scalar array is converted to float, causes problems with np.dot
nu = np.float(nu)
# (2.14), nu0 + x'*nu where x is without i in elbow
gam = nu0 + np.dot(self.x.take(ind_n[np.in1d(ind_n, ind_e, invert=True)], axis=0).take(ind_v, axis=1), nu)
gam = np.ravel(gam) # Flatten the array
delta1 = np.delete(residual, ind_e, 0) / gam # This is s - s_l in (2.14)
# Check whether all points are in the elbow or if we still need to move on
if np.sum(delta1 <= eps2) == delta1.size:
delta = np.inf
else:
delta = delta1[delta1 > eps1].min()
# Test if we need to remove some variable j from the active set
if idx > 1:
delta2 = np.array(-beta[idx - 1, ind_v] / nu)
if np.sum(delta2 <= eps2) == delta2.size:
tmpz_remove = np.inf
else:
tmpz_remove = delta2[delta2 > eps1].min()
if tmpz_remove < delta:
drop = True
delta = tmpz_remove
else:
drop = False
# Check if we need to continue or if we are done
if delta == np.inf:
logger.info(f'Finished, delta = inf')
break
# Update the shrinkage parameter
s[idx] = s[idx - 1] + delta
# Prepare the next steps depending if we drop a variable or not
if drop:
tmp_delta = delta2[delta2 > eps1] # All deltas larger than eps2
tmp_ind = ind_v[delta2 > eps1] # All V larger than eps2
j1 = tmp_ind[tmp_delta.argmin()] # The index of the variable to kick out
else:
# Find the i that will hit the elbow next
tmp_ind = np.delete(ind_n, ind_e)[
delta1 > eps2] # Remove Elbow from observations and keep non-zero elements
tmp_delta = delta1[delta1 > eps2] # All deltas that are non-zero
i_star = tmp_ind[tmp_delta.argmin()]
# Update beta
beta0[idx] = beta0[idx - 1] + delta * nu0
beta[idx] = beta[idx - 1]
beta[idx, ind_v] = beta[idx - 1, ind_v] + delta * nu
if s[idx] > s_max:
logger.info(f's = {s[idx]:.2f} is large enough')
break
# Reduce residuals not in the elbow by delta*gam
residual[np.in1d(ind_n, ind_e, invert=True)] -= delta * gam
# Check if there are points in either L or R if we do not drop
if (ind_l.size + ind_r.size == 1) & (not drop):
logger.info('No point in L or R')
break
# Add a variable to the active set
# Test if all variables are included. If yes, set lambda_var to -inf and continue with next step
if ind_v.size == k:
lambda_var = np.zeros((2, inactive.size))
lambda_var[lambda_var == 0] = -np.inf
else:
inactive = ind_k[np.in1d(ind_k, ind_v, invert=True)] # All variables not in V
tmp_e, tmp_l, tmp_r = ind_e, ind_l, ind_r # Create a copy of the index sets
if drop:
ind_v = ind_v[ind_v != j1] # Remove the detected variable from V
else:
# Add i_star to the Elbow and remove it from either Left or Right
# (we know that i_star hits the elbow)
tmp_e = np.append(tmp_e, i_star)
tmp_l = tmp_l[tmp_l != i_star]
tmp_r = tmp_r[tmp_r != i_star]
lambda_var = np.zeros((2, inactive.size)) # First row: sign=1, second row: sign=-1
lambda_var[lambda_var == 0] = -np.inf # Initially set to -inf (want to maximize lambda)
nu_var = np.zeros((2, inactive.size, 1 + ind_v.size + 1)) # Store nus in 3d array
b = np.array([0] * (ind_v.size + 1) + [1]) # The 1_0 vector (see p. 171 bottom)
for j_idx in range(inactive.size):
j_star = inactive[j_idx] # Select variable j as candidate for the next active variable
# Select all columns of x that are in ind_v and additionally j_star.
# Transposition improves performance as Python stores array in row-major order
x_v = xc.T.take(np.append(0, np.append(ind_v, j_star) + 1), axis=0, mode='clip').T
# Combination of (2.10) and (2.11)
x0 = np.vstack((np.hstack((np.ones((tmp_e.size, 1)),
self.x[tmp_e][:, ind_v].reshape((tmp_e.size, -1)),
self.x[tmp_e, j_star].reshape((tmp_e.size, -1)))),
np.hstack(
(0, np.sign(beta[idx, ind_v]), np.nan)))) # nan is a placeholder for sign
# Sign of the next variable to include may be either positive or negative
for sign in (1, -1):
index = np.where(sign == 1, 0, 1) # Index in nu_var and lambda_var
x0[-1, -1] = sign # Change sign in the x0 matrix
try:
nu_tmp = np.linalg.solve(x0, b) # Solve system (p. 171 bottom)
# If sign of last entry != sign then leave at -inf.
if np.sign(nu_tmp[-1]) == sign:
nu_var[index, j_idx, :] = nu_tmp
# Store sets that are used to compute -lambda* (p. 172))
x_l = x_v.take(tmp_l, axis=0, mode='clip')
x_r = x_v.take(tmp_r, axis=0, mode='clip')
lambda_var[index, j_idx] = -((1 - self.alpha) * np.dot(x_l, nu_tmp).sum() -
self.alpha * np.dot(x_r, nu_tmp).sum())
except LinAlgError:
pass
# Select the maximum of each column
nu_var = nu_var[lambda_var.argmax(axis=0), np.arange(inactive.size), :]
lambda_var = lambda_var.max(axis=0)
# Remove an observation from the elbow
lambda_obs = np.zeros(tmp_e.size)
lambda_obs[lambda_obs == 0] = -np.inf
nu_obs = np.zeros((1 + ind_v.size, tmp_e.size))
left_obs = np.zeros(tmp_e.size) # 1 if if we shifted observation to the left
b = np.array([0] * ind_v.size + [1])
# Store the L and the R observations of x
x_v = xc.T.take(np.append(0, ind_v + 1), axis=0, mode='clip').T
x_r = x_v.take(tmp_r, axis=0, mode='clip')
x_l = x_v.take(tmp_l, axis=0, mode='clip')
# Combination of (2.10) and (2.11), here without an additional variable j
x0_all = np.vstack((np.hstack((np.ones((tmp_e.size, 1)), self.x[tmp_e][:, ind_v].reshape((tmp_e.size, -1)))),
np.hstack((0, np.sign(beta[idx, ind_v])))))
for i in range(tmp_e.size):
x0 = np.delete(x0_all, i, 0) # Delete the ith observation
try:
nu_tmp = | np.linalg.solve(x0, b) | numpy.linalg.solve |
import numpy as np
from scipy.linalg import eigh
from scipy.special import binom
from scipy.integrate import quad
import matplotlib.pyplot as plt
import seaborn as sns
import sciunit
from networkunit.scores import to_precision
import matplotlib.mlab as mlab
from scipy.integrate import quad
import scipy.interpolate as interpolate
class eigenangle(sciunit.Score):
"""
The eigenangle score evaluates whether two correlation matrices have
similar non-random elements by calculating the significance of the angles
between the corresponding eigenvectors.
Either the binsize or the number of bins must be provides to perform the
signficnace test.
"""
score = np.nan
@classmethod
def compute(self, matrix_1, matrix_2, bin_num=None,
binsize=None, t_start=None, t_stop=None, **kwargs):
if bin_num is None:
if binsize is not None \
and (t_start is not None and t_stop is not None):
bin_num = float((t_stop - t_start) / binsize)
else:
raise ValueError('To few parameters to compute bin_num!')
N = len(matrix_1)
EWs1, EVs1 = eigh(matrix_1) # returns EWs in ascending order
EWs2, EVs2 = eigh(matrix_2)
EWs1 = EWs1[::-1]
EWs2 = EWs2[::-1]
EVs1 = EVs1.T[::-1]
EVs2 = EVs2.T[::-1]
for count, (ev1, ev2) in enumerate(zip(EVs1, EVs2)):
EVs1[count] = ev1 * np.sign(ev1[np.argmax( | np.absolute(ev1) | numpy.absolute |
import os
import h5py
import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler
def put_static_first(data, col, static_col):
"""Simple function putting the static columns first in the data.
Args:
data: Dict with a data array for each split.
col: Ordered list of the columns in the data.
static_col: List of static columns names.
Returns:
data_inverted : Analog to data with columns reordered in each split.
col_inverted : Analog to col woth columns names reordered.
"""
static_index = list(np.where(np.isin(np.array(col), static_col))[0])
n_col = len(col)
non_static_index = [k for k in range(n_col) if k not in static_index]
new_idx = static_index + non_static_index
data_inverted = {}
for split in ['train', 'test', 'val']:
data_inverted[split] = data[split][:, new_idx]
col_inverted = list(np.array(col)[new_idx])
return data_inverted, col_inverted
def clip_dataset(var_range, data, columns):
"""Set each values outside of predefined range to NaN.
Args:
var_range: Dict with associated range [min,max] to each variable name.
data: Dict with a data array for each split.
columns: Ordered list of the columns in the data.
Returns:
new_data : Data with no longer any value outside of the range.
"""
new_data = {}
for split in ['train', 'test', 'val']:
clipped_data = data[split][:]
for i, col in enumerate(columns):
if var_range.get(col):
idx = np.sort(np.concatenate([np.argwhere(clipped_data[:, i] > var_range[col][1]),
np.argwhere(clipped_data[:, i] < var_range[col][0])])[:, 0])
clipped_data[idx, i] = np.nan
new_data[split] = clipped_data
return new_data
def finding_cat_features(rep_data, threshold):
"""
Extracts the index and names of categorical in a pre-built dataset.
Args:
rep_data: Pre-built dataset as a h5py.File(...., 'r').
threshold: Number of uniqur value below which we consider a variable as categorical if it's an integer
Returns:
categorical: List of names containing categorical features.
categorical_idx: List of matching column indexes.
"""
columns = rep_data['data'].attrs['columns']
categorical = []
for i, c in enumerate(columns):
values = rep_data['data']['train'][:, i]
values = values[~np.isnan(values)]
nb_values = len(np.unique(values))
if nb_values <= threshold and np.all(values == values.astype(int)):
categorical.append(c)
categorical_idx = np.sort([np.argwhere(columns == feat)[0, 0] for feat in categorical])
return categorical, categorical_idx
def finding_cat_features_fom_file(rep_data, info_df):
"""
Extracts the index and names of categorical in a pre-built dataset.
Args:
rep_data: Pre-built dataset as a h5py.File(...., 'r').
info_df: Dataframe with information on each variable.
Returns:
categorical: List of names containing categorical features.
categorical_idx: List of matching column indexes.
"""
columns = rep_data['data'].attrs['columns']
categorical = []
for i, c in enumerate(columns):
if c.split('_')[0] != 'plain':
pass
else:
if info_df[info_df['VariableID'] == c.split('_')[-1]]['Datatype'].values == 'Categorical':
categorical.append(c)
categorical_idx = np.sort([np.argwhere(columns == feat)[0, 0] for feat in categorical])
return categorical, categorical_idx
def get_one_hot(rep_data, cat_names, cat_idx):
"""
One-hots the categorical features in a given pre-built dataset.
Args:
rep_data: Pre-built dataset as a h5py.File(...., 'r').
cat_names: List of names containing categorical features.
cat_idx: List of matching column indexes.
Returns:
all_categorical_data: Dict with each split one-hotted categorical column as a big array.
col_name: List of name of the matching columns
"""
all_categorical_data = np.concatenate([rep_data['data']['train'][:, cat_idx],
rep_data['data']['test'][:, cat_idx],
rep_data['data']['val'][:, cat_idx]], axis=0)
cat_dict = {}
col_name = []
for i, cat in enumerate(cat_idx):
dum = np.array(pd.get_dummies(all_categorical_data[:, i]))
if dum.shape[-1] <= 2:
dum = dum[:, -1:]
col_name += [cat_names[i].split('_')[-1] + '_cat']
else:
col_name += [cat_names[i].split('_')[-1] + '_cat_' + str(k) for k in range(dum.shape[-1])]
cat_dict[cat] = dum
all_categorical_data_one_h = np.concatenate(list(cat_dict.values()), axis=1)
all_categorical_data = {}
all_categorical_data['train'] = all_categorical_data_one_h[:rep_data['data']['train'].shape[0]]
all_categorical_data['test'] = all_categorical_data_one_h[
rep_data['data']['train'].shape[0]:rep_data['data']['train'].shape[0] +
rep_data['data']['test'].shape[0]]
all_categorical_data['val'] = all_categorical_data_one_h[-rep_data['data']['val'].shape[0]:]
return all_categorical_data, col_name
def scaling_data_common(data_path, threshold=25, scaler=StandardScaler(), static_idx=None, df_ref=None):
"""
Wrapper which one-hot and scales the a pre-built dataset.
Args:
data_path: String with the path to the pre-built non scaled dataset
threshold: Int below which we consider a variable as categorical
scaler: sklearn Scaler to use, default is StandardScaler.
static_idx: List of indexes containing static columns.
df_ref: Reference dataset containing supplementary information on the columns.
Returns:
data_dic: dict with each split as a big array.
label_dic: dict with each split and and labels array in same order as lookup_table.
patient_dic: dict containing a array for each split such that each row of the array is of the type
[start_index, stop_index, patient_id].
col: list of the variables names corresponding to each column.
labels_name: list of the tasks name corresponding to labels columns.
"""
rep_data = h5py.File(data_path, 'r')
columns = rep_data['data'].attrs['columns']
train_data = rep_data['data']['train'][:]
test_data = rep_data['data']['test'][:]
val_data = rep_data['data']['val'][:]
# We just extract tasks name to propagate
if 'tasks' in list(rep_data['labels'].attrs.keys()):
labels_name = rep_data['labels'].attrs['tasks']
else:
labels_name = None
# We treat np.inf and np.nan as the same
np.place(train_data, mask=np.isinf(train_data), vals=np.nan)
np.place(test_data, mask=np.isinf(test_data), vals=np.nan)
np.place(val_data, mask= | np.isinf(val_data) | numpy.isinf |
import numpy as np
from numpy.linalg import matrix_power, pinv
from scipy.integrate import quad, solve_ivp
from scipy.linalg import inv
from scipy.special import factorial, binom
from tqdm import tqdm
from functools import lru_cache
from typing import List, Callable, Union, Tuple
from copy import copy
from sympy import Symbol, ImmutableMatrix
from . import helpers_reservoir as hr
from . import picklegzip
from . TimeStepIterator import TimeStep, TimeStepIterator
class DMRError(Exception):
"""Generic error occurring in this module."""
pass
##############################################################################
class DiscreteModelRun():
def __init__(self, times, Bs, xs):
"""
Bs State transition operators for one time step
"""
self.times = times
self.Bs = Bs
self.xs = xs
def acc_net_internal_flux_matrix(self):
Bs = self.Bs
xs = self.xs
return hr.net_Fs_from_discrete_Bs_and_xs(Bs, xs)
def acc_net_external_output_vector(self):
xs = self.xs
Bs = self.Bs
return hr.net_Rs_from_discrete_Bs_and_xs(Bs, xs)
def acc_net_external_input_vector(self):
xs = self.xs
Bs = self.Bs
return hr.net_Us_from_discrete_Bs_and_xs(Bs, xs)
@property
def start_values(self):
return self.xs[0, :]
@property
def nr_pools(self):
return len(self.start_values)
@classmethod
def from_Bs_and_net_Us(cls, start_values, times, Bs, net_Us):
"""
Bs State transition operators for one time step
"""
xs = cls._solve(start_values, Bs, net_Us)
return cls(times, Bs, xs)
@classmethod
def from_Bs_and_Us_2(cls, start_values, times, Bs, Us):
"""
Bs State transition operators for one time step
"""
xs = cls._solve_2(start_values, Bs, Us)
dmr = cls(times, Bs, xs)
dmr.Us = Us
return dmr
@classmethod
def from_fluxes(cls, start_values, times, net_Us, net_Fs, net_Rs):
Bs = cls.reconstruct_Bs_without_xs(
start_values,
net_Us,
net_Fs,
net_Rs
)
return cls.from_Bs_and_net_Us(
start_values,
times,
Bs,
net_Us
)
@classmethod
def from_fluxes_2(cls, start_values, times, Us, Fs, Rs):
Us[np.abs(Us) < 1e-12] = 0.0
Fs[np.abs(Fs) < 1e-12] = 0.0
Rs[np.abs(Rs) < 1e-12] = 0.0
Bs = cls.reconstruct_Bs_without_xs_2(
start_values,
Us,
Fs,
Rs
)
return cls.from_Bs_and_Us_2(
start_values,
times,
Bs,
Us
)
@classmethod
def from_fluxes_and_solution(cls, data_times, xs, net_Fs, net_Rs):
Bs = cls.reconstruct_Bs(xs, net_Fs, net_Rs)
dmr = cls(data_times, Bs, xs)
return dmr
@classmethod
def from_iterator(
cls,
tsit
):
# "unzipping" the tupels
x_0 = tsit.initial_ts.x
n_pools = len(x_0)
Bs, net_Us, times = zip(*((ts.B+np.eye(n_pools), ts.u.reshape(-1), ts.t) for ts in tsit))
# Note:
# 1.) that the Bs of the iterator are momentary Bs whereas
# the Bs of the DiscreteModelRun are net Bs with
# net_B = B+I
#
# 2.) that the time steps also contain
# the solution xs (as it is necessarry to compute
# the next B and/or u for a nonlinear model) and that we do
# not use it but recompute it later in _solve.
# 3.) that we compute an artificial time series
# from the iterator, whereas we actually
# want to avoid a times argument since we want to
# remove non equidistant time grids anyway.
#
# Points 2 and 3 are moving backwards and actually signal that
# an iterator is the more general description of a
# discrete dynamic system. x_{i+1} = f(x_i,i) (what a
# surprise ;-))
# It seems likely that an iterator should be
# at the heart of the class.
# In case the lists (of Bs us xs # Fs...) are
# available they can be used to
# build special iterators.
# But this is not implemented yet since the first aim is
# to establish the connection
# the interator yields a new B and u for the last timestep
# which is a different convention (dmr has one more x than us or Bs )
return cls.from_Bs_and_net_Us(x_0, times, Bs[:-1], net_Us[:-1])
@classmethod
def from_euler_forward_smooth_reservoir_model(
cls,
srm,
par_dict,
func_dict,
delta_t,
number_of_steps,
start_values
):
# define some symbols to replace
# the time symbol with t=delta_t*it
it = Symbol('it')
t = srm.time_symbol
xi,T,N,x,u = srm.xi_T_N_u_representation(factor_out_xi=False)
B = xi*T*N
sym_B = hr.euler_forward_B_sym(
B,
t,
delta_t,
it
)
sym_B_net= sym_B + ImmutableMatrix.eye(*sym_B.shape)
sym_u = hr.euler_forward_net_u_sym(
u,
t,
delta_t,
it
)
num_B, num_u = map(
lambda expr: hr.numerical_array_func(
x,
it,
expr,
par_dict,
func_dict
),
(sym_B_net, sym_u)
)
return cls.from_B_and_u_funcs(
start_values,
B_func=num_B,
u_func=num_u,
number_of_steps=number_of_steps,
delta_t=par_dict[delta_t]
)
@classmethod
def from_B_and_u_funcs(
cls,
x_0,
B_func,
u_func,
number_of_steps,
delta_t
):
i_min = 0
u_0 = u_func(i_min, x_0)
B_0 = B_func(i_min, x_0)
tsit = TimeStepIterator(
initial_ts= TimeStep(B=B_0,u=u_0,x=x_0,t=0),
B_func=B_func,
u_func=u_func,
number_of_steps = number_of_steps,
delta_t=delta_t
)
return cls.from_iterator(tsit)
def restrict_to_pools(self, pool_nrs: np.ndarray) -> '__class__':
"""Restrict the discrete model run to a subset of pools.
Args:
pool_nrs: array of pool numbers INSIDE the resctricted model,
all other pools will be considered as OUSTIDE
Returns:
a DMR with ``len(pool_nrs)`` pools
"""
nr_pools = len(pool_nrs)
nr_times = len(self.times)
start_values_restricted = self.start_values[pool_nrs]
net_Us_restricted = np.nan * np.ones((nr_times-1, nr_pools))
net_Us_restricted[:] = self.net_Us[:, pool_nrs]
Bs_restricted = np.nan * np.ones((nr_times-1, nr_pools, nr_pools))
Bs_restricted = self.Bs[:, :, pool_nrs][:, pool_nrs, :]
dmr_restricted = self.__class__.from_Bs_and_net_Us(
start_values_restricted,
self.times,
Bs_restricted,
net_Us_restricted
)
return dmr_restricted
@property
@lru_cache()
def net_Us(self):
n = len(self.Bs)
return np.array(
[
self.xs[k+1]-np.matmul(self.Bs[k], self.xs[k])
for k in range(n)
]
)
@property
def dts(self):
"""
The lengths of the time intervals.
"""
return np.diff(self.times).astype(np.float64)
@property
def dt(self):
"""
The length of the time intervals.
At the moment we assume equidistance without checking
"""
return self.dts[0]
def time_bin_index(
self,
t: float
) -> int:
"""
The index of the bin enclosing the given time
"""
return int(np.floor(t/self.dt))
@classmethod
def from_SmoothModelRun(cls, smr, nr_bin):
# we discard the inner spacing
# of smr.times since it is potentially
# not equidistant
data_times=np.linspace(
smr.times[0],
smr.times[-1],
nr_bin+1
)
return cls(
data_times,
smr.fake_discretized_Bs(data_times),
smr.solve_func()(data_times)
)
@classmethod
def reconstruct_Fs_and_Rs(cls, xs, Bs):
Fs = np.nan * np.ones_like(Bs)
Rs = np.nan * np.ones(Bs.shape[:-1])
for k in range(Bs.shape[0]):
for j in range(Bs.shape[2]):
Fs[k, :, j] = Bs[k, :, j] * xs[k, j]
Rs[k, j] = (1 - Bs[k, :, j].sum()) * xs[k,j]
for j in range(Bs.shape[2]):
Fs[k, j, j] = 0
return Fs, Rs
@classmethod
def reconstruct_Bs(cls, xs, Fs, Rs):
Bs = np.nan * np.ones_like(Fs)
for k in range(len(Rs)):
try:
B = cls.reconstruct_B(xs[k], Fs[k], Rs[k])
Bs[k, :, :] = B
except DMRError as e:
msg = str(e) + 'time step %d' % k
raise(DMRError(msg))
return Bs
@classmethod
def reconstruct_Bs_without_xs(cls, start_values, Us, Fs, Rs):
x = start_values
Bs = np.nan * np.ones_like(Fs)
for k in tqdm(range(len(Rs))):
try:
B = cls.reconstruct_B(x, Fs[k], Rs[k])
Bs[k, :, :] = B
x = B @ x + Us[k]
except DMRError as e:
msg = str(e) + 'time step %d' % k
raise(DMRError(msg))
return Bs
@classmethod
def reconstruct_Bs_without_xs_2(cls, start_values, Us, Fs, Rs):
x = start_values
Bs = np.nan * np.ones_like(Fs)
for k in range(len(Rs)):
try:
B = cls.reconstruct_B_2(x, Fs[k], Rs[k], Us[k])
Bs[k, :, :] = B
x = B @ (x + Us[k])
except DMRError as e:
msg = str(e) + 'time step %d' % k
raise(DMRError(msg))
return Bs
@classmethod
def reconstruct_B(cls, x, F, R):
nr_pools = len(x)
B = np.identity(nr_pools)
if len(np.where(F < 0)[0]) > 0:
raise(DMRError('Negative flux: '))
# construct off-diagonals
for j in range(nr_pools):
if x[j] < 0:
raise(DMRError('Content negative: pool %d, ' % j))
if x[j] != 0:
B[:, j] = F[:, j] / x[j]
else:
B[:, j] = 0
# construct diagonals
for j in range(nr_pools):
if x[j] != 0:
B[j, j] = 1 - (sum(B[:, j]) - B[j, j] + R[j] / x[j])
if B[j, j] < 0:
if np.abs(x[j] - R[j] - F[:, j].sum()) < 1e-07:
# if np.abs(B[j, j]) < 1e-03: # TODO: arbitrary value
B[j, j] = 0
else:
pass
print(B[j, j])
print(x[j], R[j], F[:, j].sum(), F[j, :].sum())
print(x[j] - R[j] - F[:, j].sum())
raise(DMRError('Diag. val < 0: pool %d, ' % j))
else:
B[j, j] = 1
# # correct for negative diagonals
# neg_diag_idx = np.where(np.diag(B)<0)[0]
# for idx in neg_diag_idx:
# print("'repairing' neg diag in pool", idx)
# # scale outfluxes down to empty pool
# col = B[:, idx]
# d = col[idx]
# s = 1-d
# B[:, idx] = B[:, idx] / s
# r = R[idx] / x[idx] / s
# B[idx, idx] = 1 - (sum(B[:, idx]) - B[idx, idx] + r)
return B
@classmethod
def reconstruct_B_2(cls, x, F, R, U):
nr_pools = len(x)
B = np.identity(nr_pools)
if len(np.where(F < 0)[0]) > 0:
raise(DMRError('Negative flux: '))
# construct off-diagonals
for j in range(nr_pools):
if x[j] < 0:
raise(DMRError('Content negative: pool %d, ' % j))
if x[j] + U[j] != 0:
B[:, j] = F[:, j] / (x[j] + U[j])
else:
B[:, j] = 0
# construct diagonals
for j in range(nr_pools):
B[j, j] = 0
# print(B[:, j].sum())
# print(R[j] / (x[j] + U[j]))
if x[j] + U[j] != 0:
B[j, j] = 1 - (sum(B[:, j]) + R[j] / (x[j] + U[j]))
# B[j, j] = ((x[j] + U[j]) * (1 - sum(B[:, j])) - R[j]) / (x[j] + U[j])
if B[j, j] < 0:
# print(409, B[:, j].sum())
# B[j, j] = 0
# y = np.array([B[i, j] * (x[j] + U[j]) for i in range(nr_pools)])
# print(y)
# print()
# print(F[:, j])
# print(y - F[:, j])
# print(sum(B[:, j]))
# print((1-sum(B[:, j])) * (x[j] + U[j]), R[j])
# print(x[j] + U[j], (sum(F[:, j]) + R[j]) / 0.15)
# raise
if np.abs(B[j, j]) < 1e-08:
B[j, j] = 0.0
else:
# pass
print(B[j, j])
print(x[j], U[j], R[j], F[:, j].sum(), F[j, :].sum())
print(x[j] + U[j] - R[j] - F[:, j].sum() + F[j, :].sum())
print(B[:, j])
raise(DMRError('Diag. val < 0: pool %d, ' % j))
else:
B[j, j] = 1
# # correct for negative diagonals
# neg_diag_idx = np.where(np.diag(B)<0)[0]
# for idx in neg_diag_idx:
## print("'repairing' neg diag in pool", idx)
# # scale outfluxes down to empty pool
# col = B[:, idx]
# d = col[idx].sum()
# s = 1-d
## print(s)
# B[:, idx] = B[:, idx] / s
# r = R[idx] / (x[idx] + U[idx]) / s
# B[idx, idx] = 1 - (sum(B[:, idx]) - B[idx, idx] + r)
# if np.abs(B[idx, idx]) < 1e-08:
# B[idx, idx] = 0
#
# print(B[idx, idx], (B @ (x + U)))
return B
# @classmethod
# def reconstruct_Bs(cls, data_times, start_values, Fs, rs, net_Us):
# nr_pools = len(start_values)
# Bs = np.zeros((len(data_times)-1, nr_pools, nr_pools))
#
# x = start_values
# for k in range(len(data_times)-1):
# # B = cls.reconstruct_B(xs[k], Fs[k+shift], rs[k+shift])
# B = cls.reconstruct_B(x, Fs[k], rs[k], k)
# x = B @ x + net_Us[k]
# Bs[k,:,:] = B
# return Bs
def solve(self):
return self.xs
@classmethod
def _solve(cls, start_values, Bs, net_Us):
xs = np.nan*np.ones((len(Bs)+1, len(start_values)))
xs[0, :] = start_values
for k in range(0, len(net_Us)):
#xs[k+1] = Bs[k] @ xs[k] + net_Us[k]
xs[k+1] = np.matmul(Bs[k], xs[k]) + net_Us[k]
return xs
@classmethod
def _solve_2(cls, start_values, Bs, net_Us):
xs = np.nan*np.ones((len(Bs)+1, len(start_values)))
xs[0, :] = start_values
for k in range(0, len(net_Us)):
xs[k+1] = Bs[k] @ (xs[k] + net_Us[k])
return xs
def acc_external_output_vector(self):
n = self.nr_pools
rho = np.array([1-B.sum(0).reshape((n,)) for B in self.Bs])
soln = self.solve()[:-1]
r = rho * soln
return r
def acc_external_output_vector_2(self):
n = self.nr_pools
rho = np.array([1-B.sum(0).reshape((n,)) for B in self.Bs])
soln = self.solve()
r = rho * (soln[:-1] + self.Us)
return r
def acc_internal_flux_matrix(self):
# fixme mm 20-04-2020:
# potential gain by use of sparse matrices
Bs = self.Bs
soln = self.solve()[:-1]
return np.array([Bs[k] * soln[k] for k in range(len(Bs))])
# return value in unit "time steps"
def compute_start_m_factorial_moment(self, order, time_index=0):
Id = np.identity(self.nr_pools)
B = self.Bs[time_index]
x = self.solve()[time_index]
X = x * Id
n = order
fm = factorial(n) * pinv(X) @ matrix_power(B, n)
fm = fm @ matrix_power(pinv(Id-B), n) @ x
return fm
# return value in unit "time steps x dt[0]"
def compute_start_age_moments(self, max_order, time_index=0):
def stirling(n, k):
n1 = n
k1 = k
if n <= 0:
return 1
elif k <= 0:
return 0
elif (n == 0 and k == 0):
return -1
elif n != 0 and n == k:
return 1
elif n < k:
return 0
else:
temp1 = stirling(n1-1, k1)
temp1 = k1*temp1
return (k1*(stirling(n1-1, k1)))+stirling(n1-1, k1-1)
nr_pools = self.nr_pools
# Id = np.identity(nr_pools)
# B0 = self.Bs[time_index]
# x0 = self.solve()[time_index]
# X0 = x0 * Id
start_age_moments = []
dt = self.dts[0]
for n in range(1, max_order+1):
# the old formula is not correct for higher moments
# in discrete time
# start_age_moment = factorial(n) * inv(X0)
# start_age_moment @= matrix_power(inv(Id-B0), n) @ x0
start_m_moment = np.zeros(nr_pools)
for k in range(n+1):
start_m_moment += stirling(n, k) * \
self.compute_start_m_factorial_moment(k, time_index)
start_age_moments.append(start_m_moment*dt**n)
return np.array(start_age_moments)
def fake_xss(self, nr_time_steps):
Id = np.identity(self.nr_pools)
if np.all(self.net_Us == 0):
raise(DMRError("Cannot fake xss, because there are no inputs to the systems"))
mean_U = self.net_Us[:nr_time_steps, ...].mean(axis=0)
mean_B = self.Bs[:nr_time_steps, ...].mean(axis=0)
# fake equilibrium
fake_xss = pinv(Id-mean_B) @ mean_U
return fake_xss
def fake_eq_14C(self, nr_time_steps, F_atm, decay_rate, lim, alpha=None):
if alpha is None:
alpha = hr.ALPHA_14C
# input in age steps ai
p0 = self.fake_start_age_masses(nr_time_steps)
# import matplotlib.pyplot as plt
# fig, axes = plt.subplots(ncols=2, nrows=3, figsize=(18, 18))
# times = np.linspace(0, 1000, 50)
# z = np.array([p0_ai(int(t)) for t in times])
# y = np.array([p0(t) for t in times])
# for k, ax in zip(range(self.nr_pools), axes.flatten()):
# ax.plot(times, y[:, k], label="c")
# ax.plot(times, z[:, k])
# ax.legend()
# fig.show()
# E_a = self.fake_start_age_moments(nr_time_steps, 1).reshape(-1)
eq_14C = np.nan * np.ones((self.nr_pools, ))
for pool in range(self.nr_pools):
# print(np.float(E_a[pool])/365.25, F_atm(np.float(E_a[pool])))
# input in age steps ai, output as mass, not density
p0_pool = lambda ai: p0(ai)[pool]
# def p0_pool_14C(ai):
# res = (
## (F_atm(ai)/1000.0 + 1) *
# ai * p0_pool(ai)
## * np.exp(-decay_rate*ai)
# )
# return res
# input in age (not age indices)
def p0_pool_14C_quad(a):
res = (
(F_atm(a)/1000.0 + 1) *
p0_pool(int(a/self.dt)) / self.dt # make masses to density
# * alpha # makes integration imprecise
* np.exp(-decay_rate*int(a))
)
# print(a, res)
return res
# integration via solve_ivp is fast and successful
res_quad = solve_ivp(
lambda a, y: p0_pool_14C_quad(a),
(0, lim),
np.array([0])
)
# print("quad", res_quad.y.reshape(-1)[-1])#/365.25/self.start_values[pool])
res = res_quad.y.reshape(-1)[-1]
## res = res_quad[0]
# ai = 0
# res = 0
# res2 = 0
# while ai <= 2*lim_ai:
# res += p0_pool_14C(ai)
# res2 += p0_pool(ai)
## print(res, res2)
# ai += 1
# print(res, res2)
eq_14C[pool] = res * alpha
return eq_14C
# return value in unit "time steps"
def fake_start_m_factorial_moment(self, order, nr_time_steps):
Id = np.identity(self.nr_pools)
# fake equilibrium
fake_xss = self.fake_xss(nr_time_steps)
mean_B = self.Bs[:nr_time_steps, ...].mean(axis=0)
B = mean_B
x = fake_xss
X = x * Id
n = order
fm = factorial(n) * pinv(X) @ matrix_power(B, n)
fm = fm @ matrix_power(pinv(Id-B), n) @ x
return fm
# return value in unit "time steps x dt[0]"
def fake_start_age_moments(self, nr_time_steps, up_to_order):
def stirling(n, k):
n1 = n
k1 = k
if n <= 0:
return 1
elif k <= 0:
return 0
elif (n == 0 and k == 0):
return -1
elif n != 0 and n == k:
return 1
elif n < k:
return 0
else:
temp1 = stirling(n1-1, k1)
temp1 = k1*temp1
return (k1*(stirling(n1-1, k1)))+stirling(n1-1, k1-1)
nr_pools = self.nr_pools
# Id = np.identity(nr_pools)
# B0 = self.Bs[time_index]
# x0 = self.solve()[time_index]
# X0 = x0 * Id
start_age_moments = []
dt = self.dts[0]
for n in range(1, up_to_order+1):
# the old formula is not correct for higher moments
# in discrete time
# start_age_moment = factorial(n) * inv(X0)
# start_age_moment @= matrix_power(inv(Id-B0), n) @ x0
start_m_moment = np.zeros(nr_pools)
for k in range(n+1):
start_m_moment += stirling(n, k) * \
self.fake_start_m_factorial_moment(k, nr_time_steps)
start_age_moments.append(start_m_moment*dt**n)
return np.array(start_age_moments)
def age_moment_vector_up_to(self, up_to_order, start_age_moments):
soln = self.solve()
ams = self._solve_age_moment_system(up_to_order, start_age_moments)
res = np.nan * np.ones((ams.shape[0], ams.shape[1]+1, ams.shape[2]))
res[:, 0, :] = soln
res[:, 1:, :] = ams
return res
def age_moment_vector(self, order, start_age_moments):
ams = self._solve_age_moment_system(order, start_age_moments)
amv = ams[:, order-1, :]
return amv
def system_age_moment(self, order, start_age_moments, mask=False):
if not isinstance(mask, bool):
mask_over_time = np.repeat(
mask.reshape(1, -1),
len(self.times),
axis=0
)
else:
mask_over_time = mask
age_moment_vector = self.age_moment_vector(order, start_age_moments)
age_moment_vector[np.isnan(age_moment_vector)] = 0
soln = self.solve()
soln = np.ma.masked_array(soln, mask_over_time)
total_mass = soln.sum(1) # row sum
total_mass[total_mass == 0] = np.nan
system_age_moment = (age_moment_vector*soln).sum(1)/total_mass
return np.array(system_age_moment)
def _solve_age_moment_system(self, max_order, start_age_moments):
n = self.nr_pools
Id = np.identity(n)
ones = np.ones(n)
soln = self.solve()
soln[soln < 1e-12] = 0
# dts = self.dts
def diag_inv_with_zeros(A):
res = np.zeros_like(A)
for k in range(A.shape[0]):
if np.abs(A[k, k]) != 0:
res[k, k] = 1/A[k, k]
else:
# res[k, k] = np.nan
res[k, k] = 0
return res
age_moments = [start_age_moments]
dts = self.dts
for i in tqdm(range(len(self.times)-1)):
vec = np.zeros((max_order, n))
X_np1 = soln[i+1] * Id
X_n = soln[i] * Id
B = self.Bs[i]
for k in range(1, max_order+1):
moment_sum = np.zeros(n)
for j in range(1, k+1):
moment_sum += age_moments[-1][j-1, :].reshape((n,)) \
* binom(k, j) * dts[i]**(k-j)
# vec[k-1, :] = inv(X_np1) @ B @\
vec[k-1, :] = diag_inv_with_zeros(X_np1) @ B @\
X_n @ (moment_sum + ones * dts[i]**k)
age_moments.append(vec)
return np.array(age_moments)
def backward_transit_time_moment(
self,
order: int,
start_age_moments: np.ndarray,
mask: np.ndarray = False
)-> np.ndarray:
"""Compute the ``order`` th backward transit time moment based on the
This is done by computing a weighted sum of of the pool wise
age moments.
For every pool the weight is givem by the fraction of the
of this pools output of the combined output of all pools.
:func:`age_moment_vector`.
Args:
order (int): The order of the backward transit time moment that is
to be computed.
start_age_moments (numpy.ndarray order x nr_pools, optional):
Given initial age moments up to the order of interest.
Can possibly be computed by :func:`moments_from_densities`.
Defaults to None assuming zero initial ages.
mask (nr_pools): pools with True value will be ignored
Returns:
numpy.array: shape (nr_bins,nr_pools)
The ``order`` th backward transit time moment over the time grid.
"""
if not isinstance(mask, bool):
mask_over_time = np.repeat(
mask.reshape(1, -1),
len(self.times)-1,
axis=0
)
else:
mask_over_time = mask
# the shape of the age moment vector is (nr_bins,nr_pools)
r = self.acc_net_external_output_vector()
r = np.ma.masked_array(r, mask_over_time)
# the shape of the age moment vector is (nr_bins+1,nr_pools)
# so we have to cut it
age_moment_vector = self.age_moment_vector(
order,
start_age_moments
)[:-1,:]
pool_axis = 1
return np.array(
(r*age_moment_vector).sum(axis=pool_axis) / r.sum(axis=pool_axis)
)
def start_age_densities_func(self):
B = self.Bs[0]
u = self.net_Us[0]
dt = self.dts[0]
# assuming constant time step before times[0]
def p(a):
n = int(a // dt)
if a <= 0:
return | np.zeros_like(u) | numpy.zeros_like |
from math import *
import numpy as np
import matplotlib.pyplot as plt
import os
import re
def calculate_damping_coefficient(stiffness, restitution, m1, m2):
ln_rest = log(restitution)
return -2 * ln_rest * (get_reduced_particle_mass(m1, m2) * stiffness / (pi ** 2 + ln_rest ** 2)) ** 0.5
def get_mass(density, diameter):
return density * pi * diameter ** 3 / 6
def get_reduced_particle_mass(m1, m2):
return m1 * m2 / (m1 + m2)
def get_pos(time, mass, friction_coeff, u_0, g):
return - friction_coeff * g * time ** 2 / 2 + u_0 * time
def get_vel(time, mass, friction_coeff, u_0, g):
return - friction_coeff * g * time + u_0
def plot_pos_and_vel():
stiffness = 1e5
density = 2000
diameter = 0.1
mass = get_mass(density, diameter)
restitution = 0.8
friction_coeff = 0.6
g = 9.81
u_0 = 1
damping_coeff = calculate_damping_coefficient(stiffness, restitution, mass, mass)
col_duration = pi * sqrt(mass / stiffness)
# Generate Analytic Solution Data
t_max = u_0 / (friction_coeff * g) # From Velocity equation
# print("t_max = " + str(t_max))
times = np.arange(0, t_max, col_duration / 16)
positions = []
velocities = []
for t in times:
positions.append(get_pos(t, mass, friction_coeff, u_0, g))
velocities.append(get_vel(t, mass, friction_coeff, u_0, g))
times = np.append(times, 0.5)
positions.append(positions[-1])
velocities.append(0)
# Get Simulation Data
data = []
increments = []
data_dir = os.listdir("data")
for filename in data_dir:
name_match = re.match("1_friction_(\d+)_\d+.txt", filename)
if name_match:
i = int(name_match.group(1))
if increments.count(i) == 0:
increments.append(i)
increments.sort()
for i in increments:
timestep_data = | np.array([]) | numpy.array |
from collections import OrderedDict
from random import sample
import numpy as np
import torch
import torch.optim as optim
from torch import nn as nn
from torch import autograd
from torch.autograd import Variable
import torch.nn.functional as F
import rlkit.torch.pytorch_util as ptu
from rlkit.torch.torch_meta_irl_algorithm import np_to_pytorch_batch
from rlkit.core.eval_util import create_stats_ordered_dict
from rlkit.torch.torch_meta_irl_algorithm import TorchMetaIRLAlgorithm
from rlkit.torch.sac.policies import MakeDeterministic
from rlkit.samplers.util import rollout
from rlkit.torch.sac.policies import PostCondMLPPolicyWrapper
from rlkit.data_management.path_builder import PathBuilder
from gym.spaces import Dict
def concat_trajs(trajs):
new_dict = {}
for k in trajs[0].keys():
if isinstance(trajs[0][k], dict):
new_dict[k] = concat_trajs([t[k] for t in trajs])
else:
new_dict[k] = np.concatenate([t[k] for t in trajs], axis=0)
return new_dict
class NeuralProcessAIRL(TorchMetaIRLAlgorithm):
'''
Meta-AIRL using a neural process
assuming the context trajectories all have the same length and flat and everything nice
'''
def __init__(
self,
env,
# this is the main policy network that we wrap with
# PostCondWrapperPolicy for get_exploration policy
main_policy,
disc,
train_context_expert_replay_buffer,
train_test_expert_replay_buffer,
test_context_expert_replay_buffer,
test_test_expert_replay_buffer,
np_encoder,
policy_optimizer, # the RL algorith that updates the policy
num_disc_updates_per_epoch=160,
num_policy_updates_per_epoch=80,
num_tasks_used_per_update=5,
num_context_trajs_for_training=3,
num_test_trajs_for_training=3,
policy_batch_size_per_task=256,
# for each task, for each context, infer post, for each post sample, generate some eval trajs
num_tasks_per_eval=10,
num_diff_context_per_eval_task=2,
num_context_trajs_for_eval=3,
num_eval_trajs_per_post_sample=2,
num_context_trajs_for_exploration=3,
encoder_lr=1e-3,
encoder_optimizer_class=optim.Adam,
disc_lr=1e-3,
disc_optimizer_class=optim.Adam,
use_grad_pen=True,
grad_pen_weight=10,
plotter=None,
render_eval_paths=False,
**kwargs
):
super().__init__(
env=env,
train_context_expert_replay_buffer=train_context_expert_replay_buffer,
train_test_expert_replay_buffer=train_test_expert_replay_buffer,
test_context_expert_replay_buffer=test_context_expert_replay_buffer,
test_test_expert_replay_buffer=test_test_expert_replay_buffer,
**kwargs
)
self.main_policy = main_policy
self.encoder = np_encoder
self.disc = disc
self.rewardf_eval_statistics = None
self.policy_optimizer = policy_optimizer
self.encoder_optimizer = encoder_optimizer_class(
self.encoder.parameters(),
lr=encoder_lr,
)
self.disc_optimizer = disc_optimizer_class(
self.disc.parameters(),
lr=disc_lr,
)
self.num_policy_updates_per_epoch = num_policy_updates_per_epoch
self.num_disc_updates_per_epoch = num_disc_updates_per_epoch
self.num_tasks_used_per_update = num_tasks_used_per_update
self.num_context_trajs_for_training = num_context_trajs_for_training
self.num_test_trajs_for_training = num_test_trajs_for_training
self.policy_batch_size_per_task = policy_batch_size_per_task
self.num_tasks_per_eval = num_tasks_per_eval
self.num_diff_context_per_eval_task = num_diff_context_per_eval_task
self.num_context_trajs_for_eval = num_context_trajs_for_eval
self.num_eval_trajs_per_post_sample = num_eval_trajs_per_post_sample
self.num_context_trajs_for_exploration = num_context_trajs_for_exploration
# things we need for computing the discriminator objective
self.bce = nn.BCEWithLogitsLoss()
total_samples = self.max_path_length * self.num_tasks_used_per_update * (self.num_context_trajs_for_training + self.num_test_trajs_for_training)
self.bce_targets = torch.cat(
[
torch.ones(total_samples, 1),
torch.zeros(total_samples, 1)
],
dim=0
)
self.bce_targets = Variable(self.bce_targets)
if ptu.gpu_enabled():
self.bce.cuda()
self.bce_targets = self.bce_targets.cuda()
self.use_grad_pen = use_grad_pen
self.grad_pen_weight = grad_pen_weight
def get_exploration_policy(self, task_identifier):
list_of_trajs = self.train_context_expert_replay_buffer.sample_trajs_from_task(
task_identifier,
self.num_context_trajs_for_exploration,
)
post_dist = self.encoder([list_of_trajs])
# z = post_dist.sample()
z = post_dist.mean
z = z.cpu().data.numpy()[0]
return PostCondMLPPolicyWrapper(self.main_policy, z)
def get_eval_policy(self, task_identifier, mode='meta_test'):
if mode == 'meta_train':
rb = self.train_context_expert_replay_buffer
else:
rb = self.test_context_expert_replay_buffer
list_of_trajs = rb.sample_trajs_from_task(
task_identifier,
self.num_context_trajs_for_eval,
)
post_dist = self.encoder([list_of_trajs])
# z = post_dist.sample()
z = post_dist.mean
z = z.cpu().data.numpy()[0]
return PostCondMLPPolicyWrapper(self.main_policy, z)
def _get_disc_training_batch(self):
# k = list(self.replay_buffer.task_replay_buffers.keys())[0]
# print('\nReplay Buffer')
# print(len(self.replay_buffer.task_replay_buffers[k]._traj_endpoints))
# print('\nTrain Context')
# print(len(self.train_context_expert_replay_buffer.task_replay_buffers[k]._traj_endpoints))
# print('\nTest Context')
# print(len(self.train_test_expert_replay_buffer.task_replay_buffers[k]._traj_endpoints))
# context batch is a list of list of dicts
context_batch, task_identifiers_list = self.train_context_expert_replay_buffer.sample_trajs(
self.num_context_trajs_for_training,
num_tasks=self.num_tasks_used_per_update,
keys=['observations', 'actions']
)
flat_context_batch = [traj for task_trajs in context_batch for traj in task_trajs]
context_pred_batch = concat_trajs(flat_context_batch)
test_batch, _ = self.train_test_expert_replay_buffer.sample_trajs(
self.num_test_trajs_for_training,
task_identifiers=task_identifiers_list,
keys=['observations', 'actions']
)
flat_test_batch = [traj for task_trajs in test_batch for traj in task_trajs]
test_pred_batch = concat_trajs(flat_test_batch)
# get the test batch for the tasks from policy buffer
policy_test_batch_0, _ = self.replay_buffer.sample_trajs(
self.num_context_trajs_for_training,
task_identifiers=task_identifiers_list,
keys=['observations', 'actions']
)
flat_policy_batch_0 = [traj for task_trajs in policy_test_batch_0 for traj in task_trajs]
policy_test_pred_batch_0 = concat_trajs(flat_policy_batch_0)
policy_test_batch_1, _ = self.replay_buffer.sample_trajs(
self.num_test_trajs_for_training,
task_identifiers=task_identifiers_list,
keys=['observations', 'actions']
)
flat_policy_batch_1 = [traj for task_trajs in policy_test_batch_1 for traj in task_trajs]
policy_test_pred_batch_1 = concat_trajs(flat_policy_batch_1)
policy_test_pred_batch = {
'observations': np.concatenate((policy_test_pred_batch_0['observations'], policy_test_pred_batch_1['observations']), axis=0),
'actions': np.concatenate((policy_test_pred_batch_0['actions'], policy_test_pred_batch_1['actions']), axis=0)
}
# if we want to handle envs with different traj lengths we need to do
# something smarter with how we repeat z
traj_len = flat_context_batch[0]['observations'].shape[0]
assert all(t['observations'].shape[0] == traj_len for t in flat_context_batch), "Not handling different traj lens"
assert all(t['observations'].shape[0] == traj_len for t in flat_test_batch), "Not handling different traj lens"
assert all(t['observations'].shape[0] == traj_len for t in flat_policy_batch_0), "Not handling different traj lens"
assert all(t['observations'].shape[0] == traj_len for t in flat_policy_batch_1), "Not handling different traj lens"
return context_batch, context_pred_batch, test_pred_batch, policy_test_pred_batch, traj_len
def _get_policy_training_batch(self):
# context batch is a list of list of dicts
context_batch, task_identifiers_list = self.train_context_expert_replay_buffer.sample_trajs(
self.num_context_trajs_for_training,
num_tasks=self.num_tasks_used_per_update,
keys=['observations', 'actions']
)
# get the test batch for the tasks from policy buffer
policy_batch, _ = self.replay_buffer.sample_random_batch(
self.policy_batch_size_per_task,
task_identifiers_list=task_identifiers_list
)
policy_obs = np.concatenate([d['observations'] for d in policy_batch], axis=0) # (N_tasks * batch_size) x Dim
policy_acts = np.concatenate([d['actions'] for d in policy_batch], axis=0) # (N_tasks * batch_size) x Dim
policy_terminals = np.concatenate([d['terminals'] for d in policy_batch], axis=0) # (N_tasks * batch_size) x Dim
policy_next_obs = np.concatenate([d['next_observations'] for d in policy_batch], axis=0) # (N_tasks * batch_size) x Dim
policy_batch = dict(
observations=policy_obs,
actions=policy_acts,
terminals=policy_terminals,
next_observations=policy_next_obs
)
return context_batch, policy_batch
def _do_training(self):
'''
'''
# train the discriminator (and the encoder)
# print('$$$$$$$$$')
# print(self.num_disc_updates_per_epoch)
for i in range(self.num_disc_updates_per_epoch):
self.encoder_optimizer.zero_grad()
self.disc_optimizer.zero_grad()
context_batch, context_pred_batch, test_pred_batch, policy_test_pred_batch, traj_len = self._get_disc_training_batch()
# convert it to a pytorch tensor
# note that our objective says we should maximize likelihood of
# BOTH the context_batch and the test_batch
exp_obs_batch = np.concatenate((context_pred_batch['observations'], test_pred_batch['observations']), axis=0)
exp_obs_batch = Variable(ptu.from_numpy(exp_obs_batch), requires_grad=False)
exp_acts_batch = np.concatenate((context_pred_batch['actions'], test_pred_batch['actions']), axis=0)
exp_acts_batch = Variable(ptu.from_numpy(exp_acts_batch), requires_grad=False)
policy_obs_batch = Variable(ptu.from_numpy(policy_test_pred_batch['observations']), requires_grad=False)
policy_acts_batch = Variable(ptu.from_numpy(policy_test_pred_batch['actions']), requires_grad=False)
post_dist = self.encoder(context_batch)
# z = post_dist.sample() # N_tasks x Dim
z = post_dist.mean
# z_reg_loss = 0.0001 * z.norm(2, dim=1).mean()
z_reg_loss = 0.0
# make z's for expert samples
context_pred_z = z.repeat(1, traj_len * self.num_context_trajs_for_training).view(
-1,
z.size(1)
)
test_pred_z = z.repeat(1, traj_len * self.num_test_trajs_for_training).view(
-1,
z.size(1)
)
z_batch = torch.cat([context_pred_z, test_pred_z], dim=0)
positive_obs_batch = torch.cat([exp_obs_batch, z_batch], dim=1)
positive_acts_batch = exp_acts_batch
# make z's for policy samples
z_policy = z_batch
negative_obs_batch = torch.cat([policy_obs_batch, z_policy], dim=1)
negative_acts_batch = policy_acts_batch
# compute the loss for the discriminator
obs_batch = torch.cat([positive_obs_batch, negative_obs_batch], dim=0)
acts_batch = torch.cat([positive_acts_batch, negative_acts_batch], dim=0)
disc_logits = self.disc(obs_batch, acts_batch)
disc_preds = (disc_logits > 0).type(torch.FloatTensor)
# disc_percent_policy_preds_one = disc_preds[z.size(0):].mean()
disc_loss = self.bce(disc_logits, self.bce_targets)
accuracy = (disc_preds == self.bce_targets).type(torch.FloatTensor).mean()
if self.use_grad_pen:
eps = Variable(torch.rand(positive_obs_batch.size(0), 1), requires_grad=True)
if ptu.gpu_enabled(): eps = eps.cuda()
# old and probably has a bad weird effect on the encoder
# difference is that before I was also taking into account norm of grad of disc
# wrt the z
# interp_obs = eps*positive_obs_batch + (1-eps)*negative_obs_batch
# permute the exp_obs_batch (not just within a single traj, but overall)
# This is actually a really tricky question how to permute the batches
# 1) permute within each of trajectories
# z's will be matched, colors won't be matched anyways
# 2) permute within trajectories corresponding to a single context set
# z's will be matched, colors will be "more unmatched"
# 3) just shuffle everything up
# Also, the z's need to be handled appropriately
interp_obs = eps*exp_obs_batch + (1-eps)*policy_obs_batch
# interp_z = z_batch.detach()
# interp_obs = torch.cat([interp_obs, interp_z], dim=1)
interp_obs.detach()
# interp_obs.requires_grad = True
interp_actions = eps*positive_acts_batch + (1-eps)*negative_acts_batch
interp_actions.detach()
# interp_actions.requires_grad = True
gradients = autograd.grad(
outputs=self.disc(torch.cat([interp_obs, z_batch.detach()], dim=1), interp_actions).sum(),
inputs=[interp_obs, interp_actions],
# grad_outputs=torch.ones(exp_specs['batch_size'], 1).cuda(),
create_graph=True, retain_graph=True, only_inputs=True
)
# print(gradients[0].size())
# z_norm = gradients[0][:,-50:].norm(2, dim=1)
# print('Z grad norm: %.4f +/- %.4f' % (torch.mean(z_norm), torch.std(z_norm)))
# print(gradients[0][:,-50:].size())
# o_norm = gradients[0][:,:-50].norm(2, dim=1)
# o_norm = gradients[0].norm(2, dim=1)
# print('Obs grad norm: %.4f +/- %.4f' % (torch.mean(o_norm), torch.std(o_norm)))
# print(gradients[0].size())
# print(gradients[0][:,:50].norm(2, dim=1))
total_grad = torch.cat([gradients[0], gradients[1]], dim=1)
# print(total_grad.size())
gradient_penalty = ((total_grad.norm(2, dim=1) - 1) ** 2).mean()
# another form of grad pen
# gradient_penalty = (total_grad.norm(2, dim=1) ** 2).mean()
disc_loss = disc_loss + gradient_penalty * self.grad_pen_weight
total_reward_loss = z_reg_loss + disc_loss
total_reward_loss.backward()
self.disc_optimizer.step()
self.encoder_optimizer.step()
# print(self.disc.fc0.bias[0])
# print(self.encoder.traj_encoder.traj_enc_mlp.fc0.bias[0])
# train the policy
# print('--------')
# print(self.num_policy_updates_per_epoch)
for i in range(self.num_policy_updates_per_epoch):
context_batch, policy_batch = self._get_policy_training_batch()
policy_batch = np_to_pytorch_batch(policy_batch)
post_dist = self.encoder(context_batch)
# z = post_dist.sample() # N_tasks x Dim
z = post_dist.mean
z = z.detach()
# repeat z to have the right size
z = z.repeat(1, self.policy_batch_size_per_task).view(
self.num_tasks_used_per_update * self.policy_batch_size_per_task,
-1
).detach()
# now augment the obs with the latent sample z
policy_batch['observations'] = torch.cat([policy_batch['observations'], z], dim=1)
policy_batch['next_observations'] = torch.cat([policy_batch['next_observations'], z], dim=1)
# compute the rewards
# If you compute log(D) - log(1-D) then you just get the logits
policy_rewards = self.disc(policy_batch['observations'], policy_batch['actions']).detach()
policy_batch['rewards'] = policy_rewards
# rew_more_than_zero = (rewards > 0).type(torch.FloatTensor).mean()
# print(rew_more_than_zero.data[0])
# do a policy update (the zeroing of grads etc. should be handled internally)
# print(policy_rewards.size())
self.policy_optimizer.train_step(policy_batch)
# print(self.main_policy.fc0.bias[0])
if self.eval_statistics is None:
"""
Eval should set this to None.
This way, these statistics are only computed for one batch.
"""
self.eval_statistics = OrderedDict()
self.eval_statistics['Disc Loss'] = np.mean(ptu.get_numpy(disc_loss))
self.eval_statistics['Disc Acc'] = np.mean(ptu.get_numpy(accuracy))
# self.eval_statistics['Disc Percent Policy Preds 1'] = np.mean(ptu.get_numpy(disc_percent_policy_preds_one))
self.eval_statistics['Disc Rewards Mean'] = np.mean(ptu.get_numpy(policy_rewards))
self.eval_statistics['Disc Rewards Std'] = np.std(ptu.get_numpy(policy_rewards))
self.eval_statistics['Disc Rewards Max'] = np.max(ptu.get_numpy(policy_rewards))
self.eval_statistics['Disc Rewards Min'] = np.min(ptu.get_numpy(policy_rewards))
# self.eval_statistics['Disc Rewards GT Zero'] = np.mean(ptu.get_numpy(rew_more_than_zero))
z_norm = z.norm(2, dim=1).mean()
self.eval_statistics['Z Norm'] = np.mean(ptu.get_numpy(z_norm))
if self.policy_optimizer.eval_statistics is not None:
self.eval_statistics.update(self.policy_optimizer.eval_statistics)
def evaluate(self, epoch):
super().evaluate(epoch)
self.policy_optimizer.eval_statistics = None
def obtain_eval_samples(self, epoch, mode='meta_train'):
self.training_mode(False)
if mode == 'meta_train':
params_samples = self.train_task_params_sampler.sample_unique(self.num_tasks_per_eval)
else:
params_samples = self.test_task_params_sampler.sample_unique(self.num_tasks_per_eval)
all_eval_tasks_paths = []
for task_params, obs_task_params in params_samples:
cur_eval_task_paths = []
self.env.reset(task_params=task_params, obs_task_params=obs_task_params)
task_identifier = self.env.task_identifier
for _ in range(self.num_diff_context_per_eval_task):
eval_policy = self.get_eval_policy(task_identifier, mode=mode)
for _ in range(self.num_eval_trajs_per_post_sample):
cur_eval_path_builder = PathBuilder()
observation = self.env.reset(task_params=task_params, obs_task_params=obs_task_params)
terminal = False
while (not terminal) and len(cur_eval_path_builder) < self.max_path_length:
if isinstance(self.obs_space, Dict):
if self.policy_uses_pixels:
agent_obs = observation['pixels']
else:
agent_obs = observation['obs']
else:
agent_obs = observation
action, agent_info = eval_policy.get_action(agent_obs)
next_ob, raw_reward, terminal, env_info = (self.env.step(action))
if self.no_terminal:
terminal = False
reward = raw_reward
terminal = np.array([terminal])
reward = | np.array([reward]) | numpy.array |
"""This script creates the patched dataset"""
import sys
import glob
import json
from tqdm import tqdm
import numpy as np
from PIL import Image
import multiprocessing
from datetime import datetime
from joblib import Parallel, delayed
from scipy.interpolate import interp1d
from scipy.ndimage import generic_filter
from multiprocessing import Process, Manager
from settings import NUM_TO_LABEL, CLASSES
from utils import calculate_num_crops
import os
from utils import get_train_validation_split
from settings import (
SEGMENTATIONS_DIRECTORY,
ANONYMIZED_DATA_DIRECTORY,
RECORD_DIRECTORY,
DATASET_SPECS,
RANDOM_VALIDATION_SPLIT,
)
np.random.seed(2019)
NUM_CLASSES = len(NUM_TO_LABEL)
to_range_256 = interp1d([0, NUM_CLASSES - 1], [0, 255])
to_range_num_classes = interp1d([0, 255], [0, NUM_CLASSES - 1])
LABEL_TO_NUM = {v: k for k, v in NUM_TO_LABEL.items()}
SEGMENTATIONS_LIST = sorted(glob.glob(os.path.join(SEGMENTATIONS_DIRECTORY, "*.png")))
def process_image(target_folder, image_addrs, stuff_addrs, mode, crop_size,
crops_per_class):
""" given an image, generates patches and saves them
Parameters:
-----------
writer: writer object
Path to file
image_addrs: str
Path to image
stuff_addrs: str
Path to annotations
i: int
image number in the dataset
mode: str
train, val or test
Returns:
--------
crops_of_each_label: array_like
if mode is 'train', number of crops with central pixel of each
label type. If mode is 'test', 1.
pixels_of_each_label: array_like
number of pixels of each label among the crops generated
"""
# Open image and array
img = np.array(Image.open(image_addrs))
label = np.array(Image.open(stuff_addrs))
img_ID = image_addrs.split("/")[-1][:-4]
# Make sure is int16
img = img.astype(np.uint16)
annotations = label.astype(np.uint8)
# Define width and height
width = img.shape[0]
height = img.shape[1]
# Define variables to save labels information
crops_of_each_label = np.zeros(NUM_CLASSES)
pixels_of_each_label = np.zeros(NUM_CLASSES)
if mode in ('train'):
# create one list per each label with the positions
positions = [[] for _ in range(NUM_CLASSES)]
for pixel_col in range(width):
for pixel_row in range(height):
label = annotations[pixel_col, pixel_row]
positions[label].append([pixel_col, pixel_row])
# define dict
positions_dict = {}
for pos, _ in enumerate(positions):
if positions[pos]:
positions_dict[str(pos)] = positions[pos]
# list of labels contained in this image
unique_labels = list(np.unique(annotations))
# remove background and mammary gland
if ["mammary_gland"] in CLASSES:
if LABEL_TO_NUM['background'] in unique_labels:
unique_labels.remove(LABEL_TO_NUM['background'])
if LABEL_TO_NUM["mammary_gland"] in unique_labels:
unique_labels.remove(LABEL_TO_NUM['mammary_gland'])
for unique_label in unique_labels:
for crop_number in range(crops_per_class):
# Sample random pixel of class unique_label
sampled_pixel = np.random.randint(low=0, high=len(
positions_dict.get(str(unique_label))))
# Get pixel coordinates
coordinates = positions_dict.get(
str(unique_label))[sampled_pixel]
# Find upper left corner of the crop
x_coordinate = np.clip(
coordinates[0] - (crop_size // 2), 0, width)
y_coordinate = np.clip(
coordinates[1] - (crop_size // 2), 0, height)
# Check coordinates not too close from right or bottom side
if x_coordinate + crop_size >= width:
x_coordinate = width - crop_size
if y_coordinate + crop_size >= height:
y_coordinate = height - crop_size
# Get crop
img_crop = img[x_coordinate:x_coordinate + crop_size,
y_coordinate:y_coordinate + crop_size]
annotation_crop = annotations[
x_coordinate:x_coordinate + crop_size,
y_coordinate: y_coordinate + crop_size]
# Save img and mask patches in foler
img_crop = Image.fromarray(img_crop.astype(np.uint16))
annotation_crop = Image.fromarray(annotation_crop.astype(np.uint8))
img_crop.save(os.path.join(target_folder, 'images',
'{}-{}-{}.png'.format(img_ID,unique_label, crop_number)))
annotation_crop.save(os.path.join(target_folder, 'masks',
'{}-{}-{}.png'.format(img_ID,unique_label, crop_number)))
# Increase the number of crops of type unique_label
crops_of_each_label[unique_label] += 1
else:
overlapping = 0
img = Image.fromarray(img.astype(np.uint16))
annotations = Image.fromarray(annotations.astype(np.uint8))
# save full images
full_img_save_path = os.path.join(RECORD_DIRECTORY, 'images_full', '{}.png'.format(img_ID))
full_mask_save_path = os.path.join(RECORD_DIRECTORY, 'masks_full', '{}.png'.format(img_ID))
img.save(full_img_save_path)
annotations.save(full_mask_save_path)
# get image and segments and start the patching
x_max, y_max = img.size
path_list = []
x0 = 0
while (x0 + crop_size) < (x_max + crop_size):
y0 = 0
while (y0 + crop_size) < (y_max + crop_size):
## if patch exceeds img size then pad
if ((y0 + crop_size) - y_max > 0) or ((x0 + crop_size) - x_max > 0):
cropped_img = Image.fromarray(np.zeros((crop_size, crop_size), dtype=np.uint16))
cropped_mask = Image.fromarray(np.ones((crop_size, crop_size), dtype=np.uint8)*LABEL_TO_NUM['background'])
x1 = x0 + crop_size
y1 = y0 + crop_size
area = (x0, y0, x1, y1)
str_area = 'x'.join(map(str, area))
if (y0 + crop_size) - y_max > 0:
y1 = y_max
if (x0 + crop_size) - x_max > 0:
x1 = x_max
area = (x0, y0, x1, y1)
t_cropped_img = img.crop(area)
t_cropped_mask = annotations.crop(area)
cropped_img.paste(t_cropped_img)
cropped_mask.paste(t_cropped_mask)
unique_labels = list(np.unique(cropped_mask))
# remove blank images
if [LABEL_TO_NUM['background']] != unique_labels:
img_crop_path = os.path.join(target_folder, 'images','{}-{}.png'.format(img_ID, str_area))
mask_crop_path = os.path.join(target_folder, 'masks','{}-{}.png'.format(img_ID, str_area))
cropped_img.save(img_crop_path)
cropped_mask.save(mask_crop_path)
else:
area = (x0, y0, x0 + crop_size, y0 + crop_size)
str_area = 'x'.join(map(str, area))
cropped_img = img.crop(area)
cropped_mask = annotations.crop(area)
unique_labels = list( | np.unique(cropped_mask) | numpy.unique |
#!/usr/bin/env python2
from moke import *
from itertools import izip, chain
from multiprocessing import Pool
import pickle
import numpy as np
import scipy.stats as ss
from sklearn import decomposition, cross_validation, grid_search, linear_model, metrics
from sklearn.decomposition.nmf import nnls
from pysam import Samfile
MINBAMS = 3
def load_epi(epi):
"""(internal) load epi file
"""
chk_exit(*inp_file(path(epi)))
with open(epi) as fh:
marks = fh.readline().strip().split("\t")
h = np.loadtxt(fh, delimiter="\t")
return (marks, h)
def load_arr(arr):
"""(internal) load arr file
"""
chk_exit(*inp_file(path(arr)))
with open(arr) as fh:
marks = fh.readline().strip().split("\t")
x = np.loadtxt(fh, delimiter="\t")
return (marks, x)
def load_arrs(arrs):
"""(internal) load multiple arr files, assumes same marks (columns)
"""
xs = []
for arr in arrs:
marks, x = load_arr(arr)
xs.append(x)
return (marks, xs)
## statistical functions
def sparsevec(x):
"""(internal) Calculates the sparsity of a vector.
"""
eps = np.finfo(x.dtype).eps if 'int' not in str(x.dtype) else 1e-9
n = x.shape[0]
x1 = np.sqrt(n) - (np.abs(x).sum() + eps) / (np.sqrt(np.multiply(x, x).sum()) + eps)
x2 = np.sqrt(n) - 1
return x1 / x2
def sparsemat(X):
"""(internal) Calculates the average sparsity of a matrx.
"""
return np.mean([sparsevec(x) for x in X])
def dsig(a, lq, loc, uq):
"""(internal) Double sigmoid function to normalize features (columns).
see:
<NAME>., <NAME>. and <NAME>. Score normalization in multimodal biometric systems.
Pattern Recognition 38, 2270-2285 (2005).
"""
a = np.asanyarray(a, dtype="f")
alpha_l = loc - lq
alpha_r = uq - loc
a = a - loc
lsel = (a < 0.)
rsel = (a >= 0.)
if alpha_l:
a[lsel] = np.divide(a[lsel], -0.5 * alpha_l)
if alpha_r:
a[rsel] = np.divide(a[rsel], -0.5 * alpha_r)
np.exp(a, a)
np.add(a, 1, a)
np.power(a, -1, a)
return a
def scarr(arr, method):
"""(internal) Normalizes features of array (samples x features) through sigmoid scaling or whitening.
"""
if method.startswith("sig"):
hi = float(method.split("sig")[1]) / 100
data = np.array(arr, dtype=float).T
qs = ss.mstats.mquantiles(data, (0.0, 0.0, hi), axis=1).T
for row, lq, mu, uq in izip(data, qs[0], qs[1], qs[2]):
row[:] = (dsig(row, lq, mu, uq) - 0.5) * 2.
elif method == "whiten":
data = np.array(arr, dtype=float).T
dev = np.std(data, axis=1, ddof=1)[np.newaxis].T
dev[dev == 0.] = np.nan
data /= dev
else:
raise ValueError("unknown method")
return data.T
def scapair(raw, method):
"""(internal) Normalizes paired featues (columns) of array (sample x features) through (currently)
the DESeq method. It computes size factors by adjusting medians. see:
<NAME>. & <NAME>. Differential expression analysis for sequence count data.
Genome Biology 11, R106 (2010).
"""
def size_factors(counts):
counts = counts[np.alltrue(counts, axis=1)]
logcounts = | np.log(counts) | numpy.log |
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 29 15:29:14 2018
@author: Pooja
"""
#testing model of doubles on singles and vice versa
#import tensorflow as tf
#import keras
import os
import numpy as np
import cv2
from keras.utils import np_utils
from sklearn.model_selection import train_test_split
from keras.models import load_model
from sklearn.metrics import confusion_matrix
path_uiuc = 'C:\\Users\\Sanmoy\\Desktop\\pooja\\paper read\\sports\\dataset\\UIUC2\\'
path_44 = 'C:\\Users\\Sanmoy\\Desktop\\pooja\\paper read\\sports\\dataset\\UIUC2\\44D1\\'
mod = load_model(path_uiuc + 'tcn_models\\tcn_s_64_adam_1.h5')
#mod.predict()
x = list()
y = list()
#reading color image - changin to gray and subtracting mean
vidlist = os.listdir(path_44)
it = len(vidlist)
for i in range(it):
s1 = vidlist[i]
_, typ = s1.split('_')
if typ == '3':
y.append(3)
elif typ == '0':
y.append(0)
elif typ == '1':
y.append(1)
elif typ == '2':
y.append(2)
elif typ == '4':
continue
#y.append(4)
elif typ == '5':
y.append(5)
else:
print('Error!' + str(typ))
os.chdir(path_44 + s1)
img_list = os.listdir()
it0 = len(img_list)
temp = list()
dim = (32,32)
for j in range(it0):
img = cv2.imread(img_list[j])
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = cv2.resize(img, dim, interpolation = cv2.INTER_CUBIC)
mn1 = np.mean(img)
st1 = np.std(img)
img = (img.copy() - mn1) / st1
img = img.copy().flatten()
temp.append(img)
temp = np.asarray(temp)
x.append(temp)
X = np.asarray(x)
Y = | np.asarray(y) | numpy.asarray |
"""
This file is part of pyS5p
https://github.com/rmvanhees/pys5p.git
The class ICMio provides read access to S5p Tropomi ICM_CA_SIR products
Copyright (c) 2017-2021 SRON - Netherlands Institute for Space Research
All Rights Reserved
License: BSD-3-Clause
"""
from datetime import datetime, timedelta
from pathlib import Path, PurePosixPath
from setuptools_scm import get_version
import h5py
import numpy as np
# - global parameters ------------------------------
# - local functions --------------------------------
# - class definition -------------------------------
class ICMio():
"""
This class should offer all the necessary functionality to read Tropomi
ICM_CA_SIR products
Attributes
----------
fid : h5py.File
filename : string
bands : string
Methods
-------
coverage_time
Returns start and end of the measurement coverage time.
creation_time
Returns creation date of this product
orbit
Returns value of revolution counter.
processor_version
Returns version of the L01b processor used to generate this product.
close()
Close resources
find(msm_class)
Find a measurement as <processing-class name>.
select(msm_type: str, msm_path=None)
Select a measurement as <processing class>_<ic_id>.
get_attr(attr_name)
Obtain value of an HDF5 file attribute.
get_ref_time(band=None)
Returns reference start time of measurements.
get_delta_time(band=None)
Returns offset from the reference start time of measurement.
get_instrument_settings(band=None)
Returns instrument settings of measurement.
get_exposure_time(band=None)
Returns pixel exposure time of the measurements, which is calculated
from the parameters 'int_delay' and 'int_hold' for SWIR.
get_housekeeping_data(band=None)
Returns housekeeping data of measurements.
get_msmt_keys(band=None)
Read msmt_keys from the analysis groups.
get_msm_attr(msm_dset, attr_name, band=None)
Returns attribute of measurement dataset 'msm_dset'.
get_geo_data(band=None, geo_dset='satellite_latitude,satellite_longitude')
Returns data of selected datasets from the GEODATA group.
get_msm_data(msm_dset, band='78', *, read_raw=False, columns=None,
fill_as_nan=True)
Read datasets from a measurement selected by class-method 'select'
read_direct_msm(msm_dset, dest_sel=None, dest_dtype=None, fill_as_nan=False)
The faster implementation of class method 'get_msm_data'.
set_housekeeping_data(data, band=None)
Returns housekeeping data of measurements.
set_msm_data(msm_dset, data, band='78')
Alter dataset from a measurement selected using function 'select'.
Notes
-----
Examples
--------
"""
def __init__(self, icm_product, readwrite=False):
"""
Initialize access to an ICM product
Parameters
----------
icm_product : string
full path to in-flight calibration measurement product
readwrite : boolean
open product in read-write mode (default is False)
"""
if not Path(icm_product).is_file():
raise FileNotFoundError(f'{icm_product} does not exist')
# initialize class-attributes
self.__rw = readwrite
self.__msm_path = None
self.__patched_msm = []
self.filename = icm_product
self.bands = None
# open ICM product as HDF5 file
if readwrite:
self.fid = h5py.File(icm_product, "r+")
else:
self.fid = h5py.File(icm_product, "r")
def __repr__(self):
class_name = type(self).__name__
return f'{class_name}({self.filename!r}, readwrite={self.__rw!r})'
def __iter__(self):
for attr in sorted(self.__dict__):
if not attr.startswith("__"):
yield attr
# def __del__(self):
# """
# called when the object is destroyed
# """
# self.close()
def __enter__(self):
"""
method called to initiate the context manager
"""
return self
def __exit__(self, exc_type, exc_value, traceback):
"""
method called when exiting the context manager
"""
self.close()
return False # any exception is raised by the with statement.
def close(self):
"""
Before closing the product, we make sure that the output product
describes what has been altered by the S/W. To keep any change
traceable.
as attributes of this group, we write:
- dateStamp ('now')
- Git-version of S/W
- list of patched datasets
- auxiliary datasets used by patch-routines
"""
if self.fid is None:
return
self.bands = None
if self.__patched_msm:
# pylint: disable=no-member
sgrp = self.fid.require_group("METADATA/SRON_METADATA")
sgrp.attrs['dateStamp'] = datetime.utcnow().isoformat()
sgrp.attrs['git_tag'] = get_version(root='..',
relative_to=__file__)
if 'patched_datasets' not in sgrp:
dtype = h5py.special_dtype(vlen=str)
dset = sgrp.create_dataset('patched_datasets',
(len(self.__patched_msm),),
maxshape=(None,), dtype=dtype)
dset[:] = np.asarray(self.__patched_msm)
else:
dset = sgrp['patched_datasets']
dset.resize(dset.shape[0] + len(self.__patched_msm), axis=0)
dset[dset.shape[0]-1:] = np.asarray(self.__patched_msm)
self.fid.close()
self.fid = None
# ---------- RETURN VERSION of the S/W ----------
def find(self, msm_class) -> list:
"""
find a measurement as <processing-class name>
Parameters
----------
msm_class : string
processing-class name without ICID
Returns
-------
out : list of strings
String with msm_type as used by ICMio.select
"""
res = []
grp_list = ['ANALYSIS', 'CALIBRATION', 'IRRADIANCE', 'RADIANCE']
for ii in '12345678':
for name in grp_list:
grp_name = f'BAND{ii}_{name}'
if grp_name in self.fid:
gid = self.fid[grp_name]
res += [s for s in gid if s.startswith(msm_class)]
return list(set(res))
# -------------------------
def select(self, msm_type: str, msm_path=None) -> str:
"""
Select a measurement as <processing class>_<ic_id>
Parameters
----------
msm_type : string
Name of measurement group
msm_path : {'BAND%_ANALYSIS', 'BAND%_CALIBRATION',
'BAND%_IRRADIANCE', 'BAND%_RADIANCE'}
Name of path in HDF5 file to measurement group
Returns
-------
string
String with spectral bands found in product or empty
Attributes
----------
bands : string
Available spectral bands (or empty)
__msm_path : string
Full name of selected group in file (or None)
"""
self.bands = ''
self.__msm_path = None
# if path is given, then only determine avaialble spectral bands
# else determine path and avaialble spectral bands
if msm_path is None:
grp_list = ['ANALYSIS', 'CALIBRATION', 'IRRADIANCE', 'RADIANCE']
for ii in '12345678':
for name in grp_list:
grp_path = PurePosixPath(f'BAND{ii}_{name}', msm_type)
if str(grp_path) in self.fid:
msm_path = f'BAND%_{name}'
self.bands += ii
else:
if not msm_path.startswith('BAND%'):
raise ValueError('msm_path should start with BAND%')
for ii in '12345678':
grp_path = PurePosixPath(msm_path.replace('%', ii), msm_type)
if str(grp_path) in self.fid:
self.bands += ii
# return in case no data was found
if self.bands:
self.__msm_path = PurePosixPath(msm_path, msm_type)
return self.bands
# ---------- Functions that work before MSM selection ----------
@property
def orbit(self) -> int:
"""
Returns reference orbit number
"""
if 'reference_orbit' in self.fid.attrs:
return int(self.fid.attrs['reference_orbit'])
return None
@property
def processor_version(self) -> str:
"""
Returns version of the L01b processor
"""
if 'processor_version' not in self.fid.attrs:
return None
res = self.fid.attrs['processor_version']
if isinstance(res, bytes):
# pylint: disable=no-member
return res.decode('ascii')
return res
@property
def coverage_time(self) -> tuple:
"""
Returns start and end of the measurement coverage time
"""
if 'time_coverage_start' not in self.fid.attrs \
or 'time_coverage_end' not in self.fid.attrs:
return None
res1 = self.fid.attrs['time_coverage_start']
if isinstance(res1, bytes):
# pylint: disable=no-member
res1 = res1.decode('ascii')
res2 = self.fid.attrs['time_coverage_end']
if isinstance(res2, bytes):
# pylint: disable=no-member
res2 = res2.decode('ascii')
return (res1, res2)
@property
def creation_time(self) -> str:
"""
Returns version of the L01b processor
"""
grp = self.fid['/METADATA/ESA_METADATA/earth_explorer_header']
dset = grp['fixed_header/source']
return dset.attrs['Creation_Date'].split(b'=')[1].decode('ascii')
def get_attr(self, attr_name):
"""
Obtain value of an HDF5 file attribute
Parameters
----------
attr_name : string
name of the attribute
"""
if attr_name not in self.fid.attrs:
return None
res = self.fid.attrs[attr_name]
if isinstance(res, bytes):
return res.decode('ascii')
return res
# ---------- Functions that only work after MSM selection ----------
def get_ref_time(self, band=None) -> datetime:
"""
Returns reference start time of measurements
Parameters
----------
band : None or {'1', '2', '3', ..., '8'}
Select one of the band present in the product.
Default is 'None' which returns the first available band
"""
ref_time = datetime(2010, 1, 1, 0, 0, 0)
if not self.__msm_path:
return ref_time
if band is None:
band = self.bands[0]
elif band not in self.bands:
raise ValueError('band not found in product')
msm_path = str(self.__msm_path).replace('%', band)
msm_type = self.__msm_path.name
if msm_type in ['ANALOG_OFFSET_SWIR', 'LONG_TERM_SWIR']:
grp = self.fid[msm_path]
dset = grp[msm_type.lower() + '_group_keys']
group_keys = dset['group'][:]
for name in group_keys:
grp_path = PurePosixPath(f'BAND{band}_CALIBRATION',
name.decode('ascii'),
'OBSERVATIONS')
grp = self.fid[str(grp_path)]
ref_time += timedelta(seconds=int(grp['time'][0]))
elif msm_type in ['DPQF_MAP', 'NOISE']:
grp_path = PurePosixPath(msm_path).parent / 'ANALOG_OFFSET_SWIR'
grp = self.fid[str(grp_path)]
dset = grp['analog_offset_swir_group_keys']
group_keys = dset['group'][:]
for name in group_keys:
grp_path = PurePosixPath(f'BAND{band}_CALIBRATION',
name.decode('ascii'),
'OBSERVATIONS')
grp = self.fid[str(grp_path)]
ref_time += timedelta(seconds=int(grp['time'][0]))
else:
grp_path = PurePosixPath(msm_path, 'OBSERVATIONS')
grp = self.fid[str(grp_path)]
ref_time += timedelta(seconds=int(grp['time'][0]))
return ref_time
def get_delta_time(self, band=None):
"""
Returns offset from the reference start time of measurement
Parameters
----------
band : None or {'1', '2', '3', ..., '8'}
Select one of the band present in the product.
Default is 'None' which returns the first available band
"""
if not self.__msm_path:
return None
if band is None:
band = self.bands[0]
elif band not in self.bands:
raise ValueError('band not found in product')
msm_path = str(self.__msm_path).replace('%', band)
msm_type = self.__msm_path.name
res = None
if msm_type in ['ANALOG_OFFSET_SWIR', 'LONG_TERM_SWIR']:
grp = self.fid[msm_path]
dset = grp[msm_type.lower() + '_group_keys']
group_keys = dset['group'][:]
for name in group_keys:
grp_path = PurePosixPath(f'BAND{band}_CALIBRATION',
name.decode('ascii'),
'OBSERVATIONS')
grp = self.fid[str(grp_path)]
if res is None:
res = grp['delta_time'][0, :].astype(int)
else:
res = np.append(res, grp['delta_time'][0, :].astype(int))
elif msm_type in ['DPQF_MAP', 'NOISE']:
grp_path = PurePosixPath(msm_path).parent / 'ANALOG_OFFSET_SWIR'
grp = self.fid[str(grp_path)]
dset = grp['analog_offset_swir_group_keys']
group_keys = dset['group'][:]
for name in group_keys:
grp_path = PurePosixPath(f'BAND{band}_CALIBRATION',
name.decode('ascii'),
'OBSERVATIONS')
grp = self.fid[grp_path]
if res is None:
res = grp['delta_time'][0, :].astype(int)
else:
res = np.append(res, grp['delta_time'][0, :].astype(int))
else:
grp_path = PurePosixPath(msm_path, 'OBSERVATIONS')
grp = self.fid[str(grp_path)]
res = grp['delta_time'][0, :].astype(int)
return res
def get_instrument_settings(self, band=None) -> np.ndarray:
"""
Returns instrument settings of measurement
Parameters
----------
band : None or {'1', '2', '3', ..., '8'}
Select one of the band present in the product.
Default is 'None' which returns the first available band
"""
if not self.__msm_path:
return None
if band is None:
band = self.bands[0]
elif band not in self.bands:
raise ValueError('band not found in product')
msm_path = str(self.__msm_path).replace('%', band)
msm_type = self.__msm_path.name
res = None
if msm_type in ['ANALOG_OFFSET_SWIR', 'LONG_TERM_SWIR']:
grp = self.fid[msm_path]
dset = grp[msm_type.lower() + '_group_keys']
group_keys = dset['group'][:]
for name in group_keys:
grp_path = PurePosixPath(f'BAND{band}_CALIBRATION',
name.decode('ascii'),
'INSTRUMENT')
grp = self.fid[str(grp_path)]
if res is None:
res = grp['instrument_settings'][:]
else:
res = np.append(res, grp['instrument_settings'][:])
elif msm_type == 'DPQF_MAP':
grp_path = PurePosixPath(msm_path).parent / 'ANALOG_OFFSET_SWIR'
grp = self.fid[str(grp_path)]
dset = grp['analog_offset_swir_group_keys']
group_keys = dset['group'][:]
for name in group_keys:
grp_path = PurePosixPath(f'BAND{band}_CALIBRATION',
name.decode('ascii'),
'INSTRUMENT')
grp = self.fid[grp_path]
if res is None:
res = grp['instrument_settings'][:]
else:
res = np.append(res, grp['instrument_settings'][:])
elif msm_type == 'NOISE':
grp = self.fid[msm_path]
dset = grp[msm_type.lower() + '_msmt_keys']
icid = dset['icid'][dset.size // 2]
grp_path = PurePosixPath(f'BAND{band}_CALIBRATION',
f'BACKGROUND_RADIANCE_MODE_{icid:04d}',
'INSTRUMENT')
grp = self.fid[str(grp_path)]
res = grp['instrument_settings'][:]
else:
grp_path = PurePosixPath(msm_path, 'INSTRUMENT')
grp = self.fid[str(grp_path)]
res = grp['instrument_settings'][:]
return res
def get_exposure_time(self, band=None) -> list:
"""
Returns pixel exposure time of the measurements, which is calculated
from the parameters 'int_delay' and 'int_hold' for SWIR.
Parameters
----------
band : None or {'1', '2', '3', ..., '8'}
Select one of the band present in the product
Default is 'None' which returns the first available band
"""
if band is None:
band = self.bands[0]
elif band not in self.bands:
raise ValueError('band not found in product')
# obtain instrument settings
instr_arr = self.get_instrument_settings(band)
if instr_arr is None:
return None
# calculate exact exposure time
res = []
for instr in instr_arr:
if int(band) > 6:
res.append(1.25e-6 * (65540
- instr['int_delay'] + instr['int_hold']))
else:
res.append(instr['exposure_time'])
return res
def get_housekeeping_data(self, band=None):
"""
Returns housekeeping data of measurements
Parameters
----------
band : None or {'1', '2', '3', ..., '8'}
Select one of the band present in the product.
Default is 'None' which returns the first available band
"""
if not self.__msm_path:
return None
if band is None:
band = self.bands[0]
elif band not in self.bands:
raise ValueError('band not found in product')
msm_path = str(self.__msm_path).replace('%', band)
msm_type = self.__msm_path.name
res = None
if msm_type in ['ANALOG_OFFSET_SWIR', 'LONG_TERM_SWIR']:
grp = self.fid[msm_path]
dset = grp[msm_type.lower() + '_group_keys']
group_keys = dset['group'][:]
for name in group_keys:
grp_path = PurePosixPath(f'BAND{band}_CALIBRATION',
name.decode('ascii'),
'INSTRUMENT')
grp = self.fid[str(grp_path)]
if res is None:
res = np.squeeze(grp['housekeeping_data'])
else:
res = np.append(res, np.squeeze(grp['housekeeping_data']))
elif msm_type in ['DPQF_MAP', 'NOISE']:
grp_path = PurePosixPath(msm_path).parent / 'ANALOG_OFFSET_SWIR'
grp = self.fid[str(grp_path)]
dset = grp['analog_offset_swir_group_keys']
group_keys = dset['group'][:]
for name in group_keys:
grp_path = PurePosixPath('BAND{band}_CALIBRATION',
name.decode('ascii'),
'INSTRUMENT')
grp = self.fid[str(grp_path)]
if res is None:
res = np.squeeze(grp['housekeeping_data'])
else:
res = np.append(res, np.squeeze(grp['housekeeping_data']))
else:
grp_path = PurePosixPath(msm_path, 'INSTRUMENT')
grp = self.fid[str(grp_path)]
res = np.squeeze(grp['housekeeping_data'])
return res
# -------------------------
def get_msmt_keys(self, band=None):
"""
Read msmt_keys from the analysis groups
Parameters
----------
band : None or {'1', '2', '3', ..., '8'}
Select one of the band present in the product.
Default is 'None' which returns the first available band
Returns
-------
[ANALOG_OFFSET_SWIR] analog_offset_swir_group_keys
[LONG_TERM_SWIR] long_term_swir_group_keys
[NOISE] noise_msmt_keys
else None
"""
if not self.__msm_path:
return None
if band is None:
band = self.bands[0]
elif band not in self.bands:
raise ValueError('band not found in product')
msm_path = str(self.__msm_path).replace('%', band)
msm_type = self.__msm_path.name
if msm_type in ['ANALOG_OFFSET_SWIR', 'LONG_TERM_SWIR']:
grp = self.fid[msm_path]
return np.squeeze(grp[msm_type.lower() + '_group_keys'])
if msm_type == 'NOISE':
grp = self.fid[msm_path]
return np.squeeze(grp[msm_type.lower() + '_msmt_keys'])
return None
# -------------------------
def get_msm_attr(self, msm_dset, attr_name, band=None):
"""
Returns attribute of measurement dataset "msm_dset"
Parameters
----------
msm_dset : string
Name of measurement dataset
attr_name : string
Name of the attribute
band : None or {'1', '2', '3', ..., '8'}
Select one of the band present in the product.
Default is 'None' which returns the first available band
Returns
-------
out : scalar or numpy array
value of attribute "attr_name"
"""
if not self.__msm_path:
return None
if band is None:
band = self.bands[0]
elif band not in self.bands:
raise ValueError('band not found in product')
for dset_grp in ['OBSERVATIONS', 'ANALYSIS', '']:
ds_path = PurePosixPath(str(self.__msm_path).replace('%', band),
dset_grp, msm_dset)
if str(ds_path) not in self.fid:
continue
if attr_name in self.fid[str(ds_path)].attrs:
attr = self.fid[str(ds_path)].attrs[attr_name]
if isinstance(attr, bytes):
return attr.decode('ascii')
return attr
return None
def get_geo_data(self, band=None,
geo_dset='satellite_latitude,satellite_longitude'):
"""
Returns data of selected datasets from the GEODATA group
Parameters
----------
geo_dset : string
Name(s) of datasets in the GEODATA group, comma separated
Default is 'satellite_latitude,satellite_longitude'
band : None or {'1', '2', '3', ..., '8'}
Select one of the band present in the product
Default is 'None' which returns the first available band
Returns
-------
out : dictionary
dictionary data of selected datasets from the GEODATA group
names of dictionary are taken from parameter geo_dset
"""
if not self.__msm_path:
return None
if band is None:
band = str(self.bands[0])
elif band not in self.bands:
raise ValueError('band not found in product')
msm_path = str(self.__msm_path).replace('%', band)
msm_type = self.__msm_path.name
res = {}
if msm_type in ['ANALOG_OFFSET_SWIR', 'LONG_TERM_SWIR']:
grp = self.fid[msm_path]
dset = grp[msm_type.lower() + '_group_keys']
group_keys = dset['group'][:]
for name in group_keys:
grp_path = PurePosixPath(f'BAND{band}_CALIBRATION',
name.decode('ascii'),
'GEODATA')
grp = self.fid[str(grp_path)]
for key in geo_dset.split(','):
res[key] = np.squeeze(grp[key])
elif msm_type in ['DPQF_MAP', 'NOISE']:
grp_path = PurePosixPath(msm_path).parent / 'ANALOG_OFFSET_SWIR'
grp = self.fid[str(grp_path)]
dset = grp['analog_offset_swir_group_keys']
group_keys = dset['group'][:]
for name in group_keys:
grp_path = PurePosixPath(f'BAND{band}_CALIBRATION',
name.decode('ascii'),
'GEODATA')
grp = self.fid[str(grp_path)]
for key in geo_dset.split(','):
res[key] = | np.squeeze(grp[key]) | numpy.squeeze |
import math
import numpy as np
from sklearn.manifold import Isomap
from sklearn.preprocessing import StandardScaler
def data_from_ts(ts_values, window_size):
len_ts = len(ts_values)
len_networks = math.ceil(len_ts / window_size)
networks = []
print(len_networks)
for i in range(0, len_ts, window_size):
window = ts_values[i:i + window_size]
new_network = compute_network(window)
networks.append(new_network)
return networks
def compute_network(window):
len_window = len(window)
network = np.zeros((len_window, len_window))
for i in range(len_window):
network[:, i] = np.sqrt(np.array(abs(window[i] - window) ** 2, dtype=np.float64))
return network
def reduce_networks(networks):
sparsifyed_networks = []
for i in range(len(networks)):
net = networks[i]
iso_net = get_iso_net(net, 4, 2)
reduced_net = compute_multi_net(iso_net)
# normalize
# TODO Die Elemente auf der Hauptdiagonalen sollten eigentlich gleich sein sind sie aber nicht
scaler = StandardScaler()
scaler.fit(net)
a1 = scaler.transform(net)
scaler2 = StandardScaler()
scaler2.fit(reduced_net)
a2 = scaler2.transform(reduced_net)
difference = a1 - a2
sparsify_net = spar_net(reduced_net, difference)
# print('sparsify net: {}'.format(sparsify_net))
sparsifyed_networks.append(sparsify_net)
return sparsifyed_networks
def compute_multi_net(iso_net):
len_ts = iso_net.shape[0]
distance_matrix = np.zeros((len_ts, len_ts))
dim_ts = iso_net.ndim
for x in range(len_ts):
distance = | np.zeros(len_ts) | numpy.zeros |
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test Model Agnostic graph handler."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Standard Imports
import numpy as np
import tensorflow as tf
from tensorflow_model_analysis.eval_saved_model import testutil
from tensorflow_model_analysis.model_agnostic_eval import model_agnostic_predict
class ModelAgnosticPredictTest(testutil.TensorflowModelAnalysisTest):
def testValidation(self):
# Test no feature spec.
with self.assertRaisesRegexp(
ValueError, 'ModelAgnosticConfig must have feature_spec set.'):
model_agnostic_predict.ModelAgnosticConfig(
label_keys=['label'],
prediction_keys=['probabilities'],
feature_spec=None)
# Test no prediction keys.
feature_map = {
'age':
tf.FixedLenFeature([], tf.int64),
'language':
tf.VarLenFeature(tf.string),
'probabilities':
tf.FixedLenFeature([2], tf.int64, default_value=[9, 9]),
'label':
tf.FixedLenFeature([], tf.int64)
}
with self.assertRaisesRegexp(
ValueError, 'ModelAgnosticConfig must have prediction keys set.'):
model_agnostic_predict.ModelAgnosticConfig(
label_keys=['label'], prediction_keys=[], feature_spec=feature_map)
# Test no label keys.
with self.assertRaisesRegexp(
ValueError, 'ModelAgnosticConfig must have label keys set.'):
model_agnostic_predict.ModelAgnosticConfig(
label_keys=[],
prediction_keys=['predictions'],
feature_spec=feature_map)
# Test prediction key not in feature spec.
with self.assertRaisesRegexp(
ValueError, 'Prediction key not_prob not defined in feature_spec.'):
model_agnostic_predict.ModelAgnosticConfig(
label_keys=['label'],
prediction_keys=['not_prob'],
feature_spec=feature_map)
# Test label key not in feature spec.
with self.assertRaisesRegexp(
ValueError, 'Label key not_label not defined in feature_spec.'):
model_agnostic_predict.ModelAgnosticConfig(
label_keys=['not_label'],
prediction_keys=['probabilities'],
feature_spec=feature_map)
def testExtractFplExampleGraph(self):
# Set up some examples with some Sparseness.
examples = [
self._makeExample(
age=0, language='english', probabilities=[0.2, 0.8], label=1),
self._makeExample(age=1, language='chinese', label=0),
self._makeExample(age=2, probabilities=[0.1, 0.9], label=1),
self._makeExample(
language='chinese', probabilities=[0.8, 0.2], label=0),
]
# Set up the expected results on two of the fields. Note that directly
# entire FPLs will fail in numpy comparison.
expected_age = [np.array([0]), np.array([1]), np.array([2]), np.array([3])]
expected_language = [
tf.SparseTensorValue(
indices=np.array([[0, 0]]),
values=np.array([b'english'], dtype=np.object),
dense_shape=np.array([1, 1])),
tf.SparseTensorValue(
indices=np.array([[0, 0]]),
values=np.array([b'chinese'], dtype=np.object),
dense_shape=np.array([1, 1])),
tf.SparseTensorValue(
indices=np.array([], dtype=np.int64).reshape([0, 2]),
values=np.array([], dtype=np.object),
dense_shape=np.array([1, 0])),
tf.SparseTensorValue(
indices=np.array([[0, 0]]),
values=np.array([b'chinese'], dtype=np.object),
dense_shape= | np.array([1, 1]) | numpy.array |
import numpy as np
from scipy import interpolate
from primitives.planning.planners import SkeletonPlanning
from primitives.formation.control import FormationControl
class PrimitiveManager(object):
def __init__(self, state_manager):
self.config = state_manager.config
self.state_manager = state_manager
self.planning = SkeletonPlanning(self.state_manager.config,
self.state_manager.grid_map)
self.formation = FormationControl()
return None
def set_parameters(self, primitive_info):
"""Set up the parameters of the premitive execution
Parameters
----------
primitive_info: dict
A dictionary containing information about vehicles
and primitive realted parameters.
"""
# Update vehicles
self.vehicles_id = primitive_info['vehicles_id']
if primitive_info['vehicle_type'] == 'uav':
self.vehicles = [
self.state_manager.uav[j] for j in self.vehicles_id
]
else:
self.vehicles = [
self.state_manager.ugv[j] for j in self.vehicles_id
]
self.n_vehicles = len(self.vehicles)
# Primitive parameters
self.primitive_id = primitive_info['primitive_id']
self.formation_type = primitive_info['formation_type']
self.end_pos = primitive_info['end_pos']
self.count = 0
return None
def make_vehicles_idle(self):
for vehicle in self.vehicles:
vehicle.idle = True
return None
def make_vehicles_nonidle(self):
for vehicle in self.vehicles:
vehicle.idle = False
return None
def get_centroid(self):
centroid = []
for vehicle in self.vehicles:
centroid.append(vehicle.current_pos)
centroid = np.mean(np.asarray(centroid), axis=0)
return centroid[0:2] # only x and y
def convert_pixel_ordinate(self, point, ispixel):
if not ispixel:
converted = [point[0] / 0.42871 + 145, point[1] / 0.42871 + 115]
else:
converted = [(point[0] - 145) * 0.42871,
(point[1] - 115) * 0.42871]
return converted
def get_spline_points(self):
# Perform planning and fit a spline
self.start_pos = self.centroid_pos
pixel_start = self.convert_pixel_ordinate(self.start_pos,
ispixel=False)
pixel_end = self.convert_pixel_ordinate(self.end_pos, ispixel=False)
path = self.planning.find_path(pixel_start, pixel_end, spline=False)
# Convert to cartesian co-ordinates
points = np.zeros((len(path), 2))
for i, point in enumerate(path):
points[i, :] = self.convert_pixel_ordinate(point, ispixel=True)
# Depending on the distance select number of points of the path
segment_length = np.linalg.norm(self.start_pos - self.end_pos)
n_steps = np.floor(segment_length / 200 * 250)
if points.shape[0] > 3:
tck, u = interpolate.splprep(points.T)
unew = np.linspace(u.min(), u.max(), n_steps)
x_new, y_new = interpolate.splev(unew, tck)
# points = interpcurve(250, x_new, y_new)
# x_new, y_new = points[:, 0], points[:, 1]
else:
# Find unique points
points = np.array(list(set(tuple(p) for p in points)))
f = interpolate.interp1d(points[:, 0], points[:, 1])
x_new = np.linspace(points[0, 0], points[-1, 0], 10)
y_new = f(x_new)
new_points = np.array([x_new, y_new]).T
return new_points, points
def execute_primitive(self, p_simulation):
"""Perform primitive execution
"""
primitives = [self.planning_primitive, self.formation_primitive]
done = primitives[self.primitive_id - 1]()
# Step the simulation
p_simulation.stepSimulation()
return done
def planning_primitive(self):
"""Performs path planning primitive
"""
# Make vehicles non idle
self.make_vehicles_nonidle()
done_rolling = False
if self.count == 0:
# First point of formation
self.centroid_pos = self.get_centroid()
self.next_pos = self.centroid_pos
done = self.formation_primitive()
if done:
self.count = 1
self.new_points, points = self.get_spline_points()
else:
self.centroid_pos = self.get_centroid()
distance = np.linalg.norm(self.centroid_pos - self.end_pos)
if len(self.new_points) > 2 and distance > 5:
self.next_pos = self.new_points[0]
self.new_points = | np.delete(self.new_points, 0, 0) | numpy.delete |
import os
import vtk
import qt
import slicer
import numpy as np
import logging
from slicer.ScriptedLoadableModule import ScriptedLoadableModule, ScriptedLoadableModuleWidget, ScriptedLoadableModuleLogic, ScriptedLoadableModuleTest
# PinholeCameraCalibration
class PinholeCameraCalibration(ScriptedLoadableModule):
def __init__(self, parent):
ScriptedLoadableModule.__init__(self, parent)
self.parent.title = "PinholeCamera Calibration"
self.parent.categories = ["Computer Vision"]
self.parent.dependencies = ["PinholeCameras", "PointToLineRegistration", "Annotations"]
self.parent.contributors = ["<NAME> (Robarts Research Institute)"]
self.parent.helpText = """This module utilizes OpenCV camera calibration functions to perform intrinsic calibration and calibration to an external tracker using a tracked, calibrated stylus. """ + self.getDefaultModuleDocumentationLink()
self.parent.acknowledgementText = """This module was developed with support from the Natural Sciences and Engineering Research Council of Canada, the Canadian Foundation for Innovation, and the Virtual Augmentation and Simulation for Surgery and Therapy laboratory, Western University."""
# PinholeCameraCalibrationWidget
class PinholeCameraCalibrationWidget(ScriptedLoadableModuleWidget):
@staticmethod
def get(widget, objectName):
if widget.objectName == objectName:
return widget
else:
for w in widget.children():
resulting_widget = PinholeCameraCalibrationWidget.get(w, objectName)
if resulting_widget:
return resulting_widget
return None
@staticmethod
def areSameVTK4x4(a, b):
for i in range(0, 4):
for j in range(0, 4):
if a.GetElement(i,j) != b.GetElement(i,j):
return False
return True
@staticmethod
def vtk4x4ToNumpy(vtk4x4):
if vtk4x4 is None:
return
val = np.asmatrix(np.eye(4, 4, dtype=np.float64))
for i in range(0, 4):
for j in range(0, 4):
val[i, j] = vtk4x4.GetElement(i, j)
return val
@staticmethod
def vtk3x3ToNumpy(vtk3x3):
if vtk3x3 is None:
return
val = np.asmatrix(np.eye(3, 3, dtype=np.float64))
for i in range(0, 3):
for j in range(0, 3):
val[i, j] = vtk3x3.GetElement(i, j)
return val
@staticmethod
def loadPixmap(param, x, y):
iconPath = os.path.join(os.path.dirname(slicer.modules.pinholecameracalibration.path), 'Resources/Icons/', param + ".png")
icon = qt.QIcon(iconPath)
return icon.pixmap(icon.actualSize(qt.QSize(x, y)))
def __init__(self, parent):
ScriptedLoadableModuleWidget.__init__(self, parent)
global SVC_OPENCV2_AVAILABLE
global SVC_ARUCO_AVAILABLE
try:
global cv2
import cv2
SVC_OPENCV2_AVAILABLE = True
except ImportError:
SVC_OPENCV2_AVAILABLE = False
if not SVC_OPENCV2_AVAILABLE:
logging.error("OpenCV2 python interface not available.")
return
try:
global aruco
import cv2.aruco as aruco
SVC_ARUCO_AVAILABLE = True
except ImportError:
SVC_ARUCO_AVAILABLE = False
if not SVC_ARUCO_AVAILABLE:
logging.warning("Aruco python interface not available.")
self.logic = PinholeCameraCalibrationLogic()
self.markupsLogic = slicer.modules.markups.logic()
self.canSelectFiducials = True
self.isManualCapturing = False
self.rayList = []
self.invertImage = False
self.markupsNode = None
self.centerFiducialSelectionNode = None
self.copyNode = None
self.imageGridNode = None
self.trivialProducer = None
self.widget = None
self.videoCameraIntrinWidget = None
self.videoCameraSelector = None
self.inputsContainer = None
self.trackerContainer = None
self.intrinsicsContainer = None
self.autoSettingsContainer = None
self.checkerboardContainer = None
self.flagsContainer = None
self.checkerboardFlags = None
self.circleGridFlags = None
self.arucoContainer = None
self.arucoMarkerSizeSpinBox = None
self.arucoMarkerSeparationSpinBox = None
self.charucoContainer = None
self.charucoSquareSizeSpinBox = None
self.charucoMarkerSizeSpinBox = None
# Observer tags
self.stylusTipTransformObserverTag = None
self.pointModifiedObserverTag = None
# Inputs
self.imageSelector = None
self.stylusTipTransformSelector = None
self.stylusTipTransformNode = None
self.okPixmap = PinholeCameraCalibrationWidget.loadPixmap('icon_Ok', 20, 20)
self.notOkPixmap = PinholeCameraCalibrationWidget.loadPixmap('icon_NotOk', 20, 20)
# Tracker
self.manualButton = None
self.semiAutoButton = None
self.manualModeButton = None
self.autoModeButton = None
self.semiAutoModeButton = None
self.autoButton = None
self.resetButton = None
self.resetPtLButton = None
self.trackerResultsLabel = None
self.captureCountSpinBox = None
self.stylusTipTransformStatusLabel = None
# Intrinsics
self.capIntrinsicButton = None
self.intrinsicCheckerboardButton = None
self.intrinsicCircleGridButton = None
self.intrinsicArucoButton = None
self.intrinsicCharucoButton = None
self.adaptiveThresholdButton = None
self.normalizeImageButton = None
self.filterQuadsButton = None
self.fastCheckButton = None
self.symmetricButton = None
self.asymmetricButton = None
self.squareSizeDoubleSpinBox = None
self.clusteringButton = None
self.invertImageButton = None
self.arucoDictComboBox = None
self.arucoDictContainer = None
self.calibrateButton = None
self.columnsSpinBox = None
self.rowsSpinBox = None
# Results
self.labelResult = None
self.labelPointsCollected = None
self.videoCameraOriginInReference = None
self.stylusTipToPinholeCamera = vtk.vtkMatrix4x4()
self.IdentityMatrix = vtk.vtkMatrix4x4()
def setup(self):
ScriptedLoadableModuleWidget.setup(self)
if not SVC_OPENCV2_AVAILABLE:
self.layout.addWidget(qt.QLabel("OpenCV2 python is required and not available. Check installation/configuration of SlicerOpenCV."))
else:
# Load the UI From file
scriptedModulesPath = eval('slicer.modules.%s.path' % self.moduleName.lower())
scriptedModulesPath = os.path.dirname(scriptedModulesPath)
path = os.path.join(scriptedModulesPath, 'Resources', 'UI', 'q' + self.moduleName + 'Widget.ui')
self.widget = slicer.util.loadUI(path)
self.layout.addWidget(self.widget)
# Camera UI
layout = PinholeCameraCalibrationWidget.get(self.widget, "gridLayout")
placeholder = PinholeCameraCalibrationWidget.get(self.widget, "placeholder")
layout.removeWidget(placeholder)
self.videoCameraIntrinWidget = slicer.qMRMLPinholeCameraIntrinsicsWidget()
self.videoCameraIntrinWidget.setMRMLScene(slicer.mrmlScene)
layout.addWidget(self.videoCameraIntrinWidget, 0, 0)
# Workaround for PinholeCamera selector
self.videoCameraSelector = self.videoCameraIntrinWidget.children()[1].children()[1]
# Inputs
self.imageSelector = PinholeCameraCalibrationWidget.get(self.widget, "comboBox_ImageSelector")
self.stylusTipTransformSelector = PinholeCameraCalibrationWidget.get(self.widget, "comboBox_StylusTipSelector")
# Tracker calibration members
self.inputsContainer = PinholeCameraCalibrationWidget.get(self.widget, "collapsibleButton_Inputs")
self.trackerContainer = PinholeCameraCalibrationWidget.get(self.widget, "collapsibleButton_Tracker")
self.intrinsicsContainer = PinholeCameraCalibrationWidget.get(self.widget, "collapsibleButton_Intrinsics")
self.checkerboardContainer = PinholeCameraCalibrationWidget.get(self.widget, "widget_ContainerCheckerboard")
self.flagsContainer = PinholeCameraCalibrationWidget.get(self.widget, "widget_ContainerFlags")
self.checkerboardFlags = PinholeCameraCalibrationWidget.get(self.widget, "widget_CheckerboardFlags")
self.circleGridFlags = PinholeCameraCalibrationWidget.get(self.widget, "widget_CircleGridFlags")
self.manualButton = PinholeCameraCalibrationWidget.get(self.widget, "pushButton_Manual")
self.semiAutoButton = PinholeCameraCalibrationWidget.get(self.widget, "pushButton_SemiAuto")
self.autoButton = PinholeCameraCalibrationWidget.get(self.widget, "pushButton_Automatic")
self.manualModeButton = PinholeCameraCalibrationWidget.get(self.widget, "radioButton_Manual")
self.semiAutoModeButton = PinholeCameraCalibrationWidget.get(self.widget, "radioButton_SemiAuto")
self.autoModeButton = PinholeCameraCalibrationWidget.get(self.widget, "radioButton_Automatic")
self.autoSettingsContainer = PinholeCameraCalibrationWidget.get(self.widget, "groupBox_AutoSettings")
self.resetPtLButton = PinholeCameraCalibrationWidget.get(self.widget, "pushButton_resetPtL")
self.trackerResultsLabel = PinholeCameraCalibrationWidget.get(self.widget, "label_TrackerResultsValue")
self.captureCountSpinBox = PinholeCameraCalibrationWidget.get(self.widget, "spinBox_captureCount")
self.stylusTipTransformStatusLabel = PinholeCameraCalibrationWidget.get(self.widget, "label_StylusTipToCamera_Status")
# Intrinsic calibration members
self.capIntrinsicButton = PinholeCameraCalibrationWidget.get(self.widget, "pushButton_CaptureIntrinsic")
self.resetButton = PinholeCameraCalibrationWidget.get(self.widget, "pushButton_Reset")
self.intrinsicCheckerboardButton = PinholeCameraCalibrationWidget.get(self.widget, "radioButton_IntrinsicCheckerboard")
self.intrinsicCircleGridButton = PinholeCameraCalibrationWidget.get(self.widget, "radioButton_IntrinsicCircleGrid")
self.intrinsicArucoButton = PinholeCameraCalibrationWidget.get(self.widget, "radioButton_IntrinsicAruco")
self.intrinsicCharucoButton = PinholeCameraCalibrationWidget.get(self.widget, "radioButton_IntrinsicCharuco")
self.columnsSpinBox = PinholeCameraCalibrationWidget.get(self.widget, "spinBox_Columns")
self.rowsSpinBox = PinholeCameraCalibrationWidget.get(self.widget, "spinBox_Rows")
self.squareSizeDoubleSpinBox = PinholeCameraCalibrationWidget.get(self.widget, "doubleSpinBox_SquareSize")
self.adaptiveThresholdButton = PinholeCameraCalibrationWidget.get(self.widget, "checkBox_AdaptiveThreshold")
self.normalizeImageButton = PinholeCameraCalibrationWidget.get(self.widget, "checkBox_NormalizeImage")
self.filterQuadsButton = PinholeCameraCalibrationWidget.get(self.widget, "checkBox_FilterQuads")
self.fastCheckButton = PinholeCameraCalibrationWidget.get(self.widget, "checkBox_FastCheck")
self.symmetricButton = PinholeCameraCalibrationWidget.get(self.widget, "radioButton_SymmetricGrid")
self.asymmetricButton = PinholeCameraCalibrationWidget.get(self.widget, "radioButton_AsymmetricGrid")
self.clusteringButton = PinholeCameraCalibrationWidget.get(self.widget, "checkBox_Clustering")
self.invertImageButton = PinholeCameraCalibrationWidget.get(self.widget, "checkBox_ImageInvert")
self.arucoDictComboBox = PinholeCameraCalibrationWidget.get(self.widget, "comboBox_arucoDict")
self.arucoDictContainer = PinholeCameraCalibrationWidget.get(self.widget, "widget_arucoDictContainer")
self.arucoContainer = PinholeCameraCalibrationWidget.get(self.widget, "widget_arucoArgs")
self.arucoMarkerSizeSpinBox = PinholeCameraCalibrationWidget.get(self.widget, "doubleSpinBox_markerSize")
self.arucoMarkerSeparationSpinBox = PinholeCameraCalibrationWidget.get(self.widget, "doubleSpinBox_markerSeparation")
self.charucoContainer = PinholeCameraCalibrationWidget.get(self.widget, "widget_charucoArgs")
self.charucoSquareSizeSpinBox = PinholeCameraCalibrationWidget.get(self.widget, "doubleSpinBox_charucoSquareSize")
self.charucoMarkerSizeSpinBox = PinholeCameraCalibrationWidget.get(self.widget, "doubleSpinBox_charucoMarkerSize")
self.calibrateButton = PinholeCameraCalibrationWidget.get(self.widget, "pushButton_Calibrate")
# Results
self.labelResult = PinholeCameraCalibrationWidget.get(self.widget, "label_ResultValue")
self.labelPointsCollected = PinholeCameraCalibrationWidget.get(self.widget, "label_PointsCollected")
# Disable capture as image processing isn't active yet
self.trackerContainer.setEnabled(False)
self.intrinsicsContainer.setEnabled(False)
# UI file method does not do mrml scene connections, do them manually
self.videoCameraIntrinWidget.setMRMLScene(slicer.mrmlScene)
self.imageSelector.setMRMLScene(slicer.mrmlScene)
self.stylusTipTransformSelector.setMRMLScene(slicer.mrmlScene)
# Inputs
self.imageSelector.connect("currentNodeChanged(vtkMRMLNode*)", self.onImageSelected)
self.stylusTipTransformSelector.connect("currentNodeChanged(vtkMRMLNode*)", self.onStylusTipTransformSelected)
# Connections
self.capIntrinsicButton.connect('clicked(bool)', self.onIntrinsicCapture)
self.resetButton.connect('clicked(bool)', self.onReset)
self.intrinsicCheckerboardButton.connect('clicked(bool)', self.onIntrinsicModeChanged)
self.intrinsicCircleGridButton.connect('clicked(bool)', self.onIntrinsicModeChanged)
self.intrinsicArucoButton.connect('clicked(bool)', self.onIntrinsicModeChanged)
self.intrinsicCharucoButton.connect('clicked(bool)', self.onIntrinsicModeChanged)
self.rowsSpinBox.connect('valueChanged(int)', self.onPatternChanged)
self.columnsSpinBox.connect('valueChanged(int)', self.onPatternChanged)
self.arucoMarkerSizeSpinBox.connect('valueChanged(double)', self.onPatternChanged)
self.arucoMarkerSeparationSpinBox.connect('valueChanged(double)', self.onPatternChanged)
self.charucoSquareSizeSpinBox.connect('valueChanged(double)', self.onPatternChanged)
self.charucoMarkerSizeSpinBox.connect('valueChanged(double)', self.onPatternChanged)
self.captureCountSpinBox.connect('valueChanged(int)', self.onCaptureCountChanged)
self.arucoDictComboBox.connect('currentIndexChanged(int)', self.onArucoDictChanged)
self.calibrateButton.connect('clicked(bool)', self.onCalibrateButtonClicked)
self.manualButton.connect('clicked(bool)', self.onManualButton)
self.semiAutoButton.connect('clicked(bool)', self.onSemiAutoButton)
self.autoButton.connect('clicked(bool)', self.onAutoButton)
self.manualModeButton.connect('clicked(bool)', self.onProcessingModeChanged)
self.autoModeButton.connect('clicked(bool)', self.onProcessingModeChanged)
self.resetPtLButton.connect('clicked(bool)', self.onResetPtL)
self.adaptiveThresholdButton.connect('clicked(bool)', self.onFlagChanged)
self.normalizeImageButton.connect('clicked(bool)', self.onFlagChanged)
self.filterQuadsButton.connect('clicked(bool)', self.onFlagChanged)
self.fastCheckButton.connect('clicked(bool)', self.onFlagChanged)
self.symmetricButton.connect('clicked(bool)', self.onFlagChanged)
self.asymmetricButton.connect('clicked(bool)', self.onFlagChanged)
self.clusteringButton.connect('clicked(bool)', self.onFlagChanged)
self.invertImageButton.connect('stateChanged(int)', self.onInvertImageChanged)
# Choose red slice only
lm = slicer.app.layoutManager()
lm.setLayout(slicer.vtkMRMLLayoutNode.SlicerLayoutOneUpRedSliceView)
# Populate Aruco Dict combobox
entries = dir(cv2.aruco)
for entry in entries:
if entry.find("DICT_") != -1:
# Crop off initial DICT_ for easy keyboard jumping
self.arucoDictComboBox.addItem(entry[entry.find('DICT_')+5:])
# Force the dictionary updating in the logic
self.onArucoDictChanged()
# Initialize pattern, etc..
self.onIntrinsicModeChanged()
# Refresh Apply button state
self.updateUI()
self.onProcessingModeChanged()
def cleanup(self):
self.onReset()
self.onResetPtL()
self.capIntrinsicButton.disconnect('clicked(bool)', self.onIntrinsicCapture)
self.intrinsicCheckerboardButton.disconnect('clicked(bool)', self.onIntrinsicModeChanged)
self.intrinsicCircleGridButton.disconnect('clicked(bool)', self.onIntrinsicModeChanged)
self.intrinsicArucoButton.disconnect('clicked(bool)', self.onIntrinsicModeChanged)
self.intrinsicCharucoButton.disconnect('clicked(bool)', self.onIntrinsicModeChanged)
self.rowsSpinBox.disconnect('valueChanged(int)', self.onPatternChanged)
self.columnsSpinBox.disconnect('valueChanged(int)', self.onPatternChanged)
self.arucoMarkerSizeSpinBox.disconnect('valueChanged(double)', self.onPatternChanged)
self.arucoMarkerSeparationSpinBox.disconnect('valueChanged(double)', self.onPatternChanged)
self.charucoSquareSizeSpinBox.disconnect('valueChanged(double)', self.onPatternChanged)
self.charucoMarkerSizeSpinBox.disconnect('valueChanged(double)', self.onPatternChanged)
self.captureCountSpinBox.disconnect('valueChanged(int)', self.onCaptureCountChanged)
self.arucoDictComboBox.disconnect('currentIndexChanged(int)', self.onArucoDictChanged)
self.calibrateButton.disconnect('clicked(bool)', self.onCalibrateButtonClicked)
self.imageSelector.disconnect("currentNodeChanged(vtkMRMLNode*)", self.onImageSelected)
self.stylusTipTransformSelector.disconnect("currentNodeChanged(vtkMRMLNode*)", self.updateUI)
self.manualButton.disconnect('clicked(bool)', self.onManualButton)
self.semiAutoButton.disconnect('clicked(bool)', self.onSemiAutoButton)
self.autoButton.disconnect('clicked(bool)', self.onAutoButton)
self.manualModeButton.disconnect('clicked(bool)', self.onProcessingModeChanged)
self.autoModeButton.disconnect('clicked(bool)', self.onProcessingModeChanged)
self.resetPtLButton.disconnect('clicked(bool)', self.onResetPtL)
self.adaptiveThresholdButton.disconnect('clicked(bool)', self.onFlagChanged)
self.normalizeImageButton.disconnect('clicked(bool)', self.onFlagChanged)
self.filterQuadsButton.disconnect('clicked(bool)', self.onFlagChanged)
self.fastCheckButton.disconnect('clicked(bool)', self.onFlagChanged)
self.symmetricButton.disconnect('clicked(bool)', self.onFlagChanged)
self.asymmetricButton.disconnect('clicked(bool)', self.onFlagChanged)
self.clusteringButton.disconnect('clicked(bool)', self.onFlagChanged)
self.invertImageButton.disconnect('stateChanged(int)', self.onInvertImageChanged)
def onReset(self):
self.logic.resetIntrinsic()
self.labelResult.text = "Reset."
self.videoCameraIntrinWidget.GetCurrentNode().SetAndObserveIntrinsicMatrix(vtk.vtkMatrix3x3().Identity())
self.videoCameraIntrinWidget.GetCurrentNode().SetNumberOfDistortionCoefficients(5)
for i in range(0,5):
self.videoCameraIntrinWidget.GetCurrentNode().SetDistortionCoefficientValue(i, 0.0)
def onResetPtL(self):
self.rayList = []
self.logic.resetMarkerToSensor()
self.trackerResultsLabel.text = "Reset."
def onImageSelected(self):
# Set red slice to the copy node
if self.imageSelector.currentNode() is not None:
slicer.app.layoutManager().sliceWidget('Red').sliceLogic().GetSliceCompositeNode().SetBackgroundVolumeID(self.imageSelector.currentNode().GetID())
slicer.app.layoutManager().sliceWidget('Red').sliceLogic().FitSliceToAll()
slicer.app.layoutManager().sliceWidget('Red').sliceController().rotateSliceToLowestVolumeAxes() # If the image is not RAS aligned, we want to show it to the user anyways
# Check pixel spacing, x and y must be 1px = 1mm in order for markups to produce correct pixel locations
spacing = self.imageSelector.currentNode().GetImageData().GetSpacing()
if spacing[0] != 1.0 or spacing[1] != 1.0:
self.labelResult.text = "Image does not have 1.0 spacing in x or y, markup fiducials will not represent pixels exactly! Cannot proceed."
self.canSelectFiducials = False
else:
self.canSelectFiducials = True
self.updateUI()
def onCaptureCountChanged(self):
countString = str(self.logic.countMarkerToSensor()) + "/" + str(self.captureCountSpinBox.value) + " points captured."
string = ""
result = False
if self.logic.countMarkerToSensor() >= self.captureCountSpinBox.value:
result, videoCameraToImage, string = self.calcRegAndBuildString()
if result:
self.trackerResultsLabel.text = countString + " " + string
else:
self.trackerResultsLabel.text = countString
def onFlagChanged(self):
flags = 0
if self.intrinsicCheckerboardButton.checked:
if self.adaptiveThresholdButton.checked:
flags = flags + cv2.CALIB_CB_ADAPTIVE_THRESH
if self.normalizeImageButton.checked:
flags = flags + cv2.CALIB_CB_NORMALIZE_IMAGE
if self.filterQuadsButton.checked:
flags = flags + cv2.CALIB_CB_FILTER_QUADS
if self.fastCheckButton.checked:
flags = flags + cv2.CALIB_CB_FAST_CHECK
else:
if self.symmetricButton.checked:
flags = flags + cv2.CALIB_CB_SYMMETRIC_GRID
if self.asymmetricButton.checked:
flags = flags + cv2.CALIB_CB_ASYMMETRIC_GRID
if self.clusteringButton.checked:
flags = flags + cv2.CALIB_CB_CLUSTERING
self.logic.setFlags(flags)
def onInvertImageChanged(self, value):
self.invertImage = self.invertImageButton.isChecked()
def onPatternChanged(self, value):
self.onIntrinsicModeChanged()
def onIntrinsicCapture(self):
vtk_im = self.imageSelector.currentNode().GetImageData()
rows, cols, _ = vtk_im.GetDimensions()
components = vtk_im.GetNumberOfScalarComponents()
sc = vtk_im.GetPointData().GetScalars()
im = vtk.util.numpy_support.vtk_to_numpy(sc)
im = im.reshape(cols, rows, components)
if self.intrinsicCheckerboardButton.checked:
ret = self.logic.findCheckerboard(im, self.invertImage)
_count = 0
for i in range(0, len(self.logic.imagePoints)):
_count = _count + len(self.logic.imagePoints[i])
self.labelPointsCollected.text = _count
elif self.intrinsicCircleGridButton.checked:
ret = self.logic.findCircleGrid(im, self.invertImage)
_count = 0
for i in range(0, len(self.logic.imagePoints)):
_count = _count + len(self.logic.imagePoints[i])
self.labelPointsCollected.text = _count
elif self.intrinsicArucoButton.checked:
ret = self.logic.findAruco(im, self.invertImage)
_count = 0
for i in range(0, len(self.logic.arucoCorners)):
_count = _count + len(self.logic.arucoCorners[i])
self.labelPointsCollected.text = _count
elif self.intrinsicCharucoButton.checked:
ret = self.logic.findCharuco(im, self.invertImage)
_count = 0
for i in range(0, len(self.logic.charucoCorners)):
_count = _count + len(self.logic.charucoCorners[i])
self.labelPointsCollected.text = _count
else:
pass
if ret:
self.labelResult.text = "Success (" + str(self.logic.countIntrinsics()) + ")"
else:
self.labelResult.text = "Failure."
def onIntrinsicModeChanged(self):
if self.intrinsicCheckerboardButton.checked:
self.checkerboardContainer.enabled = True
self.squareSizeDoubleSpinBox.enabled = True
self.flagsContainer.enabled = True
self.checkerboardFlags.enabled = True
self.circleGridFlags.enabled = False
self.arucoDictContainer.enabled = False
self.arucoContainer.enabled = False
self.charucoContainer.enabled = False
self.logic.calculateObjectPattern(self.rowsSpinBox.value, self.columnsSpinBox.value, 'checkerboard', self.squareSizeDoubleSpinBox.value, 0)
elif self.intrinsicCircleGridButton.checked:
self.checkerboardContainer.enabled = True
self.squareSizeDoubleSpinBox.enabled = False
self.flagsContainer.enabled = True
self.checkerboardFlags.enabled = False
self.circleGridFlags.enabled = True
self.arucoDictContainer.enabled = False
self.arucoContainer.enabled = False
self.charucoContainer.enabled = False
self.logic.calculateObjectPattern(self.rowsSpinBox.value, self.columnsSpinBox.value, 'circlegrid', self.squareSizeDoubleSpinBox.value, 0)
elif self.intrinsicArucoButton.checked:
self.checkerboardContainer.enabled = True
self.squareSizeDoubleSpinBox.enabled = False
self.flagsContainer.enabled = False
self.checkerboardFlags.enabled = False
self.circleGridFlags.enabled = False
self.arucoDictContainer.enabled = True
self.arucoContainer.enabled = True
self.charucoContainer.enabled = False
self.logic.calculateObjectPattern(self.rowsSpinBox.value, self.columnsSpinBox.value, 'aruco', self.arucoMarkerSizeSpinBox.value, self.arucoMarkerSeparationSpinBox.value)
elif self.intrinsicCharucoButton.checked:
self.checkerboardContainer.enabled = True
self.squareSizeDoubleSpinBox.enabled = False
self.flagsContainer.enabled = False
self.checkerboardFlags.enabled = False
self.circleGridFlags.enabled = False
self.arucoDictContainer.enabled = True
self.arucoContainer.enabled = False
self.charucoContainer.enabled = True
self.logic.calculateObjectPattern(self.rowsSpinBox.value, self.columnsSpinBox.value, 'charuco', self.charucoSquareSizeSpinBox.value, self.charucoMarkerSizeSpinBox.value)
else:
pass
def onStylusTipTransformSelected(self):
if self.stylusTipTransformObserverTag is not None:
self.stylusTipTransformNode.RemoveObserver(self.stylusTipTransformObserverTag)
self.stylusTipTransformObserverTag = None
self.stylusTipTransformNode = self.stylusTipTransformSelector.currentNode()
if self.stylusTipTransformNode is not None:
self.stylusTipTransformObserverTag = self.stylusTipTransformNode.AddObserver(slicer.vtkMRMLTransformNode.TransformModifiedEvent, self.onStylusTipTransformModified)
self.updateUI()
def onCalibrateButtonClicked(self):
done, error, mtx, dist = self.logic.calibratePinholeCamera()
if done:
self.videoCameraIntrinWidget.GetCurrentNode().SetAndObserveIntrinsicMatrix(mtx)
self.videoCameraIntrinWidget.GetCurrentNode().SetNumberOfDistortionCoefficients(dist.GetNumberOfValues())
for i in range(0, dist.GetNumberOfValues()):
self.videoCameraIntrinWidget.GetCurrentNode().SetDistortionCoefficientValue(i, dist.GetValue(i))
self.videoCameraIntrinWidget.GetCurrentNode().SetReprojectionError(error)
self.labelResult.text = "Calibration reprojection error: " + str(error) + "."
@vtk.calldata_type(vtk.VTK_OBJECT)
def onStylusTipTransformModified(self, caller, event):
mat = vtk.vtkMatrix4x4()
self.stylusTipTransformNode.GetMatrixTransformToWorld(mat)
if PinholeCameraCalibrationWidget.areSameVTK4x4(mat, self.IdentityMatrix):
self.stylusTipTransformStatusLabel.setPixmap(self.notOkPixmap)
self.manualButton.enabled = False
else:
self.stylusTipTransformStatusLabel.setPixmap(self.okPixmap)
self.manualButton.enabled = True
def updateUI(self):
self.capIntrinsicButton.enabled = self.imageSelector.currentNode() is not None \
and self.videoCameraSelector.currentNode() is not None
self.intrinsicsContainer.enabled = self.imageSelector.currentNode() is not None \
and self.videoCameraSelector.currentNode() is not None
self.trackerContainer.enabled = self.imageSelector.currentNode() is not None \
and self.stylusTipTransformSelector.currentNode() is not None \
and self.videoCameraSelector.currentNode() is not None \
and self.canSelectFiducials
def onProcessingModeChanged(self):
if self.manualModeButton.checked:
self.manualButton.setVisible(True)
self.semiAutoButton.setVisible(False)
self.autoButton.setVisible(False)
self.autoSettingsContainer.setVisible(False)
elif self.semiAutoModeButton.checked:
self.manualButton.setVisible(False)
self.semiAutoButton.setVisible(True)
self.autoButton.SetVisible(False)
self.autoSettingsContainer.setVisible(True)
else:
self.manualButton.setVisible(False)
self.semiAutoButton.setVisible(False)
self.autoButton.setVisible(True)
self.autoSettingsContainer.setVisible(True)
def endManualCapturing(self):
self.isManualCapturing = False
self.manualButton.setText('Capture')
# Resume playback
slicer.app.layoutManager().sliceWidget('Red').sliceLogic().GetSliceCompositeNode().SetBackgroundVolumeID(self.centerFiducialSelectionNode.GetID())
slicer.mrmlScene.RemoveNode(self.copyNode)
self.copyNode = None
# Re-enable UI
self.inputsContainer.setEnabled(True)
self.resetPtLButton.setEnabled(True)
def onManualButton(self):
if self.isManualCapturing:
# Cancel button hit
self.endManualCapturing()
slicer.modules.annotations.logic().StopPlaceMode()
return()
# Record tracker data at time of freeze and store
self.stylusTipTransformSelector.currentNode().GetMatrixTransformToWorld(self.stylusTipToPinholeCamera)
# Make a copy of the volume node (aka freeze cv capture) to allow user to play with detection parameters or click on center
self.centerFiducialSelectionNode = slicer.mrmlScene.GetNodeByID(slicer.app.layoutManager().sliceWidget('Red').sliceLogic().GetSliceCompositeNode().GetBackgroundVolumeID())
self.copyNode = slicer.mrmlScene.CopyNode(self.centerFiducialSelectionNode)
imData = vtk.vtkImageData()
imData.DeepCopy(self.centerFiducialSelectionNode.GetImageData())
self.copyNode.SetAndObserveImageData(imData)
self.copyNode.SetName('FrozenImage')
slicer.app.layoutManager().sliceWidget('Red').sliceLogic().GetSliceCompositeNode().SetBackgroundVolumeID(self.copyNode.GetID())
# Initiate fiducial selection
self.markupsNode = slicer.vtkMRMLMarkupsFiducialNode()
slicer.mrmlScene.AddNode(self.markupsNode)
self.markupsNode.SetName('SphereCenter')
self.markupsLogic.SetActiveListID(self.markupsNode)
self.markupsLogic.StartPlaceMode(False)
self.pointModifiedObserverTag = self.markupsNode.AddObserver(slicer.vtkMRMLMarkupsNode.PointModifiedEvent, self.onPointModified)
# Disable input changing while capture is active
self.inputsContainer.setEnabled(False)
self.resetPtLButton.setEnabled(False)
self.isManualCapturing = True
self.manualButton.setText('Cancel')
@vtk.calldata_type(vtk.VTK_INT)
def onPointModified(self, caller, event, callData):
if callData is None:
return()
if self.markupsNode.GetNthControlPointPositionStatus(callData) == slicer.vtkMRMLMarkupsNode.PositionDefined:
self.endManualCapturing()
# Calculate point and line pair
arr = [0, 0, 0]
self.markupsNode.GetNthControlPointPosition(callData, arr)
point = np.zeros((1, 1, 2), dtype=np.float64)
point[0, 0, 0] = abs(arr[0])
point[0, 0, 1] = abs(arr[1])
# Get PinholeCamera parameters
mtx = PinholeCameraCalibrationWidget.vtk3x3ToNumpy(self.videoCameraSelector.currentNode().GetIntrinsicMatrix())
if self.videoCameraSelector.currentNode().GetNumberOfDistortionCoefficients() != 0:
dist = np.asarray(np.zeros((1, self.videoCameraSelector.currentNode().GetNumberOfDistortionCoefficients()), dtype=np.float64))
for i in range(0, self.videoCameraSelector.currentNode().GetNumberOfDistortionCoefficients()):
dist[0, i] = self.videoCameraSelector.currentNode().GetDistortionCoefficientValue(i)
else:
dist = np.asarray([], dtype=np.float64)
tip_cam = [self.stylusTipToPinholeCamera.GetElement(0, 3), self.stylusTipToPinholeCamera.GetElement(1, 3), self.stylusTipToPinholeCamera.GetElement(2, 3)]
# Origin - defined in camera, typically 0,0,0
origin_sen = np.asarray( | np.zeros((3, 1), dtype=np.float64) | numpy.zeros |
import numpy as np
class ActHeatSeeking:
def __init__(self, actionSpace, lowerBoundAngle, upperBoundAngle, calculateAngle):
self.actionSpace = actionSpace
self.lowerBoundAngle = lowerBoundAngle
self.upperBoundAngle = upperBoundAngle
self.calculateAngle = calculateAngle
def __call__(self, heatSeekingDirection):
heatActionAngle = {mvmtVector: self.calculateAngle(heatSeekingDirection, mvmtVector)
for mvmtVector in self.actionSpace}
angleWithinRange = lambda angle: self.lowerBoundAngle <= angle < self.upperBoundAngle
movementAnglePair = zip(self.actionSpace, heatActionAngle.values())
angleFilter = {movement: angleWithinRange(angle) for movement, angle in movementAnglePair}
chosenActions = [action for action, index in zip(angleFilter.keys(), angleFilter.values()) if index]
unchosenFilter = [action in chosenActions for action in self.actionSpace]
unchosenActions = [action for action, index in zip(self.actionSpace, unchosenFilter) if not index]
return [chosenActions, unchosenActions]
class HeatSeekingPolicy:
def __init__(self, rationalityParam, actHeatSeeking):
self.rationalityParam = rationalityParam
self.actHeatSeeking = actHeatSeeking
def __call__(self, heatSeekingDirection):
chosenActions, unchosenActions = self.actHeatSeeking(heatSeekingDirection)
chosenActionsLik = {action: self.rationalityParam / len(chosenActions) for action in chosenActions}
unchosenActionsLik = {action: (1 - self.rationalityParam) / len(unchosenActions) for action in unchosenActions}
heatSeekingActionLik = {**chosenActionsLik, **unchosenActionsLik}
return heatSeekingActionLik
class WolfPolicy:
def __init__(self, getAgentPosition, heatSeekingPolicy):
self.getAgentPosition = getAgentPosition
self.heatSeekingPolicy = heatSeekingPolicy
def __call__(self, mind, state, allAgentsAction):
wolfID = mind.index('wolf')
sheepID = mind.index('sheep')
wolfPos = self.getAgentPosition(wolfID, state)
sheepPos = self.getAgentPosition(sheepID, state)
heatSeekingDirection = | np.array(sheepPos) | numpy.array |
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
import tensorflow as tf
from tensorflow.keras.preprocessing.sequence import pad_sequences
from data_lit import Data_Augmentation
import numpy as np
#physical_devices = tf.config.list_physical_devices('GPU')
#tf.config.experimental.set_memory_growth(physical_devices[0], True)
class Initializer:
def __init__(self, units, train_tokenizer, max_length_train, label_tokenizer, encoder, decoder):
self.data = Data_Augmentation()
self.train_tokenizer = train_tokenizer
self. max_len = max_length_train
self.units = units
self.label_tokenizer = label_tokenizer
self.enc = encoder
self.dec = decoder
# Remove the <start> and <end> tags from the sentences
def Expand(self, sentence):
return sentence.split("<start>")[-1].split("<end>")[0]
# proceed for real time prediction.
'''
sentence: is the sentence given by the chatbot user
'''
def test(self, sentence):
sentence = self.data.preprocess_sentence(sentence)
whole = [] # collect the " " split sentence words
for i in sentence.split(' '):
# throw an exception if user input word not present in the vocabulary of the train data
try:
self.train_tokenizer.word_index[i]
except Exception as e:
return('Please say it clearly')
whole.append(self.train_tokenizer.word_index[i])
sentence = pad_sequences([whole], maxlen=self.max_len, padding='post')
sentence = tf.convert_to_tensor(sentence)
enc_hidden_start = [tf.zeros((1, self.units))] # initial hidden state provide to the encoder
enc_hidden, enc_output = self.enc(sentence, enc_hidden_start)
dec_output = enc_output
dec_input = tf.expand_dims([self.label_tokenizer.word_index['<start>']], 0)
answer = '' # store the answer string
# loop for predict word by word from the decoder
for i in range(1, self.max_len):
pred, dec_output, attention_weight = self.dec(dec_input, dec_output, enc_hidden)
answer += self.label_tokenizer.index_word[ | np.argmax(pred[0]) | numpy.argmax |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import tensorflow as tf
import numpy as np
from reco_utils.recommender.deeprec.IO.iterator import BaseIterator
__all__ = ["DKNTextIterator"]
class DKNTextIterator(BaseIterator):
"""Data loader for the DKN model.
DKN requires a special type of data format, where each instance contains a label, the candidate news article,
and user's clicked news article. Articles are represented by title words and title entities. Words and entities
are aligned.
Iterator will not load the whole data into memory. Instead, it loads data into memory
per mini-batch, so that large files can be used as input data.
"""
def __init__(self, hparams, graph, col_spliter=" ", ID_spliter="%"):
"""Initialize an iterator. Create necessary placeholders for the model.
Args:
hparams (obj): Global hyper-parameters. Some key setttings such as #_feature and #_field are there.
graph (obj): the running graph. All created placeholder will be added to this graph.
col_spliter (str): column spliter in one line.
ID_spliter (str): ID spliter in one line.
"""
self.col_spliter = col_spliter
self.ID_spliter = ID_spliter
self.batch_size = hparams.batch_size
self.doc_size = hparams.doc_size
self.graph = graph
with self.graph.as_default():
self.labels = tf.placeholder(tf.float32, [None, 1], name="label")
self.candidate_news_index_batch = tf.placeholder(
tf.int64, [self.batch_size, self.doc_size], name="candidate_news_index"
)
self.candidate_news_val_batch = tf.placeholder(
tf.int64, [self.batch_size, self.doc_size], name="candidate_news_val"
)
self.click_news_indices = tf.placeholder(
tf.int64, [None, 2], name="click_news_indices"
)
self.click_news_values = tf.placeholder(
tf.int64, [None], name="click_news_values"
)
self.click_news_weights = tf.placeholder(
tf.float32, [None], name="click_news_weights"
)
self.click_news_shape = tf.placeholder(
tf.int64, [None], name="dnn_feat_shape"
)
self.candidate_news_entity_index_batch = tf.placeholder(
tf.int64,
[self.batch_size, self.doc_size],
name="candidate_news_entity_index",
)
self.click_news_entity_values = tf.placeholder(
tf.int64, [None], name="click_news_entity"
)
def parser_one_line(self, line):
"""Parse one string line into feature values.
Args:
line (str): a string indicating one instance
Returns:
list: Parsed results including label, candidate_news_index, candidate_news_val, click_news_index, click_news_val,
candidate_news_entity_index, click_news_entity_index, impression_id
"""
impression_id = None
words = line.strip().split(self.ID_spliter)
if len(words) == 2:
impression_id = words[1].strip()
cols = words[0].strip().split(self.col_spliter)
label = float(cols[0])
candidate_news_index = []
candidate_news_val = []
click_news_index = []
click_news_val = []
candidate_news_entity_index = []
click_news_entity_index = []
for news in cols[1:]:
tokens = news.split(":")
if tokens[0] == "CandidateNews":
# word index start by 0
for item in tokens[1].split(","):
candidate_news_index.append(int(item))
candidate_news_val.append(float(1))
elif "clickedNews" in tokens[0]:
for item in tokens[1].split(","):
click_news_index.append(int(item))
click_news_val.append(float(1))
elif tokens[0] == "entity":
for item in tokens[1].split(","):
candidate_news_entity_index.append(int(item))
elif "entity" in tokens[0]:
for item in tokens[1].split(","):
click_news_entity_index.append(int(item))
else:
raise ValueError("data format is wrong")
return (
label,
candidate_news_index,
candidate_news_val,
click_news_index,
click_news_val,
candidate_news_entity_index,
click_news_entity_index,
impression_id,
)
def load_data_from_file(self, infile):
"""Read and parse data from a file.
Args:
infile (str): text input file. Each line in this file is an instance.
Returns:
obj: An iterator that will yields parsed results, in the format of graph feed_dict.
"""
candidate_news_index_batch = []
candidate_news_val_batch = []
click_news_index_batch = []
click_news_val_batch = []
candidate_news_entity_index_batch = []
click_news_entity_index_batch = []
label_list = []
impression_id_list = []
cnt = 0
with tf.gfile.GFile(infile, "r") as rd:
while True:
line = rd.readline()
if not line:
break
label, candidate_news_index, candidate_news_val, click_news_index, click_news_val, candidate_news_entity_index, click_news_entity_index, impression_id = self.parser_one_line(
line
)
candidate_news_index_batch.append(candidate_news_index)
candidate_news_val_batch.append(candidate_news_val)
click_news_index_batch.append(click_news_index)
click_news_val_batch.append(click_news_val)
candidate_news_entity_index_batch.append(candidate_news_entity_index)
click_news_entity_index_batch.append(click_news_entity_index)
label_list.append(label)
impression_id_list.append(impression_id)
cnt += 1
if cnt >= self.batch_size:
res = self._convert_data(
label_list,
candidate_news_index_batch,
candidate_news_val_batch,
click_news_index_batch,
click_news_val_batch,
candidate_news_entity_index_batch,
click_news_entity_index_batch,
)
yield self.gen_feed_dict(res)
candidate_news_index_batch = []
candidate_news_val_batch = []
click_news_index_batch = []
click_news_val_batch = []
candidate_news_entity_index_batch = []
click_news_entity_index_batch = []
label_list = []
impression_id_list = []
cnt = 0
def _convert_data(
self,
label_list,
candidate_news_index_batch,
candidate_news_val_batch,
click_news_index_batch,
click_news_val_batch,
candidate_news_entity_index_batch,
click_news_entity_index_batch,
):
"""Convert data into numpy arrays that are good for further model operation.
Args:
label_list (list): a list of ground-truth labels.
candidate_news_index_batch (list): the candidate news article's words indices
candidate_news_val_batch (list): the candidate news article's word values. For now the values are always 1.0
click_news_index_batch (list): words indices for user's clicked news articles
click_news_val_batch (list): words values for user's clicked news articles. For now the values are always 1.0
candidate_news_entity_index_batch (list): the candidate news article's entities indices
click_news_entity_index_batch (list): the user's clicked news article's entities indices
Returns:
dict: A dictionary, contains multiple numpy arrays that are convenient for further operation.
"""
instance_cnt = len(label_list)
click_news_indices = []
click_news_values = []
click_news_weights = []
click_news_shape = [instance_cnt, -1]
click_news_entity_values = []
batch_max_len = 0
for i in range(instance_cnt):
m = len(click_news_index_batch[i])
batch_max_len = m if m > batch_max_len else batch_max_len
for j in range(m):
click_news_indices.append([i, j])
click_news_values.append(click_news_index_batch[i][j])
click_news_weights.append(click_news_val_batch[i][j])
click_news_entity_values.append(click_news_entity_index_batch[i][j])
click_news_shape[1] = batch_max_len
res = {}
res["labels"] = np.asarray([[label] for label in label_list], dtype=np.float32)
res["candidate_news_index_batch"] = np.asarray(
candidate_news_index_batch, dtype=np.int64
)
res["candidate_news_val_batch"] = np.asarray(
candidate_news_val_batch, dtype=np.float32
)
res["click_news_indices"] = np.asarray(click_news_indices, dtype=np.int64)
res["click_news_values"] = np.asarray(click_news_values, dtype=np.int64)
res["click_news_weights"] = | np.asarray(click_news_weights, dtype=np.float32) | numpy.asarray |
"""Tests for graphmode_tensornetwork."""
import tensorflow as tf
import numpy as np
import pytest
from tensornetwork.backends.numpy import numpy_backend
np_randn_dtypes = [np.float32, np.float16, np.float64]
np_dtypes = np_randn_dtypes + [np.complex64, np.complex128]
def test_tensordot():
backend = numpy_backend.NumPyBackend()
a = backend.convert_to_tensor(2 * np.ones((2, 3, 4)))
b = backend.convert_to_tensor(np.ones((2, 3, 4)))
actual = backend.tensordot(a, b, ((1, 2), (1, 2)))
expected = np.array([[24.0, 24.0], [24.0, 24.0]])
np.testing.assert_allclose(expected, actual)
def test_reshape():
backend = numpy_backend.NumPyBackend()
a = backend.convert_to_tensor(np.ones((2, 3, 4)))
actual = backend.shape_tuple(backend.reshape(a, np.array((6, 4, 1))))
assert actual == (6, 4, 1)
def test_transpose():
backend = numpy_backend.NumPyBackend()
a = backend.convert_to_tensor(
np.array([[[1., 2.], [3., 4.]], [[5., 6.], [7., 8.]]]))
actual = backend.transpose(a, [2, 0, 1])
expected = np.array([[[1.0, 3.0], [5.0, 7.0]], [[2.0, 4.0], [6.0, 8.0]]])
np.testing.assert_allclose(expected, actual)
def test_shape_concat():
backend = numpy_backend.NumPyBackend()
a = backend.convert_to_tensor(2 * np.ones((1, 3, 1)))
b = backend.convert_to_tensor(np.ones((1, 2, 1)))
expected = backend.shape_concat((a, b), axis=1)
actual = np.array([[[2.0], [2.0], [2.0], [1.0], [1.0]]])
np.testing.assert_allclose(expected, actual)
def test_shape_tensor():
backend = numpy_backend.NumPyBackend()
a = backend.convert_to_tensor(np.ones([2, 3, 4]))
assert isinstance(backend.shape_tensor(a), tuple)
actual = backend.shape_tensor(a)
expected = np.array([2, 3, 4])
np.testing.assert_allclose(expected, actual)
def test_shape_tuple():
backend = numpy_backend.NumPyBackend()
a = backend.convert_to_tensor(np.ones([2, 3, 4]))
actual = backend.shape_tuple(a)
assert actual == (2, 3, 4)
def test_shape_prod():
backend = numpy_backend.NumPyBackend()
a = backend.convert_to_tensor(2 * np.ones([1, 2, 3, 4]))
actual = np.array(backend.shape_prod(a))
assert actual == 2**24
def test_sqrt():
backend = numpy_backend.NumPyBackend()
a = backend.convert_to_tensor(np.array([4., 9.]))
actual = backend.sqrt(a)
expected = np.array([2, 3])
np.testing.assert_allclose(expected, actual)
def test_diag():
backend = numpy_backend.NumPyBackend()
a = backend.convert_to_tensor(np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]))
with pytest.raises(TypeError):
assert backend.diag(a)
b = backend.convert_to_tensor(np.array([1.0, 2, 3]))
actual = backend.diag(b)
expected = np.array([[1.0, 0.0, 0.0], [0.0, 2.0, 0.0], [0.0, 0.0, 3.0]])
np.testing.assert_allclose(expected, actual)
def test_convert_to_tensor():
backend = numpy_backend.NumPyBackend()
array = np.ones((2, 3, 4))
actual = backend.convert_to_tensor(array)
expected = np.ones((2, 3, 4))
assert isinstance(actual, type(expected))
np.testing.assert_allclose(expected, actual)
def test_trace():
backend = numpy_backend.NumPyBackend()
a = backend.convert_to_tensor( | np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]) | numpy.array |
#!/usr/bin/python
'''
This function generates the data for a constant-velocity 2D motion example.
The motion is divided in 5 segments, passing through the points:
- (200,-100) to (100,100)
- (100,100) to (100,300)
- (100,300) to (-200,300)
- (-200,300) to (-200,-200)
- (-200,-200) to (0,0)
Q are the elements of the process noise diagonal covariance matrix (only for position)
R are the elements of the measurement noise diagonal covariance matrix
z is the [distance; orientation] data measured from the origin
true_data are the true values of the position and speed
example of use
python generate_data_2D.py 10 10 10 1e-3
'''
import sys
import numpy as np
import matplotlib.pyplot as plt
Q1 = float(sys.argv[1])
Q2 = float(sys.argv[2])
R1 = float(sys.argv[3])
R2 = float(sys.argv[4])
nSegments = 5
points = np.array([[200, -100],
[100, 100],
[100, 300],
[-200, 300],
[-200, -200],
[0,0]], dtype=float)
dp = np.diff(points, axis=0)
dist = dp**2
dist = np.round(np.sqrt(dist[:,0] + dist[:,1])) # distance
ang = np.arctan2(dp[:, 1], dp[:, 0]) # orientation
ang = np.array([ang]).T
NumberOfDataPoints = int(np.sum(dist))
T = 0.5 # [s] Sampling time interval
v_set = 2 * np.hstack((np.cos(ang), np.sin(ang)))
idx = 0
v = np.kron(np.ones((int(dist[idx]), 1)), v_set[idx, :])
for idx in range(1, nSegments):
v = np.vstack((v, np.kron(np.ones((int(dist[idx]), 1)), v_set[idx, :])))
# ==motion generation====================================================
A = np.array([[1, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 0]], dtype=float)
B = np.array([[T, 0],
[1, 0],
[0, T],
[0, 1]], dtype=float)
G = np.array([[T**2/2, 0],
[T, 0],
[0, T**2/2],
[0, T]], dtype=float)
w_x = np.random.normal(0.0, np.sqrt(Q1), NumberOfDataPoints) # noise in x-direction
w_y = np.random.normal(0.0, np.sqrt(Q2), NumberOfDataPoints) # noise in y-direction
w = np.hstack((np.array([w_x]).T, np.array([w_x]).T))
x = | np.zeros((NumberOfDataPoints, 4)) | numpy.zeros |
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
'''dataset'''
import os
import random
import cv2
import numpy as np
import mindspore
import mindspore.dataset as ds
import mindspore.dataset.vision.c_transforms as C2
import mindspore.dataset.transforms.c_transforms as C
mindspore.set_seed(1)
def prepare_image_cv2(im):
im -= np.array((104.00698793, 116.66876762, 122.67891434))
im = np.transpose(im, (2, 0, 1)) # (H x W x C) to (C x H x W)
return im
def prepare_label_cv2(im):
im = np.transpose(im, (2, 0, 1)) # (H x W x C) to (C x H x W)
return im
def get_imageId_from_fileName(filename, id_iter):
"""Get imageID from fileName if fileName is int, else return id_iter."""
filename = os.path.splitext(filename)[0]
if filename.isdigit():
return int(filename)
return id_iter
def rand_crop_pad(data, label):
scale1 = 1.0
scale2 = 1.5
img_w = 481
img_h = 321
sc = np.random.uniform(scale1, scale2)
new_h, new_w = int(sc * data.shape[0]), int(sc * data.shape[1])
data = cv2.resize(data, (new_w, new_h), interpolation=cv2.INTER_CUBIC)
label = cv2.resize(label, (new_w, new_h), interpolation=cv2.INTER_NEAREST)
mask = np.ones((1, new_h, new_w), np.float32)
if data.shape[1] < img_w:
w = img_w - data.shape[1]
data = np.pad(data, ((0, 0), (0, w), (0, 0)), 'constant', constant_values=(0, 0))
label = np.pad(label, ((0, 0), (0, w), (0, 0)), 'constant', constant_values=(1, 1))
mask = mask = np.pad(mask, ((0, 0), (0, 0), (0, w)), 'constant', constant_values=(0, 0))
width1 = 0
width2 = img_w
else:
width1 = random.randint(0, data.shape[1] - img_w)
width2 = width1 + img_w
if data.shape[0] < img_h:
h = img_h - data.shape[0]
data = np.pad(data, ((0, h), (0, 0), (0, 0)), 'constant', constant_values=(0, 0))
label = np.pad(label, ((0, h), (0, 0), (0, 0)), 'constant', constant_values=(1, 1))
mask = mask = np.pad(mask, ((0, 0), (0, h), (0, 0)), 'constant', constant_values=(0, 0))
height1 = 0
height2 = img_h
else:
height1 = random.randint(0, data.shape[0] - img_h)
height2 = height1 + img_h
data = data[height1:height2, width1:width2, :]
label = label[height1:height2, width1:width2, :]
mask = mask[:, height1:height2, width1:width2]
return data, label, mask
def create_dataset(data_path, is_training=True, is_shuffle=True, batch_size=1, repeat_size=1,
device_num=1, rank=0, num_parallel_workers=24):
"""create dataset for train or test"""
dataset = HED_Dataset_t(data_path, is_training)
print("total patch numbers per epoch", len(dataset))
if is_training:
trans1 = [
C2.Normalize(mean=[104.00698793, 116.66876762, 122.67891434], std=[1, 1, 1]),
C2.HWC2CHW(),
C.TypeCast(mindspore.float32)
]
dataloader = ds.GeneratorDataset(dataset, ['image', 'label', 'mask'], num_parallel_workers=num_parallel_workers,
shuffle=is_shuffle, num_shards=device_num, shard_id=rank)
# apply map operations on images
dataloader = dataloader.map(input_columns='image', operations=trans1,
num_parallel_workers=num_parallel_workers)
dataloader = dataloader.map(input_columns='label', operations=C.TypeCast(mindspore.float32),
num_parallel_workers=num_parallel_workers)
dataloader = dataloader.map(input_columns='mask', operations=C.TypeCast(mindspore.float32),
num_parallel_workers=num_parallel_workers)
dataloader = dataloader.batch(batch_size, drop_remainder=True)
else:
dataset = HED_Dataset_e(data_path, is_training)
dataloader = ds.GeneratorDataset(dataset, ['test', 'label'], num_parallel_workers=8, shuffle=is_shuffle)
dataloader = dataloader.map(input_columns='test', operations=C.TypeCast(mindspore.float32))
dataloader = dataloader.map(input_columns='label', operations=C.TypeCast(mindspore.float32))
dataloader = dataloader.batch(batch_size, drop_remainder=False)
# apply DatasetOps
dataloader = dataloader.repeat(repeat_size)
return dataloader
class HED_Dataset_t():
'''hed_dataset'''
def __init__(self, dataset_path, is_training=True):
if not os.path.exists(dataset_path):
raise RuntimeError("the input image dir {} is invalid!".format(dataset_path))
self.dataset_path = dataset_path
self.is_training = is_training
with open(self.dataset_path, 'r') as data_f:
self.filelist = data_f.readlines()
def __getitem__(self, index):
if self.is_training:
img_file, lb_file = self.filelist[index].split()
lb = np.array(cv2.imread(lb_file), dtype=np.float32)
lb[lb < 127.5] = 0.0
lb[lb >= 127.5] = 1.0
img_file = np.array(cv2.imread(img_file), dtype=np.float32)
if img_file.shape[0] > img_file.shape[1]:
img_file = np.rot90(img_file, 1).copy()
if lb.shape[0] > lb.shape[1]:
lb = np.rot90(lb, 1).copy()
img_file, lb, mask = rand_crop_pad(img_file, lb)
if lb.ndim == 3:
lb = np.squeeze(lb[:, :, 0])
assert lb.ndim == 2
lb = lb[np.newaxis, :, :]
return img_file, lb, mask
def __len__(self):
return len(self.filelist)
class HED_Dataset_e():
'''hed_dataset'''
def __init__(self, dataset_path, is_training=True):
if not os.path.exists(dataset_path):
raise RuntimeError("the input image dir {} is invalid!".format(dataset_path))
self.dataset_path = dataset_path
self.is_training = is_training
with open(self.dataset_path, 'r') as data_f:
self.filelist = data_f.readlines()
def __getitem__(self, index):
if not self.is_training:
img_file, lb = self.filelist[index].split()
img_file = np.array(cv2.imread(img_file), dtype=np.float32)
if img_file.shape[0] > img_file.shape[1]:
img_file = | np.rot90(img_file, 1) | numpy.rot90 |
#!/usr/bin/env python3
# -*- coding=utf-8 -*-
# Project: dspus
# injections.py
# Created by @wenchieh on <1/12/2020>
__author__ = 'wenchieh'
# sys
from random import sample
# third-part libs
import numpy as np
from scipy import linalg
from scipy.sparse import *
# parameters in injection -
# spike(M, N, Dspike, C),
# gap(M, N, D0, Dgap, C)
def injectSpike(Nall, M, N, Dspike, C):
Nstart, i = Nall, Nall
injectEs = list()
injectUs, injectVs = range(Nall, Nall + M, 1), range(Nall, Nall + N, 1)
for m in range(M):
# standard normal distribution
v1, v2, w = 0.0, 0.0, 2.0
while w > 1.0:
v1 = random.random() * 2.0 - 1.0
v2 = random.random() * 2.0 - 1.0
w = v1 * v1 + v2 * v2
outd = int(Dspike + v1 * np.sqrt(-2.0 * np.log(w) / w))
if outd < 0: outd = Dspike
outdC = int(outd * C)
outdN = outd - outdC
Ns, Cs = set(), set()
for d in range(outdN):
Ns.add(Nstart + M + random.randint(N))
for d in range(outdC):
Cs.add(random.randint(Nall))
for j in Ns:
injectEs.append([i, j])
for j in Cs:
injectEs.append([i, j])
i += 1
return len(injectEs), injectEs, injectUs, injectVs
def injectGap(Nall, M, N, D0, Dgap, C):
injectEs = list()
injectUs, injectVs = range(Nall, Nall + M, 1), range(Nall, Nall + N, 1)
Nstart, i = Nall, Nall
Md = int(1.0 * M / (Dgap - D0 + 1))
for outd in range(D0, Dgap, 1):
for m in range(Md):
outdC = int(outd * C)
outdN = outd - outdC
Ns, Cs = set(), set()
for d in range(outdN):
Ns.add(Nstart + M + random.randint(N))
for d in range(outdC):
Cs.add(random.randint(Nall))
for j in Ns:
injectEs.append([i, j])
for j in Cs:
injectEs.append([i, j])
i += 1
return len(injectEs), injectEs, injectUs, injectVs
def genEvenDenseBlock(A, B, p):
m = []
for i in range(A):
a = np.random.binomial(1, p, B)
m.append(a)
return np.array(m)
def genHyperbolaDenseBlock(A, B, alpha, tau):
'this is from hyperbolic paper: i^\alpha * j^\alpha > \tau'
m = np.empty([A, B], dtype=int)
for i in range(A):
for j in range(B):
if (i+1)**alpha * (j+1)**alpha > tau:
m[i,j] = 1
else:
m[i,j] = 0
return m
def genDiHyperRectBlocks(A1, B1, A2, B2, alpha=-0.5, tau=None, p=1):
if tau is None:
tau = A1**alpha * B1**alpha
m1 = genEvenDenseBlock(A1, B1, p=p)
m2 = genHyperbolaDenseBlock(A2, B2, alpha, tau)
M = linalg.block_diag(m1, m2)
return M
def addnosie(M, A, B, p, black=True, A0=0, B0=0):
v = 1 if black else 0
for i in range(A-A0):
a = np.random.binomial(1, p, B-B0)
for j in a.nonzero()[0]:
M[A0+i,B0+j]=v
return M
# inject a clique of size m0 by n0 with density p.
# the last parameter `testIdx` determines the camouflage type.
# testIdx = 1: random camouflage, with camouflage density set so each fraudster outputs approximately equal number of fraudulent and camouflage edges
# testIdx = 2: random camouflage, with double the density as in the precious setting
# testIdx = 3: biased camouflage, more likely to add camouflage to high degree column
#
# def injectCliqueCamo(M, m0, n0, p, testIdx):
# (m,n) = M.shape
# M2 = M.copy().tolil()
#
# colSum = np.squeeze(M2.sum(axis = 0).A)
# colSumPart = colSum[n0:n]
# colSumPartPro = np.int_(colSumPart)
# colIdx = np.arange(n0, n, 1)
# population = np.repeat(colIdx, colSumPartPro, axis = 0)
#
# for i in range(m0):
# # inject clique
# for j in range(n0):
# if random.random() < p:
# M2[i,j] = 1
# # inject camo
# if testIdx == 1:
# thres = p * n0 / (n - n0)
# for j in range(n0, n):
# if random.random() < thres:
# M2[i,j] = 1
# if testIdx == 2:
# thres = 2 * p * n0 / (n - n0)
# for j in range(n0, n):
# if random.random() < thres:
# M2[i,j] = 1
# # biased camo
# if testIdx == 3:
# colRplmt = random.sample(population, int(n0 * p))
# M2[i,colRplmt] = 1
#
# return M2.tocsc()
# inject a clique of size m0 by n0 with density p.
# the last parameter `testIdx` determines the camouflage type.
# testIdx = 1: random camouflage, with camouflage density set so each fraudster outputs approximately equal number of fraudulent and camouflage edges
# testIdx = 2: random camouflage, with double the density as in the precious setting
# testIdx = 3: biased camouflage, more likely to add camouflage to high degree column
def injectCliqueCamo(M, m0, n0, p, testIdx):
(m, n) = M.shape
injectEs = list()
injectUs, injectVs = np.arange(m0), np.arange(n0)
if testIdx in [3, 4]: # popular biased camouflage
colSum = np.squeeze(M.sum(axis = 0).A)
colSumPart = colSum[n0:n]
colSumPartPro = np.int_(colSumPart)
colIdx = np.arange(n0, n, 1)
population = np.repeat(colIdx, colSumPartPro, axis = 0)
for i in range(m0):
# inject clique
for j in range(n0):
if np.random.random() < p:
injectEs.append([i,j])
if testIdx == 0:
continue
# inject random camo
if testIdx == 1:
thres = p * n0 / (n - n0)
for j in range(n0, n):
if np.random.random() < thres:
injectEs.append([i,j])
if testIdx == 2:
thres = 2 * p * n0 / (n - n0)
for j in range(n0, n):
if np.random.random() < thres:
injectEs.append([i,j])
# biased camo
if testIdx == 3:
colRplmt = sample(population, int(n0 * p))
for j in colRplmt:
injectEs.append([i,j])
if testIdx == 4:
colRplmt = sample(population, int(2* n0 * p))
for j in colRplmt:
injectEs.append([i,j])
return len(injectEs), injectEs, injectUs, injectVs
# inject appended m0 by n0 camouflages to background graph M (cpy & paste patterns)
# add new nodes and edges
def injectAppendCPsCamo(M, m0, n0, p, camos):
(m, n) = M.shape
injectEs = list()
injectUs, injectVs = np.arange(m0) + m, np.arange(n0) + n
col_sum = np.squeeze(M.sum(axis = 0).A)
col_sumpro = np.int_(col_sum)
col_idx = np.arange(n)
pops = np.repeat(col_idx, col_sumpro, axis = 0)
# inject dependent block
for i in injectUs:
for j in injectVs:
pe = random.random()
if pe < p: injectEs.append([i, j])
if camos == 0: pass # no camo
if camos == 1:
# random camo
thres = p * n0 / (n - n0)
for j in range(n):
pe = random.random()
if pe < thres: injectEs.append([i, j])
if camos == 2:
# popular biased camo
col_pops = random.sample(pops, int(n0 * p))
for j in col_pops: injectEs.append([i, j])
return len(injectEs), injectEs, injectUs, injectVs
# pick nodes in original graph and add new edges
def injectPromotCamo(M, ms, ns, p, camos):
(m, n) = M.shape
M2 = M.copy()
m0, n0 = len(ms), len(ns)
injectEs = list()
injectUs, injectVs = np.asarray(ms, dtype=int), np.asarray(ns, dtype=int)
if camos in [3, 4, 5]:
col_sum = np.squeeze(M2.sum(axis = 0).A)
col_idx = np.setdiff1d(np.arange(n, dtype=int), injectVs)
col_sumpart = col_sum[col_idx]
pops = np.repeat(col_idx, np.int_(col_sumpart), axis = 0)
for i in injectUs:
# inject clique
for j in injectVs:
if random.random() < p and M2[i, j] == 0:
M2[i, j] = 1
injectEs.append([i, j])
if camos == 0:
continue
if camos == 1:
# random camo
thres = p * n0 / (n - n0)
for j in range(n):
pe = random.random()
if pe < thres and M2[i, j] == 0:
M2[i, j] = 1
injectEs.append([i, j])
if camos == 2:
# random camo
thres = 2 * p * n0 / (n - n0)
for j in range(n):
pe = random.random()
if pe < thres and M2[i, j] == 0:
M2[i, j] = 1
injectEs.append([i, j])
if camos in [3, 4, 5]:
# popular biased camo
n0p = 0
if camos == 4: n0p = 0.5 * n0 *p
elif camos == 3: n0p = n0 * p
elif camos == 5: n0p = 2 * n0 * p
col_pops = random.sample(pops, int(n0p))
for j in col_pops:
if M2[i, j] == 0:
M2[i, j] = 1
injectEs.append([i, j])
return M2, injectEs, injectUs, injectVs
def injectFraudConstObjs(M, ms, ns, p, testIdx):
M2 = M.copy()
injectEs = list()
injectUs = | np.asarray(ms, dtype=int) | numpy.asarray |
import numpy as np
from numpy import genfromtxt
from tensorflow.python.saved_model import builder as saved_model_builder
from pathlib import Path
import tensorflow as tf
import pickle
import tflearn
import codecs
import os
import time
modelPath="G:/work/nlp/datasets/yelp/yelp_dataset_challenge_round9/tflearn_model"
basePath="G:/work/nlp/datasets/yelp/yelp_dataset_challenge_round9/split/";
def load_file(path, sentence_path, output_value):
data={};
print("Reading CSV : " + path);
inputs = genfromtxt(path, delimiter=' ');
num_rows = inputs.shape[0];
outputs = np.full(num_rows, output_value);
print("Reading Text : " + sentence_path);
file = codecs.open(sentence_path, 'r',encoding='utf-8');
lines=file.readlines();
data['inputs']=inputs;
data['outputs'] = outputs;
data['sentences'] = np.array(lines, dtype=object);
return data;
def transform_to_one_hot(values):
val_size = values.size;
val_max = values.max();
ret=np.zeros((val_size, val_max+1));
ret[np.arange(val_size),values] = 1
return ret;
def shuffle_data(data):
x=data['inputs'];
y=data['outputs'];
s=data['sentences'];
num_examples=x.shape[0];
indices = np.random.permutation(num_examples)
training_inputs = x[indices, :]
training_outputs = y[indices, :]
training_sentences = s[indices]
data['inputs']=training_inputs;
data['outputs']=training_outputs;
data['sentences'] = training_sentences;
x=data['test_inputs'];
y=data['test_outputs'];
s=data['test_sentences'];
num_examples = x.shape[0];
indices = np.random.permutation(num_examples)
test_inputs = x[indices, :]
test_outputs = y[indices, :]
test_sentences = s[indices]
data['test_inputs']=test_inputs;
data['test_outputs']=test_outputs;
data['test_sentences'] = test_sentences;
return data;
def load_data_from_csv():
data = {}
negative_file = basePath + "1_star_training.csv";
positive_file = basePath + "5_star_training.csv";
negative_sentence_file = basePath + "1_star_training.txt";
positive_sentence_file = basePath + "5_star_training.txt";
negative_data = load_file(negative_file,negative_sentence_file,0);
positive_data = load_file(positive_file,positive_sentence_file ,2);
test_negative_file = basePath + "1_star_test.csv";
test_positive_file = basePath + "5_star_test.csv";
test_negative_sentence_file = basePath + "1_star_test.txt";
test_positive_sentence_file = basePath + "5_star_test.txt";
test_negative_data = load_file(test_negative_file,test_negative_sentence_file,0);
test_positive_data = load_file(test_positive_file,test_positive_sentence_file ,2);
data['inputs'] = np.append(negative_data['inputs'],positive_data['inputs'],axis=0);
data['sentences'] = np.append(negative_data['sentences'],positive_data['sentences'],axis=0);
data['outputs'] = np.append(negative_data['outputs'], positive_data['outputs']);
data['outputs'] = transform_to_one_hot(data['outputs']);
data['test_inputs'] = np.append(test_negative_data['inputs'],test_positive_data['inputs'],axis=0);
data['test_sentences'] = | np.append(test_negative_data['sentences'],test_positive_data['sentences'],axis=0) | numpy.append |
#https://pythonbasics.org/webserver/
import os
import sys
import glob
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
from tensorflow import keras
import numpy as np
import base64
from http.server import BaseHTTPRequestHandler, HTTPServer
import time
hostName = "localhost"
serverPort = 8080
def identify_image(fn):
image = keras.preprocessing.image.load_img(fn, color_mode="grayscale")
input_arr = keras.preprocessing.image.img_to_array(image)
input_arr = np.array([input_arr])
input_arr = | np.abs(input_arr - 255.0) | numpy.abs |
import cv2
import logging
import numpy as np
import nibabel as nib
from skimage.measure import label
from skimage.morphology import binary_closing, cube
from fetal_brain_mask.model import Unet
logger = logging.getLogger(__name__)
class MaskingTool:
def __init__(self):
self.model = Unet()
def mask_tensor(self, data, smoothen=True):
# axes have to be switched from (256,256,x) to (x,256,256)
data = np.moveaxis(data, -1, 0)
# normalize each image slice
data = np.array([self.normalize_uint8(islice) for islice in data], dtype=np.uint16)
data = data[..., np.newaxis]
resize_needed = False
original_shape = (data.shape[2], data.shape[1])
if data.shape[1] != 256 or data.shape[2] != 256:
data = self.resize_data(data)
resize_needed = True
# do prediction
data = self.model.predict_mask(data)
if smoothen:
# it would be better for this to be put in its own plugin
data = binary_closing(np.squeeze(data), cube(2))
try:
labels = label(data)
data = (labels == np.argmax(np.bincount(labels.flat)[1:]) + 1).astype(np.uint16)
except Exception as e:
logger.error(e)
logger.error('Failed to apply smoothing for ' + input_filename)
if resize_needed:
data = self.resize_data(data.astype(np.uint16), target=original_shape)
# remove extra dimension
data = np.squeeze(data)
# return result into shape (256,256, X)
data = | np.moveaxis(data, 0, -1) | numpy.moveaxis |
import copy
import datetime
import json
import math
import multiprocessing
import numpy as np
import os
import pandas as pd
import pydotplus
import random
import re
import time
from math import *
from sklearn import metrics
_CLUSTER_DATA = './bike_sharing_data/mydata'
RATEDATA = './bike_sharing_data/mydata/'
rateName = 'rental_return_rate_cluster_6_month_678_timedelta_5.json'
# STATION_STATUS = './station_status_by_id'
def getMonthCluster():
cluster = '6'
month = 678
return month, cluster
def getCluster():
with open(os.path.join(_CLUSTER_DATA, 'clusters.json'), 'r') as f:
clusters = json.load(f)
del clusters['5']['402']
del clusters['5']['491']
return clusters
def getRateData():
with open(os.path.join(RATEDATA, rateName), 'r') as f:
rateData = json.load(f)
return rateData
def getPositionAndStations_id():
clusters = getCluster()
month, cluster = getMonthCluster()
use_cluster = clusters[cluster]
stations_id = []
position = {}
for key, values in use_cluster.items():
stations_id.append(key)
position[key] = values['position']
return position, stations_id
def getInitialInfo():
month, cluster = getMonthCluster()
pattern2 = re.compile('^cluster_[0-9]+_')
filelist2 = os.listdir(_CLUSTER_DATA)
for filename in filelist2:
if filename == 'cluster_6_month_678_initialStationInfo.json':
cluster1 = filename.split('_')[1]
month1 = filename.split('_')[3]
if cluster1 == str(cluster) and month1 == str(month):
print(filename)
with open(os.path.join(_CLUSTER_DATA, filename), 'r') as f:
initialInfo = json.load(f)
return initialInfo
def haversine_array(lat1, lng1, lat2, lng2):
lat1, lng1, lat2, lng2 = map(np.radians, (lat1, lng1, lat2, lng2))
# print lat1, lng1, lat2, lng2
a = lat1 - lat2
b = lng1 - lng2
s = 2 * asin(sqrt(pow(sin(a / 2), 2) + cos(lat1) * cos(lat2) * pow(sin(b / 2), 2)))
earth_radius = 6378.137
s = s * earth_radius
if s < 0:
return round(-s, 3)
else:
return round(s, 3)
return h
def manhattan_distance(lat1, lng1, lat2, lng2):
a = haversine_array(lat1, lng1, lat1, lng2)
b = haversine_array(lat1, lng1, lat2, lng1)
return a + b
def getNeighbor(stations_id, position):
neighbor = {}
maxDis = 0
for station_id1 in stations_id:
nei = []
for station_id2 in stations_id:
d = manhattan_distance(position[str(station_id1)][0], position[str(station_id1)][1],
position[str(station_id2)][0], position[str(station_id2)][1])
if 0.6 > d > 0:
nei.append(str(station_id2))
if d > maxDis:
maxDis = d
neighbor[str(station_id1)] = nei
return neighbor
def getOlderNeighbor(stations_id, position):
neighbor = {}
maxDis = 0
for station_id1 in stations_id:
nei = []
for station_id2 in stations_id:
d = manhattan_distance(position[str(station_id1)][0], position[str(station_id1)][1],
position[str(station_id2)][0], position[str(station_id2)][1])
if 0.9 > d > 0:
nei.append(str(station_id2))
if d > maxDis:
maxDis = d
neighbor[str(station_id1)] = nei
return neighbor
def getMonthDayAndHour(): # month, day and hour used in this experiment
mon = 8
day = 99
hour = 7
return mon, day, hour
def getStation_status():
monDay = {'6': 30, '7': 31, '8': 31}
mon, day, hour = getMonthDayAndHour()
initialByDay = {}
totalDocksDict = {}
initialInfo = getInitialInfo()
position, stations_id = getPositionAndStations_id()
for station_id, values in initialInfo.items():
totD = values['totalDocks']
totalDocksDict[str(station_id)] = totD
for day in range(0, monDay[str(mon)]):
sta = {}
for station_id, values in initialInfo.items():
inf = values['info']
monInf = inf[str(mon)]
sta[str(station_id)] = monInf[day]
initialByDay[str(day + 1)] = sta
station_status = {}
for day in range(0, monDay[str(mon)]):
station_status1 = {}
for station_id in stations_id:
stationInf = initialByDay[str(day + 1)][str(station_id)][str(day + 1)][str(hour)]
station_status1[str(station_id)] = stationInf
station_status[str(day + 1)] = station_status1
return station_status, totalDocksDict
###########################
# MCTS algorithm
class BikeSystem(object):
def __init__(self, availStations=[]):
self.availStations = copy.deepcopy(availStations)
def update(self, station_id):
self.availStations.remove(str(station_id))
class MCTS(object):
def __init__(self, availStations, time=6, max_actions=1000):
self.availStations = availStations
self.calculation_time = float(time)
self.max_actions = max_actions
self.confident = 8
self.equivalence = 10000 # calc beta
self.max_depth = 1
self.fileCount = 0
def get_action(self, rootStationId, starttime, neighbor, rateData, station_status, totalDocksDict, day,
olderNeighbor): # rootStationId: current truck parking station
position, stations_id = getPositionAndStations_id()
if len(self.availStations) == 1:
return self.availStations[0]
self.visited_times = {} # key: station_id, value: visited times
simulations = 0
begin = time.time()
Q = {str(sta_id): -99999 for sta_id in self.availStations} # recalculation Q value
balanceBikeNums = {str(sta_id): 0 for sta_id in self.availStations}
countmax = 0
count = 0
expandStaSet = set()
# self.fileCount = 0
while simulations < self.max_actions + 1:
availStations_copy = copy.deepcopy(self.availStations)
countmax, count = self.run_simulation(availStations_copy, rootStationId, Q, starttime, balanceBikeNums,
neighbor,
simulations,
expandStaSet, countmax, count, rateData, station_status,
totalDocksDict, day, olderNeighbor, position)
simulations += 1
# select the station with the maximum Q value
maxQ, selected_station_id = self.select_one_station(Q, starttime, rateData, totalDocksDict, station_status, day,
rootStationId)
print("total simulations=", simulations)
print("Time spent in the simulation process:", str(time.time() - begin))
print('Maximum number of access to uct:' + str(countmax))
print('Total number of access to uct:' + str(count))
print('Maximum Q:', maxQ)
print('Maximum depth searched:', self.max_depth)
return selected_station_id
def select_one_station(self, Q, starttime, rateData, totalDocksDict, station_status, day, rootStationId):
notInServiceLevalStas = []
t_interval = starttime / 5
mon = 8
hour = 7
month = str(mon) if int(mon) >= 10 else '0' + str(mon)
day1 = str(day) if int(day) >= 10 else '0' + str(day)
date = '2017-' + str(month) + '-' + str(day1)
date = datetime.datetime.strptime(date, '%Y-%m-%d')
for sta in self.availStations:
rateDict = rateData[str(sta)]
if date.weekday() < 5:
rental_rate_0 = rateDict['rental_rate_0']
return_rate_0 = rateDict['return_rate_0']
elif date.weekday() < 7:
rental_rate_0 = rateDict['rental_rate_1']
return_rate_0 = rateDict['return_rate_1']
availableBikes = station_status[str(day)][str(sta)]['availableBikes']
availableDocks = station_status[str(day)][str(sta)]['availableDocks']
totalDocks = totalDocksDict[str(sta)]
for i in np.arange(0,
int(t_interval)): # real-time bikes docks
deltaNum = rental_rate_0[i] - return_rate_0[i]
if deltaNum > 0:
availableBikes = float(availableBikes) - deltaNum
if availableBikes < 0:
availableBikes = 0
availableDocks = float(availableDocks) + deltaNum
if availableDocks > float(totalDocks):
availableBikes = 0
availableDocks = float(totalDocks)
else:
availableDocks = float(availableDocks) - abs(deltaNum)
if availableDocks < 0:
availableDocks = 0
availableBikes = float(availableBikes) + abs(deltaNum)
if availableBikes > float(totalDocks):
availableDocks = 0
availableBikes = float(totalDocks)
realBikes = availableBikes
realDocks = availableDocks
totalDocks = totalDocksDict[str(sta)]
serviceLevel = []
for docks in range(1, int(totalDocks)):
availableBikes = int(totalDocks) - docks
availableDocks = docks
flag = 0
for j in np.arange(int(t_interval), int(t_interval) + 24):
if j >= 48:
break
else:
deltaNum = rental_rate_0[j] - return_rate_0[j]
if deltaNum > 0:
availableBikes = float(availableBikes) - deltaNum
if availableBikes <= 1:
flag = 1
break
availableDocks = float(availableDocks) + deltaNum
if availableDocks >= float(totalDocks) - 1:
flag = 1
break
else:
availableDocks = float(availableDocks) - abs(deltaNum)
if availableDocks <= 1:
flag = 1
break
availableBikes = float(availableBikes) + abs(deltaNum)
if availableBikes >= float(totalDocks) - 1:
flag = 1
break
if flag == 0:
serviceLevel.append(int(totalDocks) - int(docks))
if serviceLevel == [] or float(realBikes) < min(serviceLevel) or float(realBikes) > max(serviceLevel):
notInServiceLevalStas.append(sta)
if not notInServiceLevalStas:
maxQ, sta_id = max((Q[str(sta_id)], sta_id) for sta_id in self.availStations)
else:
maxQ, sta_id = max((Q[str(sta_id)], sta_id) for sta_id in notInServiceLevalStas)
if maxQ == -99999:
minDis = 10000
print(notInServiceLevalStas)
position, stations_id = getPositionAndStations_id()
for sta in notInServiceLevalStas:
dis = manhattan_distance(position[str(rootStationId)][0], position[str(rootStationId)][1],
position[str(sta)][0], position[str(sta)][1])
if dis < minDis:
minDis = dis
sta_id = sta
# maxQ, sta_id = max((Q[str(sta_id)], sta_id) for sta_id in self.availStations)
if sta_id == '238':
print(Q)
print('Q[238]:' + str(Q['238']))
return maxQ, sta_id
def run_simulation(self, availStations, rootStationId,
Q, starttime, balanceBikeNums, neighbor, simulations, expandStaSet, countmax,
count2, rateData, station_status, totalDocksDict, day, olderNeighbor,
position): # conduct run_simulation and get a path
"""
MCTS main process
"""
visited_times = self.visited_times
# availStations = bikeSystem.availStations
visited_paths = []
cumulativeDis = [] # The total travel distance of the truck
expand = True
selectedSta = rootStationId
dropNum = 0
pickNum = 0
# simulation
count = 0
countRequestFlag = 0
neiStaQ = []
for t in range(1, self.max_actions + 1):
lastStation = selectedSta
if all(visited_times.get(station_id) for station_id in availStations): # UCB
log_total = log(sum(visited_times[str(sta_id)] for sta_id in availStations))
value, sta_id = max((
Q[str(sta_id)] + sqrt(self.confident * log_total / visited_times[str(sta_id)]),
sta_id)
for sta_id in
availStations)
selectedSta = sta_id
count += 1
count2 += 1
else:
availNeighbor = [sta_id for sta_id in neighbor[str(lastStation)] if sta_id in availStations]
if len(availNeighbor) and random.random() < 0:
selectedSta = random.choice(availNeighbor)
else:
selectedSta = random.choice(availStations)
# bikeSystem.update(selectedSta)
availStations.remove(str(selectedSta))
# Expand
if expand is True and str(selectedSta) not in visited_times:
expand = False
visited_times[str(selectedSta)] = 0
expandStaSet.add(str(selectedSta))
if t > self.max_depth:
self.max_depth = t
visited_paths.append(selectedSta)
is_full = not len(availStations)
isRequest, endtime, dropNum0, pickNum0, real_bikes, real_docks = self.getRequest(lastStation, selectedSta,
Q, starttime,
cumulativeDis, rateData,
station_status,
totalDocksDict, day,
position)
starttime = endtime
if isRequest:
availselectedStaNeighbor = [sta_id for sta_id in olderNeighbor[str(selectedSta)] if
sta_id in availStations]
# neiStaQ = {str(sta):0 for sta in availselectedStaNeighbor}
for neiSta in availselectedStaNeighbor:
cumulativeDisCopy = copy.deepcopy(cumulativeDis)
diss = []
dis = manhattan_distance(position[str(selectedSta)][0], position[str(selectedSta)][1],
position[str(neiSta)][0], position[str(neiSta)][1])
# cumulativeDisCopy.append(dis)
cumulativeDisCopy.append(dis)
v = 7 # 10m/s == 36km/h truck speed
t = dis * 1000 / v
t_arrive = starttime + round(t / 60)
t_interval = round(t_arrive / 5)
serviceLevel, real_bikess, real_dockss = self.getServiceLevel(neiSta, t_interval,
rateData, station_status,
totalDocksDict, day)
dropNum = 0
pickNum = 0
if not serviceLevel: # return>>rental
pickNum = real_bikes
else:
minBikes = min(serviceLevel)
maxBikes = max(serviceLevel)
if minBikes <= real_bikes <= maxBikes:
pass
else:
if real_bikes < minBikes:
dropNum = minBikes - real_bikes # TN
if real_bikes > maxBikes:
pickNum = real_bikes - maxBikes
balanceBikeNumss = dropNum + pickNum
flag = -1
if dropNum > 0:
flag = 0
elif pickNum > 0:
flag = 1
neiStaQ.append(self.getScore(cumulativeDisCopy, balanceBikeNumss, real_bikess, real_dockss, flag))
if is_full or isRequest:
break
if count > countmax:
countmax = count
# Back-propagation
balanceBikeNums[str(selectedSta)] = dropNum0 + pickNum0
flag = -1
if dropNum0 > 0:
flag = 0
elif pickNum0 > 0:
flag = 1
# if selectedSta=='229':
# print('real_docks:'+str(real_docks))
for sta_id in visited_paths:
if sta_id not in visited_times:
continue
visited_times[str(sta_id)] += 1
if isRequest:
if not neiStaQ:
neiStaQ.append(0)
score = self.getScore(cumulativeDis, balanceBikeNums[str(selectedSta)],
real_bikes, real_docks, flag) + np.mean(neiStaQ)
Q[str(sta_id)] = (abs(Q[str(sta_id)]) * (visited_times[str(sta_id)] - 1) +
score) / visited_times[str(sta_id)]
Q[str(sta_id)] = round(Q[str(sta_id)], 4)
log_dir = './bike_sharing_data/mydata/log/' + str(self.fileCount + 1)
if not os.path.exists(log_dir):
os.makedirs(log_dir)
with open(log_dir + '/' + str(simulations + 1) + '.json',
'w') as f:
json.dump(Q, f)
return countmax, count2
def getScore(self, cumulativeDis, balanceNums, real_bikes, real_docks, flag):
disScore = 0
numScore = 0
score = 0
if sum(cumulativeDis) <= 300:
disScore = 10
elif sum(cumulativeDis) <= 600:
disScore = 6
elif sum(cumulativeDis) <= 1000:
disScore = 4
elif sum(cumulativeDis) <= 1500:
disScore = 2
elif sum(cumulativeDis) <= 2000:
disScore = 0
else:
disScore = -5
# dis = sum(cumulativeDis)
# if dis>=3000:
# disScore = -10
# elif dis>=2000:
# disScore = 20-10*(dis/1000)
# elif dis>=0:
# disScore = 10-5*(dis/1000)
if balanceNums == 0:
numScore = 0
elif balanceNums <= 3:
numScore = 2
elif balanceNums <= 6:
numScore = 4
elif balanceNums <= 10:
numScore = 6
else:
numScore = 10
# if balanceNums >=10:
# numScore = 10
# else:
# numScore = balanceNums
urgencyScore = 0
if flag == 0 and real_bikes <= 1:
urgencyScore = 10
elif flag == 1 and real_docks <= 1:
urgencyScore = 10
elif flag == -1:
return 0
score = 0.5 * disScore + 0.5 * numScore + urgencyScore
return score
def getRequest(self, lastStation, selectedSta, Q, starttime, cumulativeDis, rateData, station_status,
totalDocksDict, day, position):
dis = manhattan_distance(position[str(lastStation)][0], position[str(lastStation)][1],
position[str(selectedSta)][0], position[str(selectedSta)][1])
cumulativeDis.append(round(dis * 1000, 3))
noise = abs(np.random.normal(loc=0.0, scale=2))
v = 7 # 8m/s == 36km/h
t = dis * 1000 / v #
t_arrive = starttime + round(t / 60)
t_interval = round(t_arrive / 5)
serviceLevel, real_bikes, real_docks = self.getServiceLevel(selectedSta, t_interval, rateData, station_status,
totalDocksDict, day)
dropNum = 0
pickNum = 0
endtime = t_arrive
if not serviceLevel: # return>>rental
endtime = t_arrive + real_bikes * 0.3 + noise
pickNum = real_bikes
return True, endtime, dropNum, pickNum, real_bikes, real_docks
else:
minBikes = min(serviceLevel)
maxBikes = max(serviceLevel)
if minBikes <= real_bikes <= maxBikes:
endtime = t_arrive + noise
return False, endtime, dropNum, pickNum, real_bikes, real_docks
else:
if real_bikes < minBikes:
dropNum = minBikes - real_bikes
endtime = t_arrive + dropNum * 0.3 + noise # drop/take time (30s)
if real_bikes > maxBikes:
pickNum = real_bikes - maxBikes
endtime = t_arrive + pickNum * 0.3 + noise
return True, endtime, dropNum, pickNum, real_bikes, real_docks
def getServiceLevel(self, selectedSta, t_interval, rateData, station_status, totalDocksDict, day):
# mon,day,hour = getMonthDayAndHour()
mon = 8
hour = 7
rateDict = rateData[str(selectedSta)]
t_intervalFlag = 0
if hour == 7:
t_intervalFlag = 0
elif hour == 8:
t_intervalFlag = 12
elif hour == 9:
t_intervalFlag = 24
month = str(mon) if int(mon) >= 10 else '0' + str(mon)
day1 = str(day) if int(day) >= 10 else '0' + str(day)
date = '2017-' + str(month) + '-' + str(day1)
date = datetime.datetime.strptime(date, '%Y-%m-%d')
if date.weekday() < 5:
rental_rate_0 = rateDict['rental_rate_0']
return_rate_0 = rateDict['return_rate_0']
elif date.weekday() < 7:
rental_rate_0 = rateDict['rental_rate_1']
return_rate_0 = rateDict['return_rate_1']
iniBikes = station_status[str(day)][str(selectedSta)]['availableBikes']
iniDocks = station_status[str(day)][str(selectedSta)]['availableDocks']
totalDocks = totalDocksDict[str(selectedSta)]
serviceLevel = []
availableBikes = iniBikes
# print('selectedSta:'+str(selectedSta))
availableDocks = iniDocks
# print('selectedSta:'+str(selectedSta))
# print('iniBikes:'+str(iniBikes))
for i in np.arange(int(t_intervalFlag), int(t_interval) + int(t_intervalFlag)): # real-time bikes docks
deltaNum = 0
deltaNum = rental_rate_0[i] - return_rate_0[i]
if float(availableBikes) < 1.0:
pass # rental_lost += deltNum
if float(availableDocks) < 1.0:
pass # return_lost += deltNum
if deltaNum > 0:
availableBikes = float(availableBikes) - deltaNum
if availableBikes < 0:
availableBikes = 0
availableDocks = float(availableDocks) + deltaNum
if availableDocks > float(totalDocks):
availableBikes = 0
availableDocks = float(totalDocks)
else:
availableDocks = float(availableDocks) - abs(deltaNum)
if availableDocks < 0:
availableDocks = 0
availableBikes = float(availableBikes) + abs(deltaNum)
if availableBikes > float(totalDocks):
availableDocks = 0
availableBikes = float(totalDocks)
realBikes = availableBikes
realDocks = availableDocks
for docks in range(1, int(totalDocks)):
availableBikes = int(totalDocks) - docks
availableDocks = docks
flag = 0
for j in np.arange(int(t_intervalFlag) + int(t_interval), int(t_interval) + int(t_intervalFlag) + 24):
deltaNum = 0
if j >= 48:
break
else:
try:
deltaNum = rental_rate_0[j] - return_rate_0[j]
except:
print('raredata error! j:' + str(j))
if deltaNum > 0:
availableBikes = float(availableBikes) - deltaNum
if availableBikes <= 1:
flag = 1
# print('availableBikes:'+str(availableBikes))
break
availableDocks = float(availableDocks) + deltaNum
if availableDocks >= float(totalDocks) - 1:
flag = 1
# print('availaableDocks:'+str(availableDocks))
break
else:
availableDocks = float(availableDocks) - abs(deltaNum)
if availableDocks <= 1:
# print('availableDocks:'+str(availableDocks))
flag = 1
break
availableBikes = float(availableBikes) + abs(deltaNum)
if availableBikes >= float(totalDocks) - 1:
# print('availableBikes:'+str(availableBikes))
flag = 1
break
if flag == 0:
serviceLevel.append(int(totalDocks) - int(docks))
return serviceLevel, math.floor(float(realBikes)), math.floor(float(realDocks))
def start(availStations, neighbor, lostNums, visitedPath, cumulativeDis, startStation, balanceNum, mutex, realtimeBikes,
day, olderNeighbor):
print("start running, the process number is %d" % (os.getpid()))
mcts = MCTS(availStations)
selectedSta = startStation
starttime = 0
rateData = getRateData()
station_status, totalDocksDict = getStation_status()
# visitedPath = []
# cumulativeDis = []
info = {}
visitedPath.append(selectedSta)
totalLost = 0
print('start station:' + str(selectedSta))
# lostNums = {}
isRequest, starttime, dropNum, pickNum, rentalLost, returnLost, realbikes = getRequest(selectedSta, selectedSta,
starttime,
cumulativeDis, rateData,
station_status,
totalDocksDict, day)
lostNums[str(selectedSta)] = float(rentalLost) + float(returnLost)
totalLost += lostNums[str(selectedSta)]
info['time'] = starttime
info['realbikes'] = realbikes
realtimeBikes[str(selectedSta)] = info
if int(dropNum) > 0:
balanceNum[str(selectedSta)] = -int(dropNum)
elif int(pickNum) > 0:
balanceNum[str(selectedSta)] = int(pickNum)
else:
balanceNum[str(selectedSta)] = 0
if isRequest:
print('sub-process:pid=%d' % os.getpid())
print('balance station:' + str(selectedSta) + ' dropNum:' + str(dropNum) + ' pickNum:' + str(pickNum))
print('customer loss:' + str(lostNums[str(selectedSta)]))
print('current time:' + str(starttime) + ' min')
print('travel distance:')
print(cumulativeDis)
# bikeSystem.update(selectedSta)
availStations.remove(str(selectedSta))
mcts.fileCount = 0
while 1:
lastSta = selectedSta
info = {}
mutex.acquire()
if not len(availStations):
print('There are no stations need to be balanced')
lostNums['totalLost'] = totalLost
mutex.release()
break
selectedSta = mcts.get_action(lastSta, starttime, neighbor, rateData, station_status, totalDocksDict, day,
olderNeighbor)
mcts.fileCount += 1
print('through station:' + str(selectedSta))
# bikeSystem.update(selectedSta)
availStations.remove(str(selectedSta))
mutex.release()
visitedPath.append(selectedSta)
isRequest, starttime, dropNum, pickNum, rentalLost, returnLost, realbikes = getRequest(lastSta, selectedSta,
starttime,
cumulativeDis, rateData,
station_status,
totalDocksDict, day)
lostNums[str(selectedSta)] = float(rentalLost) + float(returnLost)
totalLost += lostNums[str(selectedSta)]
info['time'] = starttime
info['realbikes'] = realbikes
realtimeBikes[str(selectedSta)] = info
if int(dropNum) > 0:
balanceNum[str(selectedSta)] = -int(dropNum)
elif int(pickNum) > 0:
balanceNum[str(selectedSta)] = int(pickNum)
else:
balanceNum[str(selectedSta)] = 0
if isRequest:
print('sub-process:pid=%d' % os.getpid())
print('balance station:' + str(selectedSta) + ' dropNum:' + str(dropNum) + ' pickNum:' + str(pickNum))
print('customer loss:' + str(lostNums[str(selectedSta)]))
print('current time:' + str(starttime) + ' min')
print('travel distance:')
print(cumulativeDis)
if not len(availStations):
print('There are no stations need to be balanced')
lostNums['totalLost'] = totalLost
break
print('****************************************************')
def getRequest(lastStation, selectedSta, starttime, cumulativeDis, rateData, station_status, totalDocksDict, day):
position, stations_id = getPositionAndStations_id()
dis = manhattan_distance(position[str(lastStation)][0], position[str(lastStation)][1],
position[str(selectedSta)][0],
position[str(selectedSta)][1])
cumulativeDis.append(round(dis * 1000, 3))
noise = abs(abs(np.random.normal(loc=0.0, scale=2)))
v = 7 # 10m/s == 36km/h
t = dis * 1000 / v
t_arrive = starttime + t // 60
t_interval = t_arrive // 5
dropNum = 0
pickNum = 0
realbikes = 0
serviceLevel, real_bikes, real_docks, rentalLost, returnLost = getServiceLevel(selectedSta, t_interval, rateData,
station_status, totalDocksDict, day)
if not serviceLevel: # return>>rental
print('serviceLevel is null')
endtime = t_arrive + real_bikes * 0.3 + noise
pickNum = real_bikes
realbikes = 0
return True, endtime, dropNum, pickNum, realbikes, returnLost, realbikes
else:
minBikes = min(serviceLevel)
maxBikes = max(serviceLevel)
endtime = t_arrive
if minBikes <= real_bikes <= maxBikes:
endtime = t_arrive + noise
if selectedSta == '127':
print('dropNum:' + str(dropNum))
print('pickNum:' + str(pickNum))
realbikes = real_bikes
return False, endtime, dropNum, pickNum, rentalLost, returnLost, realbikes
else:
if real_bikes < minBikes:
dropNum = minBikes - real_bikes
endtime = t_arrive + dropNum * 0.3 + noise
if real_bikes > maxBikes:
pickNum = real_bikes - maxBikes
endtime = t_arrive + pickNum * 0.3 + noise
if selectedSta == '127':
print('dropNum:' + str(dropNum))
print('pickNum:' + str(pickNum))
if pickNum != 0:
realbikes = maxBikes
elif dropNum != 0:
realbikes = minBikes
return True, endtime, dropNum, pickNum, rentalLost, returnLost, realbikes
def getServiceLevel(selectedSta, t_interval, rateData, station_status, totalDocksDict, day):
# mon,day,hour = getMonthDayAndHour()
mon = 8
hour = 7
rateDict = rateData[str(selectedSta)]
t_intervalFlag = 0
if hour == 7:
t_intervalFlag = 0
elif hour == 8:
t_intervalFlag = 12
elif hour == 9:
t_intervalFlag = 24
month = str(mon) if int(mon) >= 10 else '0' + str(mon)
day1 = str(day) if int(day) >= 10 else '0' + str(day)
date = '2017-' + str(month) + '-' + str(day1)
date = datetime.datetime.strptime(date, '%Y-%m-%d')
if date.weekday() < 5:
rental_rate_0 = rateDict['rental_rate_0']
return_rate_0 = rateDict['return_rate_0']
elif date.weekday() < 7:
rental_rate_0 = rateDict['rental_rate_1']
return_rate_0 = rateDict['return_rate_1']
iniBikes = station_status[str(day)][str(selectedSta)]['availableBikes']
iniDocks = station_status[str(day)][str(selectedSta)]['availableDocks']
totalDocks = totalDocksDict[str(selectedSta)]
serviceLevel = []
availableBikes = iniBikes
availableDocks = iniDocks
if selectedSta == '127':
print('iniBikes:' + str(availableBikes))
print('iniDocks:' + str(availableDocks))
print('t_interval:' + str(t_interval))
print(totalDocks)
rentalLost = 0
returnLost = 0
for i in np.arange(int(t_intervalFlag), int(t_interval) + int(t_intervalFlag)): # real-time bikes docks
deltaNum = 0
deltaNum = rental_rate_0[i] - return_rate_0[i]
if float(availableBikes) < 1.0 and deltaNum > 0:
rentalLost += deltaNum
pass # rental_lost += deltNum
if float(availableDocks) < 1.0 and deltaNum < 0:
returnLost += abs(deltaNum)
pass # return_lost += deltNum
if deltaNum > 0:
availableBikes = float(availableBikes) - deltaNum
if availableBikes < 0:
availableBikes = 0
availableDocks = float(availableDocks) + deltaNum
if availableDocks > float(totalDocks):
availableBikes = 0
availableDocks = float(totalDocks)
else:
availableDocks = float(availableDocks) - abs(deltaNum)
if availableDocks < 0:
availableDocks = 0
availableBikes = float(availableBikes) + abs(deltaNum)
if availableBikes > float(totalDocks):
availableDocks = 0
availableBikes = float(totalDocks)
if selectedSta == '127':
print('realBikes:' + str(availableBikes))
print('realDocks:' + str(availableDocks))
realBikes = availableBikes
realDocks = availableDocks
for docks in range(1, int(totalDocks)):
availableBikes = int(totalDocks) - docks
availableDocks = docks
flag = 0
for j in np.arange(int(t_intervalFlag) + int(t_interval), int(t_interval) + int(t_intervalFlag) + 24):
deltaNum = 0
if j >= 48:
break
else:
deltaNum = rental_rate_0[j] - return_rate_0[j]
if deltaNum > 0:
availableBikes = float(availableBikes) - deltaNum
if availableBikes <= 1:
flag = 1
# print('availableBikes:'+str(availableBikes))
break
availableDocks = float(availableDocks) + deltaNum
if availableDocks >= float(totalDocks) - 1:
flag = 1
# print('availableDocks:'+str(availableDocks))
break
else:
availableDocks = float(availableDocks) - abs(deltaNum)
if availableDocks <= 1:
# print('availableDocks:'+str(availableDocks))
flag = 1
break
availableBikes = float(availableBikes) + abs(deltaNum)
if availableBikes >= float(totalDocks) - 1:
# print('availableBikes:'+str(availableBikes))
flag = 1
break
if flag == 0:
serviceLevel.append(int(totalDocks) - int(docks))
if selectedSta == '127':
print(serviceLevel)
return serviceLevel, math.floor(float(realBikes)), math.floor(float(realDocks)), rentalLost, returnLost
def mctsAlgorithm():
experiment_path = './bike_sharing_data/mydata/experiment_result2'
# month, day, hour = getMonthDayAndHour()
month = 8
hour = 7
day1 = [i for i in range(1, 32)]
day2 = [5, 6, 12, 13, 19, 20, 26, 27] # The weekend of August!
days = [i for i in day1 if i not in day2]
# 11 -> 1
for day in days:
position, stations_id = getPositionAndStations_id()
availStations = stations_id
availStations = multiprocessing.Manager().list(availStations)
realtimeBikes = multiprocessing.Manager().dict()
lostNums1 = multiprocessing.Manager().dict()
visitedPath1 = multiprocessing.Manager().list()
cumulativeDis1 = multiprocessing.Manager().list()
balanceNum1 = multiprocessing.Manager().dict()
lostNums2 = multiprocessing.Manager().dict()
visitedPath2 = multiprocessing.Manager().list()
cumulativeDis2 = multiprocessing.Manager().list()
balanceNum2 = multiprocessing.Manager().dict()
neighbor = getNeighbor(stations_id, position)
olderNeighbor = getOlderNeighbor(stations_id, position)
startStation1 = '237'
startStation2 = '369'
mutex = multiprocessing.Lock()
p1 = multiprocessing.Process(target=start, args=(
availStations, neighbor, lostNums1, visitedPath1, cumulativeDis1, startStation1, balanceNum1, mutex,
realtimeBikes, day, olderNeighbor))
p2 = multiprocessing.Process(target=start, args=(
availStations, neighbor, lostNums2, visitedPath2, cumulativeDis2, startStation2, balanceNum2, mutex,
realtimeBikes, day, olderNeighbor))
p1.start()
p2.start()
p1.join()
p2.join()
print('customer loss:' + str(lostNums1))
print('through station:' + str(visitedPath1))
print('balanced number:' + str(balanceNum1))
print('travel distance:' + str(cumulativeDis1))
print('customer loss:' + str(lostNums2))
print('through station:' + str(visitedPath2))
print('balanced number:' + str(balanceNum2))
print('travel distance:' + str(cumulativeDis2))
print('pre-process:pid=%d' % os.getpid())
print('real status of stations:' + str(realtimeBikes))
filename = 'result_month_' + str(month) + '_day_' + str(day) + '_hour_' + str(hour) + '.json'
realtimeBikes1 = {}
for sta, dicts in realtimeBikes.items():
realtimeBikes1[str(sta)] = dicts
experimentResult = {}
resultTruck1 = {}
resultTruck2 = {}
lostNums11 = {}
balanceNum11 = {}
for sta, num in lostNums1.items():
lostNums11[str(sta)] = num
for sta, num in balanceNum1.items():
balanceNum11[str(sta)] = num
resultTruck1['lostUsers'] = lostNums11
resultTruck1['visitedPath'] = list(visitedPath1)
resultTruck1['balanceNum'] = balanceNum11
resultTruck1['travelDis'] = list(cumulativeDis1)
lostNums22 = {}
balanceNum22 = {}
for sta, num in lostNums2.items():
lostNums22[str(sta)] = num
for sta, num in balanceNum2.items():
balanceNum22[str(sta)] = num
resultTruck2['lostUsers'] = lostNums22
resultTruck2['visitedPath'] = list(visitedPath2)
resultTruck2['balanceNum'] = balanceNum22
resultTruck2['travelDis'] = list(cumulativeDis2)
experimentResult['truck1'] = resultTruck1
experimentResult['truck2'] = resultTruck2
experimentResult['afterBalanceRealBikes'] = realtimeBikes1
experiment_path = './bike_sharing_data/mydata/experiment_result2/epsilon_0'
if not os.path.exists(experiment_path):
os.makedirs(experiment_path)
with open(os.path.join(experiment_path, filename), 'w') as f:
json.dump(experimentResult, f)
print('day' + str(day) + 'finished!')
def noRepositionStart(lostNums):
starttime = 0
position, stations_id = getPositionAndStations_id()
rateData = getRateData()
station_status, totalDocksDict = getStation_status()
# mon,day2,hour = getMonthDayAndHour()
mon = 8
for day in range(1, 32):
totalLost = 0
lost = {}
for station_id in stations_id:
rateDict = rateData[str(station_id)]
month = str(mon) if int(mon) >= 10 else '0' + str(mon)
day1 = str(day) if int(day) >= 10 else '0' + str(day)
date = '2017-' + str(month) + '-' + str(day1)
date = datetime.datetime.strptime(date, '%Y-%m-%d')
if date.weekday() < 5:
rental_rate_0 = rateDict['rental_rate_0']
return_rate_0 = rateDict['return_rate_0']
elif date.weekday() < 7:
rental_rate_0 = rateDict['rental_rate_1']
return_rate_0 = rateDict['return_rate_1']
iniBikes = station_status[str(day)][str(station_id)]['availableBikes']
iniDocks = station_status[str(day)][str(station_id)]['availableDocks']
totalDocks = totalDocksDict[str(station_id)]
availableBikes = iniBikes
availableDocks = iniDocks
rentalLost = 0
returnLost = 0
for i in np.arange(0, 48):
deltaNum = 0
deltaNum = rental_rate_0[i] - return_rate_0[i]
if deltaNum > 0 and (deltaNum - float(availableBikes)) > 0:
rentalLost += (deltaNum - float(availableBikes))
pass # rental_lost += deltNum
if deltaNum < 0 and (abs(deltaNum) - float(availableDocks)) > 0:
returnLost += (abs(deltaNum) - float(availableDocks))
pass # return_lost += deltNum
if deltaNum > 0:
availableBikes = float(availableBikes) - deltaNum
if availableBikes < 0:
availableBikes = 0
availableDocks = float(availableDocks) + deltaNum
if availableDocks > float(totalDocks):
availableBikes = 0
availableDocks = float(totalDocks)
else:
availableDocks = float(availableDocks) - abs(deltaNum)
if availableDocks < 0:
availableDocks = 0
availableBikes = float(availableBikes) + abs(deltaNum)
if availableBikes > float(totalDocks):
availableDocks = 0
availableBikes = float(totalDocks)
lost[str(station_id)] = rentalLost + returnLost
totalLost += lost[str(station_id)]
lost['totalLost'] = totalLost
print(totalLost)
lostNums[str(day)] = lost
def noReposition():
experiment_path = './bike_sharing_data/mydata/noReposition'
if not os.path.exists(experiment_path):
os.makedirs(experiment_path)
# month,day,hour = getMonthDayAndHour()
month = 8
hour = 7
lostNums = {}
noRepositionStart(lostNums)
print(lostNums)
filename = 'noRepositionLost_month_' + str(month) + '_hour_' + str(78910) + '.json'
with open(os.path.join(experiment_path, filename), 'w') as f:
json.dump(lostNums, f)
def staticRepositionStart(lostNums):
position, stations_id = getPositionAndStations_id()
rateData = getRateData()
station_status, totalDocksDict = getStation_status()
mon, day, hour = getMonthDayAndHour()
for day in range(1, 32):
totalLost = 0
lost = {}
for station_id in stations_id:
rateDict = rateData[str(station_id)]
month = str(mon) if int(mon) >= 10 else '0' + str(mon)
day1 = str(day) if int(day) >= 10 else '0' + str(day)
date = '2017-' + str(month) + '-' + str(day1)
date = datetime.datetime.strptime(date, '%Y-%m-%d')
if date.weekday() < 5:
rental_rate_0 = rateDict['rental_rate_0']
return_rate_0 = rateDict['return_rate_0']
elif date.weekday() < 7:
rental_rate_0 = rateDict['rental_rate_1']
return_rate_0 = rateDict['return_rate_1']
totalDocks = totalDocksDict[str(station_id)]
serviceLevel = []
for docks in range(1, int(totalDocks)):
availableBikes = int(totalDocks) - docks
availableDocks = docks
flag = 0
for j in np.arange(0, 19):
deltaNum = 0
deltaNum = rental_rate_0[j] - return_rate_0[j]
if deltaNum > 0:
availableBikes = float(availableBikes) - deltaNum
if availableBikes <= 1:
flag = 1
# print('availableBikes:'+str(availableBikes))
break
availableDocks = float(availableDocks) + deltaNum
if availableDocks >= float(totalDocks) - 1:
flag = 1
# print('availableDocks:'+str(availableDocks))
break
else:
availableDocks = float(availableDocks) - abs(deltaNum)
if availableDocks <= 1:
# print('availableDocks:'+str(availableDocks))
flag = 1
break
availableBikes = float(availableBikes) + abs(deltaNum)
if availableBikes >= float(totalDocks) - 1:
# print('availableBikes:'+str(availableBikes))
flag = 1
break
if flag == 0:
serviceLevel.append(int(totalDocks) - int(docks))
iniBikes = serviceLevel[random.choice(range(0, len(serviceLevel)))]
iniDocks = int(totalDocks) - iniBikes
availableBikes = iniBikes
availableDocks = iniDocks
# if station_id == '127':
# print('iniBikes:' + str(availableBikes))
# print('iniDocks:' + str(availableDocks))
# print(totalDocks)
rentalLost = 0
returnLost = 0
for i in np.arange(0, 48): # real-time bikes docks
deltaNum = 0
deltaNum = rental_rate_0[i] - return_rate_0[i]
if deltaNum > 0 and (deltaNum - float(availableBikes)) > 0:
rentalLost += (deltaNum - float(availableBikes))
pass # rental_lost += deltNum
if deltaNum < 0 and (abs(deltaNum) - float(availableDocks)) > 0:
returnLost += (abs(deltaNum) - float(availableDocks))
pass # return_lost += deltNum
if deltaNum > 0:
availableBikes = float(availableBikes) - deltaNum
if availableBikes < 0:
availableBikes = 0
availableDocks = float(availableDocks) + deltaNum
if availableDocks > float(totalDocks):
availableBikes = 0
availableDocks = float(totalDocks)
else:
availableDocks = float(availableDocks) - abs(deltaNum)
if availableDocks < 0:
availableDocks = 0
availableBikes = float(availableBikes) + abs(deltaNum)
if availableBikes > float(totalDocks):
availableDocks = 0
availableBikes = float(totalDocks)
lost[str(station_id)] = rentalLost + returnLost
totalLost += lost[str(station_id)]
lost['totalLost'] = totalLost
print(totalLost)
lostNums[str(day)] = lost
def staticReposition():
experiment_path = './bike_sharing_data/mydata/staticReposition'
if not os.path.exists(experiment_path):
os.makedirs(experiment_path)
month, day, hour = getMonthDayAndHour()
lostNums = {}
staticRepositionStart(lostNums)
print(lostNums)
filename = 'staticRepositionLost_month_' + str(month) + '_hour_' + str(78910) + '.json'
with open(os.path.join(experiment_path, filename), 'w') as f:
json.dump(lostNums, f)
def nearestNeihborRepositionStart(startStation, availStations, mutex, realtimeBikes, day, beforeBalancedTotalLost):
position, stations_id = getPositionAndStations_id()
rateData = getRateData()
station_status, totalDocksDict = getStation_status()
# mon, day, hour = getMonthDayAndHour()
mon = 8
hour = 7
dropStation = []
pickStation = []
balanceStas = []
for sta in availStations:
iniBikes = station_status[str(day)][str(sta)]['availableBikes']
iniDocks = station_status[str(day)][str(sta)]['availableDocks']
if int(iniBikes) < 5:
dropStation.append(str(sta))
balanceStas.append(str(sta))
if int(iniDocks) < 5:
pickStation.append(str(sta))
balanceStas.append(str(sta))
# balanceSta = startStation
starttime = 0
v = 7
while True:
if starttime > 80:
break
info = {}
diss = []
minDis = 10
pickNum = 0
dropNum = 0
print('balanceStas' + str(balanceStas))
if not balanceStas:
break
mutex.acquire()
balanceStas = [s for s in balanceStas if s in availStations]
if not balanceStas:
break
for sta in balanceStas:
dis = manhattan_distance(position[str(startStation)][0], position[str(startStation)][1], position[sta][0],
position[sta][1])
if dis < minDis:
minDis = dis
balanceSta = sta
startStation = balanceSta
availStations.remove(str(balanceSta))
mutex.release()
rateDict = rateData[str(balanceSta)]
month = str(mon) if int(mon) >= 10 else '0' + str(mon)
day1 = str(day) if int(day) >= 10 else '0' + str(day)
date = '2017-' + str(month) + '-' + str(day1)
date = datetime.datetime.strptime(date, '%Y-%m-%d')
if date.weekday() < 5:
rental_rate_0 = rateDict['rental_rate_0']
return_rate_0 = rateDict['return_rate_0']
elif date.weekday() < 7:
rental_rate_0 = rateDict['rental_rate_1']
return_rate_0 = rateDict['return_rate_1']
totalDocks = totalDocksDict[str(balanceSta)]
t_travel = dis * 1000 / v
t_min = math.ceil(t_travel / 60)
t = starttime + t_min
t_interval = t / 5
availableBikes = station_status[str(day)][str(balanceSta)]['availableBikes']
availableDocks = station_status[str(day)][str(balanceSta)]['availableDocks']
rentalLost = 0
returnLost = 0
for i in np.arange(0, int(t_interval)): # real-time bikes docks
deltaNum = 0
deltaNum = rental_rate_0[i] - return_rate_0[i]
if float(availableBikes) < 1.0 and deltaNum > 0:
rentalLost += deltaNum
pass # rental_lost += deltNum
if float(availableDocks) < 1.0 and deltaNum < 0:
returnLost += abs(deltaNum)
pass # return_lost += deltNum
if deltaNum > 0:
availableBikes = float(availableBikes) - deltaNum
if availableBikes < 0:
availableBikes = 0
availableDocks = float(availableDocks) + deltaNum
if availableDocks > float(totalDocks):
availableBikes = 0
availableDocks = float(totalDocks)
else:
availableDocks = float(availableDocks) - abs(deltaNum)
if availableDocks < 0:
availableDocks = 0
availableBikes = float(availableBikes) + abs(deltaNum)
if availableBikes > float(totalDocks):
availableDocks = 0
availableBikes = float(totalDocks)
realBikes = availableBikes
realDocks = availableDocks
beforeBalancedTotalLost.value = beforeBalancedTotalLost.value + returnLost + rentalLost
noise = abs(np.random.normal(loc=0.0, scale=2))
if balanceSta in dropStation:
if float(realBikes) >= 12:
endtime = t + noise
dropNum = 0
info['realbikes'] = realBikes
else:
dropNum = 12 - int(realBikes)
endtime = t + dropNum * 0.3 + noise
info['realbikes'] = 12
if balanceSta in pickStation:
if float(realDocks) >= 12:
endtime = t + noise
pickNum = 0
info['realbikes'] = float(totalDocks) - float(realDocks)
else:
pickNum = 12 - int(realDocks)
endtime = t + pickNum * 0.3 + noise
info['realbikes'] = float(totalDocks) - 12
info['time'] = endtime
realtimeBikes[str(balanceSta)] = info
staLost = {}
starttime = endtime
print('drop:' + str(dropNum))
print('pick:' + str(pickNum))
print('distance:' + str(minDis))
print('starttime:' + str(starttime))
print(realtimeBikes)
balanceStas = []
pickStation = []
dropStation = []
for sta in availStations:
t_interval = starttime / 5
iniBikes = station_status[str(day)][str(sta)]['availableBikes']
iniDocks = station_status[str(day)][str(sta)]['availableDocks']
availableBikes = iniBikes
availableDocks = iniDocks
rateDict = rateData[str(sta)]
month = str(mon) if int(mon) >= 10 else '0' + str(mon)
day1 = str(day) if int(day) >= 10 else '0' + str(day)
date = '2017-' + str(month) + '-' + str(day1)
date = datetime.datetime.strptime(date, '%Y-%m-%d')
if date.weekday() < 5:
rental_rate_0 = rateDict['rental_rate_0']
return_rate_0 = rateDict['return_rate_0']
elif date.weekday() < 7:
rental_rate_0 = rateDict['rental_rate_1']
return_rate_0 = rateDict['return_rate_1']
for i in np.arange(0, int(t_interval)): # real-time bikes docks
deltaNum = rental_rate_0[i] - return_rate_0[i]
if deltaNum > 0:
availableBikes = float(availableBikes) - deltaNum
if availableBikes < 0:
availableBikes = 0
availableDocks = float(availableDocks) + deltaNum
if availableDocks > float(totalDocks):
availableBikes = 0
availableDocks = float(totalDocks)
else:
availableDocks = float(availableDocks) - abs(deltaNum)
if availableDocks < 0:
availableDocks = 0
availableBikes = float(availableBikes) + abs(deltaNum)
if availableBikes > float(totalDocks):
availableDocks = 0
availableBikes = float(totalDocks)
realBikes = availableBikes
realDocks = availableDocks
if float(realBikes) < 5:
dropStation.append(str(sta))
balanceStas.append(str(sta))
if float(realDocks) < 5:
pickStation.append(str(sta))
balanceStas.append(str(sta))
# getNearestNeighborLost(realtimeBikes,rateData,totalDocksDict,lostNums,station_status)
# print(dropStation)
# print(pickStation)
# print(diss)
def getNearestNeighborLost(realtimeBikes, day):
rateData = getRateData()
station_status, totalDocksDict = getStation_status()
# mon,day,hour = getMonthDayAndHour()
mon = 8
hour = 7
position, stations_id = getPositionAndStations_id()
balancedSta = []
totalLost = 0
lostNums = {}
for sta, values in realtimeBikes.items():
balancedSta.append(sta)
rentalLost = 0
returnLost = 0
time = values['time']
realbikes = values['realbikes']
time_interval = time / 5
rateDict = rateData[str(sta)]
month = str(mon) if int(mon) >= 10 else '0' + str(mon)
day1 = str(day) if int(day) >= 10 else '0' + str(day)
date = '2017-' + str(month) + '-' + str(day1)
date = datetime.datetime.strptime(date, '%Y-%m-%d')
if date.weekday() < 5:
rental_rate_0 = rateDict['rental_rate_0']
return_rate_0 = rateDict['return_rate_0']
elif date.weekday() < 7:
rental_rate_0 = rateDict['rental_rate_1']
return_rate_0 = rateDict['return_rate_1']
totalDocks = int(totalDocksDict[str(sta)])
availableBikes = realbikes
availableDocks = float(totalDocks) - float(realbikes)
for i in np.arange(int(time_interval), 48): # real-time bikes docks
deltaNum = 0
deltaNum = rental_rate_0[i] - return_rate_0[i]
if deltaNum > 0 and (deltaNum - float(availableBikes)) > 0:
rentalLost += (deltaNum - float(availableBikes))
pass # rental_lost += deltNum
if deltaNum < 0 and (abs(deltaNum) - float(availableDocks)) > 0:
returnLost += (abs(deltaNum) - float(availableDocks))
pass # return_lost += deltNum
if deltaNum > 0:
availableBikes = float(availableBikes) - deltaNum
if availableBikes < 0:
availableBikes = 0
availableDocks = float(availableDocks) + deltaNum
if availableDocks > float(totalDocks):
availableBikes = 0
availableDocks = float(totalDocks)
else:
availableDocks = float(availableDocks) - abs(deltaNum)
if availableDocks < 0:
availableDocks = 0
availableBikes = float(availableBikes) + abs(deltaNum)
if availableBikes > float(totalDocks):
availableDocks = 0
availableBikes = float(totalDocks)
lostNums[str(sta)] = rentalLost + returnLost
totalLost += lostNums[str(sta)]
leftStations = [sta for sta in stations_id if sta not in balancedSta]
for sta in leftStations:
rateDict = rateData[str(sta)]
month = str(mon) if int(mon) >= 10 else '0' + str(mon)
day1 = str(day) if int(day) >= 10 else '0' + str(day)
date = '2017-' + str(month) + '-' + str(day1)
date = datetime.datetime.strptime(date, '%Y-%m-%d')
if date.weekday() < 5:
rental_rate_0 = rateDict['rental_rate_0']
return_rate_0 = rateDict['return_rate_0']
elif date.weekday() < 7:
rental_rate_0 = rateDict['rental_rate_1']
return_rate_0 = rateDict['return_rate_1']
iniBikes = station_status[str(day)][str(sta)]['availableBikes']
iniDocks = station_status[str(day)][str(sta)]['availableDocks']
totalDocks = totalDocksDict[str(sta)]
availableBikes = iniBikes
availableDocks = iniDocks
rentalLost = 0
returnLost = 0
for i in np.arange(0, 48): # real-time bikes docks
deltaNum = 0
deltaNum = rental_rate_0[i] - return_rate_0[i]
if deltaNum > 0 and (deltaNum - float(availableBikes)) > 0:
rentalLost += (deltaNum - float(availableBikes))
pass # rental_lost += deltNum
if deltaNum < 0 and (abs(deltaNum) - float(availableDocks)) > 0:
returnLost += (abs(deltaNum) - float(availableDocks))
pass # return_lost += deltNum
if deltaNum > 0:
availableBikes = float(availableBikes) - deltaNum
if availableBikes < 0:
availableBikes = 0
availableDocks = float(availableDocks) + deltaNum
if availableDocks > float(totalDocks):
availableBikes = 0
availableDocks = float(totalDocks)
else:
availableDocks = float(availableDocks) - abs(deltaNum)
if availableDocks < 0:
availableDocks = 0
availableBikes = float(availableBikes) + abs(deltaNum)
if availableBikes > float(totalDocks):
availableDocks = 0
availableBikes = float(totalDocks)
lostNums[str(sta)] = rentalLost + returnLost
totalLost += lostNums[str(sta)]
lostNums['totalLost'] = totalLost
print(totalLost)
return lostNums
def nearestNeihborReposition():
experiment_path = './bike_sharing_data/mydata/nearestNeihborReposition'
if not os.path.exists(experiment_path):
os.makedirs(experiment_path)
# month, day, hour = getMonthDayAndHour()
mon = 8
hour = 7
for day in range(1, 32):
realtimeBikes = multiprocessing.Manager().dict()
position, stations_id = getPositionAndStations_id()
availStations = multiprocessing.Manager().list(stations_id)
beforeBalancedTotalLost = multiprocessing.Value("d", 0)
startStation1 = '237'
startStation2 = '369'
lostNums = {}
mutex = multiprocessing.Lock()
p1 = multiprocessing.Process(target=nearestNeihborRepositionStart, args=(
startStation1, availStations, mutex, realtimeBikes, day, beforeBalancedTotalLost))
p2 = multiprocessing.Process(target=nearestNeihborRepositionStart, args=(
startStation2, availStations, mutex, realtimeBikes, day, beforeBalancedTotalLost))
p1.start()
p2.start()
p1.join(9)
p2.join(9)
print(realtimeBikes)
lostNums = getNearestNeighborLost(realtimeBikes, day)
lostNums['totalLost'] += beforeBalancedTotalLost.value
print(lostNums)
filename = 'nearestNeihborRepositionLost_month_' + str(mon) + '_day_' + str(day) + '_hour_' + str(
78910) + '.json'
with open(os.path.join(experiment_path, filename), 'w') as f:
json.dump(lostNums, f)
print('day' + str(day) + 'finished!')
def nearestNeihborBaseServiceLevelRepositionStart(startStation, availStations, mutex, realtimeBikes,
visitedPath, visitedDis, balanceNum, beforeBalancedTotalLost, day):
position, stations_id = getPositionAndStations_id()
rateData = getRateData()
station_status, totalDocksDict = getStation_status()
# mon, day, hour = getMonthDayAndHour()
mon = 8
hour = 7
dropStation = []
pickStation = []
balanceStas = []
for sta in stations_id:
iniBikes = station_status[str(day)][str(sta)]['availableBikes']
iniDocks = station_status[str(day)][str(sta)]['availableDocks']
totalDocks = totalDocksDict[str(sta)]
serviceLevel = []
rateDict = rateData[str(sta)]
month = str(mon) if int(mon) >= 10 else '0' + str(mon)
day1 = str(day) if int(day) >= 10 else '0' + str(day)
date = '2017-' + str(month) + '-' + str(day1)
date = datetime.datetime.strptime(date, '%Y-%m-%d')
if date.weekday() < 5:
rental_rate_0 = rateDict['rental_rate_0']
return_rate_0 = rateDict['return_rate_0']
elif date.weekday() < 7:
rental_rate_0 = rateDict['rental_rate_1']
return_rate_0 = rateDict['return_rate_1']
for docks in range(1, int(totalDocks)):
availableBikes = int(totalDocks) - docks
availableDocks = docks
flag = 0
for j in np.arange(0, 19):
deltaNum = 0
if j >= 48:
break
else:
deltaNum = rental_rate_0[j] - return_rate_0[j]
if deltaNum > 0:
availableBikes = float(availableBikes) - deltaNum
if availableBikes <= 1:
flag = 1
break
availableDocks = float(availableDocks) + deltaNum
if availableDocks >= float(totalDocks) - 1:
flag = 1
break
else:
availableDocks = float(availableDocks) - abs(deltaNum)
if availableDocks <= 1:
flag = 1
break
availableBikes = float(availableBikes) + abs(deltaNum)
if availableBikes >= float(totalDocks) - 1:
flag = 1
break
if flag == 0:
serviceLevel.append(int(totalDocks) - int(docks))
if not serviceLevel:
pickStation.append(str(sta))
balanceStas.append(str(sta))
else:
if float(iniBikes) < min(serviceLevel):
dropStation.append(str(sta))
balanceStas.append(str(sta))
if float(iniDocks) > max(serviceLevel):
pickStation.append(str(sta))
balanceStas.append(str(sta))
# balanceSta = startStation
visitedPath.append(startStation)
starttime = 0
v = 7
while True:
info = {}
minDis = 10
pickNum = 0
dropNum = 0
print('balanceStas' + str(balanceStas))
if not balanceStas:
break
mutex.acquire()
balanceStas = [s for s in balanceStas if s in availStations]
if not balanceStas:
break
for sta in balanceStas:
dis = manhattan_distance(position[str(startStation)][0], position[str(startStation)][1], position[sta][0],
position[sta][1])
if dis < minDis:
minDis = dis
balanceSta = sta
startStation = balanceSta
availStations.remove(str(balanceSta))
mutex.release()
rateDict = rateData[str(balanceSta)]
month = str(mon) if int(mon) >= 10 else '0' + str(mon)
day1 = str(day) if int(day) >= 10 else '0' + str(day)
date = '2017-' + str(month) + '-' + str(day1)
date = datetime.datetime.strptime(date, '%Y-%m-%d')
if date.weekday() < 5:
rental_rate_0 = rateDict['rental_rate_0']
return_rate_0 = rateDict['return_rate_0']
elif date.weekday() < 7:
rental_rate_0 = rateDict['rental_rate_1']
return_rate_0 = rateDict['return_rate_1']
totalDocks = totalDocksDict[str(balanceSta)]
t_travel = dis * 1000 / v
t_min = math.ceil(t_travel / 60)
t = starttime + t_min
t_interval = t / 5
availableBikes = station_status[str(day)][str(balanceSta)]['availableBikes']
availableDocks = station_status[str(day)][str(balanceSta)]['availableDocks']
rentalLost = 0
returnLost = 0
for i in np.arange(0, int(t_interval)): # real-time bikes docks
deltaNum = rental_rate_0[i] - return_rate_0[i]
if float(availableBikes) < 1.0 and deltaNum > 0:
rentalLost += deltaNum
pass # rental_lost += deltNum
if float(availableDocks) < 1.0 and deltaNum < 0:
returnLost += abs(deltaNum)
pass # return_lost += deltNum
if deltaNum > 0:
availableBikes = float(availableBikes) - deltaNum
if availableBikes < 0:
availableBikes = 0
availableDocks = float(availableDocks) + deltaNum
if availableDocks > float(totalDocks):
availableBikes = 0
availableDocks = float(totalDocks)
else:
availableDocks = float(availableDocks) - abs(deltaNum)
if availableDocks < 0:
availableDocks = 0
availableBikes = float(availableBikes) + abs(deltaNum)
if availableBikes > float(totalDocks):
availableDocks = 0
availableBikes = float(totalDocks)
mutex.acquire()
beforeBalancedTotalLost.value = beforeBalancedTotalLost.value + rentalLost + returnLost
mutex.release()
realBikes = availableBikes
realDocks = availableDocks
totalDocks = totalDocksDict[str(balanceSta)]
serviceLevel = []
for docks in range(1, int(totalDocks)):
availableBikes = int(totalDocks) - docks
availableDocks = docks
flag = 0
for j in | np.arange(0, 19) | numpy.arange |
#!/usr/bin/env python
# ------------------------------------------------------------------------------------------------------%
# Created by "<NAME>" at 18:22, 06/04/2020 %
# %
# Email: <EMAIL> %
# Homepage: https://www.researchgate.net/profile/Thieu_Nguyen6 %
# Github: https://github.com/thieunguyen5991 %
# -------------------------------------------------------------------------------------------------------%
from sklearn.metrics import mean_squared_error
from numpy import reshape, add, matmul
from time import time
from model.root.root_base import RootBase
import utils.MathUtil as my_math
class RootHybridMlnn(RootBase):
"""
This is root of all hybrid multi-layer neural network (meta-heuristics + MLNN)
"""
def __init__(self, root_base_paras=None, root_hybrid_paras=None):
RootBase.__init__(self, root_base_paras)
self.domain_range = root_hybrid_paras["domain_range"]
self.activations = root_hybrid_paras["activations"]
if root_hybrid_paras["hidden_size"][1]:
self.hidden_size = root_hybrid_paras["hidden_size"][0]
else:
self.hidden_size = 2 * root_base_paras["sliding"] * root_base_paras["feature_size"] + 1
self.problem_size, self.epoch = None, None
def _setting__(self):
## New discovery
self._activation1__ = getattr(my_math, self.activations[0])
self._activation2__ = getattr(my_math, self.activations[1])
self.input_size, self.output_size = self.X_train.shape[1], self.y_train.shape[1]
self.w1_size = self.input_size * self.hidden_size
self.b1_size = self.hidden_size
self.w2_size = self.hidden_size * self.output_size
self.b2_size = self.output_size
self.problem_size = self.w1_size + self.b1_size + self.w2_size + self.b2_size
def _forecasting__(self):
hidd = self._activation1__(add(matmul(self.X_test, self.model["w1"]), self.model["b1"]))
y_pred = self._activation2__(add(matmul(hidd, self.model["w2"]), self.model["b2"]))
real_inverse = self.scaler.inverse_transform(self.y_test)
pred_inverse = self.scaler.inverse_transform(reshape(y_pred, self.y_test.shape))
return real_inverse, pred_inverse, self.y_test, y_pred
def _running__(self):
self.time_system = time()
self._preprocessing_2d__()
self._setting__()
self.time_total_train = time()
self._training__()
self.model = self._get_model__(self.solution)
self.time_total_train = round(time() - self.time_total_train, 4)
self.time_epoch = round(self.time_total_train / self.epoch, 4)
self.time_predict = time()
y_true_unscaled, y_pred_unscaled, y_true_scaled, y_pred_scaled = self._forecasting__()
self.time_predict = round(time() - self.time_predict, 8)
self.time_system = round(time() - self.time_system, 4)
self._save_results__(y_true_unscaled, y_pred_unscaled, y_true_scaled, y_pred_scaled, self.loss_train, self.n_runs)
## Helper functions
def _get_model__(self, individual=None):
w1 = reshape(individual[:self.w1_size], (self.input_size, self.hidden_size))
b1 = reshape(individual[self.w1_size:self.w1_size + self.b1_size], (-1, self.hidden_size))
w2 = reshape(individual[self.w1_size + self.b1_size: self.w1_size + self.b1_size + self.w2_size], (self.hidden_size, self.output_size))
b2 = | reshape(individual[self.w1_size + self.b1_size + self.w2_size:], (-1, self.output_size)) | numpy.reshape |
r"""@package motsfinder.numutils
Miscellaneous numerical utilities and helpers.
@b Examples
```
>>> binomial(5, 3)
10
```
"""
from contextlib import contextmanager
import warnings
from scipy.linalg import LinAlgWarning
from scipy.integrate import fixed_quad, IntegrationWarning
from scipy.interpolate import interp1d
from scipy import optimize
import numpy as np
import sympy as sp
__all__ = [
"nan_mat",
"clip",
"linear_interp",
"binomial",
"binomial_coeffs",
"inf_norm1d",
"raise_all_warnings",
"try_quad_tolerances",
"bracket_root",
"find_root",
"find_all_roots",
"interpolate_root",
"extrapolate_root",
"IntegrationResult",
"IntegrationResults",
"NumericalError",
]
_golden = 1.61803398874989 # (1+sqrt(5))/2, the "golden ratio"
class NumericalError(Exception):
r"""Exception raised for problems with numerical evaluation.
For example, a tensor field class based on numerical data may raise this
(or a subclass) if evaluation outside the numerical domain is requested.
"""
pass
def nan_mat(shape):
r"""Create a matrix of NaN values."""
T = np.empty(shape)
T[:] = np.nan
return T
def clip(x, x_min, x_max):
r"""Confine a value to an interval."""
return max(x_min, min(x_max, x))
def linear_interp(x, x1, x2, y1, y2, extrapolate=True):
r"""Linearly interpolate between two numbers.
@param x
Abscissa to interpolate to.
@param x1,x2
Abscissas of the two data points to interpolate between.
@param y1,y2
Ordinates of the two data points to interpolate between.
"""
if not extrapolate:
x = clip(x, x1, x2)
return (y2-y1) * (x-x1)/(x2-x1) + y1
def binomial(n, k):
r"""Compute the binomial coefficient n choose k."""
return int(sp.binomial(n, k))
def binomial_coeffs(n):
r"""Compute all binomial coefficients n choose k for 0 <= k <= n.
The result is a list of integers
\f[
{n \choose 0}, {n \choose 1}, \ldots, {n \choose n}.
\f]
"""
return _BinomialCoeffs.all_coeffs(n)
class _BinomialCoeffs():
r"""Helper class to simply cache the coefficient lists.
This is used by binomial_coeffs() to re-use once computed lists.
"""
__binomial_coeffs = []
@classmethod
def all_coeffs(cls, n):
r"""Generate and cache the results for binomial_coeffs()."""
while len(cls.__binomial_coeffs) <= n:
nn = len(cls.__binomial_coeffs)
coeffs = [binomial(nn, k) for k in range(n+1)]
cls.__binomial_coeffs.append(coeffs)
return cls.__binomial_coeffs[n]
def inf_norm1d(f1, f2=None, domain=None, Ns=50, xatol=1e-12):
r"""Compute the L^inf norm of f1-f2.
The `scipy.optimize.brute` method is used to find a candidate close to the
global maximum difference. This is then taken as starting point for a
search for the local maximum difference. Setting the number of samples
`Ns` high enough should lead to the global maximum difference being found.
@param f1
First function. May also be a `NumericExpression`.
@param f2
Second function. May also be a `NumericExpression`. If not given,
simply finds the maximum absolute value of `f1`.
@param domain
Domain ``[a, b]`` inside which to search for the maximum difference.
By default, `f1` is queried for the domain.
@param Ns
Number of initial samples for the `scipy.optimize.brute` call. In case
``Ns <= 2``, the `brute()` step is skipped an a local extremum is
found inside the given `domain`. Default is `50`.
@return A pair ``(x, delta)``, where `x` is the point at which the maximum
difference was found and `delta` is the difference at that point.
"""
if domain is None:
domain = f1.domain
if not callable(f1):
f1 = f1.evaluator()
if f2 is None:
f2 = lambda x: 0.0
if not callable(f2):
f2 = f2.evaluator()
domain = list(map(float, domain))
a, b = domain
func = lambda x: (
-float(abs(f1(float(x))-f2(float(x)))) if a <= x <= b else 0.
)
if Ns <= 2:
bounds = [a, b]
else:
x0 = optimize.brute(func, [domain], Ns=Ns, finish=None)
step = (b-a)/(Ns-1)
bounds = [x0-step, x0+step]
res = optimize.minimize_scalar(
func, bounds=bounds, method='bounded',
options=dict(xatol=xatol),
)
return res.x, -res.fun
def try_quad_tolerances(func, args=(), kwargs=None, tol_min=1e-11,
tol_max=1e-2, tol_steps=None, verbose=False):
r"""Try to run a given function with increasing tolerance until integration succeeds.
@param func
Callable performing the integration. This should issue or raise an
`IntegrationWarning` for too low tolerances. It is called as
``func(tol, *args, **kwargs)``.
@param args
Optional additional positional arguments for `func`.
@param kwargs
Optional additional keyword arguments for `func`.
@param tol_min
Minimal tolerance to try first. Default is `1e-11`.
@param tol_max
Maximum tolerance to allow. If `func` fails for this tolerance, no
more trials are done and the `IntegrationWarning` warning is raised.
Default is `1e-2`.
@param tol_steps
How many steps to try when going from `tol_min` to `tol_max`. Should
be at least two. Default is to go roughly through each order of
magnitude.
@param verbose
If `True`, print the tolerances as they are tried out. Default is
`False`.
"""
if tol_min > tol_max:
raise ValueError("minimal tolerance greater than maximum tolerance")
tol_min = np.log10(tol_min)
tol_max = np.log10(tol_max)
if tol_steps is None:
tol_steps = max(2, int(round(tol_max-tol_min) + 1))
tols = np.logspace(tol_min, tol_max, tol_steps)
with raise_all_warnings():
for tol in tols:
if verbose:
print("Trying with tol=%s" % tol)
try:
return func(tol, *args, **(kwargs or dict()))
except IntegrationWarning:
if verbose:
print("... failed with tol=%s" % tol)
if tol == tols[-1]:
raise
class IntegrationResults():
r"""Represents a sequence of multiple integration results.
This class presents convenience methods to sum individual results and
errors and check whether all results were computed without any warnings.
@b Examples
```
#res = ... # obtained via some means
res[0].value # value of first result
res.sum() # sum of all values
res.sum(0, 2) # sum first and third result
res.sum(full_output=True).error # access errors, infos and warnings
print("\n".join(str(r.error) for r in res)) # print individual errors
```
"""
def __init__(self, *results, info=None, sum_makes_sense=True):
r"""Create a results object from given results.
Positional arguments can be either the ``full_output=True`` results of
`scipy.integrate.quad()` calls or IntegrationResult objects.
@param *results
Results to collect.
@param info
Arbitrary info object to be stored with the results.
@param sum_makes_sense
Whether the sum of all results is a meaningful number. This
controls if the total is printed in case of string conversion.
"""
def _to_result(res):
if not isinstance(res, IntegrationResult):
res = IntegrationResult(*res)
return res
self._results = [_to_result(r) for r in results]
## Additional info supplied to the constructor.
self.info = info
## Whether the sum of all results is a meaningful number.
self.sum_makes_sense = sum_makes_sense
@property
def value(self):
r"""Total value (sum of all results)."""
return self.sum()
@property
def error(self):
r"""Total error (sum of all errors)."""
return self.sum(full_output=True).error
def __len__(self):
r"""Number of stored results."""
return len(self._results)
def __getitem__(self, key):
r"""Access individual results."""
return self._results[key]
def __iter__(self):
r"""Iterate through individual results."""
return iter(self._results)
def sum(self, *indices, full_output=False):
r"""Combine results (sum values and optionally errors)."""
if indices:
results = [self._results[i] for i in indices]
else:
results = self._results
result = sum([r.value for r in results])
if not full_output:
return result
err = sum([r.error for r in results])
infos = [r.info for r in results]
if all([r.is_ok() for r in results]):
warning = None
else:
warning = "\n".join([r.warning for r in results if r.warning])
return IntegrationResult(result, err, infos, warning)
def __repr__(self):
result = "\n".join("[%s] %s" % (i, r) for i, r in enumerate(self._results))
if self.sum_makes_sense and len(self._results) > 1:
total = self.sum(full_output=True)
result += "\nTotal: %s +- %s" % (total.value, total.error)
if self.info:
result += "\nInfo:\n%s" % (self.info,)
return result
def all_ok(self):
r"""Return whether none of the results produced a warning."""
return all([r.is_ok() for r in self._results])
class IntegrationResult():
r"""Wrapper of the `full_output` of a `quad()` call."""
def __init__(self, value, error, info, warning=None, mult=None):
r"""Create a result object from the output of `quad()`.
@param value
Main result, i.e. the computed value.
@param error
The estimate of the error of the value.
@param info
Integration info object.
@param warning
Any warnings produced during integration.
@param mult
Factor by which to multiply the result and error.
"""
if mult is not None:
value = mult * value
error = mult * error
## Computed value.
self.value = value
## Estimated error.
self.error = error
## Info object of the integration `quad()` call.
self.info = info
## Warnings produced while integrating (`None` in case of no warnings).
self.warning = warning
def is_ok(self):
r"""Return whether the result is OK and produced no warning."""
return self.warning is None
def __repr__(self):
txt = "%s +- %s" % (self.value, self.error)
if self.warning is not None:
w = str(self.warning).split("\n")
w = "\n ".join(w)
txt += "\nWarning: %s" % w
return txt
def inverse_2x2_matrix_derivative(A, dA=None, ddA=None, diff=1):
r"""Compute derivatives of the inverse of a 2x2 matrix.
Given an invertible 2x2 matrix `A` with elements \f$a_{ij}\f$ and any
needed derivatives w.r.t. two different variables, this returns the
derivatives of the inverse of `A`.
@param A
The original matrix to compute the inverse of.
@param dA
Nested list or NumPy array with three indices where `dA[i][j][k]`
contains the value of \f$\partial_i a_{jk}\f$.
@param ddA
Nested list or NumPy array with four indices where `dA[i][j][k][l]`
contains the value of \f$\partial_i \partial_j a_{kl}\f$.
@param diff
Derivative order of the inverse matrix. If ``diff==0``, the inverse of
`A` is returned and `dA` and `ddA` are not needed. `dA` is needed if
``diff > 0`` and `ddA` for ``diff > 1``. Default is `1`.
@return NumPy array with two, three, or four axes depending on `diff`. The
meaning of the indices such that `result[i1,i2,...,k,l]` contains the
value \f$\partial_{i_1} \partial_{i_2} \ldots (B)_{kl}\f$, where `B`
is the inverse \f$B = A^{-1}\f$.
@b Notes
Consider the matrix
\f[
A = \left(\begin{array}{@{}cc@{}}
a & b\\
c & d
\end{array}\right).
\f]
The inverse is then given by
\f[
B := A^{-1} = \frac{1}{\det A} \left(\begin{array}{@{}cc@{}}
b & -b\\
-c & a
\end{array}\right),
\f]
where \f$\det A = ad-bc\f$.
The derivatives are easily computed using the chain and Leibniz' rule,
which result in (using the shorthand notation \f$a_i := \partial_i a\f$
and \f$a_{ij} := \partial_i \partial_j a\f$)
\f[
\partial_i B =
- \frac{\partial_i \det A}{(\det A)^2}
\left(\begin{array}{@{}cc@{}}
d & -b\\
-c & a
\end{array}\right)
+ \frac{1}{\det A}
\left(\begin{array}{@{}cc@{}}
d_i & -b_i\\
-c_i & a_i
\end{array}\right)
\f]
and
\f{eqnarray*}{
\partial_i \partial_j B &=&
\left(
-\frac{\partial_i\partial_j\det A}{(\det A)^2}
+ 2 \frac{(\partial_i\det A)(\partial_j\det A)}{(\det A)^3}
\right)
\left(\begin{array}{@{}cc@{}}
d & -b\\
-c & a
\end{array}\right)
\\&&
- \frac{\partial_i\det A}{(\det A)^2}
\left(\begin{array}{@{}cc@{}}
d_j & -b_j\\
-c_j & a_j
\end{array}\right)
- \frac{\partial_j\det A}{(\det A)^2}
\left(\begin{array}{@{}cc@{}}
d_i & -b_i\\
-c_i & a_i
\end{array}\right)
\\&&
+ \frac{1}{\det A}
\left(\begin{array}{@{}cc@{}}
d_{ij} & -b_{ij}\\
-c_{ij} & a_{ij}
\end{array}\right),
\f}
where
\f{eqnarray*}{
\partial_i \det A &=&
a_i d + a d_i - b_i c - b c_i,
\\
\partial_i \partial_j \det A &=&
a_{ij} d + a_i d_j + a_j d_i + a d_{ij}
- b_{ij} c - b_i c_j - b_j c_i - b c_{ij}.
\f}
"""
if diff not in (0, 1, 2):
raise NotImplementedError
ra = range(2)
A = np.asarray(A)
dA = np.asarray(dA)
a, b, c, d = A.flatten()
det = a*d - b*c
B = np.array([[d, -b], [-c, a]])
if diff == 0:
return 1/det * B
# dA has axes [partial_i, row, col]
# we want e.g.: da = [partial_x a, partial_y a]
da, db, dc, dd = [dA[:,i,j] for i in ra for j in ra]
ddet = | np.array([da[i]*d + a*dd[i] - db[i]*c - b*dc[i] for i in ra]) | numpy.array |
# Copyright 2018-2020 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for the measure module"""
import pytest
import contextlib
import numpy as np
import pennylane as qml
from pennylane.qnodes import QuantumFunctionError
# Beta imports
from pennylane.beta.queuing import AnnotatedQueue, QueuingContext
from pennylane.beta.queuing.operation import mock_operations
from pennylane.beta.queuing.measure import (
expval,
var,
sample,
probs,
Expectation,
Sample,
Variance,
Probability,
MeasurementProcess
)
@pytest.fixture(autouse=True)
def patch_operator():
with contextlib.ExitStack() as stack:
for mock in mock_operations():
stack.enter_context(mock)
yield
@pytest.mark.parametrize(
"stat_func,return_type", [(expval, Expectation), (var, Variance), (sample, Sample)]
)
class TestBetaStatistics:
"""Tests for annotating the return types of the statistics functions"""
@pytest.mark.parametrize(
"op", [qml.PauliX, qml.PauliY, qml.PauliZ, qml.Hadamard, qml.Identity],
)
def test_annotating_obs_return_type(self, stat_func, return_type, op):
"""Test that the return_type related info is updated for a
measurement"""
with AnnotatedQueue() as q:
A = op(0)
stat_func(A)
assert q.queue[:-1] == [A]
meas_proc = q.queue[-1]
assert isinstance(meas_proc, MeasurementProcess)
assert meas_proc.return_type == return_type
assert q._get_info(A) == {"owner": meas_proc}
assert q._get_info(meas_proc) == {"owns": (A)}
def test_annotating_tensor_hermitian(self, stat_func, return_type):
"""Test that the return_type related info is updated for a measurement
when called for an Hermitian observable"""
mx = np.array([[1, 0], [0, 1]])
with AnnotatedQueue() as q:
Herm = qml.Hermitian(mx, wires=[1])
stat_func(Herm)
assert q.queue[:-1] == [Herm]
meas_proc = q.queue[-1]
assert isinstance(meas_proc, MeasurementProcess)
assert meas_proc.return_type == return_type
assert q._get_info(Herm) == {"owner": meas_proc}
assert q._get_info(meas_proc) == {"owns": (Herm)}
@pytest.mark.parametrize(
"op1,op2",
[
(qml.PauliY, qml.PauliX),
(qml.Hadamard, qml.Hadamard),
(qml.PauliY, qml.Identity),
(qml.Identity, qml.Identity),
],
)
def test_annotating_tensor_return_type(self, op1, op2, stat_func, return_type):
"""Test that the return_type related info is updated for a measurement
when called for an Tensor observable"""
with AnnotatedQueue() as q:
A = op1(0)
B = op2(1)
tensor_op = A @ B
stat_func(tensor_op)
assert q.queue[:-1] == [A, B, tensor_op]
meas_proc = q.queue[-1]
assert isinstance(meas_proc, MeasurementProcess)
assert meas_proc.return_type == return_type
assert q._get_info(A) == {"owner": tensor_op}
assert q._get_info(B) == {"owner": tensor_op}
assert q._get_info(tensor_op) == {"owns": (A,B), "owner": meas_proc}
@pytest.mark.parametrize(
"stat_func", [expval, var, sample]
)
class TestBetaStatisticsError:
"""Tests for errors arising for the beta statistics functions"""
def test_not_an_observable(self, stat_func):
"""Test that a QuantumFunctionError is raised if the provided
argument is not an observable"""
dev = qml.device("default.qubit", wires=2)
@qml.qnode(dev)
def circuit():
qml.RX(0.52, wires=0)
return stat_func(qml.CNOT(wires=[0, 1]))
with pytest.raises(QuantumFunctionError, match="CNOT is not an observable"):
res = circuit()
class TestBetaProbs:
"""Tests for annotating the return types of the probs function"""
@pytest.mark.parametrize("wires", [[0], [0, 1], [1, 0, 2]])
def test_annotating_probs(self, wires):
with AnnotatedQueue() as q:
probs(wires)
assert len(q.queue) == 1
meas_proc = q.queue[0]
assert isinstance(meas_proc, MeasurementProcess)
assert meas_proc.return_type == Probability
class TestProperties:
"""Test for the properties"""
def test_wires_match_observable(self):
"""Test that the wires of the measurement process
match an internal observable"""
obs = qml.Hermitian(np.diag([1, 2, 3]), wires=['a', 'b', 'c'])
m = MeasurementProcess(Expectation, obs=obs)
assert np.all(m.wires == obs.wires)
def test_eigvals_match_observable(self):
"""Test that the eigenvalues of the measurement process
match an internal observable"""
obs = qml.Hermitian(np.diag([1, 2, 3]), wires=[0, 1, 2])
m = MeasurementProcess(Expectation, obs=obs)
assert np.all(m.eigvals == np.array([1, 2, 3]))
# changing the observable data should be reflected
obs.data = [np.diag([5, 6, 7])]
assert np.all(m.eigvals == np.array([5, 6, 7]))
def test_error_obs_and_eigvals(self):
"""Test that providing both eigenvalues and an observable
results in an error"""
obs = qml.Hermitian(np.diag([1, 2, 3]), wires=[0, 1, 2])
with pytest.raises(ValueError, match="Cannot set the eigenvalues"):
MeasurementProcess(Expectation, obs=obs, eigvals=[0, 1])
def test_error_obs_and_wires(self):
"""Test that providing both wires and an observable
results in an error"""
obs = qml.Hermitian( | np.diag([1, 2, 3]) | numpy.diag |
import numpy as np
import numpy.testing as npt
import nitime.timeseries as ts
import pytest
def test_get_time_unit():
number = 4
npt.assert_equal(ts.get_time_unit(number), None)
list_of_numbers = [4, 5, 6]
npt.assert_equal(ts.get_time_unit(list_of_numbers), None)
for tu in ['ps', 's', 'D']:
time_point = ts.TimeArray([4], time_unit=tu)
npt.assert_equal(ts.get_time_unit(time_point), tu)
list_of_time = [ts.TimeArray(4, time_unit=tu), ts.TimeArray(5, time_unit=tu)]
npt.assert_equal(ts.get_time_unit(list_of_time), tu)
# Go crazy, we don't mind:
list_of_lists = [[ts.TimeArray(4, time_unit=tu),
ts.TimeArray(5, time_unit=tu)],
[ts.TimeArray(4, time_unit=tu),
ts.TimeArray(5, time_unit=tu)]]
npt.assert_equal(ts.get_time_unit(list_of_lists), tu)
time_arr = ts.TimeArray([4, 5], time_unit=tu)
npt.assert_equal(ts.get_time_unit(time_arr), tu)
def test_TimeArray():
time1 = ts.TimeArray(list(range(100)), time_unit='ms')
time2 = time1 + time1
npt.assert_equal(time2.time_unit, 'ms')
time1 = ts.TimeArray(10 ** 6)
npt.assert_equal(time1.__repr__(), '1000000.0 s')
#TimeArray can't be more than 1-d:
with pytest.raises(ValueError) as e_info:
ts.TimeArray(np.zeros((2, 2)))
dt = ts.TimeArray(0.001, time_unit='s')
tt = ts.TimeArray([dt])
npt.assert_equal(dt, tt)
t1 = ts.TimeArray([0, 1, 2, 3])
t2 = ts.TimeArray([ts.TimeArray(0),
ts.TimeArray(1),
ts.TimeArray(2),
ts.TimeArray(3)])
npt.assert_equal(t1, t2)
def test_TimeArray_math():
"Addition and subtraction should convert to TimeArray units"
time1 = ts.TimeArray(list(range(10)), time_unit='ms')
time2 = ts.TimeArray(list(range(1,11)), time_unit='ms')
# units should be converted to whatever units the array has
time3 = time1 + 1
npt.assert_equal(time2,time3)
time4 = time2 - 1
npt.assert_equal(time1,time4)
# floats should also work
time3 = time1 + 1.0
npt.assert_equal(time2,time3)
time4 = time2 - 1.0
npt.assert_equal(time1,time4)
# test the r* versions
time3 = 1 + time1
npt.assert_equal(time2,time3)
time4 = 1 - time2
npt.assert_equal(-time1,time4)
# floats should also work
time3 = 1.0 + time1
npt.assert_equal(time2,time3)
time4 = 1.0 - time2
npt.assert_equal(-time1,time4)
timeunits = ts.TimeArray(list(range(10)), time_unit='s')
timeunits.convert_unit('ms')
# now, math with non-TimeArrays should be based on the new time_unit
# here the range() list gets converted to a TimeArray with the same units
# as timeunits (which is now 'ms')
tnew = timeunits + list(range(10))
npt.assert_equal(tnew, timeunits+time1) # recall that time1 was 0-10ms
def test_TimeArray_comparison():
"Comparison with unitless quantities should convert to TimeArray units"
time = ts.TimeArray(list(range(10)), time_unit='ms')
npt.assert_equal(time < 5 , [True]*5+[False]*5)
npt.assert_equal(time > 5 , [False]*6+[True]*4)
npt.assert_equal(time <= 5, [True]*6+[False]*4)
npt.assert_equal(time >= 5, [False]*5+[True]*5)
npt.assert_equal(time == 5, [False]*5+[True] + [False]*4)
time.convert_unit('s')
# now all of time is < 1 in the new time_unit
npt.assert_equal(time < 5 , [True]*10)
npt.assert_equal(time > 5 , [False]*10)
npt.assert_equal(time <= 5, [True]*10)
npt.assert_equal(time >= 5, [False]*10)
npt.assert_equal(time == 5, [False]*10)
def test_TimeArray_init_int64():
"""Make sure that we can initialize TimeArray with an array of ints"""
time = ts.TimeArray(np.int64(1))
npt.assert_equal(time.__repr__(), '1.0 s')
pass
def test_TimeArray_init_list():
"""Initializing with a list that contains TimeArray should work.
"""
for t in [0.001, ts.TimeArray(0.001, time_unit='s')]:
tl = [t]
ta = ts.TimeArray(t, time_unit='s')
tla = ts.TimeArray(tl, time_unit='s')
npt.assert_(ta, tla)
def test_TimeArray_repr():
"""
>>> a = ts.TimeArray([1.1,2,3])
>>> a
TimeArray([ 1.1, 2. , 3. ], time_unit='s')
>>> t = ts.TimeArray(a,time_unit='ms')
>>> t
TimeArray([ 1100., 2000., 3000.], time_unit='ms')
>>> t[0]
1100.0 ms
"""
def test_TimeArray_copyflag():
"""Testing the setting of the copy-flag, where that makes sense"""
#These two should both generate a TimeArray, with one picosecond.
#This one holds time_unit='s'
t1 = ts.TimeArray(np.array([1], dtype=np.int64), copy=False)
#This one holds time_unit='ps':
t2 = ts.TimeArray(1, time_unit='ps')
t3 = ts.TimeArray(t2, copy=False)
npt.assert_equal(t1, t2)
npt.assert_equal(t2.ctypes.data, t3.ctypes.data)
def test_TimeArray_new():
for unit in ['ns', 'ms', 's', None]:
for flag in [True, False]:
#list -doesn't make sense to set copy=True
time2 = ts.TimeArray(list(range(5)), time_unit=unit, copy=True)
#numpy array (float) - doesn't make sense to set copy=True
time2f = ts.TimeArray(np.arange(5.), time_unit=unit, copy=True)
#TimeArray
time3 = ts.TimeArray(time2, time_unit=unit, copy=flag)
#integer
time4 = ts.TimeArray(5, time_unit=unit, copy=True)
#float
time5 = ts.TimeArray(5.0, time_unit=unit, copy=True)
npt.assert_equal(time2, time2f)
npt.assert_equal(time2, time3)
time3[0] += 100
if flag:
npt.assert_(time2[0] != time3[0])
else:
npt.assert_(time2[0] == time3[0])
npt.assert_equal(time2[1:], time3[1:])
npt.assert_equal(time4, time5)
def test_TimeArray_bool():
time1 = ts.TimeArray([1, 2, 3], time_unit='s')
time2 = ts.TimeArray([1000, 2000, 3000], time_unit='ms')
bool_arr = np.ones(time1.shape, dtype=bool)
npt.assert_equal(time1, time2)
npt.assert_equal(bool_arr, time1 == time2)
npt.assert_(type(time1 == time2) is not ts.TimeArray)
def test_TimeArray_convert_unit():
"""
>>> a = ts.TimeArray([1,2,3,4])
>>> a.convert_unit('ms')
>>> a
TimeArray([ 1000., 2000., 3000., 4000.], time_unit='ms')
>>> a.time_unit
'ms'
>>> b = ts.TimeArray([1,2,3,4],'s')
>>> a==b
array([ True, True, True, True], dtype=bool)
"""
def test_TimeArray_div():
#divide singelton by singleton:
a = 2.0
b = 6.0
time1 = ts.TimeArray(a, time_unit='s')
time2 = ts.TimeArray(b, time_unit='s')
div1 = a / b
#This should eliminate the units and return a float, not a TimeArray:
div2 = time1 / time2
npt.assert_equal(div1, div2)
#Divide a TimeArray by a singelton:
a = np.array([1, 2, 3])
b = 6.0
time1 = ts.TimeArray(a, time_unit='s')
time2 = ts.TimeArray(b, time_unit='s')
div1 = a / b
#This should eliminate the units and return a float array, not a TimeArray:
div2 = time1 / time2
npt.assert_equal(div1, div2)
#Divide a TimeArray by another TimeArray:
a = np.array([1, 2, 3])
b = np.array([2, 2, 2]).astype(float) # TimeArray division is float division!
time1 = ts.TimeArray(a, time_unit='s')
time2 = ts.TimeArray(b, time_unit='s')
div1 = a / b
#This should eliminate the units and return a float array, not a TimeArray:
div2 = time1 / time2
npt.assert_equal(div1, div2)
def test_TimeArray_index_at():
time1 = ts.TimeArray(list(range(10)), time_unit='ms')
for i in range(5):
# The return value is always an array, so we keep it for multiple tests
i_arr = np.array(i)
# Check 'closest' indexing mode first
idx = time1.index_at(i)
npt.assert_equal(idx, i_arr)
# If we index with seconds/1000, results shouldn't vary
idx_secs = time1.index_at(ts.TimeArray(i / 1000., time_unit='s'))
npt.assert_equal(idx_secs, i_arr)
# If we now change the tolerance
# In this case, it should still return
idx = time1.index_at(i + 0.1, tol=0.1)
npt.assert_equal(idx, i_arr)
# But with a smaller tolerance, we should get no indices
idx = time1.index_at(i + 0.1, tol=0.05)
npt.assert_equal(idx, np.array([]))
# Now, check before/after modes
idx = time1.index_at(i + 0.1, mode='before')
npt.assert_equal(idx, i_arr)
idx = time1.index_at(i + 0.1, mode='after')
npt.assert_equal(idx, i_arr + 1)
def test_TimeArray_at():
time1 = ts.TimeArray(list(range(10)), time_unit='ms')
for i in range(10):
this = time1.at(i)
i_ms = ts.TimeArray(i / 1000.)
npt.assert_equal(this, ts.TimeArray(i, time_unit='ms'))
this_secs = time1.at(i_ms)
npt.assert_equal(this_secs, ts.TimeArray(i, time_unit='ms'))
seconds_array = ts.TimeArray(time1, time_unit='s')
this_secs = seconds_array.at(i / 1000.)
npt.assert_equal(this_secs, ts.TimeArray(i, time_unit='ms'))
all = time1.at(i_ms, tol=10)
npt.assert_equal(all, time1)
if i > 0 and i < 9:
this_secs = time1.at(i_ms, tol=1)
npt.assert_equal(this_secs,
ts.TimeArray([i - 1, i, i + 1], time_unit='ms'))
def test_TimeArray_at2():
time1 = ts.TimeArray(list(range(10)), time_unit='ms')
for i in [1]:
i_ms = ts.TimeArray(i / 1000.)
this_secs = time1.at(i_ms, tol=1)
npt.assert_equal(this_secs,
ts.TimeArray([i - 1, i, i + 1], time_unit='ms'))
def test_UniformTime_index_at():
time1 = ts.UniformTime(t0=1000, length=10, sampling_rate=1000, time_unit='ms')
mask = [False] * 10
for i in range(10):
idx = time1.index_at(ts.TimeArray(1000 + i, time_unit='ms'))
npt.assert_equal(idx, np.array(i))
mask[i] = True
mask_idx = time1.index_at(ts.TimeArray(1000 + i, time_unit='ms'),
boolean=True)
npt.assert_equal(mask_idx, mask)
if i > 0 and i < 9:
mask[i - 1] = True
mask[i + 1] = True
mask_idx = time1.index_at(
ts.TimeArray([999 + i, 1000 + i, 1001 + i],
time_unit='ms'), boolean=True)
npt.assert_equal(mask_idx, mask)
mask[i - 1] = False
mask[i + 1] = False
mask[i] = False
#XXX Need to write these tests:
#Test the unit conversion:
#
#def test_TimeArray_unit_conversion():
#Test the overloaded __getitem__ and __setitem:
#
def test_TimeArray_getset():
t1 = ts.TimeSeries(data = np.random.rand(2, 3, 4), sampling_rate=1)
npt.assert_equal(t1[0],t1.data[...,0])
def test_UniformTime():
tuc = ts.time_unit_conversion
for unit, duration in zip(['ns', 'ms', 's', None],
[2 * 10 ** 9, 2 * 10 ** 6, 100, 20]):
t1 = ts.UniformTime(duration=duration, sampling_rate=1,
time_unit=unit)
t2 = ts.UniformTime(duration=duration, sampling_rate=20,
time_unit=unit)
#The following two tests verify that first-last are equal to the
#duration, but it is unclear whether that is really the behavior we
#want, because the t_i held by a TimeSeries is the left
#(smaller) side of the time-duration defined by the bin
#The difference between the first and last item is the duration:
#npt.assert_equal(t1[-1]-t1[0],
# ts.TimeArray(duration,time_unit=unit))
#Duration doesn't depend on the sampling rate:
#npt.assert_equal(t1[-1]-t2[0],
# ts.TimeArray(duration,time_unit=unit))
a = ts.UniformTime(duration=10, sampling_rate=1)
b = ts.UniformTime(a, time_unit=unit)
npt.assert_equal(a.sampling_interval, b.sampling_interval)
npt.assert_equal(a.sampling_rate, b.sampling_rate)
b = ts.UniformTime(a, duration=2 * duration, time_unit=unit)
npt.assert_equal(a.sampling_interval, b.sampling_interval)
npt.assert_equal(a.sampling_rate, b.sampling_rate)
b = ts.UniformTime(a, length=100, time_unit=unit)
npt.assert_equal(a.sampling_interval, b.sampling_interval)
npt.assert_equal(a.sampling_rate, b.sampling_rate)
b = ts.UniformTime(a, length=100, time_unit=unit)
npt.assert_equal(a.sampling_interval, b.sampling_interval)
npt.assert_equal(a.sampling_rate, b.sampling_rate)
b = ts.UniformTime(a, length=100, duration=duration, time_unit=unit)
c = ts.UniformTime(length=100, duration=duration, time_unit=unit)
npt.assert_equal(c, b)
b = ts.UniformTime(sampling_interval=1, duration=10, time_unit=unit)
c = ts.UniformTime(sampling_rate=tuc['s'] / tuc[unit],
length=10, time_unit=unit)
npt.assert_equal(c, b)
#This should raise a value error, because the duration is shorter than
#the sampling_interval:
with pytest.raises(ValueError) as e_info:
ts.UniformTime(dict(sampling_interval=10, duration=1))
#Time objects can be initialized with other time objects setting the
#duration, sampling_interval and sampling_rate:
a = ts.UniformTime(length=1, sampling_rate=1)
with pytest.raises(ValueError) as e_info:
ts.UniformTime(dict(data=a, sampling_rate=10, sampling_interval=.1))
b = ts.UniformTime(duration=2 * a.sampling_interval,
sampling_rate=2 * a.sampling_rate)
npt.assert_equal(ts.Frequency(b.sampling_rate),
ts.Frequency(2 * a.sampling_rate))
npt.assert_equal(b.sampling_interval,
ts.TimeArray(0.5 * a.sampling_rate))
b = ts.UniformTime(duration=10,
sampling_interval=a.sampling_interval)
npt.assert_equal(b.sampling_rate, a.sampling_rate)
b = ts.UniformTime(duration=10,
sampling_rate=a.sampling_rate)
npt.assert_equal(b.sampling_interval, a.sampling_interval)
# make sure the t0 ando other attribute is copied
a = ts.UniformTime(length=1, sampling_rate=1)
b = a.copy()
npt.assert_equal(b.duration, a.duration)
npt.assert_equal(b.sampling_rate, a.sampling_rate)
npt.assert_equal(b.sampling_interval, a.sampling_interval)
npt.assert_equal(b.t0, a.t0)
def test_UniformTime_repr():
"""
>>> time1 = ts.UniformTime(sampling_rate=1000,time_unit='ms',length=3)
>>> time1.sampling_rate
1000.0 Hz
>>> time1
UniformTime([ 0., 1., 2.], time_unit='ms')
>>> time2= ts.UniformTime(sampling_rate=1000,time_unit='s',length=3)
>>> time2.sampling_rate
1000.0 Hz
>>> time2
UniformTime([ 0. , 0.001, 0.002], time_unit='s')
>>> a = ts.UniformTime(length=5,sampling_rate=1,time_unit='ms')
>>> b = ts.UniformTime(a)
>>> b
UniformTime([ 0., 1000., 2000., 3000., 4000.], time_unit='ms')
>>> a
UniformTime([ 0., 1000., 2000., 3000., 4000.], time_unit='ms')
>>> b = ts.UniformTime(a,time_unit='s')
>>> b
UniformTime([ 0., 1., 2., 3., 4.], time_unit='s')
>>> a = ts.UniformTime(length=1,sampling_rate=2)
>>> b = ts.UniformTime(length=10,sampling_interval=a.sampling_interval)
>>> b.sampling_rate
2.0 Hz
"""
def test_Frequency():
"""Test frequency representation object"""
tuc = ts.time_unit_conversion
for unit in ['ns', 'ms', 's', None]:
f = ts.Frequency(1, time_unit=unit)
npt.assert_equal(f.to_period(), tuc[unit])
f = ts.Frequency(1000, time_unit=unit)
npt.assert_equal(f.to_period(), tuc[unit] / 1000)
f = ts.Frequency(0.001, time_unit=unit)
npt.assert_equal(f.to_period(), tuc[unit] * 1000)
def test_TimeSeries():
"""Testing the initialization of the uniform time series object """
#Test initialization with duration:
tseries1 = ts.TimeSeries([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], duration=10)
tseries2 = ts.TimeSeries([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], sampling_interval=1)
npt.assert_equal(tseries1.time, tseries2.time)
#downsampling:
t1 = ts.UniformTime(length=8, sampling_rate=2)
#duration is the same, but we're downsampling to 1Hz
tseries1 = ts.TimeSeries(data=[1, 2, 3, 4], time=t1, sampling_rate=1)
#If you didn't explicitely provide the rate you want to downsample to, that
#is an error:
with pytest.raises(ValueError) as e_info:
ts.TimeSeries(dict(data=[1, 2, 3, 4], time=t1))
tseries2 = ts.TimeSeries(data=[1, 2, 3, 4], sampling_rate=1)
tseries3 = ts.TimeSeries(data=[1, 2, 3, 4], sampling_rate=1000,
time_unit='ms')
#you can specify the sampling_rate or the sampling_interval, to the same
#effect, where specificying the sampling_interval is in the units of that
#time-series:
tseries4 = ts.TimeSeries(data=[1, 2, 3, 4], sampling_interval=1,
time_unit='ms')
npt.assert_equal(tseries4.time, tseries3.time)
#The units you use shouldn't matter - time is time:
tseries6 = ts.TimeSeries(data=[1, 2, 3, 4],
sampling_interval=0.001,
time_unit='s')
npt.assert_equal(tseries6.time, tseries3.time)
#And this too - perverse, but should be possible:
tseries5 = ts.TimeSeries(data=[1, 2, 3, 4],
sampling_interval=ts.TimeArray(0.001,
time_unit='s'),
time_unit='ms')
npt.assert_equal(tseries5.time, tseries3.time)
#initializing with a UniformTime object:
t = ts.UniformTime(length=3, sampling_rate=3)
data = [1, 2, 3]
tseries7 = ts.TimeSeries(data=data, time=t)
npt.assert_equal(tseries7.data, data)
data = [1, 2, 3, 4]
#If the data is not the right length, that should throw an error:
with pytest.raises(ValueError) as e_info:
ts.TimeSeries(dict(data=data, time=t))
# test basic arithmetics wiht TimeSeries
tseries1 = ts.TimeSeries([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], sampling_rate=1)
tseries2 = tseries1 + 1
npt.assert_equal(tseries1.data + 1, tseries2.data)
npt.assert_equal(tseries1.time, tseries2.time)
tseries2 -= 1
npt.assert_equal(tseries1.data, tseries2.data)
npt.assert_equal(tseries1.time, tseries2.time)
tseries2 = tseries1 * 2
npt.assert_equal(tseries1.data * 2, tseries2.data)
npt.assert_equal(tseries1.time, tseries2.time)
tseries2 = tseries2 / 2
npt.assert_equal(tseries1.data, tseries2.data)
npt.assert_equal(tseries1.time, tseries2.time)
tseries_nd1 = ts.TimeSeries(np.random.randn(3, 100), sampling_rate=1)
tseries_nd2 = ts.TimeSeries(np.random.randn(3, 100), sampling_rate=1)
npt.assert_equal((tseries_nd1 + tseries_nd2).data,
tseries_nd1.data + tseries_nd2.data)
npt.assert_equal((tseries_nd1 - tseries_nd2).data,
tseries_nd1.data - tseries_nd2.data)
npt.assert_equal((tseries_nd1 * tseries_nd2).data,
tseries_nd1.data * tseries_nd2.data)
npt.assert_equal((tseries_nd1 / tseries_nd2).data,
tseries_nd1.data / tseries_nd2.data)
def test_TimeSeries_repr():
"""
>>> t=ts.UniformTime(length=3,sampling_rate=3)
>>> tseries1 = ts.TimeSeries(data=[3,5,8],time=t)
>>> t.sampling_rate
3.0 Hz
>>> tseries1.sampling_rate
3.0 Hz
>>> tseries1 = ts.TimeSeries(data=[3,5,8],sampling_rate=3)
>>> tseries1.time
UniformTime([ 0. , 0.3333, 0.6667], time_unit='s')
>>> tseries1.sampling_rate
3.0 Hz
>>> tseries1.sampling_interval
0.333333333333 s
>>> a = ts.UniformTime(length=1,sampling_rate=2)
>>> b = ts.TimeSeries(data=[1,2,3],sampling_interval=a.sampling_interval)
>>> b.sampling_rate
2.0 Hz
>>> a = ts.UniformTime(length=1,sampling_rate=1)
>>> b = ts.TimeSeries(data=[1,2,3],sampling_interval=a.sampling_interval)
>>> b.sampling_rate
1.0 Hz
"""
def test_Epochs():
tms = ts.TimeArray(data=list(range(100)), time_unit='ms')
tmin = ts.TimeArray(data=list(range(100)), time_unit='m')
tsec = ts.TimeArray(data=list(range(100)), time_unit='s')
utms = ts.UniformTime(length=100, sampling_interval=1, time_unit='ms')
utmin = ts.UniformTime(length=100, sampling_interval=1, time_unit='m')
utsec = ts.UniformTime(length=100, sampling_interval=1, time_unit='s')
tsms = ts.TimeSeries(data=list(range(100)), sampling_interval=1, time_unit='ms')
tsmin = ts.TimeSeries(data=list(range(100)), sampling_interval=1, time_unit='m')
tssec = ts.TimeSeries(data=list(range(100)), sampling_interval=1, time_unit='s')
# one millisecond epoch
e1ms = ts.Epochs(0, 1, time_unit='ms')
e09ms = ts.Epochs(0.1, 1, time_unit='ms')
msg = "Seems like a problem with copy=False in TimeArray constructor."
npt.assert_equal(e1ms.duration, ts.TimeArray(1, time_unit='ms'), msg)
# one day
e1d = ts.Epochs(0, 1, time_unit='D')
npt.assert_equal(e1d.duration, ts.TimeArray(1, time_unit='D'), msg)
e1ms_ar = ts.Epochs([0, 0], [1, 1], time_unit='ms')
for t in [tms, tmin, tsec, utms, utmin, utsec]:
# the sample time arrays are all at least 1ms long, so this should
# return a timearray that has exactly one time point in it
npt.assert_equal(len(t.during(e1ms)), 1)
# this time epoch should not contain any point
npt.assert_equal(len(t.during(e09ms)), 0)
# make sure, slicing doesn't change the class
npt.assert_equal(type(t), type(t.during(e1ms)))
for t in [tsms, tsmin, tssec]:
# the sample time series are all at least 1ms long, so this should
# return a timeseries that has exactly one time point in it
npt.assert_equal(len(t.during(e1ms)), 1)
# make sure, slicing doesn't change the class
npt.assert_equal(type(t), type(t.during(e1ms)))
# same thing but now there's an array of epochs
e2 = ts.Epochs([0, 10], [10, 20], time_unit=t.time_unit)
# make sure, slicing doesn't change the class for array of epochs
npt.assert_equal(type(t), type(t.during(e2)))
# Indexing with an array of epochs (all of which are the same length)
npt.assert_equal(t[e2].data.shape, (2, 10))
npt.assert_equal(len(t.during(e2)), 10)
npt.assert_equal(t[e2].data.ndim, 2)
# check the data at some timepoints (a dimension was added)
npt.assert_equal(t[e2][0], (0, 10))
npt.assert_equal(t[e2][1], (1, 11))
# check the data for each epoch
npt.assert_equal(t[e2].data[0], list(range(10)))
npt.assert_equal(t[e2].data[1], list(range(10, 20)))
npt.assert_equal(t[e2].duration, e2[0].duration)
# slice with Epochs of different length (not supported for timeseries,
# raise error, though future jagged array implementation could go here)
ejag = ts.Epochs([0, 10], [10, 40], time_unit=t.time_unit)
# next line is the same as t[ejag]
with pytest.raises(ValueError) as e_info:
t.__getitem__(ejag)
# if an epoch lies entirely between samples in the timeseries,
# return an empty array
eshort = ts.Epochs(2.5, 2.7, time_unit=t.time_unit)
npt.assert_equal(len(t[eshort].data), 0)
e1ms_outofrange = ts.Epochs(200, 300, time_unit=t.time_unit)
# assert that with the epoch moved outside of the time range of our
# data, slicing with the epoch now yields an empty array
with pytest.raises(ValueError) as e_info:
t.during(dict(e=e1ms_outofrange))
# the sample timeseries are all shorter than a day, so this should
# raise an error (instead of padding, or returning a shorter than
# expected array.
with pytest.raises(ValueError) as e_info:
t.during(dict(e=e1d))
def test_basic_slicing():
t = ts.TimeArray(list(range(4)))
for x in range(3):
ep = ts.Epochs(.5,x+.5)
npt.assert_equal(len(t[ep]), x)
# epoch starts before timeseries
npt.assert_equal(len(t[ts.Epochs(-1,3)]), len(t)-1)
# epoch ends after timeseries
npt.assert_equal(len(t[ts.Epochs(.5,5)]), len(t)-1)
# epoch starts before and ends after timeseries
npt.assert_equal(len(t[ts.Epochs(-1,100)]), len(t))
ep = ts.Epochs(20,100)
npt.assert_equal(len(t[ep]), 0)
def test_Events():
# time has to be one-dimensional
with pytest.raises(ValueError) as e_info:
ts.Events(np.zeros((2, 2)))
t = ts.TimeArray([1, 2, 3], time_unit='ms')
x = [1, 2, 3]
y = [2, 4, 6]
z = [10., 20., 30.]
i0 = [0, 0, 1]
i1 = [0, 1, 2]
for unit in [None, 's', 'ns', 'D']:
# events with data
ev1 = ts.Events(t, time_unit=unit, i=x, j=y, k=z)
# events with indices
ev2 = ts.Events(t, time_unit=unit, indices=[i0, i1])
# events with indices and labels
ev3 = ts.Events(t, time_unit=unit, labels=['trial', 'other'],
indices=[i0, i1])
# Note that the length of indices and labels has to be identical:
with pytest.raises(ValueError) as e_info:
ts.Events(t, time_unit=unit,
labels=['trial', 'other'], indices=[i0]) # Only
# one of
# the
# indices!
# make sure the time is retained
npt.assert_equal(ev1.time, t)
npt.assert_equal(ev2.time, t)
# make sure time unit is correct
if unit is not None:
npt.assert_equal(ev1.time_unit, unit)
npt.assert_equal(ev2.time_unit, unit)
else:
npt.assert_equal(ev1.time_unit, t.time_unit)
npt.assert_equal(ev2.time_unit, t.time_unit)
# make sure we can extract data
| npt.assert_equal(ev1.data['i'], x) | numpy.testing.assert_equal |
# RAiSERHD module
# <NAME>, 23 Feb 2022
# import packages
import h5py
import numpy as np
import pandas as pd
import time as ti
import os, warnings
from astropy import constants as const
from astropy import units as u
from astropy.convolution import convolve, Gaussian2DKernel
from astropy.cosmology import FlatLambdaCDM
from astropy.io import fits
from astropy import wcs
from copy import copy
from matplotlib import pyplot as plt
from matplotlib import cm, rc
from matplotlib.colors import LogNorm
from matplotlib.ticker import FormatStrFormatter, NullFormatter, LogLocator
from numba import jit
from scipy.optimize import least_squares
from scipy.special import gamma, zeta
## Define global variables that can be adjusted to customise model output
# basic constants
year = 365.2422*24*3600 # average year in seconds
maverage = (0.6*const.m_p.value) # kg average particle mass
hubble = 0.7 # dimensionless Hubble parameter
OmegaM = 0.27 # fraction of matter in the flat universe
OmegaD = 0.73 # fraction of dark energy in the flat universe
freq_cmb = 5.879e10 # frequency of cosmic microwave background at z = 0
temp_cmb = 2.725 # temperature of cosmic microwave background at z = 0
c_speed = const.c.value # speed of light
e_charge = const.e.value # electron charge
k_B = const.k_B.value # Boltzmann constant
m_e = const.m_e.value # electron mass
mu0 = const.mu0.value # vacuum permeability
sigma_T = const.sigma_T.value # electron scattering cross-section
# model parameters that can be optimised for efficiency
nangles = 16 # number of angles to calculate expansion rate along (must be greater than 1)
betaRegions = 64 # set maximum number of beta regions
limTime = (year) # the FR-II limit must be used before this time
stepRatio = 1.01 # ratio to increase time/radius
crit_age = 0.95 # fraction of source age for lower end of power law approximations
lambda_min = 1e-256 # minimum value of Lambda for computational efficiency
# shocked gas and lobe parameters
chi = 2*np.pi/3.0 # lobe geometry parameter
shockAxisRatio = 0.5875 # exponent relating the cocoon axis ratio to the shocked gas axis ratio
shockRadius = 1.072 # fraction of the radius the shocked gas is greater than the lobe
gammaX = (5./3) # lorentz factor of external gas
gammaJ = (4./3) # lorentz factor of jet plasma
# set electron energy distribution constants
Lorentzmin = 780. # minimum Lorentz factor of injected electrons AT HOTSPOT for Cygnus A
Lorentzmax = 1e6 # effectively infinity
# density and temperature profiles
rCutoff = 0.01 # minimum radius to match profiles as a fraction of r200
betaMax = 2 # set critical value above which the cocoon expands balistically
# average and standard deviation of Vikhlinin model parameters
alphaAvg = 1.64 # corrected for removal of second core term
alphaStdev = 0.30
betaPrimeAvg = 0.56
betaPrimeStdev = 0.10
gammaPrimeAvg = 3
gammaPrimeStdev = 0
epsilonAvg = 3.23
epsilonStdev = 0 # 1.93; this parameter has little effect on profile
rCoreAvg = 0.087 # this is ratio of rc to r200
rCoreStdev = 0.028
rSlopeAvg = 0.73 # this is ratio of rs to r200
rSlopeStdev = 0 # 0.39; this parameter has little effect on profile
# temperature parameters
TmgConst = (-2.099)
TmgSlope = 0.6678
TmgError = 0.0727
# new temperature parameters assuming heating from AGN during expansion
TmgAvg = 7.00
TmgStdev = 0.28
# approximate halo to gas fraction conversion
# for halo masses between 10^12 and 10^15 and redshifts 0 < z < 5
halogasfracCONST1z0 = (-0.881768418)
halogasfracCONST1z1 = (-0.02832004)
halogasfracCONST2z0 = (-0.921393448)
halogasfracCONST2z1 = 0.00064515
halogasfracSLOPE = 0.053302276
# uncertainties, in dex
dhalogasfracz0 = 0.05172769
dhalogasfracz1 = (-0.00177947)
# correction to SAGE densities
SAGEdensitycorr = (-0.1)
## Define functions for run-time user output
def __join(*values):
return ";".join(str(v) for v in values)
def __color_text(s, c, base=30):
template = '\x1b[{0}m{1}\x1b[0m'
t = __join(base+8, 2, __join(*c))
return template.format(t, s)
class Colors:
DogderBlue = (30, 144, 255)
Green = (0,200,0)
Orange = (255, 165, 0)
## Define main function to run RAiSE HD
def RAiSE_run(frequency, redshift, axis_ratio, jet_power, source_age, halo_mass=None, rand_profile=False, betas=None, regions=None, rho0Value=None, temperature=None, active_age=10.14, jet_lorentz=5, equipartition=-1.5, spectral_index=0.7, gammaCValue=5./3, lorentz_min=Lorentzmin, brightness=True, angle=0., resolution='standard', seed=None, aj_star=0.231, jet_angle=0.686, axis_exponent=0.343, fill_factor=0.549):
# record start time of code
start_time = ti.time()
# function to test type of inputs and convert type where appropriate
if nangles <= 1:
raise Exception('Private variable nangles must be greater than 1.')
frequency, redshift, axis_ratio, jet_power, source_age, halo_mass, betas, regions, rho0Value, temperature, active_age, equipartition, jet_lorentz, nenvirons = __test_inputs(frequency, redshift, axis_ratio, jet_power, source_age, halo_mass, betas, regions, rho0Value, temperature, active_age, equipartition, jet_lorentz)
# download and pre-process particles from hydrodynamical simulation
if not resolution == None:
print(__color_text('Reading particle data from file.', Colors.Green))
time, shock_time, major, minor, x1, x2, x3, tracer, vx3, volume, pressure, press_minor, alphaP_hyd, alphaP_henv, hotspot_ratio = __PLUTO_particles('RAiSE_particles.hdf5')
# set seed for quasi-random profiles
if not seed == None:
__set_seed(seed)
# create folder for output files if not present
if not os.path.exists('LDtracks'):
os.mkdir('LDtracks')
if not resolution == None:
print(__color_text('Running RAiSE dynamics and emissivity.', Colors.Green))
else:
print(__color_text('Running RAiSE dynamics.', Colors.Green))
for i in range(0, len(redshift)):
for j in range(0, len(axis_ratio)):
for k in range(0, len(jet_power)):
for l in range(0, nenvirons):
for m in range(0, len(active_age)):
for n in range(0, len(equipartition)):
for o in range(0, len(jet_lorentz)):
# set correct data types for halo mass and core density
if isinstance(halo_mass, (list, np.ndarray)):
new_halo_mass = halo_mass[l]
else:
new_halo_mass = halo_mass
if isinstance(rho0Value, (list, np.ndarray)):
new_rho0Value = rho0Value[l]
new_temperature = temperature[l]
new_betas = betas[l]
new_regions = regions[l]
else:
new_rho0Value = rho0Value
new_temperature = temperature
new_betas = betas
new_regions = regions
# calculate dynamical evolution of lobe and shocked shell using RAiSE dynamics
lobe_lengths, lobe_minor, shock_lengths, shock_pressures, lambda_crit, alphaP_denv, alpha_lambda = __RAiSE_environment(redshift[i], axis_ratio[j], jet_power[k], source_age, halo_mass=new_halo_mass, rand_profile=rand_profile, rho0Value=new_rho0Value, regions=new_regions, betas=new_betas, temperature=new_temperature, active_age=active_age[m], jet_lorentz=jet_lorentz[o], gammaCValue=gammaCValue, aj_star=aj_star, jet_angle=jet_angle, axis_exponent=axis_exponent, fill_factor=fill_factor)
# calculate synchrotron emission from lobe using particles and RAiSE model
if not resolution == None:
location, luminosity, magnetic_field = __RAiSE_emissivity(frequency, redshift[i], time, shock_time, major, minor, x1, x2, x3, tracer, vx3, volume, pressure, press_minor, alphaP_hyd, alphaP_henv, hotspot_ratio, source_age, lobe_lengths, lobe_minor, shock_pressures, lambda_crit, alphaP_denv, alpha_lambda, active_age[m], equipartition[n], spectral_index, gammaCValue=gammaCValue, lorentz_min=lorentz_min, resolution=resolution)
# create pandas dataframe for integrated emission
df = pd.DataFrame()
df['Time (yrs)'] = 10**np.asarray(source_age).astype(np.float_)
df['Size (kpc)'] = 2*lobe_lengths[0,:]/const.kpc.value
df['Pressure (Pa)'] = shock_pressures[0,:]
df['Axis Ratio'] = lobe_lengths[0,:]/lobe_lengths[-1,:]
if not resolution == None:
for q in range(0, len(frequency)):
if frequency[q] > 0:
df['B{:.2f} (T)'.format(frequency[q])] = magnetic_field[:,q]
df['L{:.2f} (W/Hz)'.format(frequency[q])] = np.nansum(luminosity[:,:,q], axis=1)
# write data to file
if isinstance(rho0Value, (list, np.ndarray)):
df.to_csv('LDtracks/LD_A={:.2f}_eq={:.2f}_p={:.2f}_Q={:.2f}_s={:.2f}_T={:.2f}_y={:.2f}_z={:.2f}.csv'.format(axis_ratio[j], np.abs(equipartition[n]), np.abs(np.log10(rho0Value[l])), jet_power[k], 2*np.abs(spectral_index) + 1, active_age[m], jet_lorentz[o], redshift[i]), index=False)
elif isinstance(halo_mass, (list, np.ndarray)):
df.to_csv('LDtracks/LD_A={:.2f}_eq={:.2f}_H={:.2f}_Q={:.2f}_s={:.2f}_T={:.2f}_y={:.2f}_z={:.2f}.csv'.format(axis_ratio[j], np.abs(equipartition[n]), halo_mass[l], jet_power[k], 2*np.abs(spectral_index) + 1, active_age[m], jet_lorentz[o], redshift[i]), index=False)
else:
raise Exception('Either the halo mass or full density profile must be provided as model inputs.')
# calculate brightness per pixel across the source
if brightness == True and not resolution == None:
x_values, y_values, brightness_list = __RAiSE_brightness_map(frequency, redshift[i], source_age, lobe_lengths, location, luminosity, angle, resolution=resolution)
for p in range(0, len(source_age)):
for q in range(0, len(frequency)):
# create pandas dataframe for spatially resolved emission
if isinstance(x_values[p][q], (list, np.ndarray)):
df = pd.DataFrame(index=x_values[p][q]/const.kpc.value, columns=y_values[p][q]/const.kpc.value, data=brightness_list[p][q])
# write surface brightness map to file
if isinstance(rho0Value, (list, np.ndarray)):
if frequency[q] > 0:
df.to_csv('LDtracks/LD_A={:.2f}_eq={:.2f}_p={:.2f}_Q={:.2f}_s={:.2f}_T={:.2f}_y={:.2f}_z={:.2f}_nu={:.2f}_t={:.2f}_{:s}.csv'.format(axis_ratio[j], np.abs(equipartition[n]), np.abs(np.log10(rho0Value[l])), jet_power[k], 2*np.abs(spectral_index) + 1, active_age[m], jet_lorentz[o], redshift[i], frequency[q], source_age[p], resolution), header=True, index=True)
else:
df.to_csv('LDtracks/LD_A={:.2f}_eq={:.2f}_p={:.2f}_Q={:.2f}_s={:.2f}_T={:.2f}_y={:.2f}_z={:.2f}_t={:.2f}_{:s}.csv'.format(axis_ratio[j], np.abs(equipartition[n]), np.abs(np.log10(rho0Value[l])), jet_power[k], 2*np.abs(spectral_index) + 1, active_age[m], jet_lorentz[o], redshift[i], source_age[p], resolution), header=True, index=True)
elif isinstance(halo_mass, (list, np.ndarray)):
if frequency[q] > 0:
df.to_csv('LDtracks/LD_A={:.2f}_eq={:.2f}_H={:.2f}_Q={:.2f}_s={:.2f}_T={:.2f}_y={:.2f}_z={:.2f}_nu={:.2f}_t={:.2f}_{:s}.csv'.format(axis_ratio[j], np.abs(equipartition[n]), halo_mass[l], jet_power[k], 2*np.abs(spectral_index) + 1, active_age[m], jet_lorentz[o], redshift[i], frequency[q], source_age[p], resolution), header=True, index=True)
else:
df.to_csv('LDtracks/LD_A={:.2f}_eq={:.2f}_H={:.2f}_Q={:.2f}_s={:.2f}_T={:.2f}_y={:.2f}_z={:.2f}_t={:.2f}_{:s}.csv'.format(axis_ratio[j], np.abs(equipartition[n]), halo_mass[l], jet_power[k], 2*np.abs(spectral_index) + 1, active_age[m], jet_lorentz[o], redshift[i], source_age[p], resolution), header=True, index=True)
else:
raise Exception('Either the halo mass or full density profile must be provided as model inputs.')
else:
if isinstance(rho0Value, (list, np.ndarray)):
warnings.warn('The following file was not created as no emission is present: LDtracks/LD_A={:.2f}_eq={:.2f}_p={:.2f}_Q={:.2f}_s={:.2f}_T={:.2f}_y={:.2f}_z={:.2f}_nu={:.2f}_t={:.2f}_{:s}.csv'.format(axis_ratio[j], np.abs(equipartition[n]), np.abs(np.log10(rho0Value[l])), jet_power[k], 2*np.abs(spectral_index) + 1, active_age[m], jet_lorentz[o], redshift[i], frequency[q], source_age[p], resolution), category=UserWarning)
elif isinstance(halo_mass, (list, np.ndarray)):
warnings.warn('The following file was not created as no emission is present: LDtracks/LD_A={:.2f}_eq={:.2f}_H={:.2f}_Q={:.2f}_s={:.2f}_T={:.2f}_y={:.2f}_z={:.2f}_nu={:.2f}_t={:.2f}_{:s}.csv'.format(axis_ratio[j], np.abs(equipartition[n]), halo_mass[l], jet_power[k], 2*np.abs(spectral_index) + 1, active_age[m], jet_lorentz[o], redshift[i], frequency[q], source_age[p], resolution), category=UserWarning)
else:
raise Exception('Either the halo mass or full density profile must be provided as model inputs.')
# print total run time to screen
print(__color_text('RAiSE completed running after {:.2f} seconds.'.format(ti.time() - start_time), Colors.Green))
# Define function to test type of inputs and convert type where appropriate
def __test_inputs(frequency, redshift, axis_ratio, jet_power, source_age, halo_mass, betas, regions, rho0Value, temperature, active_age, equipartition, jet_lorentz):
# convert redshift, axis ratio and jet power to correct data types
if not isinstance(frequency, (list, np.ndarray)):
frequency = [frequency]
for i in range(0, len(frequency)):
if not isinstance(frequency[i], (int, float)):
raise Exception('Frequency must be provided as a float or list/array of floats in units of log10 Hertz.')
else:
if frequency[i] <= 0:
frequency[i] = -1.
warnings.warn('Pressure map will be produced instead of surface brightness image.', category=UserWarning)
elif not (5 < frequency[i] and frequency[i] < 20):
raise Exception('Frequency must be provided as a float or list/array of floats in units of log10 Hertz.')
if not isinstance(redshift, (list, np.ndarray)):
redshift = [redshift]
for i in range(0, len(redshift)):
if not isinstance(redshift[i], (int, float)) or not (0 < redshift[i] and redshift[i] < 20):
raise Exception('Redshift must be provided as a float or list/array of floats.')
if not isinstance(axis_ratio, (list, np.ndarray)):
axis_ratio = [axis_ratio]
for i in range(0, len(axis_ratio)):
if not isinstance(axis_ratio[i], (int, float)) or not (1 <= axis_ratio[i] and axis_ratio[i] < 20):
raise Exception('Axis ratio must be provided as a float or list/array of floats and be greater than 1.')
if not isinstance(jet_power, (list, np.ndarray)):
jet_power = [jet_power]
for i in range(0, len(jet_power)):
if not isinstance(jet_power[i], (int, float)) or not (33 < jet_power[i] and jet_power[i] < 46):
raise Exception('Jet power must be provided as a float or list/array of floats in units of log10 Watts.')
if not isinstance(source_age, (list, np.ndarray)):
source_age = [source_age]
for i in range(0, len(source_age)):
if not isinstance(source_age[i], (int, float)) or not (0 <= source_age[i] and source_age[i] <= 10.14):
raise Exception('Source age must be provided as a float or list/array of floats in units of log10 years.')
else:
source_age[i] = float(source_age[i])
if not isinstance(active_age, (list, np.ndarray)):
active_age = [active_age]
for i in range(0, len(active_age)):
if not isinstance(active_age[i], (int, float)) or not (0 <= active_age[i] and active_age[i] <= 10.14):
raise Exception('Active age must be provided as a float or list/array of floats in units of log10 years.')
if not isinstance(equipartition, (list, np.ndarray)):
equipartition = [equipartition]
for i in range(0, len(equipartition)):
if not isinstance(equipartition[i], (int, float)) or not (-6 < equipartition[i] and equipartition[i] < 6):
raise Exception('Equipartition factor must be provided as a float or list/array of floats in units of log10.')
if not isinstance(jet_lorentz, (list, np.ndarray)):
jet_lorentz = [jet_lorentz]
for i in range(0, len(jet_lorentz)):
if not isinstance(jet_lorentz[i], (int, float)) or not (-100 <= jet_lorentz[i] and jet_lorentz[i] < 20):
raise Exception('Jet bulk lorentz factor factor must be provided as a float or list/array of floats.')
elif (-100 <= jet_lorentz[i] and jet_lorentz[i] <= 1):
jet_lorentz[i] = 0
warnings.warn('Jet phase will not be included in this simulation.', category=UserWarning)
# convert environment to correct data types
if not isinstance(halo_mass, (list, np.ndarray)) and not halo_mass == None:
halo_mass = [halo_mass]
nenvirons_halo = len(halo_mass)
elif not halo_mass == None:
nenvirons_halo = len(halo_mass)
if isinstance(halo_mass, (list, np.ndarray)):
for i in range(0, len(halo_mass)):
if not isinstance(halo_mass[i], (int, float)) or not (9 < halo_mass[i] and halo_mass[i] < 17):
raise Exception('Dark matter halo mass must be provided as a float or list/array of floats in units of log10 stellar mass.')
if not isinstance(rho0Value, (list, np.ndarray)) and not rho0Value == None:
rho0Value = [rho0Value]
nenvirons_rho = len(rho0Value)
elif not rho0Value == None:
nenvirons_rho = len(rho0Value)
if isinstance(rho0Value, (list, np.ndarray)):
if not isinstance(temperature, (list, np.ndarray)) and not temperature == None:
temperature = [temperature]*nenvirons_rho
elif temperature == None or not len(temperature) == nenvirons_rho:
rho0Value = None # full density profile not provided
if isinstance(betas, (list, np.ndarray)) and not isinstance(betas[0], (list, np.ndarray)):
betas = [betas]*nenvirons_rho
elif not isinstance(betas, (list, np.ndarray)) and not betas == None:
betas = [[betas]]*nenvirons_rho
elif betas == None or not len(betas) == nenvirons_rho:
rho0Value = None # full density profile not provided
if isinstance(regions, (list, np.ndarray)) and not isinstance(regions[0], (list, np.ndarray)):
regions = [regions]*nenvirons_rho
elif not isinstance(regions, (list, np.ndarray)) and not betas == None:
regions = [[regions]]*nenvirons_rho
elif regions == None or not len(regions) == nenvirons_rho:
rho0Value = None # full density profile not provided
if isinstance(rho0Value, (list, np.ndarray)):
nenvirons = nenvirons_rho
for i in range(0, len(rho0Value)):
if not isinstance(rho0Value[i], (int, float)) or not (1e-30 < rho0Value[i] and rho0Value[i] < 1e-15):
raise Exception('Core gas density must be provided as a float or list/array of floats in units of kg/m^3.')
for i in range(0, len(temperature)):
if not isinstance(temperature[i], (int, float)) or not (0 < temperature[i] and temperature[i] < 1e12):
raise Exception('Gas temperature must be provided as a float or list/array of floats in units of Kelvin.')
else:
nenvirons = nenvirons_halo
return frequency, redshift, axis_ratio, jet_power, source_age, halo_mass, betas, regions, rho0Value, temperature, active_age, equipartition, jet_lorentz, nenvirons
# Define random seed function
@jit(nopython=True) # Set "nopython" mode for best performance, equivalent to @njit
def __set_seed(value):
np.random.seed(value)
## Define functions for analytic modelling of the environment
# function to calculate properties of the environment and call RAiSE_evolution
def __RAiSE_environment(redshift, axis_ratio, jet_power, source_age, halo_mass=None, rand_profile=False, betas=None, regions=None, rho0Value=None, temperature=None, active_age=10.14, jet_lorentz=5., gammaCValue=5./3, aj_star=0.231, jet_angle=0.686, axis_exponent=0.343, fill_factor=0.549):
# check minimal inputs
if halo_mass == None and (not isinstance(betas, (list, np.ndarray)) or not isinstance(regions, (list, np.ndarray))):
raise Exception('Either the halo mass or full density profile must be provided as model inputs.')
# calculate gas mass and virial radius of halo unless density and temperature profile fully specified
gasfraction = 0
if not halo_mass == None:
rVir = (10**halo_mass*const.M_sun.value/(100./const.G.value*(100.*hubble*np.sqrt(OmegaM*(1 + redshift)**3 + OmegaD)/const.kpc.value)**2))**(1./3)
if rand_profile == False:
gasfraction = __HalogasfracFunction(halo_mass, redshift)
else:
gasfraction = __rand_norm(__HalogasfracFunction(halo_mass, redshift), __dHalogasfracFunction(halo_mass, redshift))
gasMass = 10**(halo_mass + gasfraction)*const.M_sun.value
# approximate the gas density profile of Vikhlinin 2006 by multiple density profiles with a simple beta dependence
if not isinstance(betas, (list, np.ndarray)) or not isinstance(regions, (list, np.ndarray)):
# set maximum number of regions
nregions = betaRegions
nregions, new_betas, new_regions = __DensityProfiler(rVir, nregions, rand_profile)
elif len(betas) == len(regions):
# set maximum number of regions
nregions = len(betas)
new_betas = np.asarray(betas.copy())
new_regions = np.asarray(regions.copy())
else:
raise Exception('Variables betas and regions must be arrays of the same length.')
# calculate the average temperature of the external medium
if temperature == None:
if not halo_mass == None:
if rand_profile == False:
tempFlat = 10**TmgAvg
tempCluster = 10**(TmgConst + TmgSlope*halo_mass)
else:
tempFlat = 10**(__rand_norm(TmgAvg, TmgStdev))
tempCluster = 10**(__rand_norm(TmgConst + TmgSlope*halo_mass, TmgError))
temperature = max(tempFlat, tempCluster) # take the highest temperature out of the flat profile and cluster model
else:
raise Exception('Either the halo mass or temperature must be provided as model inputs.')
# determine initial value of density parameter given gas mass and density profile
if not rho0Value == None:
# determine density parameter in the core
k0Value = rho0Value*new_regions[0]**new_betas[0]
# extend first beta region to a radius of zero
new_regions[0] = 0
elif not halo_mass == None:
# extend first beta region to a radius of zero
new_regions[0] = 0
# find relative values (i.e. to 1) of density parameter in each beta region
kValues = __DensityParameter(nregions, 1.0, new_betas, new_regions)
# determine density parameter in the core
k0Value = __k0ValueFinder(rVir, gasMass, nregions, new_betas, new_regions, kValues)
else:
raise Exception('Either the halo mass or core density must be provided as model inputs.')
# find values of density parameter in each beta region
kValues = __DensityParameter(nregions, k0Value, new_betas, new_regions)
# call RadioSourceEvolution function to calculate Dt tracks
return __RAiSE_evolution(redshift, axis_ratio, jet_power, source_age, active_age, gammaCValue, nregions, new_betas, new_regions, kValues, temperature, jet_lorentz, aj_star, jet_angle, axis_exponent, fill_factor)
# approximate the gas density profile of Vikhlinin 2006 by multiple density profiles with a simple beta dependence
@jit(nopython=True) # Set "nopython" mode for best performance, equivalent to @njit
def __DensityProfiler(rVir, nregions, rand_profile):
# instantiate variables
betas, regions = np.zeros(nregions), np.zeros(nregions)
# set values of Vikhlinin model parameters
if rand_profile == False:
alpha = alphaAvg
betaPrime = betaPrimeAvg
gammaPrime = gammaPrimeAvg # this value has no uncertainty
epsilon = epsilonAvg
rCore = rCoreAvg
rSlope = rSlopeAvg
else:
alpha = __rand_norm(alphaAvg, alphaStdev)
betaPrime = __rand_norm(betaPrimeAvg, betaPrimeStdev)
gammaPrime = __rand_norm(gammaPrimeAvg, gammaPrimeStdev) # this value has no uncertainty
epsilon = __rand_norm(epsilonAvg, epsilonStdev)
rCore = __rand_norm(rCoreAvg, rCoreStdev)
rSlope = __rand_norm(rSlopeAvg, rSlopeStdev)
# set minimum and maximum radius for density profile to be matched
rmin = rCutoff*rVir
rmax = rVir
# use logarithmic radius scale
r = rmin
ratio = (rmax/rmin)**(1./(nregions)) - 1
for count in range(0, nregions):
# set radius at low end of region
rlow = r
# calculate relative density at rlow, i.e. ignoring rho_0 factor
rhoLow = np.sqrt((rlow/(rCore*rVir))**(-alpha)/((1 + rlow**2/(rCore*rVir)**2)**(3*betaPrime - alpha/2.)*(1 + rlow**gammaPrime/(rSlope*rVir)**gammaPrime)**(epsilon/gammaPrime)))
# increment radius
dr = r*ratio
r = r + dr
# set radius at high end of region
rhigh = r
# calculate relative density at rlow, i.e. ignoring rho_0 factor
rhoHigh = np.sqrt((rhigh/(rCore*rVir))**(-alpha)/((1 + rhigh**2/(rCore*rVir)**2)**(3*betaPrime - alpha/2.)*(1 + rhigh**gammaPrime/(rSlope*rVir)**gammaPrime)**(epsilon/gammaPrime)))
# set value of innermost radius of each beta region
if count == 0:
# extend first beta region to a radius of zero
regions[count] = 0
else:
regions[count] = rlow
# calculate exponent beta for each region to match density profile, ensuring beta is less than 2
if (-np.log(rhoLow/rhoHigh)/np.log(rlow/rhigh) < betaMax):
betas[count] = -np.log(rhoLow/rhoHigh)/np.log(rlow/rhigh)
else:
# ensure beta is less than (or equal to) 2
betas[count] = betaMax
# set this count to be the number of distinct regions
nregions = count + 1
break
return nregions, betas, regions
# find values of density parameter in each beta region
@jit(nopython=True) # Set "nopython" mode for best performance, equivalent to @njit
def __DensityParameter(nregions, k0Value, betas, regions):
# instantiate variables
kValues = np.zeros(nregions)
# calculate density parameters in each region
for count in range(0, nregions):
# match tracks between regions `a' and `b'
if count > 0:
# find replicating core density in region `b' required to match pressures and times
kValues[count] = kValues[count - 1]*regions[count]**(betas[count] - betas[count - 1])
# if first region, set initial value of replicating core density as actual core density
else:
kValues[count] = k0Value
return kValues
# determine value of the density parameter at the core given gas mass and density profile
@jit(nopython=True) # Set "nopython" mode for best performance, equivalent to @njit
def __k0ValueFinder(rVir, gasMass, nregions, betas, regions, kValues):
# set volume to zero initially
volume = 0
# calculate weighted volume integral using by analytically integraing the volume in each beta region
for count in range(0, nregions):
# set lower bound of analytic integral
rlow = regions[count]
# set upper bound of analytic integral
if (count + 1 == nregions):
rhigh = rVir
else:
rhigh = regions[count + 1]
# increment total weighted volume by weigthed volume of this region
volume = volume + 4*np.pi*(kValues[count]/kValues[0])/(3 - betas[count])*(rhigh**(3 - betas[count]) - rlow**(3 - betas[count]))
# calculate density parameter at the core from stellar mass and weighted volume
k0Value = gasMass/volume
return k0Value
# random normal with values truncated to avoid sign changes
@jit(nopython=True) # Set "nopython" mode for best performance, equivalent to @njit
def __rand_norm(mean, stdev):
rand_number = np.random.normal(mean, stdev)
while (mean*rand_number < 0 or np.abs(rand_number - mean) > 2*stdev):
rand_number = np.random.normal(mean, stdev)
return rand_number
# gas fraction-halo mass relationship
@jit(nopython=True) # Set "nopython" mode for best performance, equivalent to @njit
def __HalogasfracFunction(halo_mass, redshift):
return max(halogasfracCONST1z0 + halogasfracCONST1z1*redshift, halogasfracCONST2z0 + halogasfracCONST2z1*redshift) + halogasfracSLOPE*(halo_mass - 14) + SAGEdensitycorr # in log space
# gas fraction-halo mass relationship error
@jit(nopython=True) # Set "nopython" mode for best performance, equivalent to @njit
def __dHalogasfracFunction(halo_mass, redshift):
return dhalogasfracz0 + dhalogasfracz1*redshift # in log space
## Define functions required for RAiSE dynamical evolution
# function to calculate dynamical evolution of lobe and shocked shell
def __RAiSE_evolution(redshift, axis_ratio, jet_power, source_age, active_age, gammaCValue, nregions, betas, regions, kValues, temperature, jet_lorentz, aj_star=0.231, jet_angle=0.686, axis_exponent=0.343, fill_factor=0.549):
# convert jet power and source age to correct units
QavgValue = 10**jet_power/2. # set the power of *each* jet; convert from log space
if isinstance(source_age, (list, np.ndarray)):
tFinal = np.zeros_like(source_age)
for i in range(0, len(source_age)):
tFinal[i] = 10**source_age[i]*year # convert from log space years to seconds
else:
tFinal = np.array([10**source_age*year])
tActive = 10**active_age*year
# calculate angle of current radial line
angles = np.arange(0, nangles, 1).astype(np.int_)
dtheta = (np.pi/2)/nangles
theta = dtheta*(angles + 0.5)
# calculate opening angle of jet
open_angle = (jet_angle*np.pi/180)/(axis_ratio/2.83)
# evaluate the translation coefficients eta_c and eta_s
eta_c = 1./np.sqrt(axis_ratio**2*(np.sin(theta))**2 + (np.cos(theta))**2)
eta_s = 1./np.sqrt(axis_ratio**(2*shockAxisRatio)*(np.sin(theta))**2 + (np.cos(theta))**2)
# evaluate the translation coefficient zeta_s/eta_s at t -> infinity
zetaeta = np.sqrt(axis_ratio**(2*shockAxisRatio)*(np.sin(theta))**2 + (np.cos(theta))**2)/np.sqrt(axis_ratio**(4*shockAxisRatio)*(np.sin(theta))**2 + (np.cos(theta))**2)
eta_c[0], eta_s[0], zetaeta[0] = 1., 1., 1,
# calculate the differential volume element coefficient chi
dchi = 4*np.pi/3.*np.sin(theta)*np.sin(dtheta/2.)
# solve RAiSE dynamics iteratively to find thermal component of lobe pressure
if jet_lorentz > 1:
# run code in strong-shock limit to calibrate initial velocity
x_time = 10**10.14*year
_, _, _, _, _, _, _, critical_point_1 = __RAiSE_runge_kutta(QavgValue, np.array([x_time]), x_time, axis_ratio, aj_star, axis_exponent, fill_factor, jet_lorentz, open_angle, angles, eta_c, eta_s, zetaeta, dchi, nregions, betas, regions, kValues, temperature, gammaCValue, critical_velocity=c_speed, strong_shock=True)
# run code for full RAiSE HD dynamical model
lobe_lengths, lobe_minor, shock_lengths, shock_pressures, lambda_crit, alphaP_denv, alpha_lambda, critical_point_3 = __RAiSE_runge_kutta(QavgValue, tFinal, tActive, axis_ratio, aj_star, axis_exponent, fill_factor, jet_lorentz, open_angle, angles, eta_c, eta_s, zetaeta, dchi, nregions, betas, regions, kValues, temperature, gammaCValue, critical_velocity=c_speed*critical_point_1[2]/critical_point_1[3])
else:
# run code for RAiSE X dynamical model
lobe_lengths, lobe_minor, shock_lengths, shock_pressures, lambda_crit, alphaP_denv, alpha_lambda, _ = __RAiSE_runge_kutta(QavgValue, tFinal, tActive, axis_ratio, aj_star, axis_exponent, fill_factor, jet_lorentz, open_angle, angles, eta_c, eta_s, zetaeta, dchi, nregions, betas, regions, kValues, temperature, gammaCValue)
return lobe_lengths, lobe_minor, shock_lengths, shock_pressures, lambda_crit, alphaP_denv, alpha_lambda
# function to apply Runge-Kutta method and extract values at requested time steps
@jit(nopython=True) # Set "nopython" mode for best performance, equivalent to @njit
def __RAiSE_runge_kutta(QavgValue, source_age, active_age, axis_ratio, aj_star, axis_exponent, fill_factor, jet_lorentz, open_angle, angles, eta_c, eta_s, zetaeta, dchi, nregions, betas, regions, kValues, temperature, gammaCValue, critical_velocity=0., strong_shock=False):
# instantiate variables
X, P = np.zeros((nangles, 5)), np.zeros((nangles, 4))
critical_point = np.zeros(4)
regionPointer = np.zeros(nangles).astype(np.int_)
lobe_minor, lambda_crit, alphaP_denv, alpha_lambda = np.zeros(len(source_age)), np.zeros(len(source_age)), np.zeros(len(source_age)), np.zeros(len(source_age))
lobe_lengths, shock_lengths, shock_pressures = np.zeros((nangles, len(source_age))), np.zeros((nangles, len(source_age))), np.zeros((nangles, len(source_age)))
# calculate injection ages to derive time-average power-law indices for external pressure and filling factor
inject_age = np.zeros(2*len(source_age))
inject_axis_ratios, inject_pressures, inject_lambdas = np.zeros(2*len(source_age)), np.zeros(2*len(source_age)), np.zeros(2*len(source_age))
for timePointer in range(0, len(source_age)):
inject_age[2*timePointer:2*(timePointer + 1)] = np.asarray([crit_age*source_age[timePointer], source_age[timePointer]])
inject_index = np.argsort(inject_age) # sort ages in ascending order
# calculate the spatially-averaged jet velocity and Lorentz factor
if jet_lorentz > 1:
bulk_lorentz = np.sqrt(jet_lorentz**2*aj_star**4 - aj_star**4 + 1)
bulk_velocity = np.sqrt((jet_lorentz**2*aj_star**4 - aj_star**4)/(jet_lorentz**2*aj_star**4 - aj_star**4 + 1))*c_speed
else:
bulk_lorentz, bulk_velocity = -1, -1
i = 0
for timePointer in range(0, len(source_age)):
# set initial conditions for each volume element
if timePointer == 0:
# calculate initial time and radius for ODE
FR2time = limTime
if jet_lorentz > 1:
FR2radius = bulk_velocity*limTime
FR2velocity = bulk_velocity # eta_R is very large
else:
FR2radius = np.sqrt(1 - 1./100**2)*c_speed*limTime
FR2velocity = np.sqrt(1 - 1./100**2)*c_speed
# test if this radius is above start of second region boundary
if (regions[1] < FR2radius):
FR2radius = regions[1]
if jet_lorentz > 1:
FR2time = regions[1]/bulk_velocity
FR2velocity = bulk_velocity
else:
FR2time = regions[1]/(np.sqrt(1 - 1./100**2)*c_speed)
FR2velocity = np.sqrt(1 - 1./100**2)*c_speed
# calculate the initial jet/shock shell radius and velocity for each angle theta
X[angles,0] = FR2time
X[angles,1] = FR2radius*eta_s
X[angles,2] = FR2velocity*eta_s
if jet_lorentz > 1:
X[0,3], X[angles[1:],3] = bulk_lorentz, 1./np.sqrt(1 - (FR2velocity*eta_s[angles[1:]]/c_speed)**2)
else:
X[0,3], X[angles[1:],3] = 100, 100*eta_s[angles[1:]]
X[angles,4] = -1 # null value
# set region pointer to first (non-zero) region if smaller than FR2 radius
index = regions[1] < X[angles,1]
regionPointer[index] = 1
regionPointer[np.logical_not(index)] = 0
# calculate fraction of jet power injected into each volume element
injectFrac = dchi*eta_s**(3 - betas[regionPointer[0]])*zetaeta**2
injectFrac = injectFrac/np.sum(injectFrac) # sum should be equal to unity
# solve ODE to find radius and pressue at each time step
while (X[0,0] < source_age[timePointer]):
while (X[0,0] < inject_age[inject_index[i]]):
# calculate the appropriate density profile for each angle theta
for anglePointer in range(0, nangles):
while (regionPointer[anglePointer] + 1 < nregions and X[anglePointer,1] > regions[regionPointer[anglePointer] + 1]):
regionPointer[anglePointer] = regionPointer[anglePointer] + 1
# check if next step passes time point of interest
if (X[0,0]*stepRatio > inject_age[inject_index[i]]):
step = inject_age[inject_index[i]] - X[0,0]
else:
step = X[0,0]*(stepRatio - 1)
# update estimates of time, radius and velocity
__rk4sys(step, X, P, QavgValue, active_age, aj_star, axis_exponent, fill_factor, jet_lorentz, open_angle, angles, injectFrac, eta_c, eta_s, zetaeta, dchi, regionPointer, betas, kValues, temperature, gammaCValue, critical_velocity, strong_shock)
X[:,3] = np.maximum(1, X[:,3])
# find location of jet--lobe transition
critical_point[0], critical_point[1], critical_point[2], critical_point[3] = X[0,0], X[0,1], X[0,2]*X[0,3], X[0,4]
# record axis ratio, external pressure and filling factor and injection times
if P[-1,0] > 0:
inject_axis_ratios[inject_index[i]] = 1./(P[0,0]/P[-1,0])**2 # inverted to match alpha_lambda definition
else:
inject_axis_ratios[inject_index[i]] = 1
inject_pressures[inject_index[i]] = P[0,2]
inject_lambdas[inject_index[i]] = P[0,3]
# update injection age if not a requested source age
if inject_age[inject_index[i]] < source_age[timePointer]:
i = i + 1
# calculate the lobe and shocked shell length, shock pressure and total pressure as a function of angle
lobe_lengths[angles,timePointer] = P[angles,0]
shock_lengths[angles,timePointer] = X[angles,1]
shock_pressures[angles,timePointer] = P[angles,1]
lambda_crit[timePointer] = P[0,3]
# calculate lobe minor axis (associated with dimensions of shocked shell) at this time step
lobe_minor[timePointer] = X[-1,1]*eta_c[-1]/(shockRadius*eta_s[-1])
# calculate the slope of external pressure profile at this time step
if inject_pressures[inject_index[2*timePointer]] <= 0:
alphaP_denv[timePointer] = 0
else:
alphaP_denv[timePointer] = np.log(inject_pressures[2*timePointer + 1]/inject_pressures[2*timePointer])/np.log(inject_age[2*timePointer + 1]/inject_age[2*timePointer])
if inject_lambdas[2*timePointer] <= 0:
alpha_lambda[timePointer] = 1e9 # no emission from this injection time
else:
alpha_lambda[timePointer] = np.log(inject_lambdas[2*timePointer + 1]/inject_lambdas[2*timePointer])/np.log(inject_age[2*timePointer + 1]/inject_age[2*timePointer]) + np.log(inject_axis_ratios[2*timePointer + 1]/inject_axis_ratios[2*timePointer])/np.log(inject_age[2*timePointer + 1]/inject_age[2*timePointer]) # filling factor and changing volume/axis ratio
return lobe_lengths, lobe_minor, shock_lengths, shock_pressures, lambda_crit, alphaP_denv, alpha_lambda, critical_point
# Runge-Kutta method to solve ODE in dynamical model
@jit(nopython=True) # Set "nopython" mode for best performance, equivalent to @njit
def __rk4sys(step, X, P, QavgValue, active_age, aj_star, axis_exponent, fill_factor, jet_lorentz, open_angle, angles, injectFrac, eta_c, eta_s, zetaeta, dchi, regionPointer, betas, kValues, temperature, gammaCValue, critical_velocity, strong_shock):
# instantiate variables
Y, K1, K2, K3, K4 = np.zeros((len(angles), 5)), np.zeros((len(angles), 5)), np.zeros((len(angles), 5)), np.zeros((len(angles), 5)), np.zeros((len(angles), 5))
# fouth order Runge-Kutta method
__xpsys(X, K1, P, QavgValue, active_age, aj_star, axis_exponent, fill_factor, jet_lorentz, open_angle, angles, injectFrac, eta_c, eta_s, zetaeta, dchi, regionPointer, betas, kValues, temperature, gammaCValue, critical_velocity, strong_shock)
Y[:,:] = X[:,:] + 0.5*step*K1[:,:]
__xpsys(Y, K2, P, QavgValue, active_age, aj_star, axis_exponent, fill_factor, jet_lorentz, open_angle, angles, injectFrac, eta_c, eta_s, zetaeta, dchi, regionPointer, betas, kValues, temperature, gammaCValue, critical_velocity, strong_shock)
Y[:,:] = X[:,:] + 0.5*step*K2[:,:]
__xpsys(Y, K3, P, QavgValue, active_age, aj_star, axis_exponent, fill_factor, jet_lorentz, open_angle, angles, injectFrac, eta_c, eta_s, zetaeta, dchi, regionPointer, betas, kValues, temperature, gammaCValue, critical_velocity, strong_shock)
Y[:,:] = X[:,:] + 0.5*step*K3[:,:]
__xpsys(Y, K4, P, QavgValue, active_age, aj_star, axis_exponent, fill_factor, jet_lorentz, open_angle, angles, injectFrac, eta_c, eta_s, zetaeta, dchi, regionPointer, betas, kValues, temperature, gammaCValue, critical_velocity, strong_shock)
X[:,:] = X[:,:] + (step/6.)*(K1[:,:] + 2*K2[:,:] + 2*K3[:,:] + K4[:,:])
# coupled second order differential equations for lobe evolution
@jit(nopython=True) # Set "nopython" mode for best performance, equivalent to @njit
def __xpsys(X, f, P, QavgValue, active_age, aj_star, axis_exponent, fill_factor, jet_lorentz, open_angle, angles, injectFrac, eta_c, eta_s, zetaeta, dchi, regionPointer, betas, kValues, temperature, gammaCValue, critical_velocity, strong_shock):
# Differential equations for X[0,1,2,3,4] = (time, radius, velocity, lorentz_factor, thermal_velocity)
# Additional variable for P[0,1,2,3] = (lobe_length, lobe_pressure, external_pressure, lambda_crit)
f[angles,0] = 1.
f[angles,1] = X[angles,2]
# test if the AGN is active at this time-step
if (X[0,0] <= active_age):
active_jet = 1
else:
active_jet = 0
# calculate the spatially-averaged jet velocity and Lorentz factor
if jet_lorentz > 1:
bulk_lorentz = np.sqrt(jet_lorentz**2*aj_star**4 - aj_star**4 + 1)
bulk_velocity = np.sqrt((jet_lorentz**2*aj_star**4 - aj_star**4)/(jet_lorentz**2*aj_star**4 - aj_star**4 + 1))*c_speed
else:
bulk_lorentz, bulk_velocity = -1, -1
# TWO-PHASE FLUID
if jet_lorentz > 1:
# calculate the lobe formation scale
eta_R = QavgValue*bulk_lorentz**2/(2*np.pi*kValues[regionPointer[0]]*(bulk_lorentz*bulk_velocity)*(bulk_lorentz - 1)*c_speed**2*(1 - np.cos(open_angle))*X[0,1]**(2 - betas[regionPointer[0]]))
# calculate lambda_crit
#if (eta_R/bulk_lorentz**2) > 1:
# lambda_crit = 0
#else:
# lambda_crit = 1
lambda_crit = np.exp(-(eta_R/bulk_lorentz**2)/(2*np.log(2)))
P[0,3] = lambda_crit
else:
P[0,3] = 1
# ACCELERATION
# update fraction of jet power injected into each volume element
injectFrac_new = dchi*eta_s**(3 - betas[regionPointer[0]])*zetaeta**2
injectFrac_new = injectFrac/np.sum(injectFrac) # sum should be equal to unity
if jet_lorentz > 1:
injectFrac[angles] = (1 - lambda_crit)*injectFrac_new + lambda_crit*injectFrac # keep static at late times
else:
injectFrac[angles] = injectFrac_new[angles]
# acceleration of jet-head
if jet_lorentz > 1:
jet_acceleration = (betas[regionPointer[0]] - 2)*bulk_velocity*X[0,2]/(2*X[0,1]*(1 + eta_R**(-1./2))**2*eta_R**(1./2))
# acceleration of lobe (supersonic/subsonic)
if jet_lorentz > 1 and strong_shock == True:
f[angles,2] = np.minimum((gammaCValue - 1)*injectFrac[angles]*(QavgValue*active_jet)*X[angles,1]**(betas[regionPointer[angles]] - 3)/(X[angles,2]*(1 + (X[angles,3]*X[angles,2]/c_speed)**2)*dchi[angles]*(X[angles,3]*zetaeta[angles])**2*kValues[regionPointer[angles]]) + (betas[regionPointer[angles]] - 3*gammaCValue)*(X[angles,2])**2/(2*X[angles,1]*(1 + (X[angles,3]*X[angles,2]/c_speed)**2)), (betas[regionPointer[angles]] - 2)/(5 - betas[regionPointer[angles]]) * X[angles,2]*X[angles,3]/(X[0,0] + year)) # ensure model doesn't run slower than limit due to numerics
elif jet_lorentz > 1:
f[angles,2] = (gammaCValue - 1)*injectFrac[angles]*(QavgValue*active_jet)*X[angles,1]**(betas[regionPointer[angles]] - 3)/(X[angles,2]*(1 + (X[angles,3]*X[angles,2]/c_speed)**2)*dchi[angles]*(X[angles,3]*zetaeta[angles])**2*kValues[regionPointer[angles]]) + (betas[regionPointer[angles]] - 3*gammaCValue)*(X[angles,2])**2/(2*X[angles,1]*(1 + (X[angles,3]*X[angles,2]/c_speed)**2)) - (3*gammaCValue - betas[regionPointer[angles]])*(k_B*temperature/maverage)/(2*X[angles,1]*(1 + (X[angles,3]*X[angles,2]/c_speed)**2)*(X[angles,3]*zetaeta[angles])**2)
else:
sub_angles = (X[angles,2]*X[angles,3]*zetaeta)**2/(gammaX*(k_B*temperature/maverage)) <= 1
super_angles = np.logical_not(sub_angles)
f[super_angles,2] = (gammaX + 1)*(gammaCValue - 1)*injectFrac[super_angles]*(QavgValue*active_jet)*X[super_angles,1]**(betas[regionPointer[super_angles]] - 3)/(2*X[super_angles,2]*(1 + (X[super_angles,3]*X[super_angles,2]/c_speed)**2)*dchi[super_angles]*(X[super_angles,3]*zetaeta[super_angles])**2*kValues[regionPointer[super_angles]]) + (betas[regionPointer[super_angles]] - 3*gammaCValue)*(X[super_angles,2])**2/(2*X[super_angles,1]*(1 + (X[super_angles,3]*X[super_angles,2]/c_speed)**2)) + (gammaX - 1)*(3*gammaCValue - betas[regionPointer[super_angles]])*(k_B*temperature/maverage)/(4*X[super_angles,1]*(1 + (X[super_angles,3]*X[super_angles,2]/c_speed)**2)*(X[super_angles,3]*zetaeta[super_angles])**2)
f[sub_angles,2] = (betas[regionPointer[sub_angles]] - 2)*(X[sub_angles,2])**2/X[sub_angles,1]
# combine acceleration from jet-head and lobe as two-phase fluid
if jet_lorentz > 1:
if (lambda_crit < lambda_min or X[0,0] < 10*limTime): # improve stability
f[0,2], f[angles[1:],2] = jet_acceleration, jet_acceleration*eta_s[angles[1:]]
X[angles[1:],2] = X[0,2]*eta_s[angles[1:]]
else:
f[0,2], f[angles[1:],2] = (1 - lambda_crit)*jet_acceleration + lambda_crit*f[0,2], (1 - lambda_crit)*jet_acceleration*eta_s[angles[1:]] + lambda_crit*f[angles[1:],2]
# calculate Lorentz factor of two-phase fluid
f[angles,3] = X[angles,3]**3*X[angles,2]*f[angles,2]/c_speed**2
# PRESSURES
# external pressure at each volume element
P[angles,2] = kValues[regionPointer[angles]]*(k_B*temperature/maverage)*X[angles,1]**(-betas[regionPointer[angles]])
# set velocity associated with thermal component of lobe perssure
if jet_lorentz > 1 and critical_velocity > 0:
if (lambda_crit < lambda_min or X[0,0] < 10*limTime): # improve stability
f[0,4], f[angles[1:],4] = jet_acceleration, jet_acceleration*eta_s[angles[1:]]
X[angles[1:],4] = X[0,4]*eta_s[angles[1:]]
else:
f[angles,4] = (betas[regionPointer[angles]] - 2)/(5 - betas[regionPointer[angles]]) * X[angles,4]/(X[0,0] + year)
else:
X[angles,4], f[angles,4] = X[angles,2]*X[angles,3], f[angles,2]
# jet/lobe pressure at each volume element
volume = X[angles,1]**3*dchi[angles]
if jet_lorentz > 1:
# calculate lobe pressure
P[angles,1] = zetaeta[angles]**2*kValues[regionPointer[angles]]*X[angles,1]**(-betas[regionPointer[angles]])*(np.minimum(X[angles,2], X[angles,4]))**2 + kValues[regionPointer[angles]]*(k_B*temperature/maverage)*X[angles,1]**(-betas[regionPointer[angles]])
# calculate average pressure across jet/lobe
pressure = np.sum(P[angles,1]*volume)/np.sum(volume)
# set average pressure in all of lobe other than hotspot
P[angles[1:],1] = pressure
else:
# calculate lobe pressure
P[super_angles,1] = 2./(gammaX + 1)*zetaeta[super_angles]**2*kValues[regionPointer[super_angles]]*X[super_angles,1]**(-betas[regionPointer[super_angles]])*(X[super_angles,2]*X[super_angles,3])**2 - (gammaX - 1)/(gammaX + 1)*kValues[regionPointer[super_angles]]*(k_B*temperature/maverage)*X[super_angles,1]**(-betas[regionPointer[super_angles]])
P[sub_angles,1] = P[sub_angles,2]
# calculate average pressure across jet/lobe
pressure = np.sum(P[angles,1]*volume)/np.sum(volume)
# set average pressure in all of lobe other than hotspot
P[angles[1:],1] = pressure
# AXIS RATIO
if jet_lorentz > 1:
# calculate total mass of particles from the jet
particle_mass = QavgValue*np.minimum(active_age, X[0,0])/((bulk_lorentz - 1)*c_speed**2)
# calculate volume occupied by particles expanding at sound speed and maximum fillable volume within shocked shell
jet_sound = c_speed* | np.sqrt(gammaJ - 1) | numpy.sqrt |
#%%
import pandas as pd
import numpy as np
from datetime import datetime
import os
import pickle
# List down file paths
#dir_data = "../smoking-lvm-cleaned-data/final"
exec(open('../env_vars.py').read())
dir_data = os.environ['dir_data']
dir_picklejar = os.environ['dir_picklejar']
# Read in data
data_dates = pd.read_csv(os.path.join(os.path.realpath(dir_data), 'participant-dates.csv'))
data_selfreport = pd.read_csv(os.path.join(os.path.realpath(dir_data), 'self-report-smoking-final.csv'))
data_hq_episodes = pd.read_csv(os.path.join(os.path.realpath(dir_data), 'hq-episodes-final.csv'))
#%%
###############################################################################
# Data preparation: data_dates data frame
###############################################################################
# Create unix timestamps corresponding to 12AM of a given human-readable date
data_dates["start_date_unixts"] = (
data_dates["start_date"]
.apply(lambda x: datetime.strptime(x, "%m/%d/%Y"))
.apply(lambda x: datetime.timestamp(x))
)
data_dates["quit_date_unixts"] = (
data_dates["quit_date"]
.apply(lambda x: datetime.strptime(x, "%m/%d/%Y"))
.apply(lambda x: datetime.timestamp(x))
)
data_dates["expected_end_date_unixts"] = (
data_dates["expected_end_date"]
.apply(lambda x: datetime.strptime(x, "%m/%d/%Y"))
.apply(lambda x: datetime.timestamp(x))
)
data_dates["actual_end_date_unixts"] = (
data_dates["actual_end_date"]
.apply(lambda x: datetime.strptime(x, "%m/%d/%Y"))
.apply(lambda x: datetime.timestamp(x))
)
# More tidying up
data_dates = (
data_dates
.rename(columns={"participant": "participant_id",
"quit_date": "quit_date_hrts",
"start_date": "start_date_hrts",
"actual_end_date": "actual_end_date_hrts",
"expected_end_date": "expected_end_date_hrts"})
.loc[:, ["participant_id",
"start_date_hrts","quit_date_hrts",
"expected_end_date_hrts", "actual_end_date_hrts",
"start_date_unixts", "quit_date_unixts",
"expected_end_date_unixts","actual_end_date_unixts"]]
)
#%%
###############################################################################
# Merge data_selfreport with data_dates
###############################################################################
data_selfreport = data_dates.merge(data_selfreport,
how = 'left',
on = 'participant_id')
#%%
###############################################################################
# Data preparation: data_selfreport data frame
###############################################################################
# Drop the participants labelled 10X as they are pilot individuals
data_selfreport = data_selfreport.dropna(how = 'any', subset=['hour'])
def calculate_delta(message):
sr_accptresponse = ['Smoking Event(less than 5 minutes ago)',
'Smoking Event(5 - 15 minutes ago)',
'Smoking Event(15 - 30 minutes ago)',
'Smoking Event(more than 30 minutes ago)']
sr_dictionary = {'Smoking Event(less than 5 minutes ago)': 1,
'Smoking Event(5 - 15 minutes ago)': 2,
'Smoking Event(15 - 30 minutes ago)': 3,
'Smoking Event(more than 30 minutes ago)': 4}
if message in sr_accptresponse:
# Convert time from minutes to seconds
use_delta = sr_dictionary[message]
else:
# If participant reported smoking more than 30 minutes ago,
# then we consider time s/he smoked as missing
use_delta = pd.NA
return use_delta
def round_day(raw_day):
if pd.isna(raw_day):
# Missing values for raw_day can occur
# if participant reported smoking more than 30 minutes ago
out_day = pd.NA
else:
# This takes care of the instances when participant reported to smoke
# less than 30 minutes ago
if raw_day >= 0:
# If on or after Quit Date, round down to the nearest integer
# e.g., floor(2.7)=2
out_day = np.floor(raw_day)
else:
# If before Quit Date, round up to the nearest integer
# e.g., ceil(-2.7)=-2
out_day = np.ceil(raw_day)
return out_day
#%%
data_selfreport['date'] = pd.to_datetime(data_selfreport.date)
data_selfreport['start_date'] = pd.to_datetime(data_selfreport.start_date_hrts)
data_selfreport['quit_date'] = pd.to_datetime(data_selfreport.quit_date_hrts)
data_selfreport["delta"] = data_selfreport["message"].apply(lambda x: calculate_delta(x))
# Create a new variable, study_day: number of days since participant entered
# the study
data_selfreport['study_day'] = (data_selfreport['date'] - data_selfreport['start_date']).dt.days
# Create a new variable, day_since_quit: number of days before or after
# 12AM on Quit Date
data_selfreport['day_since_quit'] = (data_selfreport['date'] - data_selfreport['quit_date']).dt.days
# Create a new variable, is_post_quit: whether a given day falls before or on/after 12AM on Quit Date
data_selfreport["is_post_quit"] = data_selfreport["day_since_quit"].apply(lambda x: 0 if x < 0 else 1)
# Create a new variable, day_within_period:
# if is_post_quit<0, number of days after 12AM on start of study
# if is_post_quit>=0, number of days after 12AM on Quit Date
# hence day_within_period is a count variable with ZERO as minimum value
data_selfreport["day_within_period"] = np.where(data_selfreport["is_post_quit"]==0,
data_selfreport["study_day"],
data_selfreport["day_since_quit"])
# Number of hours elapsed since the beginning of the study
data_selfreport['hours_since_start_of_study'] = (data_selfreport['date'] - data_selfreport['start_date'])/ | np.timedelta64(1,'h') | numpy.timedelta64 |
import h5py
import numpy as np
np.set_printoptions(threshold=np.nan)
from shutil import copyfile
copyfile("dummy_lutnet.h5", "pretrained_bin.h5") # create pretrained.h5 using datastructure from dummy.h5
bl = h5py.File("baseline_pruned.h5", 'r')
#dummy = h5py.File("dummy.h5", 'r')
pretrained = h5py.File("pretrained_bin.h5", 'r+')
# dense layer 1
bl_w1 = bl["model_weights"]["binary_dense_1"]["binary_dense_1"]["Variable_1:0"]
bl_pruning_mask = bl["model_weights"]["binary_dense_1"]["binary_dense_1"]["pruning_mask:0"]
bl_gamma = bl["model_weights"]["binary_dense_1"]["binary_dense_1"]["Variable:0"]
zero_fill = np.zeros(np.shape(np.array(bl_w1)))
pret_w1 = pretrained["model_weights"]["binary_dense_1"]["binary_dense_1"]["Variable_1:0"]
pret_pruning_mask = pretrained["model_weights"]["binary_dense_1"]["binary_dense_1"]["pruning_mask:0"]
p_gamma = pretrained["model_weights"]["binary_dense_1"]["binary_dense_1"]["Variable:0"]
pret_w1[...] = np.array(bl_w1)
p_gamma[...] = np.array(bl_gamma)
pret_pruning_mask[...] = np.array(bl_pruning_mask)
print(np.sum(np.array(bl_pruning_mask)), np.prod(np.shape(np.array(bl_pruning_mask))))
# dense layer 2
bl_w1 = bl["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_1:0"]
bl_rand_map_0 = bl["model_weights"]["binary_dense_2"]["binary_dense_2"]["rand_map_0:0"]
bl_pruning_mask = bl["model_weights"]["binary_dense_2"]["binary_dense_2"]["pruning_mask:0"]
bl_gamma = bl["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable:0"]
bl_means = bl["model_weights"]["residual_sign_1"]["residual_sign_1"]["means:0"]
pret_rand_map_0 = pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["rand_map_0:0"]
pret_rand_map_1 = pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["rand_map_1:0"]
pret_rand_map_2 = pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["rand_map_2:0"]
pret_pruning_mask = pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["pruning_mask:0"]
p_gamma = pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable:0"]
pret_means = pretrained["model_weights"]["residual_sign_1"]["residual_sign_1"]["means:0"]
pret_c1 = pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_1:0"]
pret_c2 = pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_2:0"]
pret_c3 = pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_3:0"]
pret_c4 = pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_4:0"]
pret_c5 = pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_5:0"]
pret_c6 = pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_6:0"]
pret_c7 = pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_7:0"]
pret_c8 = pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_8:0"]
pret_c9 = pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_9:0"]
pret_c10= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_10:0"]
pret_c11= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_11:0"]
pret_c12= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_12:0"]
pret_c13= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_13:0"]
pret_c14= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_14:0"]
pret_c15= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_15:0"]
pret_c16= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_16:0"]
pret_c17= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_17:0"]
pret_c18= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_18:0"]
pret_c19= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_19:0"]
pret_c20= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_20:0"]
pret_c21= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_21:0"]
pret_c22= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_22:0"]
pret_c23= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_23:0"]
pret_c24= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_24:0"]
pret_c25= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_25:0"]
pret_c26= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_26:0"]
pret_c27= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_27:0"]
pret_c28= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_28:0"]
pret_c29= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_29:0"]
pret_c30= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_30:0"]
pret_c31= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_31:0"]
pret_c32= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_32:0"]
pret_w1 = pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_33:0"]
pret_rand_map_exp_0 = pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["rand_map_exp_0:0"]
pret_rand_map_exp_1 = pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["rand_map_exp_1:0"]
pret_rand_map_exp_2 = pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["rand_map_exp_2:0"]
weight_shape = np.shape(bl_w1)
tile_shape = np.shape(pret_c1)
zero_fill = np.zeros(tile_shape)
one_fill = np.ones(tile_shape)
neg_one_fill = -np.ones(tile_shape)
# randomisation and pruning recovery
bl_w1_unroll = np.array(bl_w1)
bl_w1 = np.array(bl_w1)
rand_map_0 = np.arange(tile_shape[0])
np.random.shuffle(rand_map_0)
rand_map_1 = np.arange(tile_shape[0])
np.random.shuffle(rand_map_1)
rand_map_2 = np.arange(tile_shape[0])
np.random.shuffle(rand_map_2)
pruning_mask = np.array(bl_pruning_mask).astype(bool)
init_mask = np.logical_not(pruning_mask[rand_map_0])
pruning_mask_recover = np.logical_and(pruning_mask, init_mask)[np.argsort(rand_map_0)]
pruning_mask = np.logical_or(pruning_mask, pruning_mask_recover)
init_mask = np.reshape(init_mask, tile_shape)
# expand randomisation map across tiles
rand_map_0_expand = np.tile(rand_map_0,[weight_shape[0]/tile_shape[0]])
rand_map_1_expand = np.tile(rand_map_1,[weight_shape[0]/tile_shape[0]])
rand_map_2_expand = np.tile(rand_map_2,[weight_shape[0]/tile_shape[0]])
for i in range(weight_shape[0]):
rand_map_0_expand[i] = rand_map_0_expand[i] + (tile_shape[0]*(weight_shape[0]/tile_shape[0]-1)) * (rand_map_0_expand[i]/tile_shape[0]) + tile_shape[0]*(i%weight_shape[0]/tile_shape[0])
rand_map_1_expand[i] = rand_map_1_expand[i] + (tile_shape[0]*(weight_shape[0]/tile_shape[0]-1)) * (rand_map_1_expand[i]/tile_shape[0]) + tile_shape[0]*(i%weight_shape[0]/tile_shape[0])
rand_map_2_expand[i] = rand_map_2_expand[i] + (tile_shape[0]*(weight_shape[0]/tile_shape[0]-1)) * (rand_map_2_expand[i]/tile_shape[0]) + tile_shape[0]*(i%weight_shape[0]/tile_shape[0])
bl_w1_rand_0 = bl_w1_unroll[rand_map_0_expand]
bl_w1_rand_0 = np.reshape(bl_w1_rand_0, weight_shape)
w1 = bl_w1
# connect1 only
c1 = one_fill
c2 = neg_one_fill
c3 = one_fill
c4 = neg_one_fill
c5 = one_fill
c6 = neg_one_fill
c7 = one_fill
c8 = neg_one_fill
c9 = one_fill
c10 = neg_one_fill
c11 = one_fill
c12 = neg_one_fill
c13 = one_fill
c14 = neg_one_fill
c15 = one_fill
c16 = neg_one_fill
c17 = neg_one_fill
c18 = one_fill
c19 = neg_one_fill
c20 = one_fill
c21 = neg_one_fill
c22 = one_fill
c23 = neg_one_fill
c24 = one_fill
c25 = neg_one_fill
c26 = one_fill
c27 = neg_one_fill
c28 = one_fill
c29 = neg_one_fill
c30 = one_fill
c31 = neg_one_fill
c32 = one_fill
pret_w1 [...] = w1
pret_c1 [...] = c1
pret_c2 [...] = c2
pret_c3 [...] = c3
pret_c4 [...] = c4
pret_c5 [...] = c5
pret_c6 [...] = c6
pret_c7 [...] = c7
pret_c8 [...] = c8
pret_c9 [...] = c9
pret_c10[...] = c10
pret_c11[...] = c11
pret_c12[...] = c12
pret_c13[...] = c13
pret_c14[...] = c14
pret_c15[...] = c15
pret_c16[...] = c16
pret_c17[...] = c17
pret_c18[...] = c18
pret_c19[...] = c19
pret_c20[...] = c20
pret_c21[...] = c21
pret_c22[...] = c22
pret_c23[...] = c23
pret_c24[...] = c24
pret_c25[...] = c25
pret_c26[...] = c26
pret_c27[...] = c27
pret_c28[...] = c28
pret_c29[...] = c29
pret_c30[...] = c30
pret_c31[...] = c31
pret_c32[...] = c32
pret_rand_map_0[...] = np.reshape(rand_map_0, (-1,1)).astype(float)
pret_rand_map_1[...] = np.reshape(rand_map_1, (-1,1)).astype(float)
pret_rand_map_2[...] = np.reshape(rand_map_2, (-1,1)).astype(float)
p_gamma[...] = np.array(bl_gamma)
pret_means[...] = np.array(bl_means)
pret_pruning_mask[...] = np.array(bl_pruning_mask)
rand_map_0_expand = np.reshape(rand_map_0_expand, [-1,1]).astype(float)
pret_rand_map_exp_0[...] = rand_map_0_expand
rand_map_1_expand = np.reshape(rand_map_1_expand, [-1,1]).astype(float)
pret_rand_map_exp_1[...] = rand_map_1_expand
rand_map_2_expand = np.reshape(rand_map_2_expand, [-1,1]).astype(float)
pret_rand_map_exp_2[...] = rand_map_2_expand
print(np.sum(np.array(bl_pruning_mask)), np.prod(np.shape(np.array(bl_pruning_mask))))
# dense layer 3
bl_w1 = bl["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_1:0"]
bl_rand_map_0 = bl["model_weights"]["binary_dense_3"]["binary_dense_3"]["rand_map_0:0"]
bl_pruning_mask = bl["model_weights"]["binary_dense_3"]["binary_dense_3"]["pruning_mask:0"]
bl_gamma = bl["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable:0"]
bl_means = bl["model_weights"]["residual_sign_2"]["residual_sign_2"]["means:0"]
pret_rand_map_0 = pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["rand_map_0:0"]
pret_rand_map_1 = pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["rand_map_1:0"]
pret_rand_map_2 = pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["rand_map_2:0"]
pret_pruning_mask = pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["pruning_mask:0"]
p_gamma = pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable:0"]
pret_means = pretrained["model_weights"]["residual_sign_2"]["residual_sign_2"]["means:0"]
pret_c1 = pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_1:0"]
pret_c2 = pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_2:0"]
pret_c3 = pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_3:0"]
pret_c4 = pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_4:0"]
pret_c5 = pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_5:0"]
pret_c6 = pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_6:0"]
pret_c7 = pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_7:0"]
pret_c8 = pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_8:0"]
pret_c9 = pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_9:0"]
pret_c10= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_10:0"]
pret_c11= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_11:0"]
pret_c12= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_12:0"]
pret_c13= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_13:0"]
pret_c14= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_14:0"]
pret_c15= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_15:0"]
pret_c16= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_16:0"]
pret_c17= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_17:0"]
pret_c18= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_18:0"]
pret_c19= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_19:0"]
pret_c20= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_20:0"]
pret_c21= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_21:0"]
pret_c22= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_22:0"]
pret_c23= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_23:0"]
pret_c24= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_24:0"]
pret_c25= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_25:0"]
pret_c26= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_26:0"]
pret_c27= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_27:0"]
pret_c28= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_28:0"]
pret_c29= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_29:0"]
pret_c30= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_30:0"]
pret_c31= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_31:0"]
pret_c32= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_32:0"]
pret_w1 = pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_33:0"]
pret_rand_map_exp_0 = pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["rand_map_exp_0:0"]
pret_rand_map_exp_1 = pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["rand_map_exp_1:0"]
pret_rand_map_exp_2 = pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["rand_map_exp_2:0"]
weight_shape = np.shape(bl_w1)
tile_shape = np.shape(pret_c1)
zero_fill = np.zeros(tile_shape)
one_fill = np.ones(tile_shape)
neg_one_fill = -np.ones(tile_shape)
# randomisation and pruning recovery
bl_w1_unroll = np.array(bl_w1)
bl_w1 = np.array(bl_w1)
rand_map_0 = np.arange(tile_shape[0])
np.random.shuffle(rand_map_0)
rand_map_1 = np.arange(tile_shape[0])
np.random.shuffle(rand_map_1)
rand_map_2 = np.arange(tile_shape[0])
np.random.shuffle(rand_map_2)
pruning_mask = np.array(bl_pruning_mask).astype(bool)
init_mask = np.logical_not(pruning_mask[rand_map_0])
pruning_mask_recover = np.logical_and(pruning_mask, init_mask)[np.argsort(rand_map_0)]
pruning_mask = np.logical_or(pruning_mask, pruning_mask_recover)
init_mask = np.reshape(init_mask, tile_shape)
# expand randomisation map across tiles
rand_map_0_expand = np.tile(rand_map_0,[weight_shape[0]/tile_shape[0]])
rand_map_1_expand = np.tile(rand_map_1,[weight_shape[0]/tile_shape[0]])
rand_map_2_expand = np.tile(rand_map_2,[weight_shape[0]/tile_shape[0]])
for i in range(weight_shape[0]):
rand_map_0_expand[i] = rand_map_0_expand[i] + (tile_shape[0]*(weight_shape[0]/tile_shape[0]-1)) * (rand_map_0_expand[i]/tile_shape[0]) + tile_shape[0]*(i%weight_shape[0]/tile_shape[0])
rand_map_1_expand[i] = rand_map_1_expand[i] + (tile_shape[0]*(weight_shape[0]/tile_shape[0]-1)) * (rand_map_1_expand[i]/tile_shape[0]) + tile_shape[0]*(i%weight_shape[0]/tile_shape[0])
rand_map_2_expand[i] = rand_map_2_expand[i] + (tile_shape[0]*(weight_shape[0]/tile_shape[0]-1)) * (rand_map_2_expand[i]/tile_shape[0]) + tile_shape[0]*(i%weight_shape[0]/tile_shape[0])
bl_w1_rand_0 = bl_w1_unroll[rand_map_0_expand]
bl_w1_rand_0 = np.reshape(bl_w1_rand_0, weight_shape)
w1 = bl_w1
# connect1 only
c1 = one_fill
c2 = neg_one_fill
c3 = one_fill
c4 = neg_one_fill
c5 = one_fill
c6 = neg_one_fill
c7 = one_fill
c8 = neg_one_fill
c9 = one_fill
c10 = neg_one_fill
c11 = one_fill
c12 = neg_one_fill
c13 = one_fill
c14 = neg_one_fill
c15 = one_fill
c16 = neg_one_fill
c17 = neg_one_fill
c18 = one_fill
c19 = neg_one_fill
c20 = one_fill
c21 = neg_one_fill
c22 = one_fill
c23 = neg_one_fill
c24 = one_fill
c25 = neg_one_fill
c26 = one_fill
c27 = neg_one_fill
c28 = one_fill
c29 = neg_one_fill
c30 = one_fill
c31 = neg_one_fill
c32 = one_fill
pret_w1 [...] = w1
pret_c1 [...] = c1
pret_c2 [...] = c2
pret_c3 [...] = c3
pret_c4 [...] = c4
pret_c5 [...] = c5
pret_c6 [...] = c6
pret_c7 [...] = c7
pret_c8 [...] = c8
pret_c9 [...] = c9
pret_c10[...] = c10
pret_c11[...] = c11
pret_c12[...] = c12
pret_c13[...] = c13
pret_c14[...] = c14
pret_c15[...] = c15
pret_c16[...] = c16
pret_c17[...] = c17
pret_c18[...] = c18
pret_c19[...] = c19
pret_c20[...] = c20
pret_c21[...] = c21
pret_c22[...] = c22
pret_c23[...] = c23
pret_c24[...] = c24
pret_c25[...] = c25
pret_c26[...] = c26
pret_c27[...] = c27
pret_c28[...] = c28
pret_c29[...] = c29
pret_c30[...] = c30
pret_c31[...] = c31
pret_c32[...] = c32
pret_rand_map_0[...] = np.reshape(rand_map_0, (-1,1)).astype(float)
pret_rand_map_1[...] = np.reshape(rand_map_1, (-1,1)).astype(float)
pret_rand_map_2[...] = np.reshape(rand_map_2, (-1,1)).astype(float)
p_gamma[...] = np.array(bl_gamma)
pret_means[...] = np.array(bl_means)
pret_pruning_mask[...] = np.array(bl_pruning_mask)
rand_map_0_expand = np.reshape(rand_map_0_expand, [-1,1]).astype(float)
pret_rand_map_exp_0[...] = rand_map_0_expand
rand_map_1_expand = np.reshape(rand_map_1_expand, [-1,1]).astype(float)
pret_rand_map_exp_1[...] = rand_map_1_expand
rand_map_2_expand = np.reshape(rand_map_2_expand, [-1,1]).astype(float)
pret_rand_map_exp_2[...] = rand_map_2_expand
print(np.sum(np.array(bl_pruning_mask)), np.prod(np.shape(np.array(bl_pruning_mask))))
# dense layer 4
bl_w1 = bl["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_1:0"]
bl_rand_map_0 = bl["model_weights"]["binary_dense_4"]["binary_dense_4"]["rand_map_0:0"]
bl_pruning_mask = bl["model_weights"]["binary_dense_4"]["binary_dense_4"]["pruning_mask:0"]
bl_gamma = bl["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable:0"]
bl_means = bl["model_weights"]["residual_sign_3"]["residual_sign_3"]["means:0"]
pret_rand_map_0 = pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["rand_map_0:0"]
pret_rand_map_1 = pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["rand_map_1:0"]
pret_rand_map_2 = pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["rand_map_2:0"]
pret_pruning_mask = pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["pruning_mask:0"]
p_gamma = pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable:0"]
pret_means = pretrained["model_weights"]["residual_sign_3"]["residual_sign_3"]["means:0"]
pret_c1 = pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_1:0"]
pret_c2 = pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_2:0"]
pret_c3 = pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_3:0"]
pret_c4 = pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_4:0"]
pret_c5 = pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_5:0"]
pret_c6 = pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_6:0"]
pret_c7 = pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_7:0"]
pret_c8 = pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_8:0"]
pret_c9 = pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_9:0"]
pret_c10= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_10:0"]
pret_c11= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_11:0"]
pret_c12= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_12:0"]
pret_c13= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_13:0"]
pret_c14= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_14:0"]
pret_c15= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_15:0"]
pret_c16= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_16:0"]
pret_c17= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_17:0"]
pret_c18= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_18:0"]
pret_c19= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_19:0"]
pret_c20= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_20:0"]
pret_c21= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_21:0"]
pret_c22= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_22:0"]
pret_c23= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_23:0"]
pret_c24= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_24:0"]
pret_c25= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_25:0"]
pret_c26= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_26:0"]
pret_c27= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_27:0"]
pret_c28= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_28:0"]
pret_c29= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_29:0"]
pret_c30= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_30:0"]
pret_c31= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_31:0"]
pret_c32= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_32:0"]
pret_w1 = pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_33:0"]
pret_rand_map_exp_0 = pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["rand_map_exp_0:0"]
pret_rand_map_exp_1 = pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["rand_map_exp_1:0"]
pret_rand_map_exp_2 = pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["rand_map_exp_2:0"]
weight_shape = np.shape(bl_w1)
tile_shape = np.shape(pret_c1)
zero_fill = np.zeros(tile_shape)
one_fill = np.ones(tile_shape)
neg_one_fill = -np.ones(tile_shape)
# randomisation and pruning recovery
bl_w1_unroll = np.array(bl_w1)
bl_w1 = np.array(bl_w1)
rand_map_0 = np.arange(tile_shape[0])
np.random.shuffle(rand_map_0)
rand_map_1 = np.arange(tile_shape[0])
np.random.shuffle(rand_map_1)
rand_map_2 = np.arange(tile_shape[0])
np.random.shuffle(rand_map_2)
pruning_mask = np.array(bl_pruning_mask).astype(bool)
init_mask = np.logical_not(pruning_mask[rand_map_0])
pruning_mask_recover = np.logical_and(pruning_mask, init_mask)[np.argsort(rand_map_0)]
pruning_mask = np.logical_or(pruning_mask, pruning_mask_recover)
init_mask = np.reshape(init_mask, tile_shape)
# expand randomisation map across tiles
rand_map_0_expand = np.tile(rand_map_0,[weight_shape[0]/tile_shape[0]])
rand_map_1_expand = np.tile(rand_map_1,[weight_shape[0]/tile_shape[0]])
rand_map_2_expand = np.tile(rand_map_2,[weight_shape[0]/tile_shape[0]])
for i in range(weight_shape[0]):
rand_map_0_expand[i] = rand_map_0_expand[i] + (tile_shape[0]*(weight_shape[0]/tile_shape[0]-1)) * (rand_map_0_expand[i]/tile_shape[0]) + tile_shape[0]*(i%weight_shape[0]/tile_shape[0])
rand_map_1_expand[i] = rand_map_1_expand[i] + (tile_shape[0]*(weight_shape[0]/tile_shape[0]-1)) * (rand_map_1_expand[i]/tile_shape[0]) + tile_shape[0]*(i%weight_shape[0]/tile_shape[0])
rand_map_2_expand[i] = rand_map_2_expand[i] + (tile_shape[0]*(weight_shape[0]/tile_shape[0]-1)) * (rand_map_2_expand[i]/tile_shape[0]) + tile_shape[0]*(i%weight_shape[0]/tile_shape[0])
bl_w1_rand_0 = bl_w1_unroll[rand_map_0_expand]
bl_w1_rand_0 = np.reshape(bl_w1_rand_0, weight_shape)
w1 = bl_w1
# connect1 only
c1 = one_fill
c2 = neg_one_fill
c3 = one_fill
c4 = neg_one_fill
c5 = one_fill
c6 = neg_one_fill
c7 = one_fill
c8 = neg_one_fill
c9 = one_fill
c10 = neg_one_fill
c11 = one_fill
c12 = neg_one_fill
c13 = one_fill
c14 = neg_one_fill
c15 = one_fill
c16 = neg_one_fill
c17 = neg_one_fill
c18 = one_fill
c19 = neg_one_fill
c20 = one_fill
c21 = neg_one_fill
c22 = one_fill
c23 = neg_one_fill
c24 = one_fill
c25 = neg_one_fill
c26 = one_fill
c27 = neg_one_fill
c28 = one_fill
c29 = neg_one_fill
c30 = one_fill
c31 = neg_one_fill
c32 = one_fill
pret_w1 [...] = w1
pret_c1 [...] = c1
pret_c2 [...] = c2
pret_c3 [...] = c3
pret_c4 [...] = c4
pret_c5 [...] = c5
pret_c6 [...] = c6
pret_c7 [...] = c7
pret_c8 [...] = c8
pret_c9 [...] = c9
pret_c10[...] = c10
pret_c11[...] = c11
pret_c12[...] = c12
pret_c13[...] = c13
pret_c14[...] = c14
pret_c15[...] = c15
pret_c16[...] = c16
pret_c17[...] = c17
pret_c18[...] = c18
pret_c19[...] = c19
pret_c20[...] = c20
pret_c21[...] = c21
pret_c22[...] = c22
pret_c23[...] = c23
pret_c24[...] = c24
pret_c25[...] = c25
pret_c26[...] = c26
pret_c27[...] = c27
pret_c28[...] = c28
pret_c29[...] = c29
pret_c30[...] = c30
pret_c31[...] = c31
pret_c32[...] = c32
pret_rand_map_0[...] = np.reshape(rand_map_0, (-1,1)).astype(float)
pret_rand_map_1[...] = np.reshape(rand_map_1, (-1,1)).astype(float)
pret_rand_map_2[...] = np.reshape(rand_map_2, (-1,1)).astype(float)
p_gamma[...] = np.array(bl_gamma)
pret_means[...] = np.array(bl_means)
pret_pruning_mask[...] = np.array(bl_pruning_mask)
rand_map_0_expand = np.reshape(rand_map_0_expand, [-1,1]).astype(float)
pret_rand_map_exp_0[...] = rand_map_0_expand
rand_map_1_expand = np.reshape(rand_map_1_expand, [-1,1]).astype(float)
pret_rand_map_exp_1[...] = rand_map_1_expand
rand_map_2_expand = np.reshape(rand_map_2_expand, [-1,1]).astype(float)
pret_rand_map_exp_2[...] = rand_map_2_expand
print(np.sum(np.array(bl_pruning_mask)), np.prod(np.shape(np.array(bl_pruning_mask))))
# dense layer 5
bl_w1 = bl["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_1:0"]
bl_rand_map_0 = bl["model_weights"]["binary_dense_5"]["binary_dense_5"]["rand_map_0:0"]
bl_pruning_mask = bl["model_weights"]["binary_dense_5"]["binary_dense_5"]["pruning_mask:0"]
bl_gamma = bl["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable:0"]
bl_means = bl["model_weights"]["residual_sign_4"]["residual_sign_4"]["means:0"]
pret_rand_map_0 = pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["rand_map_0:0"]
pret_rand_map_1 = pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["rand_map_1:0"]
pret_rand_map_2 = pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["rand_map_2:0"]
pret_pruning_mask = pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["pruning_mask:0"]
p_gamma = pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable:0"]
pret_means = pretrained["model_weights"]["residual_sign_4"]["residual_sign_4"]["means:0"]
pret_c1 = pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_1:0"]
pret_c2 = pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_2:0"]
pret_c3 = pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_3:0"]
pret_c4 = pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_4:0"]
pret_c5 = pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_5:0"]
pret_c6 = pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_6:0"]
pret_c7 = pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_7:0"]
pret_c8 = pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_8:0"]
pret_c9 = pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_9:0"]
pret_c10= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_10:0"]
pret_c11= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_11:0"]
pret_c12= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_12:0"]
pret_c13= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_13:0"]
pret_c14= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_14:0"]
pret_c15= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_15:0"]
pret_c16= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_16:0"]
pret_c17= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_17:0"]
pret_c18= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_18:0"]
pret_c19= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_19:0"]
pret_c20= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_20:0"]
pret_c21= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_21:0"]
pret_c22= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_22:0"]
pret_c23= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_23:0"]
pret_c24= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_24:0"]
pret_c25= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_25:0"]
pret_c26= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_26:0"]
pret_c27= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_27:0"]
pret_c28= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_28:0"]
pret_c29= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_29:0"]
pret_c30= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_30:0"]
pret_c31= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_31:0"]
pret_c32= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_32:0"]
pret_w1 = pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_33:0"]
pret_rand_map_exp_0 = pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["rand_map_exp_0:0"]
pret_rand_map_exp_1 = pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["rand_map_exp_1:0"]
pret_rand_map_exp_2 = pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["rand_map_exp_2:0"]
weight_shape = np.shape(bl_w1)
tile_shape = np.shape(pret_c1)
zero_fill = np.zeros(tile_shape)
one_fill = np.ones(tile_shape)
neg_one_fill = -np.ones(tile_shape)
# randomisation and pruning recovery
bl_w1_unroll = np.array(bl_w1)
bl_w1 = np.array(bl_w1)
rand_map_0 = np.arange(tile_shape[0])
np.random.shuffle(rand_map_0)
rand_map_1 = np.arange(tile_shape[0])
np.random.shuffle(rand_map_1)
rand_map_2 = np.arange(tile_shape[0])
np.random.shuffle(rand_map_2)
pruning_mask = np.array(bl_pruning_mask).astype(bool)
init_mask = np.logical_not(pruning_mask[rand_map_0])
pruning_mask_recover = np.logical_and(pruning_mask, init_mask)[np.argsort(rand_map_0)]
pruning_mask = np.logical_or(pruning_mask, pruning_mask_recover)
init_mask = np.reshape(init_mask, tile_shape)
# expand randomisation map across tiles
rand_map_0_expand = | np.tile(rand_map_0,[weight_shape[0]/tile_shape[0]]) | numpy.tile |
"""Spike sorting classes and window"""
from __future__ import division
from __future__ import print_function
__authors__ = ['<NAME>', '<NAME>']
import os
import sys
import time
import datetime
from copy import copy
import operator
import random
import shutil
import hashlib
import multiprocessing as mp
from PyQt4 import QtCore, QtGui
from PyQt4.QtCore import Qt
from PyQt4.QtGui import QAction, QIcon, QApplication
import numpy as np
import scipy
import scipy.signal
#from scipy.cluster.hierarchy import fclusterdata
import pylab as pl
import pyximport
pyximport.install(build_in_temp=False, inplace=True)
from . import util # .pyx file
from . import core
from .core import (WaveForm, Gaussian, MAXLONGLONG, R, toiter, intround, printflush, lstrip,
rstrip, lrstrip, pad, td2days, SpykeToolWindow, NList, NSList, dist,
USList, ClusterChange, SpikeSelectionSlider, lrrep2Darrstripis, rollwin2D)
from .detect import DEBUG
from .surf import EPOCH
from .plot import SpikeSortPanel, CLUSTERCOLOURDICT, WHITE
from .__version__ import __version__
#MAXCHANTOLERANCE = 100 # um
NSLISTWIDTH = 70 # minimize nslist width, enough for 7 digit spike IDs
PANELWIDTHPERCOLUMN = 120 # sort panel width per column of channels
PANELHEIGHTPERROW = 50 # sort panel height per row of channels
VSCROLLBARWIDTH = 14 # hack
SORTWINDOWHEIGHT = 1035 # TODO: this should be set programmatically
MINSORTWINDOWWIDTH = 566
MEANWAVEMAXSAMPLES = 2000
NPCSPERCHAN = 7
PCALIB = 'mdp'
ICALIB = 'sklearn'
DEFMINISI = 50 # default minimum ISI to check for on export, us
MAXGROUPISI = 100000 # us (100 ms)
MAXGROUPDT = 100000000 # us (100 s)
class Sort(object):
"""A spike sorting session, in which you can detect spikes and sort them into Neurons.
A .sort file is a single Python2-pickled Sort object. A .json file is a
jsonpickle-pickled Sort object"""
def __init__(self, detector=None, stream=None, tw=None):
self.__version__ = __version__
self.fname = ''
self.user = ''
self.notes = ''
self.detector = detector # this Sort's current Detector object
self.tw = tw # time window (us) relative to spike time
self.stream = stream
self.probe = stream.probe # only one probe design per sort allowed
self.converter = stream.converter
self.neurons = {}
self.clusters = {} # neurons with multidm params scaled for plotting
self.norder = [] # stores order of neuron ids display in nlist
self.npcsperchan = NPCSPERCHAN
def get_nextnid(self):
"""nextnid is used to retrieve the next unique single unit ID"""
nids = list(self.neurons)
if len(nids) == 0:
return 1 # single unit nids start at 1
else:
return max(max(nids) + 1, 1) # at least 1
nextnid = property(get_nextnid)
def get_nextmuid(self):
"""nextmuid is used to retrieve the next unique multiunit ID"""
nids = list(self.neurons)
if len(nids) == 0:
return -1 # multiunit ids start at -1
else:
return min(min(nids) - 1, -1) # at most -1
nextmuid = property(get_nextmuid)
def get_good(self):
"""Return array of nids marked by user as 'good'"""
good = []
for neuron in self.neurons.values():
try:
if neuron.good:
good.append(neuron.id)
except AttributeError: # neuron is from older sort, no .good attrib
neuron.good = False
return np.asarray(good)
def set_good(self, good):
"""Set good flag to True for nids in good, False otherwise"""
nids = list(self.neurons)
assert np.all([ nid in nids for nid in good ]) # make sure all nids in good exist
notgood = np.setdiff1d(nids, good)
for nid in notgood:
neuron = self.neurons[nid]
neuron.good = False
for nid in good:
neuron = self.neurons[nid]
neuron.good = True
good = property(get_good, set_good)
def get_stream(self):
try:
return self._stream
except AttributeError:
# this is likely a brand new sort, has yet to be assigned a Stream
return None
def set_stream(self, stream=None):
"""Check stream type and name and probe type, and restore filtmeth, car, sampfreq and
shcorrect to stream when binding/modifying stream to self"""
oldstream = self.stream
if stream != None and oldstream != None:
# do stream types match?
if type(stream) != type(oldstream):
raise ValueError("Stream types don't match: %s, %s"
% (type(oldstream), type(stream)))
# do stream probe types match?
if type(stream.probe) != type(oldstream.probe):
raise ValueError("Stream probe types don't match: %s, %s"
% (type(oldstream.probe), type(stream.probe)))
# is one stream fname a superset of the other?
if (stream.fname not in oldstream.fname) and (oldstream.fname not in stream.fname):
raise ValueError("Stream file names are not supersets of each other: %s, %s"
% (oldstream.fname, stream.fname))
else:
print('Stream file names are similar enough to proceed: %s, %s'
% (stream.fname, oldstream.fname))
try:
stream.filtmeth = self.filtmeth
stream.car = self.car
stream.sampfreq = self.sampfreq
stream.shcorrect = self.shcorrect
except AttributeError:
pass # one of the above aren't bound
self._stream = stream # set it
print('Bound stream %r to sort %r' % (stream.fname, self.fname))
# now that tres is known, calculate window timepoints wrt spike time:
self.calc_twts_twi()
stream = property(get_stream, set_stream)
def calc_twts_twi(self):
"""Calculate temporal window timepoints wrt spike time, and the indices of these
timepoints wrt spike time"""
tres = self.tres
tw = self.tw
twts = np.arange(tw[0], tw[1], tres)
twts += twts[0] % tres # get rid of mod, so twts go through zero
self.twts = twts
self.twi = intround(twts[0] / tres), intround(twts[-1] / tres)
#info('twi = %s' % (self.twi,))
def update_tw(self, tw):
"""Update tw and everything that depends on it. Note that this shouldn't
be called directly by the user. Call SpykeWindow.update_spiketw() instead"""
oldtw = self.tw
self.tw = tw
self.calc_twts_twi()
dtw = np.asarray(tw) - np.asarray(oldtw) # new minus old
self.spikes['t0'] += dtw[0]
self.spikes['t1'] += dtw[1]
self.spikes['tis'] = self.spikes['tis'] - intround(dtw[0] / self.tres)
# recalculate any existing templates:
for neuron in self.neurons.values():
if neuron.wave.data != None:
neuron.update_wave()
print('WARNING: all spike waveforms need to be reloaded!')
def get_tres(self):
return self.stream.tres
tres = property(get_tres)
def __getstate__(self):
"""Get object state for pickling"""
# copy it cuz we'll be making changes, this is fast because it's just a shallow copy
d = self.__dict__.copy()
# Spikes and wavedata arrays are (potentially) saved separately.
# usids and PCs/ICs can be regenerated from the spikes array.
for attr in ['spikes', 'wavedata', 'usids', 'X', 'Xhash']:
# keep _stream during normal pickling for multiprocessing, but remove it
# manually when pickling to sort file
try: del d[attr]
except KeyError: pass
return d
def get_nspikes(self):
try: return len(self.spikes)
except AttributeError: return 0
nspikes = property(get_nspikes)
def update_usids(self):
"""Update usids, which is an array of indices of unsorted spikes"""
nids = self.spikes['nid']
self.usids, = np.where(nids == 0) # 0 means unclustered
def get_spikes_sortedby(self, attr='id'):
"""Return array of all spikes, sorted by attribute 'attr'"""
vals = self.spikes[attr]
spikes = self.spikes[vals.argsort()]
return spikes
def get_wave(self, sid):
"""Return WaveForm corresponding to spike sid"""
spikes = self.spikes
nchans = spikes['nchans'][sid]
chans = spikes['chans'][sid, :nchans]
t0 = spikes['t0'][sid]
t1 = spikes['t1'][sid]
wavedata = self.wavedata[sid, 0:nchans]
ts = np.arange(t0, t1, self.tres) # build them up
return WaveForm(data=wavedata, ts=ts, chans=chans, tres=self.tres)
def get_maxchan_wavedata(self, sid=None, nid=None):
"""Return wavedata of maxchan of spike sid or neuron nid"""
if sid != None:
assert nid == None
chani = self.spikes['chani'][sid]
return self.wavedata[sid, chani]
elif nid != None:
assert sid == None
neuron = self.neurons[nid]
chani, = np.where(neuron.chans == neuron.chan)
assert len(chani) == 1
chani = chani[0] # pull out of length 1 array
return neuron.wave.data[chani]
def get_mean_wave(self, sids, nid=None):
"""Return the mean and std waveform of spike waveforms in sids"""
spikes = self.spikes
nsids = len(sids)
if nsids > MEANWAVEMAXSAMPLES:
step = nsids // MEANWAVEMAXSAMPLES + 1
s = ("get_mean_wave() sampling every %d spikes instead of all %d"
% (step, nsids))
if nid != None:
s = "neuron %d: " % nid + s
print(s)
sids = sids[::step]
nsids = len(sids) # update
chanss = spikes['chans'][sids]
nchanss = spikes['nchans'][sids]
chanslist = [ chans[:nchans] for chans, nchans in zip(chanss, nchanss) ] # list of arrays
chanpopulation = np.concatenate(chanslist)
groupchans = np.unique(chanpopulation) # comes out sorted
wavedata = self.wavedata[sids]
if wavedata.ndim == 2: # should be 3, get only 2 if nsids == 1
wavedata.shape = 1, wavedata.shape[0], wavedata.shape[1] # give it a singleton 3rd dim
nt = wavedata.shape[-1]
maxnchans = len(groupchans)
data = np.zeros((maxnchans, nt))
# all spikes have same nt, but not necessarily same nchans, keep track of
# how many spikes contributed to each of the group's chans
nspikes = np.zeros((maxnchans, 1), dtype=int)
for chans, wd in zip(chanslist, wavedata):
chanis = groupchans.searchsorted(chans) # each spike's chans is a subset of groupchans
data[chanis] += wd[:len(chans)] # accumulate
nspikes[chanis] += 1 # inc spike count for this spike's chans
#t0 = time.time()
data /= nspikes # normalize all data points appropriately, this is now the mean
var = np.zeros((maxnchans, nt))
for chans, wd in zip(chanslist, wavedata):
chanis = groupchans.searchsorted(chans) # each spike's chans is a subset of groupchans
var[chanis] += (wd[:len(chans)] - data[chanis]) ** 2 # accumulate 2nd moment
var /= nspikes # normalize all data points appropriately, this is now the variance
std = np.sqrt(var)
# keep only those chans that at least 1/2 the spikes contributed to
bins = list(groupchans) + [np.inf] # concatenate rightmost bin edge
hist, bins = np.histogram(chanpopulation, bins=bins)
chans = groupchans[hist >= nsids/2]
chanis = groupchans.searchsorted(chans)
data = data[chanis]
std = std[chanis]
return WaveForm(data=data, std=std, chans=chans)
def check_ISIs(self, nids='good'):
"""Check that interspike intervals of spikes in each nid never fall below DEFMINISI"""
print('Checking inter-spike intervals')
if nids == 'good':
nids = self.good
elif nids == 'all':
nids = sorted(self.neurons)
for nid in nids:
neuron = self.neurons[nid]
spikets = self.spikes['t'][neuron.sids] # should be a sorted copy
assert spikets.flags['OWNDATA'] # safe to modify in place
spikets.sort() # just in case it isn't perfectly sorted
ndupl = (np.diff(spikets) < DEFMINISI).sum()
if ndupl > 0:
msg = ('n%d has %d duplicate spikes (given DEFMINISI=%d us).\n'
'Remove duplicate spikes with the ISI tool in the Verify tab'
% (nid, ndupl, DEFMINISI))
raise RuntimeError(msg)
def check_wavealign(self, nids='good', maxdti=1):
"""Check that each neurons's primary peak on the max chan is no more than +/- maxdti
timepoints away from the t=0 alignment timepoint"""
print('Checking neuron mean waveform alignment')
if nids == 'good':
nids = self.good
elif nids == 'all':
nids = sorted(self.neurons)
nt = self.twi[1] - self.twi[0] + 1 # expected number of points of each chan's wavedata
for nid in nids:
neuron = self.neurons[nid]
wd = self.get_maxchan_wavedata(nid=nid)
assert len(wd) == nt
# find biggest positive and negative peaks, check which comes first, ensure
# the primary peak is within maxdti of t=0 alignment timepoint:
ppeakis, _ = scipy.signal.find_peaks(wd) # positive peak indices
npeakis, _ = scipy.signal.find_peaks(-wd) # negative peak indices
pmaxi = ppeakis[wd[ppeakis].argmax()] # max positive peak index
nmaxi = npeakis[wd[npeakis].argmin()] # max negative peak index
if nmaxi < pmaxi: # usual case: -ve then +ve peak
peak1i = nmaxi
else: # less common: +ve then -ve peak, make sure +ve peak is worthy of alignment
pmax, nmax = wd[pmaxi], wd[nmaxi]
if pmax > abs(nmax): # +ve peak is bigger than -ve peak, align to +ve peak
peak1i = pmaxi
else:
peak1i = nmaxi # default to -ve peak
alignti = 0 - self.twi[0] # +ve
dti = peak1i - alignti
#print("n%d: dti=%d" % (nid, dti))
if abs(dti) > maxdti:
peak1uV = self.converter.AD2uV(wd[peak1i])
peak1us = intround(self.tres*(peak1i-alignti))
msg = ('Primary peak (%+d uV @ t=%d us) of n%d is %+d timepoints away from '
'the t=0 us alignment point. Shift it closer and try again'
% (peak1uV, peak1us, nid, dti))
raise RuntimeError(msg)
def check_wavepadding(self, nids='good', npad=2):
"""Check if any spikes are edge padded, presumably due to being shifted but not
reloaded. For robustness, check for consistent signs of padding across all channels.
An edge is considered padded if it does not change over npad datapoints"""
print('Checking spike waveform padding')
assert npad >= 2 # need at least 2 points to do a diff
if nids == 'good':
nids = self.good
elif nids == 'all':
nids = sorted(self.neurons)
for nid in nids:
neuron = self.neurons[nid]
for sid in neuron.sids:
wd = self.wavedata[sid] # multichannel waveform data
# are left and right edges of wavedata identical for npad number of points?
l, r = wd[:, :npad], wd[:, -npad:] # shape (nchans, npad)
leftpadded = (np.diff(l, axis=1) == 0).all()
rightpadded = (np.diff(r, axis=1) == 0).all()
# handle case where spike is right after or right before a 0-padded
# region of data due to gaps between experiments:
if leftpadded:
if (wd[:, 0] == 0).all():
leftpadded = False
if rightpadded:
if (wd[:, -1] == 0).all():
rightpadded = False
if leftpadded or rightpadded:
msg = ('n%d has s%d that looks like it has been padded.\n'
'leftpadded, rightpadded = %r, %r\n'
'Reload s%d or n%d or all spikes and try again'
% (nid, sid, leftpadded, rightpadded, sid, nid))
raise RuntimeError(msg)
def check_contiguous_nids(self):
"""Check that neuron IDs are contiguous (no gaps)"""
print('Checking that neuron IDs are contiguous')
nids = np.array(list(self.neurons))
nids = nids[nids > 0] # only consider +ve nids
nids.sort()
if (np.diff(nids) != 1).any():
raise RuntimeError('Neuron IDs are not contiguous, renumber all and try again')
def exportptcsfiles(self, basepath, sortpath, user='', notes=''):
"""Export spike data to binary .ptcs files under basepath, one file per recording"""
# First check to make sure various things are OK before exporting:
self.check_ISIs()
self.check_wavealign()
self.check_wavepadding()
self.check_contiguous_nids()
spikes = self.spikes
exportdt = str(datetime.datetime.now()) # get an export datetime stamp
exportdt = exportdt.split('.')[0] # ditch the us
if self.stream.is_multi(): # self.stream is a MultiStream
streams = self.stream.streams
else: # self.stream is a single Stream
streams = [self.stream]
print('Exporting "good" clusters to:')
# do a separate export for each recording:
# absolute start and stop times of all streams, rounded to nearest raw timepoint:
tranges = self.stream.tranges
t0 = tranges[0, 0] # absolute start time of first stream
for stream, trange in zip(streams, tranges):
abst0 = trange[0] # absolute start time of this stream relative to t0
# time delta between this stream and first stream, to nearest raw timepoint, us:
dt = abst0 - t0
dt = intround(dt) # to nearest int us
self.exportptcsfile(stream, basepath, dt, exportdt, sortpath,
user=user, notes=notes)
def exportptcsfile(self, stream, basepath, dt, exportdt, sortpath, user='', notes=''):
"""Export spike data of all "good" spikes to binary .ptcs file in basepath.
Constrain to spikes in stream, and undo any time delta in spike times.
dt is the integer time difference between start of stream and start of first stream in
the track, rounded to the nearest us (spike times are stored as int64 us in .ptcs)"""
# build up list of PTCSNeuronRecords that have spikes in this stream,
# and tally their spikes
nsamplebytes = 4 # float32
nrecs = []
nspikes = 0
# only export neurons marked as "good", could be single or multi unit:
for nid in sorted(self.good):
neuron = self.neurons[nid]
spikets = self.spikes['t'][neuron.sids] # should be a sorted copy
assert spikets.flags['OWNDATA'] # safe to modify in place
spikets.sort() # just in case it isn't perfectly sorted
spikets -= dt # export spike times relative to t=0 of this recording
# only include spikes that occurred during this recording
lo, hi = spikets.searchsorted([stream.t0, stream.t1])
spikets = spikets[lo:hi]
if len(spikets) == 0:
continue # don't save empty neurons
nrec = PTCSNeuronRecord(neuron, spikets, nsamplebytes, descr='')
nrecs.append(nrec)
nspikes += len(spikets)
nneurons = len(nrecs)
# create the header and write everything to file:
path = os.path.join(basepath, stream.srcfnameroot)
try: os.mkdir(path)
except OSError: pass # path already exists?
fname = stream.srcfnameroot + '.ptcs'
fullfname = os.path.join(path, fname)
header = PTCSHeader(self, sortpath, stream, nneurons, nspikes, nsamplebytes,
fullfname, exportdt, user=user, notes=notes)
with open(fullfname, 'wb') as f:
header.write(f)
for nrec in nrecs:
nrec.write(f)
print(fullfname)
def exportcsv(self, fname):
"""Export all "good" spikes to a .csv file with time (s), nid, and maxchan as the
columns"""
sids = []
#chans = []
for nid in sorted(self.good):
neuron = self.neurons[nid]
sids.append(neuron.sids)
# the alternative is to export each spike's unit's channel:
#chans.append(np.tile(neuron.chan, neuron.nspikes))
sids = np.hstack(sids)
spikes = self.spikes[sids]
tsecs = spikes['t'] / 1e6 # convert from us to s
nids = spikes['nid']
chans = spikes['chan']
#chans = np.hstack(chans)
data = np.column_stack([tsecs, nids, chans])
print('Exporting (tsec, nid, chan) of all spikes marked as "good" to %s' % fname)
np.savetxt(fname, data, fmt='%.6f, %d, %d')
def exporttschid(self, basepath):
"""Export int64 (timestamp, channel, neuron id) 3 tuples to binary file"""
raise NotImplementedError('Needs to be redone to work with multiple streams')
spikes = self.spikes[self.spikes['nid'] > 0] # don't export unsorted/multiunit spikes
dt = str(datetime.datetime.now()) # get an export timestamp
dt = dt.split('.')[0] # ditch the us
dt = dt.replace(' ', '_')
dt = dt.replace(':', '.')
srffnameroot = srffnameroot.replace(' ', '_')
tschidfname = dt + '_' + srffnameroot + '.tschid'
tschid = np.empty((len(spikes), 3), dtype=np.int64)
tschid[:, 0] = spikes['t']
tschid[:, 1] = spikes['chan']
tschid[:, 2] = spikes['nid']
tschid.tofile(os.path.join(path, tschidfname)) # save it
print(tschidfname)
def exportdin(self, basepath):
"""Export stimulus din(s) to binary .din file(s) in basepath"""
if self.stream.is_multi(): # self.stream is a MultiStream
streams = self.stream.streams
else: # self.stream is a single Stream
streams = [self.stream]
dinfiledtype=[('TimeStamp', '<i8'), ('SVal', '<i8')] # pairs of int64s
print('Exporting DIN(s) to:')
for stream in streams:
try: # neither of these attribs should exist for recordings with no stimuli:
svrecs = stream.srff.digitalsvalrecords
dsprecs = stream.srff.displayrecords
except AttributeError:
continue # no din to export for this stream
if len(svrecs) == 0 or stream.srff.ndigitalsvalrecords == 0:
raise ValueError("digitalsvalrecords are empty for stream %r. Attribute "
"shouldn't exist" % stream.fname)
path = os.path.join(basepath, stream.srcfnameroot)
try: os.mkdir(path)
except OSError: pass # path already exists?
# upcast SVal field from uint16 to int64, creates a copy,
# but it's not too expensive:
svrecs = svrecs.astype(dinfiledtype)
# convert to normal n x 2 int64 array
svrecs = svrecs.view(np.int64).reshape(-1, 2)
# Some old recordings (<= ptc15) contain multiple experiments.
# To deal with this, iterate over stream.srff.displayrecords, export one .din
# per displayrecord. Append experiment ID to each .din filename, if necessary.
svrects = svrecs[:, 0]
dsprects = [ dsprec.TimeStamp for dsprec in dsprecs ]
svalrecis = svrects.searchsorted(dsprects)
assert svalrecis[0] == 0
svalrecis = svalrecis[1:] # exclude the trivial 0 index
# split sval records according to displayrecord timestamps:
dins = np.split(svrecs, svalrecis)
assert len(dins) == len(dsprecs)
for eid, din in enumerate(dins):
if eid == 0 and len(dins) == 1:
eidstr = ''
elif len(dins) < 10:
eidstr = '.%d' % eid
else: # include leading zero to maintain alphabetical fname order
eidstr = '.%02d' % eid
dinfname = stream.srcfnameroot + eidstr + '.din'
fullfname = os.path.join(path, dinfname)
din.tofile(fullfname) # save it
print(fullfname)
def exporttextheader(self, basepath):
"""Export stimulus text header(s) to .textheader file(s) in basepath"""
if self.stream.is_multi(): # self.stream is a MultiStream
streams = self.stream.streams
else: # self.stream is a single Stream
streams = [self.stream]
print('Exporting text header(s) to:')
for stream in streams:
try:
dsprecs = stream.srff.displayrecords
except AttributeError: # no textheader to export for this stream
continue
if len(dsprecs) == 0:
raise ValueError("displayrecords are empty for stream %r. Attribute "
"shouldn't exist" % stream.fname)
path = os.path.join(basepath, stream.srcfnameroot)
try: os.mkdir(path)
except OSError: pass # path already exists?
# Some old recordings (<= ptc15) contain multiple experiments.
# To deal with this, iterate over stream.srff.displayrecords, export one
# .textheader per displayrecord. Append experiment ID to each .textheader
# filename, if necessary.
for eid, dsprec in enumerate(dsprecs):
textheader = dsprec.Header.python_tbl
if eid == 0 and len(dsprecs) == 1:
eidstr = ''
elif len(dsprecs) < 10:
eidstr = '.%d' % eid
else: # include leading zero to maintain alphabetical fname order
eidstr = '.%02d' % eid
textheaderfname = stream.srcfnameroot + eidstr + '.textheader'
fullfname = os.path.join(path, textheaderfname)
with open(fullfname, 'w') as f:
f.write(textheader) # save it
print(fullfname)
def exportall(self, basepath, sortpath):
"""Export spike data, stimulus din and textheader to basepath"""
self.exportptcsfiles(basepath, sortpath)
self.exportdin(basepath)
self.exporttextheader(basepath)
def exportspikewaves(self, sids, selchans, tis, fname, format):
"""Export spike waveform data of selected sids, selchans and tis to binary
.spikes.zip file or text .spikes.csv file"""
nspikes = len(sids)
chans, chanslist = self.get_common_chans(sids, selchans)
nchans = len(chans)
ti0, ti1 = tis
nt = ti1 - ti0
# fill in 3D data array:
dtype = self.wavedata.dtype
data = np.zeros((nspikes, nchans, nt), dtype=dtype)
for sii, sid in enumerate(sids):
spikechans = chanslist[sii]
spikechanis = spikechans.searchsorted(chans)
data[sii] = self.wavedata[sid][spikechanis, ti0:ti1]
if format == 'text': # flatten timepoints of all chans into columns
data.shape = nspikes, nchans*nt
stream = self.stream
assert stream.kind == 'highpass' # should be the only type ever saved to self
if format == 'binary':
nids = self.spikes['nid'][sids]
spiketimes = self.spikes['t'][sids]
chanpos = stream.probe.siteloc_arr()
uVperAD = stream.converter.AD2uV(1) # convert 1 AD unit to uV
with open(fname, 'wb') as f:
np.savez_compressed(f, data=data, sids=sids, nids=nids,
spiketimes=spiketimes, chans=chans, tis=tis,
chanpos=chanpos, uVperAD=uVperAD)
elif format == 'text':
np.savetxt(fname, data, fmt='%d', delimiter=',') # data should be int
else:
raise ValueError('Unknown format: %r' % format)
print('Exported %d spikes on chans=%r and tis=%r to %s'
% (nspikes, list(chans), list(tis), fname))
def get_param_matrix(self, kind=None, sids=None, tis=None, selchans=None, norm=False,
dims=None, scale=True):
"""Organize dims parameters from sids into a data matrix, each column
corresponding to a dim. To do PCA/ICA clustering on all spikes, one maxchan at
a time, caller needs to call this multiple times, one for each set of
maxchan unique spikes,"""
spikes = self.spikes
dtypefields = list(spikes.dtype.fields)
if sids is None:
sids = spikes['id'] # default to all spikes
comps = [ dim for dim in dims if dim.startswith('c') and dim[-1].isdigit() ]
rmserror = np.any([ dim == 'RMSerror' for dim in dims ])
ncomp = len(comps)
hascomps = ncomp > 0
if hascomps:
X = self.get_component_matrix(kind, sids, tis=tis, chans=selchans,
minncomp=ncomp, norm=norm)
if rmserror:
rms = self.get_rms_error(sids, tis=tis, chans=selchans)
data = []
for dim in dims:
if dim in dtypefields:
data.append( np.float32(spikes[dim][sids]) )
elif dim.startswith('c') and dim[-1].isdigit():
compid = int(lstrip(dim, 'c'))
data.append( np.float32(X[:, compid]) )
elif dim == 'RMSerror':
data.append( np.float32(rms) )
else:
raise RuntimeError('Unknown dim %r' % dim)
# np.column_stack returns a copy, not modifying the original array
data = np.column_stack(data)
if scale:
# ensure 0 mean, and unit variance/stdev
for dim, d in zip(dims, data.T): # d iterates over columns
d -= d.mean()
if dim in ['x0', 'y0'] and self.probe.ncols > 1:
try: x0std # normalize spatial params by x0 std
except NameError: x0std = spikes['x0'].std()
if x0std != 0.0:
d /= x0std
#elif dim == 't': # the longer the recording in hours, the greater the
# # scaling in time
# trange = d.max() - d.min()
# tscale = trange / (60*60*1e6)
# d *= tscale / d.std()
else: # normalize all other dims by their std
dstd = d.std()
if dstd != 0.0:
d /= dstd
return data
def get_component_matrix(self, kind, sids, tis=None, chans=None, minncomp=None,
norm=False):
"""Find set of chans common to all sids, and do PCA/ICA on those waveforms. Or,
if chans are specified, limit PCA/ICA to them. Return component matrix with at
least minncomp dimensions"""
spikes = self.spikes
nt = self.wavedata.shape[2]
if tis is None: # use full waveform
tis = np.asarray([0, nt])
#print('tis: %r' % (tis,))
ti0, ti1 = tis
assert ti0 < ti1 <= nt
nt = ti1 - ti0
chans, chanslist = self.get_common_chans(sids, chans)
nchans = len(chans)
nspikes = len(sids)
if nspikes < 2:
raise RuntimeError("Need at least 2 spikes for %s" % kind)
if nchans == 0:
raise RuntimeError("Spikes have no common chans for %s" % kind)
# check if desired components have already been calculated (cache hit):
Xhash = self.get_Xhash(kind, sids, tis, chans, self.npcsperchan, norm)
self.Xhash = Xhash # save as key to most recent component matrix in self.X
try: self.X
except AttributeError: self.X = {} # init the dimension reduction cache attrib
if Xhash in self.X:
print('Cache hit, using cached %ss from tis=%r, chans=%r of %d spikes' %
(kind[:-1], list(tis), list(chans), nspikes))
return self.X[Xhash] # no need to recalculate
print('Cache miss, (re)calculating %ss' % kind[:-1])
# collect data between tis from chans from all spikes:
print('Doing %s on tis=%r, chans=%r of %d spikes' %
(kind, list(tis), list(chans), nspikes))
# MDP complains of roundoff errors with float32 for large covariance matrices
data = np.zeros((nspikes, nchans, nt), dtype=np.float64)
for sii, sid in enumerate(sids):
spikechans = chanslist[sii]
spikechanis = spikechans.searchsorted(chans)
spikedata = self.wavedata[sid][spikechanis, ti0:ti1]
if norm:
# normalize by Vpp of chan with max Vpp:
maxptp = spikedata.ptp(axis=1).max()
if maxptp != 0: # prevent div by 0
spikedata = spikedata / maxptp
data[sii] = spikedata
print('Input shape for %s: %r' % (kind, data.shape))
t0 = time.time()
data.shape = nspikes, nchans*nt # flatten timepoints of all chans into columns
print('Reshaped input for %s: %r' % (kind, data.shape))
if kind == 'PCA': # principal components analysis
if PCALIB == 'mdp':
import mdp # delay as late as possible
X = mdp.pca(data, output_dim=5, svd=False) # svd=False is default
elif PCALIB == 'sklearn':
# sklearn's PCA is about 8x slower than mdp.pca, I think because it
# doesn't tap into scipy.linalg.eig compiled code. RandomizedPCA is faster
# than PCA, but isn't deterministic, and is still 2-3x slower than mdp.pca
from sklearn.decomposition import PCA
pca = PCA(n_components=5)
X = pca.fit_transform(data) # do both the fit and the transform
else:
raise ValueError('Invalid PCALIB %r' % PCALIB)
if X.shape[1] < minncomp:
raise RuntimeError("Can't satisfy minncomp=%d request" % minncomp)
elif kind == 'sPCA': # sparse principal components analysis
from sklearn.decomposition import SparsePCA
n_components = 5
alpha = 1 # sparseness parameter
n_jobs = mp.cpu_count()
spca = SparsePCA(n_components=n_components, alpha=alpha, n_jobs=n_jobs)
X = spca.fit_transform(data) # do both the fit and the transform
elif kind == 'mbsPCA': # mini batch sparse principal components analysis
from sklearn.decomposition import MiniBatchSparsePCA
n_components = 5
alpha = 1 # sparseness parameter
n_jobs = mp.cpu_count()
mbspca = MiniBatchSparsePCA(n_components=n_components, alpha=alpha, n_jobs=n_jobs)
X = mbspca.fit_transform(data) # do both the fit and the transform
elif kind == 'NMF': # non-negative matrix factorization
from sklearn.decomposition import NMF
n_components = 5
init = None # 'random', 'nndsvd', 'nndsvda', 'nndsvdar', 'custom'
nmf = NMF(n_components=n_components, init=init)
X = nmf.fit_transform(data) # do both the fit and the transform
elif kind == 'tSNE': # t-distributed stochastic neighbor embedding
# limit number of PCs to feed into ICA, keep up to npcsperchan components per
# chan on average:
ncomp = min((self.npcsperchan*nchans, data.shape[1]))
print('ncomp: %d' % ncomp)
import mdp # delay as late as possible
# do PCA first, to reduce dimensionality and speed up ICA:
data = mdp.pca(data, output_dim=ncomp)
from sklearn.manifold import TSNE
n_components = 3 # not suited for any more than 3, according to the paper
#init = 'random', 'pca'
tsne = TSNE(n_components=n_components)
X = tsne.fit_transform(data) # do both the fit and the transform
elif kind == 'ICA': # independent components analysis
# ensure nspikes >= ndims**2 for good ICA convergence
maxncomp = intround(np.sqrt(nspikes))
if maxncomp < minncomp:
raise RuntimeError("Can't satisfy minncomp=%d request" % minncomp)
if data.shape[0] <= data.shape[1]:
raise RuntimeError('Need more observations than dimensions for ICA')
# limit number of PCs to feed into ICA, keep up to npcsperchan components per
# chan on average:
ncomp = min((self.npcsperchan*nchans, maxncomp, data.shape[1]))
if ICALIB == 'mdp':
import mdp # delay as late as possible
# do PCA first, to reduce dimensionality and speed up ICA:
print('ncomp: %d' % ncomp)
data = mdp.pca(data, output_dim=ncomp)
# nonlinearity g='pow3', ie x**3. tanh seems to separate better,
# but is a bit slower. gaus seems to be slower still, and no better
# than tanh, but these are just vague impressions.
# defaults to whitened=False, ie assumes data isn't whitened
node = mdp.nodes.FastICANode(g='pow3')
X = node(data)
pm = node.get_projmatrix()
X = X[:, np.any(pm, axis=0)] # keep only the non zero columns
elif ICALIB == 'sklearn':
from sklearn.decomposition import FastICA
# when whiten=True (default), FastICA preprocesses the data using PCA, and
# n_components is the number of PCs that are kept before doing ICA.
alg = 'parallel' # parallel or deflation, default is parallel
fun = 'logcosh' # logcosh, exp, or cube, default is logcosh
maxiter = 100 # default is 200
tol = 0.5 # default is 0.0001, seems need >~ 0.1 to exit faster
## TODO: make FastICA algorithm (parallel, deflation), nonlinearity (logcosh,
## exp, cube) and IC sort method (abs(kurtosis) vs. negentropy) GUI options
print('ncomp=%d, alg=%r, fun=%r, maxiter=%d, tol=%g'
% (ncomp, alg, fun, maxiter, tol))
fastica = FastICA(n_components=ncomp, algorithm=alg,
whiten=True, fun=fun, fun_args=None,
max_iter=maxiter, tol=tol, w_init=None,
random_state=None)
X = fastica.fit_transform(data) # do both the fit and the transform
#pm = fastica.components_
print('fastica niters: %d' % (fastica.n_iter_))
else:
raise ValueError('Invalid ICALIB %r' % ICALIB)
if X.shape[1] < 3:
raise RuntimeError('Need at least 3 columns')
# Sort ICs by decreasing kurtosis or negentropy. For kurtosis, see Scholz2004 (or
# rather, opposite to their approach, which picked ICs with most negative
# kurtosis). For methods of estimating negentropy, see Hyvarinen1997.
'''
# sort by abs(kurtosis) of each IC (column)
k = scipy.stats.kurtosis(X, axis=0)
ki = abs(k).argsort()[::-1] # decreasing order of abs(kurtosis)
print('Sort by abs(kurtosis):')
print(k[ki])
X = X[:, ki] # sort the ICs
'''
# sort by negentropy of each IC (column), this seems to work better than kurtosis
# at separating clusters of similar size:
ne = core.negentropy(X, axis=0)
assert (ne > 0).all()
nei = ne.argsort()[::-1] # decreasing order of negentropy
print('Sort by negentropy:')
print(ne[nei])
X = X[:, nei] # sort the ICs
'''
import pylab as pl
pl.figure()
pl.imshow(pm)
pl.colorbar()
pl.title('original projmatrix')
pl.figure()
pl.imshow(pm[:, ki])
pl.colorbar()
pl.title('decreasing abs(kurtosis) projmatrix')
pl.figure()
pl.imshow(pm[:, nei])
pl.colorbar()
pl.title('decreasing negentropy projmatrix')
'''
else:
raise ValueError('Unknown kind %r' % kind)
print('Output shape for %s: %r' % (kind, X.shape))
self.X[Xhash] = X # cache for fast future retrieval
print('%s took %.3f sec' % (kind, time.time()-t0))
unids = list(np.unique(spikes['nid'][sids])) # set of all nids that sids span
for nid in unids:
# don't update pos of junk cluster, if any, since it might not have any chans
# common to all its spikes, and therefore can't have PCA/ICA done on it
if nid != 0:
self.clusters[nid].update_comppos(X, sids)
return X
def get_rms_error(self, sids, tis=None, chans=None):
"""Calculate RMS error of spike waveforms (all from the same cluster) relative to
their cluster's mean waveform. Consider only selected tis and chans"""
spikes = self.spikes
nids = np.unique(spikes['nid'][sids])
nid = nids[0]
if len(nids) > 1 or nid == 0:
raise RuntimeError("Spikes must all belong to the same (non-junk) cluster for "
"RMS error calculation")
nt = self.wavedata.shape[2]
if tis is None: # use full waveform
tis = np.asarray([0, nt])
#print('tis: %r' % (tis,))
ti0, ti1 = tis
assert ti0 < ti1 <= nt
nt = ti1 - ti0
chans, chanslist = self.get_common_chans(sids, chans)
nchans = len(chans)
nspikes = len(sids)
if nchans == 0:
raise RuntimeError("Spikes have no common chans for RMS error")
# collect data between tis from chans from all spikes:
print('Getting RMS error on tis=%r, chans=%r of %d spikes' %
(list(tis), list(chans), nspikes))
data = | np.zeros((nspikes, nchans, nt), dtype=np.float64) | numpy.zeros |
import numpy as np
import math
from homie_particle import homie_particle
import copy
class homie_filter:
def __init__(self, num_particles, start_x, start_y, start_theta):
# create a list of homie particles
self._num_homies = num_particles
self._homies = np.array([homie_particle(start_x, start_y, start_theta) for i in range(num_particles)])
self.give_particle_id()
self._xs = []
self._ys = []
self._thetas = []
# physical properties of all homie robots
self._wheel_circ = 21.2 # cm circumference of each wheel
self._clics_per_rev = 20 # number of "clicks" per revolution from rotary encode
self._wheel_err_mult = 0.1 # multiplier to guesstimate wheel movement error
self._homie_weights = None
def move_particles(self, l_cnt, r_cnt, del_t):
# compute distance moved by each wheel
l_dist_mu = self._wheel_circ*l_cnt/self._clics_per_rev
r_dist_mu = self._wheel_circ*r_cnt/self._clics_per_rev
# compute standard deviation of movement for each wheel
l_dist_sig = self._wheel_err_mult*np.sqrt(np.abs(l_dist_mu))
r_dist_sig = self._wheel_err_mult*np.sqrt(np.abs(r_dist_mu))
# randomly sample left and right wheel distances and move particles
for i in range(len(self._homies)):
# print('Moving particle: '+str(self._homies[i]._id))
this_l_dist = np.random.normal(l_dist_mu, l_dist_sig)
this_r_dist = | np.random.normal(r_dist_mu, r_dist_sig) | numpy.random.normal |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import uuid
import numpy as np
import tempfile
from io import BytesIO
from base64 import b64encode
from contextlib import contextmanager
import warnings
try:
from matplotlib.colors import ColorConverter
import matplotlib as mpl
import matplotlib.pyplot as plt
except ImportError:
ColorConverter = None
mpl = None
plt = None
try:
import av
except ImportError:
av = None
try:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
from moviepy.editor import VideoClip
except ImportError:
VideoClip = None
def export_pyav(sequence, filename, rate=30, bitrate=None,
width=None, height=None, codec='mpeg4', format='yuv420p',
autoscale=True):
"""Export a sequence of images as a standard video file using PyAv.
N.B. If the quality and detail are insufficient, increase the
bitrate.
Parameters
----------
sequence : any iterator or array of array-like images
The images should have two dimensions plus an
optional third dimensions representing color.
filename : string
name of output file
rate : integer
frame rate of output file, 30 by default
bitrate : integer
Video bitrate is crudely guessed if None is given.
width : integer
By default, set the width of the images.
height : integer
By default, set the height of the images. If width is specified
and height is not, the height is autoscaled to maintain the aspect
ratio.
codec : string
a valid video encoding, 'mpeg4' by default
format: string
Video stream format, 'yuv420p' by default.
autoscale : boolean
Linearly rescale the brightness to use the full gamut of black to
white values. If the datatype of the images is not 'uint8', this must
be set to True, as it is by default.
"""
if av is None:
raise("This feature requires PyAV with FFmpeg or libav installed.")
output = av.open(filename, 'w')
stream = output.add_stream(bytes(codec), rate)
stream.pix_fmt = bytes(format)
ndim = None
for frame_no, img in enumerate(sequence):
if not frame_no:
# Inspect first frame to set up stream.
if bitrate is None:
bitrate = _estimate_bitrate(img.shape, rate)
stream.bit_rate = int(bitrate)
if width is None:
stream.height = img.shape[0]
stream.width = img.shape[1]
else:
stream.width = width
stream.height = (height or
width * img.shape[0] // img.shape[1])
ndim = img.ndim
if ndim == 3:
if img.shape.count(3) != 1:
raise ValueError("Images have the wrong shape.")
# This is a color image. Ensure that the color axis is axis 2.
color_axis = img.shape.index(3)
img = np.rollaxis(img, color_axis, 3)
elif ndim == 2:
# Expand into color to satisfy PyAV's expectation that images
# be in color. (Without this, an assert is tripped.)
img = np.repeat(np.expand_dims(img, 2), 3, axis=2)
else:
raise ValueError("Images have the wrong shape.")
# PyAV requires uint8.
if img.dtype is not np.uint8 and (not autoscale):
raise ValueError("Autoscaling must be turned on if the image "
"data type is not uint8. Convert the datatype "
"manually if you want to turn off autoscale.")
if autoscale:
normed = (img - img.min()) / (img.max() - img.min())
img = (255 * normed).astype('uint8')
frame = av.VideoFrame.from_ndarray(np.asarray(img), format=b'bgr24')
packet = stream.encode(frame)
output.mux(packet)
output.close()
def play(sequence, rate=30, bitrate=None,
width=None, height=None, autoscale=True):
"""In an IPython notebook, display a sequence of images as
an embedded video.
N.B. If the quality and detail are insufficient, increase the
bit rate.
Parameters
----------
sequence : any iterator or array of array-like images
The images should have two dimensions plus an
optional third dimensions representing color.
rate : integer
frame rate of output file, 30 by default
bitrate : integer
Video bitrate is crudely guessed if None is given.
width : integer
By default, set the width of the images.
height : integer
By default, set the height of the images. If width is specified
and height is not, the height is autoscaled to maintain the aspect
ratio.
autoscale : boolean
Linearly rescale the brightness to use the full gamut of black to
white values. If the datatype of the images is not 'uint8', this must
be set to True, as it is by default.
"""
try:
from IPython.display import display
except ImportError:
raise ImportError("This feature requires IPython.")
with tempfile.NamedTemporaryFile(suffix='.webm') as temp:
export_pyav(sequence, bytes(temp.name), codec='libvpx', rate=rate,
width=width, height=height, bitrate=bitrate,
format='yuv420p', autoscale=True)
temp.flush()
display(repr_video(temp.name, 'x-webm'))
class CachedFrameGenerator(object):
def __init__(self, sequence, rate, autoscale=True):
self.sequence = sequence
self._cached_frame_no = None
self._cache = None
self.autoscale = autoscale
self.rate = rate
def __call__(self, t):
frame_no = int(t * self.rate)
if self._cached_frame_no != frame_no:
self._cached_frame_no = frame_no
self._cache = _to_rgb_uint8(self.sequence[frame_no], self.autoscale)
return self._cache
def export_moviepy(sequence, filename, rate=30, bitrate=None, width=None,
height=None, codec='libx264', format='yuv420p',
autoscale=True, quality=None, verbose=True,
ffmpeg_params=None, rate_range=(16, 32)):
"""Export a sequence of images as a standard video file using MoviePy.
Parameters
----------
sequence : any iterator or array of array-like images
The images should have two dimensions plus an
optional third dimensions representing color.
filename : string
name of output file
rate : integer, optional
frame rate of output file, 30 by default
NB: The output frame rate will be limited between `rate_range`
bitrate : integer or string, optional
Preferably use the parameter `quality` for controlling the bitrate.
width : integer, optional
By default, set the width of the images.
height : integer, optional
By default, set the height of the images. If width is specified
and height is not, the height is autoscaled to maintain the aspect
ratio.
codec : string, optional
a valid video encoding, 'libx264' by default
format: string, optional
Video stream format, 'yuv420p' by default.
quality: number or string, optional
For 'mpeg4' codec: sets qscale:v. 1 = high quality, 5 = default.
For 'libx264' codec: sets crf. 0 = lossless, 23 = default.
For 'wmv2' codec: sets fraction of lossless bitrate, 0.01 = default
autoscale : boolean, optional
Linearly rescale the brightness to use the full gamut of black to
white values. True by default.
verbose : boolean, optional
Determines whether MoviePy will print progress. True by default.
ffmpeg_params : dictionary, optional
List of parameters that will be passed to ffmpeg. Avoid using
['-qscale:v', '-crf', '-pixel_format'].
rate_range : tuple of two numbers
As extreme frame rates have playback issues on many players, by default
the frame rate is limited between 16 and 32. When the desired frame rate
is too low, frames will be multiplied an integer number of times. When
the desired frame rate is too high, frames will be skipped at constant
intervals.
See Also
--------
http://zulko.github.io/moviepy/ref/VideoClip/VideoClip.html#moviepy.video.VideoClip.VideoClip.write_videofile
"""
if VideoClip is None:
raise ImportError('The MoviePy exporter requires moviepy to work.')
if ffmpeg_params is None:
ffmpeg_params = []
export_rate = _normalize_framerate(rate, *rate_range)
if codec == 'wmv2' and bitrate is None and quality is None:
quality = 0.01
if quality is not None:
if codec == 'mpeg4':
ffmpeg_params.extend(['-qscale:v', str(quality)])
elif codec == 'libx264':
ffmpeg_params.extend(['-crf', str(quality)])
elif codec == 'wmv2':
if bitrate is not None:
warnings.warn("(wmv) quality is ignored when bitrate is set.")
else:
bitrate = quality * _estimate_bitrate(sequence.frame_shape,
export_rate)
if format is not None:
ffmpeg_params.extend(['-pixel_format', str(format)])
if bitrate is not None:
bitrate = str(bitrate)
if rate <= 0:
raise ValueError
clip = VideoClip(CachedFrameGenerator(sequence, rate, autoscale))
clip.duration = (len(sequence) - 1) / rate
if not (height is None and width is None):
clip = clip.resize(height=height, width=width)
clip.write_videofile(filename, export_rate, codec, bitrate, audio=False,
verbose=verbose, ffmpeg_params=ffmpeg_params)
if av is not None:
export = export_pyav
elif VideoClip is not None:
export = export_moviepy
else:
export = None
def repr_video(fname, mimetype):
"""Load the video in the file `fname`, with given mimetype,
and display as HTML5 video.
"""
try:
from IPython.display import HTML
except ImportError:
raise ImportError("This feature requires IPython.")
video_encoded = open(fname, "rb").read().encode("base64")
video_tag = """<video controls>
<source alt="test" src="data:video/{0};base64,{1}" type="video/webm">
Use Google Chrome browser.</video>""".format(mimetype, video_encoded)
return HTML(data=video_tag)
def _scrollable_stack(sequence, width, normed=True):
# See the public function, scrollable_stack, below.
# This does all the work, and it returns a string of HTML and JS code,
# as expected by Frame._repr_html_(). The public function wraps this
# in IPython.display.HTML for the user.
from IPython.display import Javascript, HTML, display_png
from jinja2 import Template
SCROLL_STACK_JS = Template("""
require(['jquery'], function() {
if (!(window.PIMS)) {
var stack_cursors = {};
window.PIMS = {stack_cursors: {}};
}
$('#stack-{{stack_id}}-slice-0').css('display', 'block');
window.PIMS.stack_cursors['{{stack_id}}'] = 0;
});
require(['jquery'],
$('#image-stack-{{stack_id}}').bind('mousewheel DOMMouseScroll', function(e) {
var direction;
var cursor = window.PIMS.stack_cursors['{{stack_id}}'];
e.preventDefault();
if (e.type == 'mousewheel') {
direction = e.originalEvent.wheelDelta < 0;
}
else if (e.type == 'DOMMouseScroll') {
direction = e.originalEvent.detail < 0;
}
var delta = direction * 2 - 1;
if (cursor + delta < 0) {
return;
}
else if (cursor + delta > {{length}} - 1) {
return;
}
$('#stack-{{stack_id}}-slice-' + cursor).css('display', 'none');
$('#stack-{{stack_id}}-slice-' + (cursor + delta)).css('display', 'block');
window.PIMS.stack_cursors['{{stack_id}}'] = cursor + delta;
}));""")
TAG = Template('<img src="data:image/png;base64,{{data}}" '
'style="display: none;" '
'id="stack-{{stack_id}}-slice-{{i}}" />')
WRAPPER = Template('<div id="image-stack-{{stack_id}}", style='
'"width: {{width}}; float: left; display: inline;">')
stack_id = uuid.uuid4() # random unique identifier
js = SCROLL_STACK_JS.render(length=len(sequence), stack_id=stack_id)
output = '<script>{0}</script>'.format(js)
output += WRAPPER.render(width=width, stack_id=stack_id)
if normed:
sequence = normalize(np.asarray(sequence))
for i, s in enumerate(sequence):
output += TAG.render(
data=b64encode(_as_png(s, width, normed=False)).decode('utf-8'),
stack_id=stack_id, i=i)
output += "</div>"
return output
def scrollable_stack(sequence, width=512, normed=True):
"""Display a sequence or 3D stack of frames as an interactive image
that responds to scrolling.
Parameters
----------
sequence: a 3D Frame (or any array) or an iterable of 2D Frames (or arrays)
width: integer
Optional, defaults to 512. The height is auto-scaled.
normed : Rescale the brightness to fill the gamut. All pixels in the
stack rescaled uniformly.
Returns
-------
an interactive image, contained in a IPython.display.HTML object
"""
from IPython.display import HTML
return HTML(_scrollable_stack(sequence, width=width, normed=normed))
def _as_png(arr, width, normed=True):
"Create a PNG image buffer from an array."
try:
from PIL import Image
except ImportError:
raise ImportError("This feature requires PIL/Pillow.")
w = width # for brevity
h = arr.shape[0] * w // arr.shape[1]
if normed:
arr = normalize(arr)
img = Image.fromarray((arr * 255).astype('uint8')).resize((w, h))
img_buffer = BytesIO()
img.save(img_buffer, format='png')
return img_buffer.getvalue()
def normalize(arr):
"""This normalizes an array to values between 0 and 1.
Parameters
----------
arr : ndarray
Returns
-------
ndarray of float
normalized array
"""
ptp = arr.max() - arr.min()
# Handle edge case of a flat image.
if ptp == 0:
ptp = 1
scaled_arr = (arr - arr.min()) / ptp
return scaled_arr
def _to_rgb_uint8(image, autoscale):
if autoscale:
image = (normalize(image) * 255).astype(np.uint8)
elif image.dtype is not np.uint8:
if np.issubdtype(image.dtype, np.integer):
max_value = np.iinfo(image.dtype).max
# sometimes 12-bit images are stored as unsigned 16-bit
if max_value == 2**16 - 1 and image.max() < 2**12:
max_value = 2**12 - 1
image = (image / max_value * 255).astype(np.uint8)
else:
image = (image * 255).astype(np.uint8)
ndim = image.ndim
shape = image.shape
if ndim == 3 and shape.count(3) == 1:
# This is a color image. Ensure that the color axis is axis 2.
color_axis = shape.index(3)
image = np.rollaxis(image, color_axis, 3)
elif image.ndim == 3 and shape.count(4) == 1:
# This is an RGBA image. Drop the A values.
color_axis = shape.index(4)
image = | np.rollaxis(image, color_axis, 4) | numpy.rollaxis |
#!/usr/bin/env python
from __future__ import (print_function, division)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
from scipy.interpolate import griddata
from scipy.stats import norm
from scipy.optimize import curve_fit
from astropy.io import fits
import os
import matplotlib.colors as colors
import matplotlib.cm as cm
import matplotlib.ticker as plticker
def lazy_mode():
raise NotImplementedError
def estimate_ccf_err(ccf, mask=None):
if mask is None:
m = np.ones_like(ccf, dtype=bool)
else:
m = mask
# if return 2d array
# ccf_err = np.ones_like(ccf)
# error = np.sqrt(ccf)
error = np.array(
[np.std(ccf[i][~m[i]]) for i in range(ccf.shape[0])]
)
# error is 1d
return error
# ccf_err *= error[:, None]
# if return 2d array
# return ccf_err
def get_ccf_residuals(master_out, ccf, mask=None):
if mask is None:
mask = np.ones(ccf.shape[0], dtype=bool)
return master_out - ccf[mask,:]
def create_master_out(rv, ccf, ccf_err, velocity_mask=None, bad=None):
# stack the out of transit CCFs
# tm = transit_mask
vm = velocity_mask
if vm is None:
vm = np.ones_like(rv, dtype=bool)
if bad is not None:
keep = np.arange(ccf.shape[0])
bad = np.atleast_1d(bad)
keep = np.setdiff1d(keep, bad)
ccf = ccf[keep]
ccf_err = ccf_err[keep]
# tm = tm[keep]
# rv_out = np.median(rv[~m,:], axis=0)
# assert _all_equal(rv), "input RV arrays are not equal, master RV array is ambiguous"
# ccf_out = np.sum(ccf[~m], axis=0)
ccf_out = np.average(ccf, weights=1/ccf_err**2, axis=0)
# ccf_out = np.mean(ccf[~tm], axis=0)
ccf_err_out = np.sqrt(np.sum(ccf_err**2, axis=0))
# fit to shift to stellar rest frame and normalise
popt, _ = fit_ccf(rv[vm], ccf_out[vm], yerr=ccf_err_out[vm])
c, mu = popt[[0,2]]
print('mu = {:.4f} +- {:.4f}'.format(mu, _[2]))#, mu)
# print(what)
# rv_out = rv - mu
# ccf_out /= c
# ccf_err_out /= c
return (rv-mu, ccf_out, ccf_err_out,
inverted_normal_distribution(rv, *popt))# / c)
def inverted_normal_distribution(x, c, A, mu, sigma):
# model function for the inverted Gaussian distribution
# return c + A * norm.pdf(x, loc=mu, scale=sigma)
return (c -
A * np.exp(
# -0.5*(x - mu)**2 / (2*sigma**2)
-0.5 * ( (x - mu) / sigma )**2
)#/
# (sigma * np.sqrt(2*np.pi))
)
def fit_ccf_residuals_pymc3(rv, ccf, ccf_err=None, step=1000, tune=1000,
chains=2, cores=2,
target_accept=0.8):
try:
import pymc3 as pm
import theano.tensor as tt
except ImportError("pymc3 or theano not installed; either install or use "
"utils.fit_ccf_residuals()"):
sys.exit()
# varnames = ["c", "A", "mu", "sigma", "jitter", "mod"]
with pm.Model() as model:
# logA = pm.Normal('logA', mu=0, sd=10)
# logc = pm.Normal('logc', mu=0, sd=10)
# c = pm.Normal('c', mu=0, sd=10)
# A = pm.Bound(pm.Normal, upper=0)('A', mu=-0.1, sd=5)
# jitter = pm.Bound(pm.Normal, lower=0)('jitter', mu=0.01, sd=5)
# A = pm.Normal('A', mu=
# c = pm.Normal('c', mu=0, sd=10)
# c = pm.Normal('c', mu=0, sd=1)
med = np.median(ccf)
# c = pm.Normal('c', mu=med, sd=np.std(ccf))
c_ppt = pm.Normal('c_ppt', mu=0, sd=np.std(ccf)*1e3)
c = pm.Deterministic('c', c_ppt/1e3)
# c = pm.Normal('c', mu=0, sd=np.std(ccf)*1e3)
# c = 0.0
# c = pm.Uniform('c', -1, 1)
rv_range = rv[-1] - rv[0]
# rv_range = 20
# mu = pm.Bound(
# pm.Normal, lower=rv[0], upper=rv[-1]
## pm.Normal, lower=-15, upper=15,
# )(
# 'mu', mu=0, sd=5.0
## testval=rv[np.argmin(ccf)]
# )
# mu = -5.
# standard use
# mu = pm.Normal('mu', mu=0, sd=rv_range/2)
mu = pm.Bound(pm.Normal, lower=-5, upper=5)(
'mu', mu=0, sd=rv_range/2
)
# sigma = pm.Bound(pm.Normal, lower=0, upper=rv_range/2)(
# 'sigma', mu=5, sd=rv_range)
# sigma = pm.Bound(pm.HalfNormal, lower=0)('sigma', sd=10)
# sigma = pm.Bound(pm.HalfNormal, lower=0)('sigma', sd=5)
# sigma = pm.Bound(pm.HalfNormal, lower=0.5)('sigma', sd=2)
sigma = pm.HalfNormal('sigma', sd=10)
# sigma = 3
# log_sigma = pm.Normal('log_sigma', mu=1.1, sd=0.5)
# sigma = pm.Deterministic('sigma', tt.exp(log_sigma))
fwhm = pm.Deterministic('fwhm', sigma * 2 * tt.sqrt(2*tt.log(2)))
# sigma = pm.Uniform('sigma', 0, rv_range*3)
# log_sigma = pm.Normal('log_sigma', mu=0, sd=np.log((rv[-1]-rv[0])*3))
# log_sigma = pm.Normal('log_sigma', mu=0, sd=np.log(rv_range))
# log_sigma = pm.Bound(pm.Normal, upper=rv_range*3)(
# 'log_sigma', mu=0, sd=10)
# logjitter = pm.Normal('logs', mu=0, sd=10)
# logjitter = pm.Normal('logs', mu=0, sd=10)
# sigma = pm.Deterministic('sigma', tt.exp(log_sigma))
# c = pm.Deterministic('c', tt.exp(logc))
# A = pm.Deterministic('A', tt.exp(logA))
# A = pm.Uniform('A', lower=0, upper=1)
# A = pm.HalfNormal('A', sd=np.abs(ccf.max() - ccf.min()))
A_ppt = pm.HalfNormal('A_ppt', sd=np.abs(ccf.max() - ccf.min()) * 1e3)
A = pm.Deterministic('A', A_ppt/1e3)
# A = 0.0002
# A = pm.Bound(pm.Normal, lower=1e-6, upper=0.1)('A', mu=0.002, sd=0.01)
# print(np.abs(
# np.min(ccf) - np.median(ccf)
# ))
# logA = pm.Normal('logA',
# mu=np.log(
# np.abs(
# np.min(ccf) - np.median(ccf)
# )
# ),
# sd=5)
# A = pm.Deterministic('A', tt.exp(logA))
# A = pm.Bound(pm.Normal, lower=0, upper=1)
# 'A', mu
# jitter = pm.Uniform('jitter', 0, 1)
# jitter = pm.Deterministic('jitter', tt.exp(logjitter))
# model = c - A * pm.Normal('')
mod = (c -
# A * tt.exp(-0.5*(rv - mu)**2 / sigma**2)#/
A * tt.exp(-0.5 * tt.sqr((rv - mu) / sigma))#/
# (sigma * np.sqrt(2*np.pi))
)
models = pm.Deterministic('models', mod)
if ccf_err is None:
# jitter = pm.Bound(pm.HalfNormal, lower=0)('jitter', sd=1)
jitter_ppt = pm.HalfNormal('jitter_ppt', sd=np.std(ccf)*1e3)
jitter = pm.Deterministic('jitter', jitter_ppt/1e3)
# log_jitter = pm.Normal('log_jitter', mu=np.log(np.std(ccf)*10), sd=1)
# jitter = pm.Deterministic('jitter', tt.exp(log_jitter))
# jitter = pm.Bound(pm.HalfNormal, lower=0)('jitter', sd=np.std(ccf))
obs = pm.Normal('obs', mu=mod, sd=jitter, observed=ccf)
else:
obs = pm.Normal('obs', mu=mod, sd=ccf_err, observed=ccf)
# obs = pm.Normal('obs', mu=mod, sd=np.median(ccf_err), observed=ccf)
trace = pm.sample(step, tune=tune, chains=chains, cores=cores,
target_accept=target_accept)
return trace
def fit_ccf_residuals(rv, ccf, err=None, p0=None, **kwargs):
n = ccf.shape[0]
# mu = np.zeros(n)
# mu_err = np.zeros(n)
# models = np.zeros_like(ccf)
# (level, contrast, centre, width,
# level_err, contrast_err, centre_err, width_err) = np.zeros((8,n))
popts, perrs = [], []
if err is None:
err = np.ones_like(ccf)
for i in range(n):
popt, perr = fit_ccf(rv, ccf[i], yerr=err[i], p0=p0,
**kwargs)
# for j,x in enumerate([level, contrast, centre, width]):
# x[i] = popt[j]
# level[i] =
# level[i] = popt[0]
popts.append(popt)
perrs.append(perr)
# mu[i] = popt[2]
# mu_err[i] = perr[2]
# if len(popt) > 4:
# s = slice(0,-1)
# else:
# s = slice(0,None)
# models[i] = inverted_normal_distribution(rv, *popt[s])
# return mu, mu_err, models
return np.atleast_2d(popts), np.atleast_2d(perrs)
def _log_probability(theta, x, y, yerr):
def _log_likelihood(data, model, error):
inv_sigma2 = 1/error**2
return -0.5 * np.sum((data - model)**2 * inv_sigma2 - np.log(inv_sigma2))
c, A, mu, sigma, jitter = theta
if A > 0 or A < -10:
return -np.inf
elif jitter < 0:
return -np.inf
elif sigma < 0 or sigma > (x[-1]-x[0])/4:
return -np.inf
elif mu < x[0] or mu > x[-1]:
return -np.inf
elif c < -1 or c > 1:
return -np.inf
model = inverted_normal_distribution(x, c, A, mu, sigma)
error = np.sqrt(yerr**2 + (model * jitter)**2)
return _log_likelihood(y, model, error)
def fit_ccf(rv, ccf, yerr=None, p0=None, method='lsq', mcmc_steps=2000,
mcmc_threads=2):
x = rv
y = ccf
if yerr is None:
yerr = None#np.ones_like(y)
abs_sig = False
else:
abs_sig=True
# initial guesses
# if p0 is None:
c0 = np.median(y)
# A0 = np.abs(c0 / np.min(y))
A0 = np.abs(c0 - np.min(y))
mu0 = x[np.argmin(y)]
sigma0 = 3
default = [c0, A0, mu0, sigma0]
# alpha0 = 0.0
_p0 = []
if isinstance(p0, list):
for i in range(len(p0)):
if p0[i] is None:
_p0.append(default[i])
else:
_p0.append(p0[i])
p0 = _p0
elif p0 is None:
p0 = default
f = inverted_normal_distribution
# try:
if method == 'lsq':
popt, pcov = curve_fit(f,
x, y, p0=p0, method='lm',
sigma=np.ones_like(y)*yerr,
absolute_sigma=True,
# sigma=yerr,
# absolute_sigma=abs_sig
)
perr = np.sqrt(np.diag(pcov))
elif method == 'mcmc':
try:
import emcee
from multiprocessing import Pool
except ImportError("emcee not installed; either install or use method='lsq'"):
sys.exit()
walkers = 400
ndim = 5
p0.append(np.median(yerr)*0.01)
p0_err = np.array([p0[0]*0.02, p0[1]*0.02, 0.5, p0[3]*0.02, p0[4]*0.1])
p0 = np.array(p0)
print(p0)
print(p0_err)
start = [p0 + np.random.randn(ndim) * p0_err for _ in range(walkers)]
# print(start)
if mcmc_threads > 1:
os.environ["OMP_NUM_THREADS"] = "1"
with Pool(processes=2) as pool:
sampler = emcee.EnsembleSampler(walkers, ndim,
_log_probability,
pool=pool,
args=(x, y, yerr))
sampler.run_mcmc(start, mcmc_steps, progress=True)
else:
sampler = emcee.EnsembleSampler(walkers, ndim,
_log_probability,
args=(x, y, yerr))
sampler.run_mcmc(start, mcmc_steps, progress=True)
discard = int(0.75*mcmc_steps)
fc = sampler.get_chain(flat=True, discard=discard)
fcm = sampler.get_chain()
print('fcm shape', fcm.shape)
print('fc shape', fc.shape)
steps = np.arange(mcmc_steps)
fig, axes = plt.subplots(ndim+1,1, figsize=(10,2*ndim),
gridspec_kw={"hspace":0.01})
labels = ['logp', 'c', 'A', 'mu', 'sigma', 'jitter']
for i in range(ndim+1):
for j in range(walkers):
if i == 0:
# print(sampler.get_log_prob().shape)
axes[i].plot(steps, sampler.get_log_prob()[:,j], lw=0.5)
else:
axes[i].plot(steps, fcm[:,j,i-1], lw=0.5)
axes[i].set_ylabel(labels[i])
if i == 4:
axes[i].set_xlabel('steps')
popt = | np.median(fc, axis=0) | numpy.median |
import json
import os
import numpy as np
from tqdm import tqdm
from mmhuman3d.core.conventions.keypoints_mapping import convert_kps
from mmhuman3d.data.data_converters.base_converter import BaseModeConverter
from mmhuman3d.data.data_converters.builder import DATA_CONVERTERS
from mmhuman3d.data.data_structures.human_data import HumanData
from mmhuman3d.data.datasets.pipelines.hybrik_transforms import (
get_bbox,
get_intrinsic_matrix,
)
@DATA_CONVERTERS.register_module()
class MpiInf3dhpHybrIKConverter(BaseModeConverter):
"""MPI-INF-3DHP dataset for HybrIK `Monocular 3D Human Pose Estimation In
The Wild Using Improved CNN Supervision' 3DC`2017 More details can be found
in the `paper.
<https://arxiv.org/pdf/1611.09813.pdf>`__.
Args:
modes (list): 'test' or 'train' for accepted modes
"""
ACCEPTED_MODES = ['test', 'train']
def __init__(self, modes=[]):
super(MpiInf3dhpHybrIKConverter, self).__init__(modes)
@staticmethod
def cam2pixel_matrix(cam_coord: np.ndarray,
intrinsic_param: np.ndarray) -> np.ndarray:
"""Convert coordinates from camera to image frame given intrinsic
matrix
Args:
cam_coord (np.ndarray): Coordinates in camera frame
intrinsic_param (np.ndarray): 3x3 Intrinsic matrix
Returns:
img_coord (np.ndarray): Coordinates in image frame
"""
cam_coord = cam_coord.transpose(1, 0)
cam_homogeneous_coord = np.concatenate(
(cam_coord, np.ones((1, cam_coord.shape[1]), dtype=np.float32)),
axis=0)
img_coord = np.dot(intrinsic_param, cam_homogeneous_coord) / (
cam_coord[2, :] + 1e-8)
img_coord = np.concatenate((img_coord[:2, :], cam_coord[2:3, :]),
axis=0)
return img_coord.transpose(1, 0)
def convert_by_mode(self, dataset_path: str, out_path: str,
mode: str) -> dict:
"""
Args:
dataset_path (str): Path to directory where hybrik preprocessed
json files are stored
out_path (str): Path to directory to save preprocessed npz file
mode (str): Mode in accepted modes
Returns:
dict:
A dict containing keys image_path, image_height, image_width,
bbox_xywh, cam_param, root_cam, depth_factor, keypoints3d,
keypoints3d_mask, keypoints3d_cam, keypoints3d_cam_mask
stored in HumanData() format
"""
if mode == 'train':
ann_file = os.path.join(dataset_path,
'annotation_mpi_inf_3dhp_train_v2.json')
elif mode == 'test':
ann_file = os.path.join(dataset_path,
'annotation_mpi_inf_3dhp_test.json')
with open(ann_file, 'r') as fid:
database = json.load(fid)
# use HumanData to store all data
human_data = HumanData()
# structs we use
image_path_, bbox_xywh_, root_cam_, image_width_, image_height_, \
joint_cam_, joint_img_, depth_factor_ = \
[], [], [], [], [], [], [], []
smpl = {}
smpl['thetas'] = []
smpl['betas'] = []
cam_param = {}
cam_param['f'] = []
cam_param['c'] = []
cam_param['intrinsic'] = []
num_datapoints = len(database['images'])
for ann_image, ann_annotations in tqdm(
zip(database['images'], database['annotations']),
total=num_datapoints):
ann = dict()
for k, v in ann_image.items():
assert k not in ann.keys()
ann[k] = v
for k, v in ann_annotations.items():
ann[k] = v
width, height = ann['width'], ann['height']
bbox = ann['bbox']
bbox = get_bbox(np.array(bbox), width, height)
K = np.array(ann['cam_param']['intrinsic_param'])
f = np.array([K[0, 0], K[1, 1]])
c = np.array([K[0, 2], K[1, 2]])
intrinsic = get_intrinsic_matrix(f, c, inv=True)
joint_cam = np.array(ann['keypoints_cam'])
num_joints = joint_cam.shape[0]
# if train
if mode == 'train':
root_idx = 4
_, sub, seq, vid, im = ann['file_name'].split('/')[-1].split(
'_')
fname = '{}/{}/{}/{}'.format(sub, seq,
vid.replace('V', 'video_'), im)
# fname = '{}/{}/imageFrames/{}/frame_{}'.format(
# sub, seq, vid.replace('V', 'video_'), im)
elif mode == 'test':
root_idx = 14
fname = 'mpi_inf_3dhp_test_set/' + ann['file_name']
# fname = 'mpi_inf_3dhp_test_set/mpi_inf_3dhp_test_set/' + ann[
# 'file_name']
joint_img = self.cam2pixel_matrix(joint_cam, K)
joint_img[:, 2] = joint_img[:, 2] - joint_cam[root_idx, 2]
root_cam = joint_cam[root_idx]
joint_img = np.hstack([joint_img, np.ones([num_joints, 1])])
joint_cam = np.hstack([joint_cam, np.ones([num_joints, 1])])
image_path_.append(fname)
image_height_.append(height)
image_width_.append(width)
bbox_xywh_.append(bbox)
depth_factor_.append(2000.)
cam_param['f'].append(f.reshape((-1, 2)))
cam_param['c'].append(c.reshape((-1, 2)))
cam_param['intrinsic'].append(intrinsic)
joint_cam_.append(joint_cam)
joint_img_.append(joint_img)
root_cam_.append(root_cam)
cam_param['f'] = np.array(cam_param['f']).reshape((-1, 2))
cam_param['c'] = np.array(cam_param['c']).reshape((-1, 2))
cam_param['intrinsic'] = np.array(cam_param['intrinsic']).reshape(
(-1, 3, 3))
if mode == 'train':
keypoints3d_ = np.array(joint_img_).reshape((-1, 28, 4))
keypoints3d_cam_ = np.array(joint_cam_).reshape((-1, 28, 4))
keypoints3d_, keypoints3d_mask = convert_kps(
keypoints3d_, 'hybrik_hp3d', 'human_data')
keypoints3d_cam_, keypoints3d_cam_mask = convert_kps(
keypoints3d_cam_, 'hybrik_hp3d', 'human_data')
elif mode == 'test':
keypoints3d_ = np.array(joint_img_).reshape((-1, 17, 4))
keypoints3d_cam_ = | np.array(joint_cam_) | numpy.array |
"""Primary tests."""
import copy
import functools
import pickle
from typing import Any, Callable, Dict, List, Optional, Tuple
import warnings
import numpy as np
import pytest
import scipy.optimize
from pyblp import (
Agents, CustomMoment, DemographicCovarianceMoment, Formulation, Integration, Iteration, Optimization, Problem,
Products, Simulation, build_ownership, data_to_dict, parallel
)
from pyblp.utilities.basics import Array, Options, update_matrices, compute_finite_differences
from .conftest import SimulatedProblemFixture
@pytest.mark.usefixtures('simulated_problem')
@pytest.mark.parametrize('solve_options_update', [
pytest.param({'method': '2s'}, id="two-step"),
pytest.param({'scale_objective': True}, id="scaled objective"),
pytest.param({'center_moments': False, 'W_type': 'unadjusted', 'se_type': 'clustered'}, id="complex covariances"),
pytest.param({'delta_behavior': 'last'}, id="faster starting delta values"),
pytest.param({'fp_type': 'linear'}, id="non-safe linear fixed point"),
pytest.param({'fp_type': 'safe_nonlinear'}, id="nonlinear fixed point"),
pytest.param({'fp_type': 'nonlinear'}, id="non-safe nonlinear fixed point"),
pytest.param(
{'iteration': Iteration('hybr', {'xtol': 1e-12}, compute_jacobian=True)},
id="linear Newton fixed point"
),
pytest.param(
{'fp_type': 'safe_nonlinear', 'iteration': Iteration('hybr', {'xtol': 1e-12}, compute_jacobian=True)},
id="nonlinear Newton fixed point"
)
])
def test_accuracy(simulated_problem: SimulatedProblemFixture, solve_options_update: Options) -> None:
"""Test that starting parameters that are half their true values give rise to errors of less than 10%."""
simulation, _, problem, solve_options, _ = simulated_problem
# skip different iteration configurations when they won't matter
if simulation.K2 == 0 and {'delta_behavior', 'fp_type', 'iteration'} & set(solve_options_update):
return pytest.skip("A different iteration configuration has no impact when there is no heterogeneity.")
if simulation.epsilon_scale != 1 and 'nonlinear' in solve_options_update.get('fp_type', 'safe_linear'):
return pytest.skip("Nonlinear fixed point configurations are not supported when epsilon is scaled.")
# update the default options and solve the problem
updated_solve_options = copy.deepcopy(solve_options)
updated_solve_options.update(solve_options_update)
updated_solve_options.update({k: 0.5 * solve_options[k] for k in ['sigma', 'pi', 'rho', 'beta']})
results = problem.solve(**updated_solve_options)
# test the accuracy of the estimated parameters
keys = ['sigma', 'pi', 'rho', 'beta']
if problem.K3 > 0:
keys.append('gamma')
for key in keys:
np.testing.assert_allclose(getattr(simulation, key), getattr(results, key), atol=0, rtol=0.1, err_msg=key)
@pytest.mark.usefixtures('simulated_problem')
@pytest.mark.parametrize('compute_options', [
pytest.param({'method': 'approximate'}, id="approximation"),
pytest.param({'method': 'normal'}, id="normal distribution"),
pytest.param({'method': 'empirical'}, id="empirical distribution")
])
def test_optimal_instruments(simulated_problem: SimulatedProblemFixture, compute_options: Options) -> None:
"""Test that starting parameters that are half their true values also give rise to errors of less than 10% under
optimal instruments.
"""
simulation, _, problem, solve_options, problem_results = simulated_problem
# compute optimal instruments and update the problem (only use a few draws to speed up the test)
compute_options = copy.deepcopy(compute_options)
compute_options.update({
'draws': 5,
'seed': 0
})
new_problem = problem_results.compute_optimal_instruments(**compute_options).to_problem()
# update the default options and solve the problem
updated_solve_options = copy.deepcopy(solve_options)
updated_solve_options.update({k: 0.5 * solve_options[k] for k in ['sigma', 'pi', 'rho', 'beta']})
new_results = new_problem.solve(**updated_solve_options)
# test the accuracy of the estimated parameters
keys = ['beta', 'sigma', 'pi', 'rho']
if problem.K3 > 0:
keys.append('gamma')
for key in keys:
np.testing.assert_allclose(getattr(simulation, key), getattr(new_results, key), atol=0, rtol=0.1, err_msg=key)
@pytest.mark.usefixtures('simulated_problem')
def test_importance_sampling(simulated_problem: SimulatedProblemFixture) -> None:
"""Test that starting parameters that are half their true values also give rise to errors of less than 20% under
importance sampling.
"""
simulation, _, problem, solve_options, problem_results = simulated_problem
# importance sampling is only relevant when there are agent data
if problem.K2 == 0:
return pytest.skip("There are no agent data.")
# it suffices to test importance sampling for problems without demographics
if problem.D > 0:
return pytest.skip("Testing importance sampling is hard with demographics.")
# compute a more precise delta
delta = problem_results.compute_delta(integration=simulation.integration)
# do importance sampling and verify that the mean utility didn't change if precise integration isn't used
sampling_results = problem_results.importance_sampling(
draws=500,
ar_constant=2,
seed=0,
delta=delta,
integration=Integration('mlhs', 50000, {'seed': 0}),
)
# solve the new problem
new_problem = sampling_results.to_problem()
updated_solve_options = copy.deepcopy(solve_options)
updated_solve_options.update({k: 0.5 * solve_options[k] for k in ['sigma', 'pi', 'rho', 'beta']})
new_results = new_problem.solve(**updated_solve_options)
# test the accuracy of the estimated parameters
keys = ['beta', 'sigma', 'pi', 'rho']
if problem.K3 > 0:
keys.append('gamma')
for key in keys:
np.testing.assert_allclose(getattr(simulation, key), getattr(new_results, key), atol=0, rtol=0.2, err_msg=key)
@pytest.mark.usefixtures('simulated_problem')
def test_bootstrap(simulated_problem: SimulatedProblemFixture) -> None:
"""Test that post-estimation output medians are within 5% parametric bootstrap confidence intervals."""
_, _, problem, solve_options, problem_results = simulated_problem
# create bootstrapped results (use only a few draws and don't iterate for speed)
bootstrapped_results = problem_results.bootstrap(draws=100, seed=0, iteration=Iteration('return'))
# test that post-estimation outputs are within 95% confidence intervals
t = problem.products.market_ids[0]
merger_ids = np.where(problem.products.firm_ids == 1, 0, problem.products.firm_ids)
merger_ids_t = merger_ids[problem.products.market_ids == t]
method_mapping = {
"aggregate elasticities": lambda r: r.compute_aggregate_elasticities(),
"consumer surpluses": lambda r: r.compute_consumer_surpluses(),
"approximate prices": lambda r: r.compute_approximate_prices(merger_ids),
"own elasticities": lambda r: r.extract_diagonals(r.compute_elasticities()),
"aggregate elasticity in t": lambda r: r.compute_aggregate_elasticities(market_id=t),
"consumer surplus in t": lambda r: r.compute_consumer_surpluses(market_id=t),
"approximate prices in t": lambda r: r.compute_approximate_prices(merger_ids_t, market_id=t)
}
for name, method in method_mapping.items():
values = method(problem_results)
bootstrapped_values = method(bootstrapped_results)
median = np.median(values)
bootstrapped_medians = np.nanmedian(bootstrapped_values, axis=range(1, bootstrapped_values.ndim))
lb, ub = np.percentile(bootstrapped_medians, [2.5, 97.5])
np.testing.assert_array_less(np.squeeze(lb), np.squeeze(median) + 1e-14, err_msg=name)
np.testing.assert_array_less(np.squeeze(median), np.squeeze(ub) + 1e-14, err_msg=name)
@pytest.mark.usefixtures('simulated_problem')
def test_bootstrap_se(simulated_problem: SimulatedProblemFixture) -> None:
"""Test that bootstrapped SEs are close to analytic ones. Or at least the same order of magnitude -- especially for
large numbers of RCs they may not necessarily be very close to each other.
"""
_, _, _, _, problem_results = simulated_problem
# compute bootstrapped results (ignore supply side iteration because we will only use the parameter draws)
bootstrapped_results = problem_results.bootstrap(draws=1000, seed=0, iteration=Iteration('return'))
# compare SEs
for key in ['sigma', 'pi', 'rho', 'beta', 'gamma']:
analytic_se = np.nan_to_num(getattr(problem_results, f'{key}_se'))
bootstrapped_se = getattr(bootstrapped_results, f'bootstrapped_{key}').std(axis=0)
np.testing.assert_allclose(analytic_se, bootstrapped_se, atol=0.001, rtol=0.5, err_msg=key)
@pytest.mark.usefixtures('simulated_problem')
def test_result_serialization(simulated_problem: SimulatedProblemFixture) -> None:
"""Test that result objects can be serialized and that their string representations are the same when they are
unpickled.
"""
simulation, simulation_results, problem, solve_options, problem_results = simulated_problem
originals = [
Formulation('x + y', absorb='C(z)', absorb_method='lsmr', absorb_options={'tol': 1e-10}),
Integration('halton', size=10, specification_options={'seed': 0, 'scramble': True}),
Iteration('lm', method_options={'max_evaluations': 100}, compute_jacobian=True),
Optimization('nelder-mead', method_options={'xatol': 1e-5}, compute_gradient=False, universal_display=False),
problem,
simulation,
simulation_results,
problem_results,
problem_results.compute_optimal_instruments(),
problem_results.bootstrap(draws=1, seed=0),
data_to_dict(simulation_results.product_data),
solve_options['micro_moments'],
]
for original in originals:
unpickled = pickle.loads(pickle.dumps(original))
assert str(original) == str(unpickled), str(original)
@pytest.mark.usefixtures('simulated_problem')
@pytest.mark.parametrize('solve_options_update', [
pytest.param({'costs_bounds': (-1e10, 1e10)}, id="non-binding costs bounds"),
pytest.param({'check_optimality': 'both'}, id="Hessian computation")
])
def test_trivial_changes(simulated_problem: SimulatedProblemFixture, solve_options_update: Dict) -> None:
"""Test that solving a problem with arguments that shouldn't give rise to meaningful differences doesn't give rise
to any differences.
"""
simulation, _, problem, solve_options, results = simulated_problem
# solve the problem with the updated options
updated_solve_options = copy.deepcopy(solve_options)
updated_solve_options.update(solve_options_update)
updated_results = problem.solve(**updated_solve_options)
# test that all arrays in the results are essentially identical
for key, result in results.__dict__.items():
if isinstance(result, np.ndarray) and result.dtype != np.object:
if 'hessian' not in key:
np.testing.assert_allclose(result, getattr(updated_results, key), atol=1e-14, rtol=0, err_msg=key)
@pytest.mark.usefixtures('simulated_problem')
def test_parallel(simulated_problem: SimulatedProblemFixture) -> None:
"""Test that solving problems and computing results in parallel gives rise to the same results as when using serial
processing.
"""
_, _, problem, solve_options, results = simulated_problem
# compute marginal costs as a test of results (everything else has already been computed without parallelization)
costs = results.compute_costs()
# solve the problem and compute costs in parallel
with parallel(2):
parallel_results = problem.solve(**solve_options)
parallel_costs = parallel_results.compute_costs()
# test that all arrays in the results are essentially identical
for key, result in results.__dict__.items():
if isinstance(result, np.ndarray) and result.dtype != np.object:
np.testing.assert_allclose(result, getattr(parallel_results, key), atol=1e-14, rtol=0, err_msg=key)
# test that marginal costs are essentially equal
np.testing.assert_allclose(costs, parallel_costs, atol=1e-14, rtol=0)
@pytest.mark.usefixtures('simulated_problem')
@pytest.mark.parametrize(['ED', 'ES', 'absorb_method', 'absorb_options'], [
pytest.param(1, 0, None, None, id="1 demand FE, default method"),
pytest.param(0, 1, None, None, id="1 supply FE, default method"),
pytest.param(1, 1, None, None, id="1 demand- and 1 supply FE, default method"),
pytest.param(2, 0, None, None, id="2 demand FEs, default method"),
pytest.param(0, 2, 'sw', None, id="2 supply FEs, SW"),
pytest.param(3, 1, 'lsmr', None, id="3 demand- and 1 supply FEs, LSMR"),
pytest.param(1, 3, 'map', {'transform': 'cimmino', 'acceleration': 'cg'}, id="1 demand- and 3 supply FEs, MAP-CG"),
])
def test_fixed_effects(
simulated_problem: SimulatedProblemFixture, ED: int, ES: int, absorb_method: Optional[str],
absorb_options: Optional[dict]) -> None:
"""Test that absorbing different numbers of demand- and supply-side fixed effects gives rise to essentially
identical first-stage results as does including indicator variables. Also test that optimal instruments results,
marginal costs, and test statistics remain unchanged.
"""
simulation, simulation_results, problem, solve_options, problem_results = simulated_problem
# there cannot be supply-side fixed effects if there isn't a supply side
if problem.K3 == 0:
ES = 0
if ED == ES == 0:
return pytest.skip("There are no fixed effects to test.")
# configure the optimization routine to only do a few iterations to save time and never get to the point where small
# numerical differences between methods build up into noticeable differences
solve_options = copy.deepcopy(solve_options)
solve_options['optimization'] = Optimization('l-bfgs-b', {'maxfun': 3})
# make product data mutable and add instruments
product_data = {k: simulation_results.product_data[k] for k in simulation_results.product_data.dtype.names}
product_data.update({
'demand_instruments': problem.products.ZD[:, :-problem.K1],
'supply_instruments': problem.products.ZS[:, :-problem.K3]
})
# remove constants and delete associated elements in the initial beta
product_formulations = list(problem.product_formulations).copy()
if ED > 0:
assert product_formulations[0] is not None
constant_indices = [i for i, e in enumerate(product_formulations[0]._expressions) if not e.free_symbols]
solve_options['beta'] = np.delete(solve_options['beta'], constant_indices, axis=0)
product_formulations[0] = Formulation(f'{product_formulations[0]._formula} - 1')
if ES > 0:
assert product_formulations[2] is not None
product_formulations[2] = Formulation(f'{product_formulations[2]._formula} - 1')
# add fixed effect IDs to the data
demand_id_names: List[str] = []
supply_id_names: List[str] = []
state = np.random.RandomState(seed=0)
for side, count, names in [('demand', ED, demand_id_names), ('supply', ES, supply_id_names)]:
for index in range(count):
name = f'{side}_ids{index}'
ids = state.choice(['a', 'b', 'c'], problem.N)
product_data[name] = ids
names.append(name)
# split apart excluded demand-side instruments so they can be included in formulations
instrument_names: List[str] = []
for index, instrument in enumerate(product_data['demand_instruments'].T):
name = f'demand_instrument{index}'
product_data[name] = instrument
instrument_names.append(name)
# build formulas for the IDs
demand_id_formula = ' + '.join(demand_id_names)
supply_id_formula = ' + '.join(supply_id_names)
# solve the first stage of a problem in which the fixed effects are absorbed
solve_options1 = copy.deepcopy(solve_options)
product_formulations1 = product_formulations.copy()
if ED > 0:
assert product_formulations[0] is not None
product_formulations1[0] = Formulation(
product_formulations[0]._formula, demand_id_formula, absorb_method, absorb_options
)
if ES > 0:
assert product_formulations[2] is not None
product_formulations1[2] = Formulation(
product_formulations[2]._formula, supply_id_formula, absorb_method, absorb_options
)
problem1 = Problem(
product_formulations1, product_data, problem.agent_formulation, simulation.agent_data,
distributions=simulation.distributions, epsilon_scale=simulation.epsilon_scale,
costs_type=simulation.costs_type
)
if solve_options1['micro_moments']:
solve_options1['W'] = scipy.linalg.pinv(scipy.linalg.block_diag(
problem1.products.ZD.T @ problem1.products.ZD,
problem1.products.ZS.T @ problem1.products.ZS,
np.eye(len(solve_options1['micro_moments'])),
))
problem_results1 = problem1.solve(**solve_options1)
# solve the first stage of a problem in which fixed effects are included as indicator variables
solve_options2 = copy.deepcopy(solve_options)
product_formulations2 = product_formulations.copy()
if ED > 0:
assert product_formulations[0] is not None
product_formulations2[0] = Formulation(f'{product_formulations[0]._formula} + {demand_id_formula}')
if ES > 0:
assert product_formulations[2] is not None
product_formulations2[2] = Formulation(f'{product_formulations[2]._formula} + {supply_id_formula}')
problem2 = Problem(
product_formulations2, product_data, problem.agent_formulation, simulation.agent_data,
distributions=simulation.distributions, epsilon_scale=simulation.epsilon_scale,
costs_type=simulation.costs_type
)
solve_options2['beta'] = np.r_[
solve_options2['beta'],
np.full((problem2.K1 - solve_options2['beta'].size, 1), np.nan)
]
if solve_options2['micro_moments']:
solve_options2['W'] = scipy.linalg.pinv(scipy.linalg.block_diag(
problem2.products.ZD.T @ problem2.products.ZD,
problem2.products.ZS.T @ problem2.products.ZS,
np.eye(len(solve_options2['micro_moments'])),
))
problem_results2 = problem2.solve(**solve_options2)
# solve the first stage of a problem in which some fixed effects are absorbed and some are included as indicators
if ED == ES == 0:
problem_results3 = problem_results2
else:
solve_options3 = copy.deepcopy(solve_options)
product_formulations3 = product_formulations.copy()
if ED > 0:
assert product_formulations[0] is not None
product_formulations3[0] = Formulation(
f'{product_formulations[0]._formula} + {demand_id_names[0]}', ' + '.join(demand_id_names[1:]) or None
)
if ES > 0:
assert product_formulations[2] is not None
product_formulations3[2] = Formulation(
f'{product_formulations[2]._formula} + {supply_id_names[0]}', ' + '.join(supply_id_names[1:]) or None
)
problem3 = Problem(
product_formulations3, product_data, problem.agent_formulation, simulation.agent_data,
distributions=simulation.distributions, epsilon_scale=simulation.epsilon_scale,
costs_type=simulation.costs_type
)
solve_options3['beta'] = np.r_[
solve_options3['beta'],
np.full((problem3.K1 - solve_options3['beta'].size, 1), np.nan)
]
if solve_options3['micro_moments']:
solve_options3['W'] = scipy.linalg.pinv(scipy.linalg.block_diag(
problem3.products.ZD.T @ problem3.products.ZD,
problem3.products.ZS.T @ problem3.products.ZS,
np.eye(len(solve_options3['micro_moments'])),
))
problem_results3 = problem3.solve(**solve_options3)
# compute optimal instruments (use only two draws for speed; accuracy is not a concern here)
Z_results1 = problem_results1.compute_optimal_instruments(draws=2, seed=0)
Z_results2 = problem_results2.compute_optimal_instruments(draws=2, seed=0)
Z_results3 = problem_results3.compute_optimal_instruments(draws=2, seed=0)
# compute marginal costs
costs1 = problem_results1.compute_costs()
costs2 = problem_results2.compute_costs()
costs3 = problem_results3.compute_costs()
J1 = problem_results1.run_hansen_test()
J2 = problem_results2.run_hansen_test()
J3 = problem_results3.run_hansen_test()
LR1 = problem_results1.run_distance_test(problem_results)
LR2 = problem_results2.run_distance_test(problem_results)
LR3 = problem_results3.run_distance_test(problem_results)
LM1 = problem_results1.run_lm_test()
LM2 = problem_results2.run_lm_test()
LM3 = problem_results3.run_lm_test()
wald1 = problem_results1.run_wald_test(
problem_results1.parameters[:2], np.eye(problem_results1.parameters.size)[:2]
)
wald2 = problem_results2.run_wald_test(
problem_results2.parameters[:2], np.eye(problem_results2.parameters.size)[:2]
)
wald3 = problem_results3.run_wald_test(
problem_results3.parameters[:2], np.eye(problem_results3.parameters.size)[:2]
)
# choose tolerances
atol = 1e-8
rtol = 1e-5
# test that all problem results expected to be identical are essentially identical, except for standard errors under
# micro moments, which are expected to be slightly different
problem_results_keys = [
'theta', 'sigma', 'pi', 'rho', 'beta', 'gamma', 'sigma_se', 'pi_se', 'rho_se', 'beta_se', 'gamma_se',
'delta', 'tilde_costs', 'xi', 'omega', 'xi_by_theta_jacobian', 'omega_by_theta_jacobian', 'objective',
'gradient', 'projected_gradient'
]
for key in problem_results_keys:
if key.endswith('_se') and solve_options['micro_moments']:
continue
result1 = getattr(problem_results1, key)
result2 = getattr(problem_results2, key)
result3 = getattr(problem_results3, key)
if key in {'beta', 'gamma', 'beta_se', 'gamma_se'}:
result2 = result2[:result1.size]
result3 = result3[:result1.size]
np.testing.assert_allclose(result1, result2, atol=atol, rtol=rtol, err_msg=key, equal_nan=True)
np.testing.assert_allclose(result1, result3, atol=atol, rtol=rtol, err_msg=key, equal_nan=True)
# test that all optimal instrument results expected to be identical are essentially identical
Z_results_keys = [
'demand_instruments', 'supply_instruments', 'inverse_covariance_matrix', 'expected_xi_by_theta_jacobian',
'expected_omega_by_theta_jacobian'
]
for key in Z_results_keys:
result1 = getattr(Z_results1, key)
result2 = getattr(Z_results2, key)
result3 = getattr(Z_results3, key)
np.testing.assert_allclose(result1, result2, atol=atol, rtol=rtol, err_msg=key)
np.testing.assert_allclose(result1, result3, atol=atol, rtol=rtol, err_msg=key)
# test that marginal costs and test statistics are essentially identical
np.testing.assert_allclose(costs1, costs2, atol=atol, rtol=rtol)
np.testing.assert_allclose(costs1, costs3, atol=atol, rtol=rtol)
np.testing.assert_allclose(J1, J2, atol=atol, rtol=rtol)
np.testing.assert_allclose(J1, J3, atol=atol, rtol=rtol)
np.testing.assert_allclose(LR1, LR2, atol=atol, rtol=rtol)
np.testing.assert_allclose(LR1, LR3, atol=atol, rtol=rtol)
np.testing.assert_allclose(LM1, LM2, atol=atol, rtol=rtol)
np.testing.assert_allclose(LM1, LM3, atol=atol, rtol=rtol)
np.testing.assert_allclose(wald1, wald2, atol=atol, rtol=rtol)
| np.testing.assert_allclose(wald1, wald3, atol=atol, rtol=rtol) | numpy.testing.assert_allclose |
import warnings
import numpy as np
import palpy
from rubin_sim.utils import Site, m5_flat_sed
from .baseStacker import BaseStacker
__all__ = ['NormAirmassStacker', 'ParallaxFactorStacker', 'HourAngleStacker',
'FilterColorStacker', 'ZenithDistStacker', 'ParallacticAngleStacker',
'DcrStacker', 'FiveSigmaStacker', 'SaturationStacker']
class SaturationStacker(BaseStacker):
"""Calculate the saturation limit of a point source. Assumes Guassian PSF.
Parameters
----------
pixscale : float, optional (0.2)
Arcsec per pixel
gain : float, optional (2.3)
electrons per adu
saturation_e : float, optional (150e3)
The saturation level in electrons
zeropoints : dict-like, optional (None)
The zeropoints for the telescope. Keys should be str with filter names, values in mags.
If None, will use Rubin-like zeropoints.
km : dict-like, optional (None)
Atmospheric extinction values. Keys should be str with filter names.
If None, will use Rubin-like zeropoints.
"""
colsAdded = ['saturation_mag']
def __init__(self, seeingCol='seeingFwhmEff', skybrightnessCol='skyBrightness',
exptimeCol='visitExposureTime', nexpCol='numExposures',
filterCol='filter', airmassCol='airmass',
saturation_e=150e3, zeropoints=None, km=None, pixscale=0.2, gain=1.0):
self.units = ['mag']
self.colsReq = [seeingCol, skybrightnessCol, exptimeCol, nexpCol, filterCol, airmassCol]
self.seeingCol = seeingCol
self.skybrightnessCol = skybrightnessCol
self.exptimeCol = exptimeCol
self.nexpCol = nexpCol
self.filterCol = filterCol
self.airmassCol = airmassCol
self.saturation_adu = saturation_e/gain
self.pixscale = 0.2
names = ['u', 'g', 'r', 'i', 'z', 'y']
types = [float]*6
if zeropoints is None:
# Note these zeropoints are calculating the number of *electrons* per second (thus gain=1)
# https://github.com/lsst-pst/syseng_throughputs/blob/master/notebooks/Syseng%20Throughputs%20Repo%20Demo.ipynb
self.zeropoints = np.array([27.03, 28.38, 28.15, 27.86, 27.46, 26.68]).view(list(zip(names, types)))
self.saturation_adu = saturation_e
else:
self.zeropoints = zeropoints
if km is None:
# Also from notebook above
self.km = np.array([0.491, 0.213, 0.126, 0.096, 0.069, 0.170]).view(list(zip(names, types)))
else:
self.km = km
def _run(self, simData, cols_present=False):
for filtername in np.unique(simData[self.filterCol]):
in_filt = np.where(simData[self.filterCol] == filtername)[0]
# Calculate the length of the on-sky time per EXPOSURE
exptime = simData[self.exptimeCol][in_filt] / simData[self.nexpCol][in_filt]
# Calculate sky counts per pixel per second from skybrightness + zeropoint (e/1s)
sky_counts = 10.**(0.4*(self.zeropoints[filtername]
- simData[self.skybrightnessCol][in_filt])) * self.pixscale**2
# Total sky counts in each exposure
sky_counts = sky_counts * exptime
# The counts available to the source (at peak) in each exposure is the
# difference between saturation and sky
remaining_counts_peak = (self.saturation_adu - sky_counts)
# Now to figure out how many counts there would be total, if there are that many in the peak
sigma = simData[self.seeingCol][in_filt]/2.354
source_counts = remaining_counts_peak * 2.*np.pi*(sigma/self.pixscale)**2
# source counts = counts per exposure (expTimeCol / nexp)
# Translate to counts per second, to apply zeropoint
count_rate = source_counts / exptime
simData['saturation_mag'][in_filt] = -2.5*np.log10(count_rate) + self.zeropoints[filtername]
# Airmass correction
simData['saturation_mag'][in_filt] -= self.km[filtername]*(simData[self.airmassCol][in_filt] - 1.)
return simData
class FiveSigmaStacker(BaseStacker):
"""
Calculate the 5-sigma limiting depth for a point source in the given conditions.
This is generally not needed, unless the m5 parameters have been updated
or m5 was not previously calculated.
"""
colsAdded = ['m5_simsUtils']
def __init__(self, airmassCol='airmass', seeingCol='seeingFwhmEff', skybrightnessCol='skyBrightness',
filterCol='filter', exptimeCol='visitExposureTime'):
self.units = ['mag']
self.colsReq = [airmassCol, seeingCol, skybrightnessCol, filterCol, exptimeCol]
self.airmassCol = airmassCol
self.seeingCol = seeingCol
self.skybrightnessCol = skybrightnessCol
self.filterCol = filterCol
self.exptimeCol = exptimeCol
def _run(self, simData, cols_present=False):
if cols_present:
# Column already present in data; assume it needs updating and recalculate.
return simData
filts = np.unique(simData[self.filterCol])
for filtername in filts:
infilt = np.where(simData[self.filterCol] == filtername)
simData['m5_simsUtils'][infilt] = m5_flat_sed(filtername,
simData[infilt][self.skybrightnessCol],
simData[infilt][self.seeingCol],
simData[infilt][self.exptimeCol],
simData[infilt][self.airmassCol])
return simData
class NormAirmassStacker(BaseStacker):
"""Calculate the normalized airmass for each opsim pointing.
"""
colsAdded = ['normairmass']
def __init__(self, airmassCol='airmass', decCol='fieldDec',
degrees=True, telescope_lat = -30.2446388):
self.units = ['X / Xmin']
self.colsReq = [airmassCol, decCol]
self.airmassCol = airmassCol
self.decCol = decCol
self.telescope_lat = telescope_lat
self.degrees = degrees
def _run(self, simData, cols_present=False):
"""Calculate new column for normalized airmass."""
# Run method is required to calculate column.
# Driver runs getColInfo to know what columns are needed from db & which are calculated,
# then gets data from db and then calculates additional columns (via run methods here).
if cols_present:
# Column already present in data; assume it is correct and does not need recalculating.
return simData
dec = simData[self.decCol]
if self.degrees:
dec = np.radians(dec)
min_z_possible = np.abs(dec - np.radians(self.telescope_lat))
min_airmass_possible = 1./np.cos(min_z_possible)
simData['normairmass'] = simData[self.airmassCol] / min_airmass_possible
return simData
class ZenithDistStacker(BaseStacker):
"""Calculate the zenith distance for each pointing.
If 'degrees' is True, then assumes altCol is in degrees and returns degrees.
If 'degrees' is False, assumes altCol is in radians and returns radians.
"""
colsAdded = ['zenithDistance']
def __init__(self, altCol='altitude', degrees=True):
self.altCol = altCol
self.degrees = degrees
if self.degrees:
self.units = ['degrees']
else:
self.unit = ['radians']
self.colsReq = [self.altCol]
def _run(self, simData, cols_present=False):
"""Calculate new column for zenith distance."""
if cols_present:
# Column already present in data; assume it is correct and does not need recalculating.
return simData
if self.degrees:
simData['zenithDistance'] = 90.0 - simData[self.altCol]
else:
simData['zenithDistance'] = np.pi/2.0 - simData[self.altCol]
return simData
class ParallaxFactorStacker(BaseStacker):
"""Calculate the parallax factors for each opsim pointing. Output parallax factor in arcseconds.
"""
colsAdded = ['ra_pi_amp', 'dec_pi_amp']
def __init__(self, raCol='fieldRA', decCol='fieldDec', dateCol='observationStartMJD', degrees=True):
self.raCol = raCol
self.decCol = decCol
self.dateCol = dateCol
self.units = ['arcsec', 'arcsec']
self.colsReq = [raCol, decCol, dateCol]
self.degrees = degrees
def _gnomonic_project_toxy(self, RA1, Dec1, RAcen, Deccen):
"""Calculate x/y projection of RA1/Dec1 in system with center at RAcen, Deccenp.
Input radians.
"""
# also used in Global Telescope Network website
cosc = np.sin(Deccen) * np.sin(Dec1) + np.cos(Deccen) * np.cos(Dec1) * np.cos(RA1-RAcen)
x = np.cos(Dec1) * np.sin(RA1-RAcen) / cosc
y = (np.cos(Deccen)*np.sin(Dec1) - np.sin(Deccen)*np.cos(Dec1)*np.cos(RA1-RAcen)) / cosc
return x, y
def _run(self, simData, cols_present=False):
if cols_present:
# Column already present in data; assume it is correct and does not need recalculating.
return simData
ra_pi_amp = np.zeros(np.size(simData), dtype=[('ra_pi_amp', 'float')])
dec_pi_amp = np.zeros(np.size(simData), dtype=[('dec_pi_amp', 'float')])
ra_geo1 = np.zeros(np.size(simData), dtype='float')
dec_geo1 = np.zeros(np.size(simData), dtype='float')
ra_geo = np.zeros(np.size(simData), dtype='float')
dec_geo = np.zeros(np.size(simData), dtype='float')
ra = simData[self.raCol]
dec = simData[self.decCol]
if self.degrees:
ra = np.radians(ra)
dec = | np.radians(dec) | numpy.radians |
import argparse
import json
import sys
import numpy as np
import pyqsp
from pyqsp import angle_sequence, response
from pyqsp.phases import phase_generators
from pyqsp.poly import (StringPolynomial, TargetPolynomial,
polynomial_generators)
# -----------------------------------------------------------------------------
class VAction(argparse.Action):
def __call__(self, parser, args, values, option_string=None):
curval = getattr(args, self.dest, 0) or 0
values = values.count('v') + 1
setattr(args, self.dest, values + curval)
# -----------------------------------------------------------------------------
def CommandLine(args=None, arglist=None):
'''
Main command line. Accepts args, to allow for simple unit testing.
'''
import pkg_resources # part of setuptools
version = pkg_resources.require("pyqsp")[0].version
help_text = """usage: pyqsp [options] cmd
Version: {}
Commands:
poly2angles - compute QSP phase angles for the specified polynomial (use --poly)
hamsim - compute QSP phase angles for Hamiltonian simulation using the Jacobi-Anger expansion of exp(-i tau sin(2 theta))
invert - compute QSP phase angles for matrix inversion, i.e. a polynomial approximation to 1/a, for given delta and epsilon parameter values
angles - generate QSP phase angles for the specified --seqname and --seqargs
poly - generate QSP phase angles for the specified --polyname and --polyargs, e.g. sign and threshold polynomials
polyfunc - generate QSP phase angles for the specified --func and --polydeg using tensorflow + keras optimization method (--tf)
response - generate QSP polynomial response functions for the QSP phase angles specified by --phiset
Examples:
pyqsp --poly=-1,0,2 poly2angles
pyqsp --poly=-1,0,2 --plot poly2angles
pyqsp --signal_operator=Wz --poly=0,0,0,1 --plot poly2angles
pyqsp --plot --tau 10 hamsim
pyqsp --plot --tolerance=0.01 --seqargs 3 invert
pyqsp --plot-npts=4000 --plot-positive-only --plot-magnitude --plot --seqargs=1000,1.0e-20 --seqname fpsearch angles
pyqsp --plot-npts=100 --plot-magnitude --plot --seqargs=23 --seqname erf_step angles
pyqsp --plot-npts=100 --plot-positive-only --plot --seqargs=23 --seqname erf_step angles
pyqsp --plot-real-only --plot --polyargs=20,20 --polyname poly_thresh poly
pyqsp --plot-positive-only --plot --polyargs=19,10 --plot-real-only --polyname poly_sign poly
pyqsp --plot-positive-only --plot-real-only --plot --polyargs 20,3.5 --polyname gibbs poly
pyqsp --plot-positive-only --plot-real-only --plot --polyargs 20,0.2,0.9 --polyname efilter poly
pyqsp --plot-positive-only --plot --polyargs=19,10 --plot-real-only --polyname poly_sign --method tf poly
pyqsp --plot --func "np.cos(3*x)" --polydeg 6 polyfunc
pyqsp --plot --func "np.cos(3*x)" --polydeg 6 --plot-qsp-model polyfunc
pyqsp --plot-positive-only --plot-real-only --plot --polyargs 20,3.5 --polyname gibbs --plot-qsp-model poly
pyqsp --polydeg 16 --measurement="z" --func="-1+np.sign(1/np.sqrt(2)-x)+ np.sign(1/np.sqrt(2)+x)" --plot polyfunc
""".format(version)
parser = argparse.ArgumentParser(
description=help_text,
formatter_class=argparse.RawTextHelpFormatter)
def float_list(value):
try:
if not ',' in value and value.startswith("[") and value.endswith("]"):
fstrset = value[1:-1].split(" ")
fstrset = [x for x in fstrset if x]
flist = list(map(float, fstrset))
return flist
return list(map(float, value.split(",")))
except Exception as err:
print(
f"[pyqsp.float_list] failed to parse float list, err={err} from {value}")
raise
parser.add_argument("cmd", help="command")
parser.add_argument(
'-v',
"--verbose",
nargs=0,
help="increase output verbosity (add more -v to increase versbosity)",
action=VAction,
dest='verbose')
parser.add_argument("-o", "--output", help="output filename", default="")
parser.add_argument(
"--signal_operator",
help="QSP sequence signal_operator, either Wx (signal is X rotations) or Wz (signal is Z rotations)",
type=str,
default="Wx")
parser.add_argument(
"--plot",
help="generate QSP response plot",
action="store_true")
parser.add_argument(
"--hide-plot",
help="do not show plot (but it may be saved to a file if --output is specified)",
action="store_true")
parser.add_argument(
"--return-angles",
help="return QSP phase angles to caller",
action="store_true")
parser.add_argument(
"--poly",
help="comma delimited list of floating-point coeficients for polynomial, as const, a, a^2, ...",
action="store",
type=float_list)
parser.add_argument(
"--func",
help="for tf method, numpy expression specifying ideal function (of x) to be approximated by a polynomial, e.g. 'np.cos(3 * x)'",
type=str)
parser.add_argument(
"--polydeg",
help="for tf method, degree of polynomial to use in generating approximation of specified function (see --func)",
type=int)
parser.add_argument(
"--tau",
help="time value for Hamiltonian simulation (hamsim command)",
type=float,
default=100)
parser.add_argument(
"--epsilon",
help="parameter for polynomial approximation to 1/a, giving bound on error",
type=float,
default=0.3)
parser.add_argument(
"--seqname",
help="name of QSP phase angle sequence to generate using the 'angles' command, e.g. fpsearch",
type=str,
default=None)
parser.add_argument(
"--seqargs",
help="arguments to the phase angles generated by seqname (e.g. length,delta,gamma for fpsearch)",
action="store",
type=float_list)
parser.add_argument(
"--polyname",
help="name of polynomial generate using the 'poly' command, e.g. 'sign'",
type=str,
default=None)
parser.add_argument(
"--polyargs",
help="arguments to the polynomial generated by poly (e.g. degree,kappa for 'sign')",
action="store",
type=float_list)
parser.add_argument(
"--plot-magnitude",
help="when plotting only show magnitude, instead of separate imaginary and real components",
action="store_true")
parser.add_argument(
"--plot-probability",
help="when plotting only show squared magnitude, instead of separate imaginary and real components",
action="store_true")
parser.add_argument(
"--plot-real-only",
help="when plotting only real component, and not imaginary",
action="store_true")
parser.add_argument(
"--title",
help="plot title",
type=str,
default=None)
parser.add_argument(
"--measurement",
help="measurement basis if using the polyfunc argument",
type=str,
default=None)
parser.add_argument(
"--output-json",
help="output QSP phase angles in JSON format",
action="store_true")
parser.add_argument(
"--plot-positive-only",
help="when plotting only a-values (x-axis) from 0 to +1, instead of from -1 to +1 ",
action="store_true")
parser.add_argument(
"--plot-tight-y",
help="when plotting scale y-axis tightly to real part of data",
action="store_true")
parser.add_argument(
"--plot-npts",
help="number of points to use in plotting",
type=int,
default=400)
parser.add_argument(
"--tolerance",
help="error tolerance for phase angle optimizer",
type=float,
default=0.1)
parser.add_argument(
"--method",
help="method to use for qsp phase angle generation, either 'laurent' (default) or 'tf' (for tensorflow + keras)",
type=str,
default='laurent')
parser.add_argument(
"--plot-qsp-model",
help="show qsp_model version of response plot instead of the default plot",
action="store_true")
parser.add_argument(
"--phiset",
help="comma delimited list of QSP phase angles, to be used in the 'response' command",
action="store",
type=float_list)
parser.add_argument(
"--nepochs",
help="number of epochs to use in tensorflow optimization",
type=int,
default=5000)
parser.add_argument(
"--npts-theta",
help="number of discretized values of theta to use in tensorflow optimization",
type=int,
default=30)
if not args:
args = parser.parse_args(arglist)
phiset = None
plot_args = dict(plot_magnitude=args.plot_magnitude,
plot_probability=args.plot_probability,
plot_positive_only=args.plot_positive_only,
plot_real_only=args.plot_real_only,
plot_tight_y=args.plot_tight_y,
npts=args.plot_npts,
show=(not args.hide_plot),
show_qsp_model_plot=args.plot_qsp_model,
)
qspp_args = dict(signal_operator=args.signal_operator,
method=args.method,
tolerance=args.tolerance,
nepochs=args.nepochs,
npts_theta=args.npts_theta,
)
if args.cmd == "poly2angles":
coefs = args.poly
if not coefs:
print(
f"[pyqsp.main] must specify polynomial coeffients using --poly, e.g. --poly -1,0,2")
sys.exit(0)
if isinstance(coefs, str):
coefs = list(map(float, coefs.split(",")))
print(f"[pyqsp] polynomial coefficients={coefs}")
phiset = angle_sequence.QuantumSignalProcessingPhases(
coefs, **qspp_args)
if args.plot:
response.PlotQSPResponse(
phiset,
pcoefs=coefs,
signal_operator=args.signal_operator,
**plot_args)
elif args.cmd == "hamsim":
pg = pyqsp.poly.PolyCosineTX()
pcoefs, scale = pg.generate(
*args.seqargs,
return_coef=True,
ensure_bounded=True,
return_scale=True)
phiset = angle_sequence.QuantumSignalProcessingPhases(
pcoefs, **qspp_args)
if args.plot:
response.PlotQSPResponse(
phiset,
target=lambda x: scale * np.cos(args.seqargs[0] * x),
signal_operator="Wx",
title="Hamiltonian Simultation (Cosine)",
**plot_args)
pg = pyqsp.poly.PolySineTX()
pcoefs, scale = pg.generate(
*args.seqargs,
return_coef=True,
ensure_bounded=True,
return_scale=True)
phiset = angle_sequence.QuantumSignalProcessingPhases(
pcoefs, **qspp_args)
if args.plot:
response.PlotQSPResponse(
phiset,
target=lambda x: scale * np.sin(args.seqargs[0] * x),
signal_operator="Wx",
title="Hamiltonian Simultation (Sine)",
**plot_args)
elif args.cmd == "fpsearch":
pg = pyqsp.phases.FPSearch()
phiset = pg.generate(*args.seqargs)
if args.plot:
response.PlotQSPResponse(
phiset,
signal_operator="Wx",
measurement="z",
title="Oblivious amplification",
**plot_args)
elif args.cmd == "invert":
pg = pyqsp.poly.PolyOneOverX()
pcoefs, scale = pg.generate(
*args.seqargs,
return_coef=True,
ensure_bounded=True,
return_scale=True)
phiset = angle_sequence.QuantumSignalProcessingPhases(
pcoefs, **qspp_args)
if args.plot:
response.PlotQSPResponse(
phiset,
target=lambda x: scale * 1 / x,
signal_operator="Wx",
measurement="z",
title="Inversion",
**plot_args)
elif args.cmd == "gibbs":
pg = pyqsp.poly.PolyGibbs()
pcoefs, scale = pg.generate(
*args.seqargs,
ensure_bounded=True,
return_scale=True)
phiset = angle_sequence.QuantumSignalProcessingPhases(
pcoefs, **qspp_args)
if args.plot:
response.PlotQSPResponse(
phiset,
target=lambda x: scale * np.exp(-args.seqargs[1] * x),
signal_operator="Wx",
title="Gibbs distribution",
**plot_args)
elif args.cmd == "efilter":
pg = pyqsp.poly.PolyEigenstateFiltering()
pcoefs, scale = pg.generate(
*args.seqargs,
ensure_bounded=True,
return_scale=True)
phiset = angle_sequence.QuantumSignalProcessingPhases(
pcoefs, **qspp_args)
if args.plot:
delta = args.seqargs[1] / 2.
response.PlotQSPResponse(
phiset,
target=lambda x: scale *
(np.sign(x + delta) - np.sign(x - delta)) / 2,
signal_operator="Wx",
title="Eigenstate filtering",
**plot_args)
elif args.cmd == "relu":
pg = pyqsp.poly.PolySoftPlus()
pcoefs, scale = pg.generate(
*args.seqargs,
ensure_bounded=True,
return_scale=True)
phiset = angle_sequence.QuantumSignalProcessingPhases(
pcoefs, **qspp_args)
if args.plot:
response.PlotQSPResponse(
phiset,
target=lambda x: scale * np.maximum(x - args.seqargs[1], 0.),
signal_operator="Wx",
title="ReLU Function",
**plot_args)
elif args.cmd == "poly_sign":
pg = pyqsp.poly.PolySign()
pcoefs, scale = pg.generate(
*args.seqargs,
ensure_bounded=True,
return_scale=True)
phiset = angle_sequence.QuantumSignalProcessingPhases(
pcoefs, **qspp_args)
if args.plot:
response.PlotQSPResponse(
phiset,
target=lambda x: scale * np.sign(x),
signal_operator="Wx",
title="Sign Function",
**plot_args)
elif args.cmd == "poly_thresh":
pg = pyqsp.poly.PolyThreshold()
pcoefs, scale = pg.generate(
*args.seqargs,
ensure_bounded=True,
return_scale=True)
phiset = angle_sequence.QuantumSignalProcessingPhases(
pcoefs, **qspp_args)
if args.plot:
response.PlotQSPResponse(
phiset,
target=lambda x: scale *
( | np.sign(x + 0.5) | numpy.sign |
"""
Optimal power flow models for hybrid AC/DC microgrids
@author: <NAME>
@email: <EMAIL>
Something should be noted for the hypothesis.
1) The energy losses on the bi-directional converters is modelled simply as used in
[1]Concerted action on computer modeling and simulation
[2]Energy management and operation modelling of hybrid AC–DC microgrid
There are more complex modelling method for different types of converters, see the following references for details.
[1]Mathematical Efficiency Modeling of Static Power Converters
[2]Power Loss Modeling of Isolated AC/DC Converter
The variations on the mathematical modelling result in significant differences in terms of the mathematical property.
2) Even renewable energy sources are assigned with operational cost, e.g., linear in this case.
3) The power losses is ignored in the real-time operation.
@Reference:
[1]
"""
from pypower import runopf
from gurobipy import *
from numpy import zeros, c_, shape, ix_, ones, r_, arange, sum, diag, concatenate, power
from scipy.sparse import csr_matrix as sparse
from scipy.sparse import hstack, vstack, diags
from distribution_system_optimization.test_cases import case33
from distribution_system_optimization.data_format import case_converters
# The following cases, data formats are imported from the Pypower package.
from pypower import case6ww, case9, case30, case118, case300
from pypower.idx_brch import F_BUS, T_BUS, BR_R, BR_X, TAP, SHIFT, BR_STATUS, RATE_A
from pypower.idx_cost import MODEL, NCOST, PW_LINEAR, COST, POLYNOMIAL
from pypower.idx_bus import BUS_TYPE, REF, VA, VM, PD, GS, VMAX, VMIN, BUS_I, QD
from pypower.idx_gen import GEN_BUS, VG, PG, QG, PMAX, PMIN, QMAX, QMIN
from pypower.ext2int import ext2int
def main(Case_AC=None, Case_DC=None, Converters=None):
"""
:param Case_AC: AC case
:param Case_DC: DC case
:param Converters: Bi-directional converters
:return: Obtained solutions for hybrid AC DC networks
"""
# 1) Problem formulation
model_AC = AC_network_formulation(Case_AC)
model_DC = DC_network_formulation(Case_DC)
# 2) Solve the initial problems
sol_AC = AC_opf_solver(model_AC)
sol_DC = DC_opf_solver(model_DC)
# 3) Connect two systems via the BIC networks
model_converters = BIC_network_formulation(model_AC, model_DC, Converters)
# 4) Solve the merged functions
# 4.1) Solve the problem
return model_converters
def DC_network_formulation(case):
"""
:param case:
:return:
"""
case = ext2int(case)
baseMVA, bus, gen, branch, gencost = case["baseMVA"], case["bus"], case["gen"], case["branch"], case["gencost"]
nb = shape(case['bus'])[0] ## number of buses
nl = shape(case['branch'])[0] ## number of branches
ng = shape(case['gen'])[0] ## number of dispatchable injections
f = branch[:, F_BUS] ## list of "from" buses
t = branch[:, T_BUS] ## list of "to" buses
i = range(nl) ## double set of row indices
# Connection matrix
Cf = sparse((ones(nl), (i, f)), (nl, nb))
Ct = sparse((ones(nl), (i, t)), (nl, nb))
Cg = sparse((ones(ng), (gen[:, GEN_BUS], range(ng))), (nb, ng))
# Modify the branch resistance
Branch_R = branch[:, BR_X]
for i in range(nl):
if Branch_R[i] <= 0:
Branch_R[i] = max(Branch_R)
Cf = Cf.T
Ct = Ct.T
# Obtain the boundary information
Slmax = branch[:, RATE_A] / baseMVA
Pij_l = -Slmax
Iij_l = zeros(nl)
Vm_l = power(bus[:, VMIN], 2)
Pg_l = gen[:, PMIN] / baseMVA
Pij_u = Slmax
Iij_u = Slmax
# Vm_u = [max(turn_to_power(bus[:, VMAX], 2))] * nb
Vm_u = power(bus[:, VMAX], 2)
Pg_u = gen[:, PMAX] / baseMVA
# Pg_l = -Pg_u
lx = concatenate([Pij_l, Iij_l, Vm_l, Pg_l])
ux = concatenate([Pij_u, Iij_u, Vm_u, Pg_u])
# KCL equation
Aeq_p = hstack([Ct - Cf, -diag(Ct * Branch_R) * Ct, zeros((nb, nb)), Cg])
beq_p = bus[:, PD] / baseMVA
# KVL equation
Aeq_KVL = hstack([-2 * diags(Branch_R), diags(power(Branch_R, 2)), Cf.T - Ct.T, zeros((nl, ng))])
beq_KVL = zeros(nl)
Aeq = vstack([Aeq_p, Aeq_KVL])
Aeq = Aeq.todense()
beq = concatenate([beq_p, beq_KVL])
neq = len(beq)
nx = 2 * nl + nb + ng
Q = zeros(nx)
c = zeros(nx)
c0 = zeros(nx)
for i in range(ng):
Q[i + 2 * nl + nb] = gencost[i, 4] * baseMVA * baseMVA
c[i + 2 * nl + nb] = gencost[i, 5] * baseMVA
c0[i + 2 * nl + nb] = gencost[i, 6]
model = {"Q": Q,
"c": c,
"c0": c0,
"Aeq": Aeq,
"beq": beq,
"lx": lx,
"ux": ux,
"nx": nx,
"nb": nb,
"nl": nl,
"ng": ng,
"f": f,
"neq": neq}
return model
def AC_network_formulation(case):
"""
:param case:
:return:
"""
case = ext2int(case)
baseMVA, bus, gen, branch, gencost = case["baseMVA"], case["bus"], case["gen"], case["branch"], case["gencost"]
nb = shape(case['bus'])[0] ## number of buses
nl = shape(case['branch'])[0] ## number of branches
ng = shape(case['gen'])[0] ## number of dispatchable injections
f = branch[:, F_BUS] ## list of "from" buses
t = branch[:, T_BUS] ## list of "to" buses
i = range(nl) ## double set of row indices
# Connection matrix
Cf = sparse((ones(nl), (i, f)), (nl, nb))
Ct = sparse((ones(nl), (i, t)), (nl, nb))
Cg = sparse((ones(ng), (gen[:, GEN_BUS], range(ng))), (nb, ng))
Branch_R = branch[:, BR_R]
Branch_X = branch[:, BR_X]
Cf = Cf.T
Ct = Ct.T
# Obtain the boundary information
Slmax = branch[:, RATE_A] / baseMVA
Pij_l = -Slmax
Qij_l = -Slmax
Iij_l = zeros(nl)
Vm_l = power(bus[:, VMIN], 2)
Pg_l = gen[:, PMIN] / baseMVA
Qg_l = gen[:, QMIN] / baseMVA
Pij_u = Slmax
Qij_u = Slmax
Iij_u = Slmax
Vm_u = 2 * power(bus[:, VMAX], 2)
Pg_u = 2 * gen[:, PMAX] / baseMVA
Qg_u = gen[:, QMAX] / baseMVA
# Problem formulation
lx = concatenate([Pij_l, Qij_l, Iij_l, Vm_l, Pg_l, Qg_l])
ux = concatenate([Pij_u, Qij_u, Iij_u, Vm_u, Pg_u, Qg_u])
# KCL equation, active power
Aeq_p = hstack([Ct - Cf, zeros((nb, nl)), - | diag(Ct * Branch_R) | numpy.diag |
"""General ROM utilities
<NAME>, 14 Feb 2018
"""
import warnings
import numpy as np
import scipy.linalg as scalg
# from IPython import embed
import sharpy.linear.src.libsparse as libsp
import sharpy.linear.src.libss as libss
def balreal_direct_py(A, B, C, DLTI=True, Schur=False, full_outputs=False):
r"""
Find balanced realisation of continuous (``DLTI = False``) and discrete (``DLTI = True``)
time of LTI systems using scipy libraries.
The function proceeds to achieve balanced realisation of the state-space system by first solving
the Lyapunov equations. They are solved using Barlets-Stewart algorithm for
Sylvester equation, which is based on A matrix Schur decomposition.
.. math::
\mathbf{A\,W_c + W_c\,A^T + B\,B^T} &= 0 \\
\mathbf{A^T\,W_o + W_o\,A + C^T\,C} &= 0
to obtain the reachability and observability gramians, which are positive definite matrices.
Then, the gramians are decomposed into their Cholesky factors such that:
.. math::
\mathbf{W_c} &= \mathbf{Q_c\,Q_c^T} \\
\mathbf{W_o} &= \mathbf{Q_o\,Q_o^T}
A singular value decomposition (SVD) of the product of the Cholesky factors is performed
.. math:: (\mathbf{Q_o^T\,Q_c}) = \mathbf{U\,\Sigma\,V^*}
The singular values are then used to build the transformation matrix :math:`\mathbf{T}`
.. math::
\mathbf{T} &= \mathbf{Q_c\,V\,\Sigma}^{-1/2} \\
\mathbf{T}^{-1} &= \mathbf{\Sigma}^{-1/2}\,\mathbf{U^T\,Q_o^T}
The balanced system is therefore of the form:
.. math::
\mathbf{A_b} &= \mathbf{T\,A\,T^{-1}} \\
\mathbf{B_b} &= \mathbf{T\,B} \\
\mathbf{C_b} &= \mathbf{C\,T^{-1}} \\
\mathbf{D_b} &= \mathbf{D}
Warnings:
This function may be less computationally efficient than the ``balreal``
Matlab implementation and does not offer the option to bound the realisation
in frequency and time.
Notes:
Lyapunov equations are solved using Barlets-Stewart algorithm for
Sylvester equation, which is based on A matrix Schur decomposition.
Args:
A (np.ndarray): Plant Matrix
B (np.ndarray): Input Matrix
C (np.ndarray): Output Matrix
DLTI (bool): Discrete time state-space flag
Schur (bool): Use Schur decomposition to solve the Lyapunov equations
Returns:
tuple of np.ndarrays: Tuple of the form ``(S, T, Tinv)`` containing:
- Singular values in diagonal matrix (``S``)
- Transformation matrix (``T``).
- Inverse transformation matrix(``Tinv``).
References:
Anthoulas, A.C.. Approximation of Large Scale Dynamical Systems. Chapter 7. Advances in Design and Control.
SIAM. 2005.
"""
### select solver for Lyapunov equation
# Notation reminder:
# scipy: A X A.T - X = -Q
# contr: A W A.T - W = - B B.T
# obser: A.T W A - W = - C.T C
if DLTI:
sollyap = scalg.solve_discrete_lyapunov
else:
sollyap = scalg.solve_lyapunov
# solve Lyapunov
if Schur:
# decompose A
Atri, U = scalg.schur(A)
# solve Lyapunov
BBtri = np.dot(U.T, np.dot(B, np.dot(B.T, U)))
CCtri = np.dot(U.T, np.dot(C.T, np.dot(C, U)))
Wctri = sollyap(Atri, BBtri)
Wotri = sollyap(Atri.T, CCtri)
# reconstruct Wo,Wc
Wc = np.dot(U, np.dot(Wctri, U.T))
Wo = np.dot(U, np.dot(Wotri, U.T))
else:
Wc = sollyap(A, np.dot(B, B.T))
Wo = sollyap(A.T, np.dot(C.T, C))
# Choleski factorisation: W=Q Q.T
# Qc = scalg.cholesky(Wc).T
# Qo = scalg.cholesky(Wo).T
# build M matrix and SVD
# M = np.dot(Qo.T, Qc)
# U, s, Vh = scalg.svd(M)
# S = np.diag(s)
# Sinv = np.diag(1. / s)
# V = Vh.T
# Build transformation matrices
# T = np.dot(Qc, np.dot(V, np.sqrt(Sinv)))
# Tinv = np.dot(np.sqrt(Sinv), np.dot(U.T, Qo.T))
# return S, T, Tinv
### Find transformation matrices
# avoid Cholevski - unstable
hsv_sq, Tinv = np.linalg.eig(np.dot(Wc, Wo))
T = np.linalg.inv(Tinv)
# sort
iisort = np.argsort(hsv_sq)[::-1]
hsv = np.sqrt(hsv_sq[iisort])
T = T[:, iisort]
Tinv = Tinv[iisort, :]
if full_outputs is False:
return hsv, T, Tinv
else:
# get square-root factors
UT, QoT = scalg.qr(np.dot(np.diag(np.sqrt(hsv)), Tinv), pivoting=False)
Vh, QcT = scalg.qr(np.dot(T, np.diag(np.sqrt(hsv))).T, pivoting=False)
return hsv, UT.T, Vh, QcT.T, QoT.T
def balreal_iter(A, B, C, lowrank=True, tolSmith=1e-10, tolSVD=1e-6, kmin=None,
tolAbs=False, Print=False, outFacts=False):
"""
Find balanced realisation of DLTI system.
Notes:
Lyapunov equations are solved using iterative squared Smith
algorithm, in its low or full rank version. These implementations are
as per the low_rank_smith and smith_iter functions respectively but,
for computational efficiency, the iterations are rewritten here so as to
solve for the observability and controllability Gramians contemporary.
- Exploiting sparsity:
This algorithm is not ideal to exploit sparsity. However, the following
strategies are implemented:
- if the A matrix is provided in sparse format, the powers of A will be
calculated exploiting sparsity UNTIL the number of non-zero elements
is below 15% the size of A. Upon this threshold, the cost of the matrix
multiplication rises dramatically, and A is hence converted to a dense
numpy array.
"""
### Solve Lyapunov equations
# Notation reminder:
# scipy: A X A.T - X = -Q
# contr: A W A.T - W = - B B.T
# obser: A.T W A - W = - C.T C
# low-rank smith: A.T X A - X = -Q Q.T
# matrices size
N = A.shape[0]
rC = B.shape[1]
rO = C.shape[0]
if lowrank: # low-rank square-Smith iteration (with SVD)
# initialise smith iteration
DeltaNorm = 1e6 # error
DeltaNormNext = DeltaNorm ** 2 # error expected at next iter
kk = 0
Qck = B
Qok = C.T
if Print:
print('Iter\tMaxZ\t|\trank_c\trank_o\tA size')
while DeltaNorm > tolSmith and DeltaNormNext > 1e-3 * tolSmith:
###### controllability
### compute Ak^2 * Qck
# (future: use block Arnoldi)
Qcright = libsp.dot(A, Qck)
MaxZhere = np.max(np.abs(Qcright))
### enlarge Z matrices
Qck = np.concatenate((Qck, Qcright), axis=1)
Qcright = None
rC = Qck.shape[1]
if kmin == None or kmin < rC:
### "cheap" SVD truncation
Uc, svc = scalg.svd(Qck, full_matrices=False, overwrite_a=True,
lapack_driver='gesdd')[:2]
# import scipy.linalg.interpolative as sli
# Ucnew,svcnew,temp=sli.svd(Qck,tolSVD)
if tolAbs:
rcmax = np.sum(svc > tolSVD)
else:
rcmax = | np.sum(svc > tolSVD * svc[0]) | numpy.sum |
"""NAOS-Conica specific methods and variables.
"""
from __future__ import division, print_function
import astropy.io.fits as pyfits
import numpy as np
import scipy.ndimage as nd
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import time
import glob
import pdb
import time
from aoinstrument import AOInstrument
class CONICA(AOInstrument):
"""The CONICA Class, that enables processing of CONICA images.
"""
instrument = 'NAOS+CONICA'
def is_bad_surrounded(self,bad):
#Returns matrix of booleans showing which pixels are surrounded by bad pixels
#"Surrounded" means there is at least one bad pixel in at least two directions
numPixels = 3
sz = len(bad)
is_bad_to_left = np.zeros((sz,sz-numPixels))
is_bad_to_right = np.zeros((sz,sz-numPixels))
is_bad_above = np.zeros((sz-numPixels,sz))
is_bad_below = np.zeros((sz-numPixels,sz))
for ii in range(0,numPixels):
is_bad_to_left+=bad[0:sz,numPixels-ii-1:sz-ii-1]
is_bad_to_right+=bad[0:sz,ii+1:sz-numPixels+ii+1]
is_bad_above+=bad[numPixels-ii-1:sz-ii-1,0:sz]
is_bad_below+=bad[ii+1:sz-numPixels+ii+1,0:sz]
is_bad_to_left = is_bad_to_left>0
is_bad_to_right = is_bad_to_right>0
is_bad_above = is_bad_above>0
is_bad_below = is_bad_below>0
is_surrounded = np.zeros((sz,sz))
is_surrounded[0:sz,numPixels:sz]+=is_bad_to_left
is_surrounded[0:sz,0:sz-numPixels]+=is_bad_to_right
is_surrounded[numPixels:sz,0:sz]+=is_bad_above
is_surrounded[0:sz-numPixels,0:sz]+=is_bad_below
is_surrounded = is_surrounded>2
return is_surrounded
def saturated_pixels(self,image,header,threshold=7500):
"""Returns coordinates of all saturated pixels
Uses image and header from file
Parameters
----------
image: numpy array
The input image
header: pyfits header
The header from this image.
"""
if "COADDS" in header.keys():
pixels = np.where(image/header["COADDS"]>threshold)
else:
pixels = np.where(image>threshold)
return pixels
def make_all_darks(self, ddir='', rdir=''):
"""Make all darks in a current directory. This skeleton routine assumes that
keywords "SHRNAME", "NAXIS1" and "NAXIS2" exist.
"""
#Allow over-riding default reduction and data directories.
if (rdir == ''):
rdir = self.rdir
if (ddir == ''):
ddir = self.ddir
if len(self.csv_dict) == 0:
print("Error: Run read_summary_csv first. No darks made.")
return
darks = np.where(np.array(self.csv_dict['SHRNAME']) == 'closed')[0]
#Now we need to find unique values of the following:
#NAXIS1, NAXIS2 (plus for nirc2... ITIME, COADDS, MULTISAM)
codes = []
for d in darks:
codes.append(self.csv_dict['NAXIS1'][d] + self.csv_dict['NAXIS2'][d] +
self.csv_dict['EXPTIME'][d] + self.csv_dict['COADDS'][d] + self.csv_dict['MULTISAM'][d])
codes = np.array(codes)
#For each unique code, find all dark files and call make_dark.
for c in np.unique(codes):
w = np.where(codes == c)[0]
if (len(w) >= 3):
files = [ddir + self.csv_dict['FILENAME'][darks[ww]] for ww in w]
self.make_dark(files, rdir=rdir)
def make_all_flats(self, ddir='', rdir=''):
"""Search for sets of files that look like they are a series of flats. If "Lamp Off"
files exist within 100 files or so of the flats, call them the darks to go with the
flats. """
#Allow over-riding default reduction and data directories.
if (rdir == ''):
rdir = self.rdir
if (ddir == ''):
ddir = self.ddir
if len(self.csv_dict) == 0:
print("Error: Run read_summary_csv first. No flats made.")
return
#Fill in elevation with a default value (45, for dome flat position) if there are fits header errors.
els = self.csv_dict['NAXIS1']
for i in range(len(els)):
try:
this_el = float(els[i])
except:
els[i] = '45.0'
els = els.astype(float)
#If we're in the dome flat position with more than 1000 counts, this looks
#like it could be a dome flat!
codes = []
flats_maybe = np.where(self.csv_dict['OBJECT']=='flats')[0]
fluxes = self.csv_dict['MEDIAN_VALUE'][flats_maybe].astype(float)
for ix in range(len(els)):
codes.append(self.csv_dict['ESO INS OPTI6 ID'][ix] + self.csv_dict['NAXIS1'][ix] + self.csv_dict['NAXIS2'][ix] +
self.csv_dict['EXPTIME'][ix] + self.csv_dict['COADDS'][ix] + self.csv_dict['MULTISAM'][ix] +
self.csv_dict['SLITNAME'][ix])
codes = np.array(codes)
flat_codes = codes[flats_maybe]
#For each unique code, find the files with consistent flux
for c in np.unique(flat_codes):
#w indexes flats_maybe
w = np.where(flat_codes == c)[0]
#Flux has to be within 10% of the median to count.
this_flat_flux = np.median(fluxes[w])
good_flats = flats_maybe[w[np.where(np.abs( (fluxes[w] - this_flat_flux)/this_flat_flux < 0.1))[0]]]
#Less than 2 flats... don't bother.
if (len(good_flats) >= 2):
ffiles = [ddir + self.csv_dict['FILENAME'][ww] for ww in good_flats]
lamp_off = np.where( (codes == c) & (np.array(self.csv_dict['MEDIAN_VALUE'].astype(float) < 600) & \
(np.abs(els - 45) < 0.01) ) )[0]
if (len(lamp_off) >= 3):
#Use these lamp_off indexes to create a "special" dark.
dfiles = [ddir + self.csv_dict['FILENAME'][ww] for ww in lamp_off]
try:
hh = pyfits.open(dfiles[0], ignore_missing_end=True)[0].header
except:
hh = pyfits.open(dfiles[0]+'.gz', ignore_missing_end=True)[0].header
dfilename = str(lamp_off[0]) + '_' + self.get_dark_filename(hh)
self.make_dark(dfiles, out_file=dfilename)
self.make_flat(ffiles, dark_file=dfilename)
#Otherwise, just use default darks. This *will* give an error if they don't exist.
else:
self.make_flat(ffiles)
def csv_block_string(self, ix):
"""Find a string from the summary csv file that identifies a unique configuration
for a set of files to be processed as a block. It isn't *quite* correct
because the target name sometimes stays the same with a target change.
Parameters
----------
ix: int
The index of the file (in the csv dictionary) that we want to get a block string
for"""
if len(self.csv_dict) == 0:
print("Error: Run read_summary_csv first. No string returned.")
return
block_string = self.csv_dict['NAXIS1'][ix] + self.csv_dict['NAXIS2'][ix] + \
self.csv_dict['OBJECT'][ix] + self.csv_dict['ESO INS OPTI6 ID'][ix] + \
self.csv_dict['EXPTIME'][ix] + self.csv_dict['COADDS'][ix]
return block_string
def info_from_header(self, h, subarr=None):
"""Find important information from the fits header and store in a common format
Parameters
----------
h: The fits header
Returns
-------
(dark_file, flat_file, filter, wave, rad_pixel)
"""
#First, sanity check the header
try: inst=h['INSTRUME']
except: inst=''
if (len(inst)==0):
print("Error: could not find instrument in header...")
raise UserWarning
if ((self.instrument != inst) & (inst[0:3] != '###')):
print("Error: software expecting: ", self.instrument, " but instrument is: ", inst)
raise UserWarning
"""try: fwo = h['FWONAME']
except:
print("No FWONAME in NACO header")
raise UserWarning"""
try: fwi = h['ESO INS OPTI6 ID']
except:
print("No FWINAME in NACO header")
raise UserWarning
try: slit = h['SLITNAME']
except:
slit = 'none'
"""print("No SLITNAME in NACO header")
raise UserWarning"""
if (fwi=='J'):
wave = 1.265e-6
filter='J'
elif (fwi=='H'):
wave = 1.66e-6
filter='H'
elif (fwi=='Ks'):
wave = 2.18e-6
filter='Ks'
elif (fwi=='L_prime'):
wave = 3.8e-6
filter='L_prime'
elif (fwi=='M_prime'):
wave = 4.78e-6
filter='M_prime'
elif ('NB' in fwi or 'IB' in fwi):
wave = float(fwi[3:len(fwi)])*1e-6
filter = fwi
elif (fwi=='empty'):
wave = 5e-7
filter = 'empty'
else:
print("Unknown Filter!")
pdb.set_trace()
if (slit == 'none'):
flat_file = 'flat_' + filter + '.fits'
else:
flat_file = 'flat_' + filter + '_' + slit + '.fits'
try: camname = h['CAMNAME']
except:
camname = 'narrow_VLT'
print("No CAMNAME in header")
if (camname == 'narrow'):
#This comes from the Yelda (2010) paper.
rad_pixel = 0.009952*(np.pi/180.0/3600.0)
elif (camname == 'narrow_VLT'):
rad_pixel = 0.03*(np.pi/180.0/3600.0)
else:
print("Unknown Camera!")
raise UserWarning
#Estimate the expected readout noise directly from the header.
"""if h['SAMPMODE'] == 2:
multisam = 1
else:
multisam = h['MULTISAM']"""
multisam = 1
#The next line comes from the NACO manual.
if fwi=='L_prime':
gain = 9.8
elif fwi=='M_prime':
gain = 9.0
else:
gain = 11.0
rnoise = 4.4
#Find the appropriate dark file if needed.
dark_file = self.get_dark_filename(h)
targname = h['ESO OBS NAME']
#The pupil orientation...
try:
el = h['ESO TEL ALT']
except:
el = -1
if (el > 0):
vertang_pa = (h['ESO ADA ABSROT START']+h['ESO ADA ABSROT END'])/2
altstart = 90-(180/np.pi)*np.arccos(1./h['ESO TEL AIRM START'])
altend = 90-(180/np.pi)*np.arccos(1./h['ESO TEL AIRM END'])
vertang_pa += (altstart+altend)/2
pa = vertang_pa-(180-(h['ESO TEL PARANG START']+h['ESO TEL PARANG END'])/2)
else:
vertang_pa=np.NaN
pa = np.NaN
#Find the pupil type and parameters for the pupil...
pupil_params=dict()
pupil_type = 'annulus'
pupil_params['outer_diam'] = 8.2
#Secondary obstruction guesstimated form picture on ESO webpage.
pupil_params['inner_diam'] = 1.5
ftpix_file = 'ftpix_' + filter + '_fullpupil.fits'
if subarr:
subarr_string = '_' + str(subarr)
else:
subarr_string = ''
ftpix_file = 'ftpix_' + filter + '_fullpupil' + subarr_string + '.fits'
# else:
# print "Assuming full pupil..."
# pupil_type = 'annulus'
# pupil_params['inner_diam'] = 1.8
# pupil_params['outer_diam'] = 10.2 #Maximum diameter is really 10.5
# ftpix_file = 'ftpix_' + filter + '_fullpupil.fits'
return {'dark_file':dark_file, 'flat_file':flat_file, 'filter':filter,
'wave':wave, 'rad_pixel':rad_pixel,'targname':targname,
'pupil_type':pupil_type,'pupil_params':pupil_params,'ftpix_file':ftpix_file,
'gain':gain, 'rnoise':rnoise, 'vertang_pa':vertang_pa, 'pa':pa}
def get_dark_filename(self,h):
"""Create a dark fits filename based on a header
Parameters
----------
h: header from astropy.io.fits
Returns
-------
dark_file: string
"""
dark_file = 'dark_' + str(h['NAXIS1']) + '_' + str(int(h['EXPTIME']*100)) + '.fits'
return dark_file
def destripe_conica(self,im, subtract_edge=True, subtract_median=False, do_destripe=True):
"""Destripe an image from the NACO camera.
The algorithm is:
1) Subtract the mode from each quadrant.
2) For each pixel, find the 24 pixels in other quadrants corresponding to
reflections about the chip centre.
3) Subtract the median of these pixels.
Parameters
----------
im: array_like
The input image.
subtract_median: bool, optional
Whether or not to subtract the median from each quadrant.
subtract_edge: bool, optional
Whether or not to adjust the means of each quadrant by the edge pixels.
Returns
-------
im: array_like
The corrected image.
"""
s = im.shape
quads = [im[0:s[0]//2,0:s[1]//2],im[s[0]:s[0]//2-1:-1,0:s[1]//2],
im[0:s[0]//2,s[1]:s[1]//2-1:-1],im[s[0]:s[0]//2-1:-1,s[1]:s[1]//2-1:-1]]
#print(quads)
quads = np.array(quads, dtype='float')
#Work through the quadrants, modifying based on the edges.
if subtract_edge:
quads[1] += np.median(quads[3][:,s[1]//2-8:s[1]//2])- np.median(quads[1][:,s[1]//2-8:s[1]//2])
quads[2] += np.median(quads[3][s[0]//2-8:s[0]//2,:])- np.median(quads[2][s[0]//2-8:s[0]//2,:])
delta = 0.5*(np.median(quads[3][s[0]//2-8:s[0]//2,:]) + np.median(quads[3][:,s[1]//2-8:s[1]//2])
- np.median(quads[0][s[0]//2-8:s[0]//2,:]) - np.median(quads[0][:,s[1]//2-8:s[1]//2]))
quads[0] += delta
#Subtract the background
if subtract_median:
print("Subtracting Medians...")
MED_DIFF_MULTIPLIER = 4.0
for i in range(4):
quad = quads[i,:,:]
med = np.median(quad)
dispersion = np.median(np.abs(quad - med))
goodpix = np.where(np.abs(quad - med) < MED_DIFF_MULTIPLIER*dispersion)
med = np.median(quad[goodpix])
quads[i,:,:] -= med
if do_destripe:
quads = quads.reshape((4,s[0]//2,s[1]//16,8))
stripes = quads.copy()
for i in range(4):
for j in range(s[0]//2): #The -1 on line is because of artifacts
for k in range(s[0]//16):
pix = np.array([stripes[(i+1)%4,j,k,:],stripes[(i+2)%4,j,k,:],stripes[(i+3)%4,j,k,:]])
quads[i,j,k,:] -= np.median(pix)
quads = quads.reshape((4,s[0]//2,s[1]//2))
im[0:s[0]//2,0:s[1]//2] = quads[0]
im[s[0]:s[0]//2-1:-1,0:s[1]//2] = quads[1]
im[0:s[0]//2,s[1]:s[1]//2-1:-1] = quads[2]
im[s[0]:s[0]//2-1:-1,s[1]:s[1]//2-1:-1] = quads[3]
return im
def make_dark(self,in_files, out_file='', subtract_median=True, destripe=True, med_threshold=15.0, rdir=''):
"""Create a dark frame and save to a fits file,
with an attached bad pixel map as the first fits extension.
Parameters
----------
in_files : array_like (dtype=string). A list of input filenames.
out_file: string
The file to write to.
subtract_median: bool, optional
Whether or not to subtract the median from each frame (or quadrants)
destripe: bool, optional
Whether or not to destripe the images.
med_threshold: float, optional
The threshold for pixels to be considered bad if their absolute
value differs by more than this multiple of the median difference
of pixel values from the median.
Returns
-------
(optional) out_file: If an empty string is given, it is filled with the default out
filename
"""
#Allow over-riding default reduction directory.
if (rdir == ''):
rdir = self.rdir
VAR_THRESHOLD = 10.0
nf = len(in_files)
if (nf < 3):
print("At least 3 dark files sre needed for reliable statistics")
raise UserWarning
# Read in the first dark to check the dimensions.
try:
in_fits = pyfits.open(in_files[0], ignore_missing_end=True)
except:
in_fits = pyfits.open(in_files[0]+'.gz', ignore_missing_end=True)
h = in_fits[0].header
instname = ''
try: instname=h['ESO INS ID']
except:
print("Unknown Header Type")
#Create the output filename if needed
if (out_file == ''):
out_file = self.get_dark_filename(h)
s = in_fits[0].data.shape
in_fits.close()
darks = np.zeros((nf,s[0],s[1]))
plt.clf()
for i in range(nf):
#Read in the data
adark = pyfits.getdata(in_files[i])
if ('CONICA' in instname):
adark = self.destripe_conica(adark, subtract_median=subtract_median, do_destripe=destripe)
if (subtract_median):
plt.imshow(np.minimum(adark,1e2))
else:
plt.imshow(adark)
print("Median: " + str(np.median(adark)))
plt.pause(0.001)
#plt.draw()
darks[i,:,:] = adark
#Now look for weird pixels.
med_dark = np.median(darks, axis=0)
max_dark = np.max(darks, axis=0)
var_dark = np.zeros((s[0],s[1]))
for i in range(nf):
var_dark += (darks[i,:,:] - med_dark)**2
var_dark -= (max_dark - med_dark)**2
var_dark /= nf-2
#We need to threshold the med_diff quantity in case of low-noise, many subread images
med_diff = np.maximum(np.median(np.abs(med_dark - | np.median(med_dark) | numpy.median |
"""
Created on Sun Sep 13 15:13:33 2020
@author: iseabrook1
"""
#This script contains the code required to produce analyse the predictability of
#binary edge changes given the value of l_e for each edge.
#<NAME>, <EMAIL>
#MIT License. Please reference below publication if used for research purposes.
#Reference: Seabrook et al., Evaluating Structural Edge Importance in Financial Networks
#
################################################################################
# Instructions for use.
#
# User is required to provide paths to the files containing l_e/dA pairs.
# These are generated for the college messaging dataset and bilateral trade dataset
# in 'exploratory_data_analysis.py'.
# This script initially produces boxplots of the distribution of l_e for changing
# edges vs. unchanging edges, and uses kernel density estimation to estimate and plot
# distributions of P(Delta A=0|l_e). These are presented in the paper above, and related
# to the observed prediction capability.
# The script then uses the observed change labels to train a logistic regression
# classifier to predict which edges will change given the value of l_e. The code
# compares the predictions to a monte carlo average of a dummy classifier which randomly
# predicts edges to change with probability 1/no.of observed changes. The code then outputs
# results for balanced accuracy, Receiver Operating Characteristic Area Under Curve,
# Precision Recall Area Under Curve, and the plots associated with these.
#
###############################################################################
import sys
sys.path.append("N:/documents/packages")
sys.path.append("N:/documents/phdenv")
#import generate_data as gd
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_curve, auc
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import confusion_matrix
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from imblearn.over_sampling import RandomOverSampler
import sys
sys.path.append("N:/documents/packages")
sys.path.append("N:/documents/phdenv")
sys.path.append("..") # Adds higher directory to python modules path.
from model_evaluation import structural_importance_model as sim
import seaborn as sns
from scipy import stats
from imblearn.pipeline import Pipeline
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import balanced_accuracy_score
if __name__ == "__main__":
path_to_college="C:/Users/iseabrook1/OneDrive - Financial Conduct Authority/Network_analytics/PhD/Data"
path_to_bilat="C:/Users/iseabrook1/OneDrive - Financial Conduct Authority/Network_analytics/PhD/Data"
ds_le_bilat=pd.read_csv(path_to_bilat+"/ds_le_bilat.csv")
ds_le_college=pd.read_csv(path_to_college+"/ds_le_college.csv")
ds_le_bilat["change_bool"]=np.multiply(abs(ds_le_bilat.delta_A_act)>0,1)
ds_le_college["change_bool"]=np.multiply(abs(ds_le_college.delta_A_act)>0,1)
#step 1 - boxplots
datasets=[ds_le_bilat,ds_le_college]
ds_names = ["Bilateral Trade", "College Messaging"]
fig, axs=plt.subplots(1,2, figsize=(10,5))
axs=axs.flatten()
all_t=[]
all_p=[]
for i, ds in enumerate(datasets):
g1 = ds[(ds.log_l_e>-15)&(ds.change_bool==0)]["log_l_e"].values
g2 = ds[(ds.log_l_e>-15)&(ds.change_bool==1)]["log_l_e"].values
t,p=stats.ttest_ind(g1,g2)
print(t,p)
all_t.append(t)
all_p.append(p)
sns.boxplot(x="change_bool", y="log_l_e", data=ds[(ds.log_l_e>-15)], ax=axs[i])
axs[i].set_title(ds_names[i] + f': p-value = {p:.3e}')
axs[i].set_xlabel("Change label")
axs[i].set_ylabel("$\ln(l_e)$")
plt.show()
#step 2 - change distributions
change_pdf_vals_bilat=sim.change_dist(ds_le_bilat.set_index(["variable", "trade date time"]))
ds_le_change_bilat= ds_le_bilat.merge(change_pdf_vals_bilat.to_frame().reset_index(), on=["trade date time", "variable"])
ds_le_change_bilat.columns = [ 'index', 'variable', 'trade date time',
'A_init', 'A_fin', 'l_e',
'delta_A_act', 'delta_A_rel1',
'log_l_e', 'log_delta_A_rel1',
'change_bool', "change_pdf_vals"]
change_pdf_vals_college=sim.change_dist(ds_le_college.set_index(["variable", "trade date time"]))
ds_le_change_college= ds_le_college.merge(change_pdf_vals_college.to_frame().reset_index(), on=["trade date time", "variable"])
ds_le_change_college.columns = [ 'index', 'variable', 'trade date time',
'A_init', 'A_fin', 'l_e',
'delta_A_act', 'delta_A_rel1',
'log_l_e', 'log_delta_A_rel1',
'change_bool', "change_pdf_vals"]
plt.figure()
plt.scatter(y=ds_le_change_bilat[ds_le_change_bilat.change_bool==0]["change_pdf_vals"],x=ds_le_change_bilat[ds_le_change_bilat.change_bool==0]["l_e"], marker='+', label="Bilateral trade")
plt.scatter(y=ds_le_change_college[ds_le_change_college.change_bool==0]["change_pdf_vals"],x=ds_le_change_college[ds_le_change_college.change_bool==0]["l_e"], marker='+', label="College Msg")
plt.xlabel("l_e")
plt.ylabel("$P(\Delta A=0| l_e)$")
plt.legend()
plt.show()
# #step 3 - change prediction
classifiers_dict = {'rf':RandomForestClassifier(),
'lr':LogisticRegression(class_weight='balanced', random_state = 42),
'gb':GaussianNB()}
classifier_params_dict={'rf':
{'clf__bootstrap': [False, True],
'clf__n_estimators': [80,90, 100, 110, 130]},
'lr':
{'clf__C': [0.001,0.01,0.1,1,10, 100],
'clf__penalty': ('l2', 'l1'),
'clf__max_iter': [50, 100, 200]},
'gb':
{'clf__var_smoothing': [0.00000001, 0.000000001, 0.00000001]}}
datasets=[ds_le_bilat,ds_le_college]
ds_names = ["Bilateral Trade", "College Messaging"]
colors = ["navy","orange","g","r", "purple"]
classifiers = [LogisticRegression(class_weight='balanced',C=100, max_iter=50, penalty='l2'), #Bilat
LogisticRegression(class_weight='balanced',C=10, max_iter=200, penalty='l2'), #College
]
fig, ax=plt.subplots(3,1, figsize=(10,5))
fig1, ax1=plt.subplots(3,1, figsize=(10,5))
ax=ax.flatten()
ax1=ax1.flatten()
for i, ds in enumerate(datasets):
dataset_prec = ds.copy()
print("PR")
X, y = np.array(dataset_prec["log_l_e"]).reshape(-1,1), dataset_prec["change_bool"]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42, stratify = y)
best_classifier = classifiers[i]
pipeline = Pipeline(steps = [
('sampler', RandomOverSampler()),
('gb_classifier', best_classifier)
])
pipeline.fit(X_train, y_train)
y_prediction = pipeline.predict(X_test)
y_score = pipeline.predict_proba(X_test)
balanced_accuracy = balanced_accuracy_score(y_test, y_prediction)
print('Balanced accuracy %.3f' % balanced_accuracy)
#print(confusion_matrix(y_test, y_prediction))
tn, fp, fn, tp = confusion_matrix(y_test, y_prediction).ravel()
#print("tn:",tn, "fp:", fp,"fn:", fn,"tp:",tp)
#look at PR AUC. Use this to justify choice of recall
#(if we were to consider e.g. F1 score)
fpr, tpr, _ = roc_curve(y_test, y_score[:,1])
roc_auc= auc(fpr, tpr)
print('ROC AUC: %.3f' % roc_auc)
prec, recall, _ = precision_recall_curve(y_test, y_score[:,1],
pos_label=pipeline.named_steps['gb_classifier'].classes_[1])
auc_score = auc(recall, prec)
print('PR AUC: %.3f' % auc_score)
#binomial monte carlo generation, attempt to use log reg to predict.
pr_auc_dumm_list = []
bal_acc_list = []
fpr_dumm_list=[]
tpr_dumm_list=[]
prec_dumm_list=[]
recall_dumm_list=[]
auc_dumm_list=[]
thresholds_list=[]
base_fpr = np.linspace(0, 1, 101)
for j in range(100):
ds_le_dumm = ds.copy()
p = len(ds_le_dumm[ds_le_dumm.change_bool==1])/len(ds_le_dumm)
ds_le_dumm.loc[:, "change_bool"] = np.random.binomial(1, p, len(ds_le_dumm))
X_dumm, y_dumm = np.array(ds_le_dumm["log_l_e"]).reshape(-1,1), ds_le_dumm["change_bool"]
X_train_dumm, X_test_dumm, y_train_dumm, y_test_dumm = train_test_split(X_dumm, y_dumm, test_size=0.2, random_state=42, stratify = y)
pipeline_dumm = Pipeline(steps = [
('sampler', RandomOverSampler()),
('gb_classifier', best_classifier)
])
pipeline_dumm.fit(X_train_dumm, y_train_dumm)
y_prediction_dumm = pipeline_dumm.predict(X_test_dumm)
y_score_dumm = pipeline_dumm.predict_proba(X_test_dumm)
fpr_dumm, tpr_dumm, _ = roc_curve(y_test_dumm, y_score_dumm[:,1], drop_intermediate=False)
roc_auc_dumm= auc(fpr_dumm, tpr_dumm)
prec_dumm, recall_dumm, thresholds_dumm = precision_recall_curve(y_test_dumm, y_score_dumm[:,1],
pos_label=pipeline_dumm.named_steps['gb_classifier'].classes_[1])
balanced_accuracy_dumm = balanced_accuracy_score(y_test_dumm, y_prediction_dumm)
bal_acc_list.append(balanced_accuracy_dumm)
tpr_dumm = np.interp(base_fpr, fpr_dumm, tpr_dumm)
prec_dumm[0] = 0.0
tpr_dumm[0] = 0.0
tpr_dumm_list.append(tpr_dumm)
prec_dumm_list.append(prec_dumm)
recall_dumm_list.append(recall_dumm)
auc_dumm_list.append(roc_auc_dumm)
thresholds_list.append(thresholds_dumm)
balanced_accuracy_dumm = np.mean(bal_acc_list)
tpr_dumm_list = np.array(tpr_dumm_list)
tpr_dumm_mean = tpr_dumm_list.mean(axis=0)
tpr_dumm_std = tpr_dumm_list.std(axis=0)
mean_auc_dumm = auc(base_fpr, tpr_dumm_mean)
std_auc_dumm = np.std(auc_dumm_list)
tprs_upper = np.minimum(tpr_dumm_mean + tpr_dumm_std, 1)
tprs_lower = tpr_dumm_mean - tpr_dumm_std
prec_dumm_list = [i[0: min(map(len,thresholds_list))] for i in prec_dumm_list]
prec_dumm_list = np.array(prec_dumm_list)
prec_dumm_mean = prec_dumm_list.mean(axis=0)
prec_dumm_std = prec_dumm_list.std(axis=0)
std_pr_auc_dumm = np.std(auc_dumm_list)
precs_upper = np.minimum(prec_dumm_mean + prec_dumm_std, 1)
precs_lower = prec_dumm_mean - prec_dumm_std
recall_dumm_list = [i[0:min(map(len,thresholds_list))] for i in recall_dumm_list]
recall_dumm_list = | np.array(recall_dumm_list) | numpy.array |
# Copyright 2021 <NAME>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Filters and filter banks"""
import abc
from typing import Mapping, Optional, Tuple, Union
import numpy as np
from pydrobert.speech import AliasedFactory
from pydrobert.speech import config
from pydrobert.speech.scales import MelScaling
from pydrobert.speech.scales import ScalingFunction
from pydrobert.speech.util import alias_factory_subclass_from_arg
from pydrobert.speech.util import angular_to_hertz
from pydrobert.speech.util import hertz_to_angular
__all__ = [
"LinearFilterBank",
"TriangularOverlappingFilterBank",
"GaborFilterBank",
"ComplexGammatoneFilterBank",
"WindowFunction",
"BartlettWindow",
"BlackmanWindow",
"HammingWindow",
"HannWindow",
"GammaWindow",
]
# banks
class LinearFilterBank(AliasedFactory):
"""A collection of linear, time invariant filters
A :class:`LinearFilterBank` instance is expected to provide factory methods for
instantiating a fixed number of LTI filters in either the time or frequency domain.
Filters should be organized lowest frequency first.
Attributes
----------
is_real : bool
is_analytic : bool
is_zero_phase : bool
num_filts : int
sampling_rate : float
centers_hz : tuple
supports_hz : tuple
supports : tuple
supports_ms : tuple
"""
@abc.abstractproperty
def is_real(self) -> bool:
"""Whether the filters are real or complex"""
pass
@abc.abstractproperty
def is_analytic(self) -> bool:
"""Whether the filters are (approximately) analytic"""
pass
@abc.abstractproperty
def is_zero_phase(self) -> bool:
"""Whether the filters are zero phase or not
Zero phase filters are even functions with no imaginary part in the fourier
domain. Their impulse responses center around 0.
"""
pass
@abc.abstractproperty
def num_filts(self) -> int:
"""Number of filters in the bank"""
pass
@abc.abstractproperty
def sampling_rate(self) -> float:
"""Number of samples in a second of a target recording"""
pass
@abc.abstractproperty
def supports_hz(self) -> Tuple:
"""Boundaries of effective support of filter freq responses, in Hz.
Returns a tuple of length `num_filts` containing pairs of floats of the low and
high frequencies. Frequencies outside the span have a response of approximately
(with magnitude up to :obj:`pydrobert.speech.EFFECTIVE_SUPPORT_SIGNAL`) zero.
The boundaries need not be tight, i.e. the region inside the boundaries could be
zero. It is more important to guarantee that the region outside the boundaries
is approximately zero.
The boundaries ignore the Hermitian symmetry of the filter if it is real. Bounds
of ``(10, 20)`` for a real filter imply that the region ``(-20, -10)`` could
also be nonzero.
The user is responsible for adjusting the for the periodicity induced by
sampling. For example, if the boundaries are ``(-5, 10)`` and the filter is
sampled at 15Hz, then all bins of an associated DFT could be nonzero.
"""
pass
@abc.abstractproperty
def supports(self) -> Tuple:
"""Boundaries of effective support of filter impulse resps, in samples
Returns a tuple of length `num_filts` containing pairs of integers of the first
and last (effectively) nonzero samples.
The boundaries need not be tight, i.e. the region inside the boundaries could be
zero. It is more important to guarantee that the region outside the boundaries
is approximately zero.
If a filter is instantiated using a buffer that is unable to fully contain the
supported region, samples will wrap around the boundaries of the buffer.
Noncausal filters will have start indices less than 0. These samples will wrap
to the end of the filter buffer when the filter is instantiated.
"""
pass
@property
def supports_ms(self) -> tuple:
"""Boundaries of effective support of filter impulse resps, in ms"""
return tuple(
(s[0] * 1000 / self.sampling_rate, s[1] * 1000 / self.sampling_rate,)
for s in self.supports
)
@abc.abstractmethod
def get_impulse_response(self, filt_idx: int, width: int) -> np.ndarray:
"""Construct filter impulse response in a fixed-width buffer
Construct the filter in the time domain.
Parameters
----------
filt_idx : int
The index of the filter to generate. Less than `num_filts`
width : int
The length of the buffer, in samples. If less than the support of the
filter, the filter will alias.
Returns
-------
array-like
1D float64 or complex128 numpy array of length `width`
"""
pass
@abc.abstractmethod
def get_frequency_response(
self, filt_idx: int, width: int, half: bool = False
) -> np.ndarray:
"""Construct filter frequency response in a fixed-width buffer
Construct the 2pi-periodized filter in the frequency domain. Zero-phase filters
`is_zero_phase` are returned as 8-byte float arrays. Otherwise, they will be
16-byte complex floats.
Parameters
----------
filt_idx : int
The index of the filter to generate. Less than `num_filts`
width : int
The length of the DFT to output
half : bool, optional
Whether to return only the DFT bins between [0,pi]
Results
-------
array-like
If `half` is `False`, returns a 1D float64 or complex128
numpy array of length `width`. If `half` is `True` and
`width` is even, the returned array is of length
``width // 2 + 1``. If `width` is odd, the returned array
is of length ``(width + 1) // 2``.
"""
pass
@abc.abstractmethod
def get_truncated_response(
self, filt_idx: int, width: int
) -> Tuple[int, np.ndarray]:
"""Get nonzero region of filter frequency response
Many filters will be compactly supported in frequency (or approximately so).
This method generates a tuple `(bin_idx, buf)` of the nonzero region.
In the case of a complex filter, ``bin_idx + len(buf)`` may be greater than
`width`; the filter wraps around in this case. The full frequency response can
be calculated from the truncated response by:
>>> bin_idx, trnc = bank.get_truncated_response(filt_idx, width)
>>> full = numpy.zeros(width, dtype=trnc.dtype)
>>> wrap = min(bin_idx + len(trnc), width) - bin_idx
>>> full[bin_idx:bin_idx + wrap] = trnc[:wrap]
>>> full[:len(trnc) - wrap] = tnc[wrap:]
In the case of a real filter, only the nonzero region between ``[0, pi]``
(half-spectrum) is returned. No wrapping can occur since it would inevitably
interfere with itself due to conjugate symmetry. The half-spectrum can easily be
recovered by:
>>> half_width = (width + width % 2) // 2 + 1 - width % 2
>>> half = numpy.zeros(half_width, dtype=trnc.dtype)
>>> half[bin_idx:bin_idx + len(trnc)] = trnc
And the full spectrum by:
>>> full[bin_idx:bin_idx + len(trnc)] = trnc
>>> full[width - bin_idx - len(trnc) + 1:width - bin_idx + 1] = \\
... trnc[:None if bin_idx else 0:-1].conj()
(the embedded if-statement is necessary when bin_idx is 0, as the full fft
excludes its symmetric bin)
Parameters
----------
filt_idx : int
The index of the filter to generate. Less than `num_filts`
width : int
The length of the DFT to output
Returns
-------
tuple of int, array
"""
pass
class TriangularOverlappingFilterBank(LinearFilterBank):
"""Triangular frequency response whose vertices are along the scale
The vertices of the filters are sampled uniformly along the passed scale. If the
scale is nonlinear, the triangles will be asymmetrical. This is closely related to,
but not identical to, the filters described in [povey2011]_ and [young]_.
Parameters
----------
scaling_function : pydrobert.speech.ScalingFunction, str, or dict
Dictates the layout of filters in the Fourier domain. Can be a
:class:`ScalingFunction` or something compatible with
:func:`pydrobert.speech.alias_factory_subclass_from_arg`
num_filts : int, optional
The number of filters in the bank
high_hz, low_hz : float, optional
The topmost and bottommost edge of the filters, respectively. The default for
`high_hz` is the Nyquist
sampling_rate : float, optional
The sampling rate (cycles/sec) of the target recordings
analytic : bool, optional
Whether to use an analytic form of the bank. The analytic form is easily derived
from the real form in [povey2011]_ and [young]_. Since the filter is compactly
supported in frequency, the analytic form is simply the suppression of the
``[-pi, 0)`` frequencies
Attributes
----------
centers_hz : tuple
is_real : bool
is_analytic : bool
num_filts : int
sampling_rate : float
supports_hz : tuple
supports : tuple
supports_ms : tuple
Raises
------
ValueError
If `high_hz` is above the Nyquist, or `low_hz` is below 0, or
``high_hz <= low_hz``
"""
aliases = {"tri", "triangular"}
def __init__(
self,
scaling_function: Union[ScalingFunction, Mapping, str],
num_filts: int = 40,
high_hz: Optional[float] = None,
low_hz: float = 20.0,
sampling_rate: float = 16000,
analytic: bool = False,
):
scaling_function = alias_factory_subclass_from_arg(
ScalingFunction, scaling_function
)
if low_hz < 0 or (
high_hz and (high_hz <= low_hz or high_hz > sampling_rate // 2)
):
raise ValueError(
"Invalid frequency range: ({:.2f},{:.2f}".format(low_hz, high_hz)
)
self._rate = sampling_rate
if high_hz is None:
high_hz = sampling_rate // 2
# compute vertices
scale_low = scaling_function.hertz_to_scale(low_hz)
scale_high = scaling_function.hertz_to_scale(high_hz)
scale_delta = (scale_high - scale_low) / (num_filts + 1)
self._vertices = tuple(
scaling_function.scale_to_hertz(scale_low + scale_delta * idx)
for idx in range(0, num_filts + 2)
)
self._analytic = analytic
@property
def is_real(self) -> bool:
return not self._analytic
@property
def is_analytic(self) -> bool:
return self._analytic
@property
def is_zero_phase(self) -> bool:
return True
@property
def num_filts(self) -> int:
return len(self._vertices) - 2
@property
def sampling_rate(self) -> float:
return self._rate
@property
def centers_hz(self) -> Tuple[float]:
"""The point of maximum gain in each filter's frequency response, in Hz
This property gives the so-called "center frequencies" - the
point of maximum gain - of each filter.
"""
return self._vertices[1:-1]
@property
def supports_hz(self) -> tuple:
return tuple(
(low, high) for low, high in zip(self._vertices[:-2], self._vertices[2:])
)
@property
def supports(self) -> tuple:
# A given filter is bound from above by
# 2(w_r - w_l) / ((w_c - w_l)(w_r - w_c)t^2pi)
supports = []
for idx in range(len(self._vertices) - 2):
left = hertz_to_angular(self._vertices[idx], self._rate)
mid = hertz_to_angular(self._vertices[idx + 1], self._rate)
right = hertz_to_angular(self._vertices[idx + 2], self._rate)
K = np.sqrt(8 * (right - left) / np.pi)
K /= np.sqrt(config.EFFECTIVE_SUPPORT_THRESHOLD)
K /= np.sqrt(mid - left) * np.sqrt(right - mid)
K = int(np.ceil(K))
supports.append((-K // 2 - 1, K // 2 + 1))
return tuple(supports)
def get_impulse_response(self, filt_idx: int, width: int) -> np.ndarray:
left = hertz_to_angular(self._vertices[filt_idx], self._rate)
mid = hertz_to_angular(self._vertices[filt_idx + 1], self._rate)
right = hertz_to_angular(self._vertices[filt_idx + 2], self._rate)
res = np.zeros(width, dtype=np.complex128 if self._analytic else np.float64)
# for numerical stability (angles can get pretty small)
if right - mid > mid - left:
denom = right - mid
div_term = mid - left
else:
denom = mid - left
div_term = right - mid
denom *= (int(self._analytic) + 1) * np.pi
for t in range(1, width + 1):
if self._analytic:
numer = (right - left) / div_term * np.exp(1j * mid * t)
numer -= (right - mid) / div_term * np.exp(1j * left * t)
numer -= (mid - left) / div_term * np.exp(1j * right * t)
else:
numer = (right - left) / div_term * np.cos(mid * t)
numer -= (right - mid) / div_term * np.cos(left * t)
numer -= (mid - left) / div_term * np.cos(right * t)
val = numer / t ** 2
if t < width:
res[t] += val
res[-t] += val.conj()
else:
res[0] += val
numer = mid / div_term * (right ** 2 - left ** 2)
numer += right / div_term * (left ** 2 - mid ** 2)
numer += left / div_term * (mid ** 2 - right ** 2)
res[0] += numer / 2
res /= denom
return res
def get_frequency_response(
self, filt_idx: int, width: int, half: bool = False
) -> np.ndarray:
left = self._vertices[filt_idx]
mid = self._vertices[filt_idx + 1]
right = self._vertices[filt_idx + 2]
left_idx = int(np.ceil(width * left / self._rate))
right_idx = int(width * right / self._rate)
assert self._rate * (left_idx - 1) / width <= left
assert self._rate * (right_idx + 1) / width >= right, width
dft_size = width
if half:
if width % 2:
dft_size = (width + 1) // 2
else:
dft_size = width // 2 + 1
res = np.zeros(dft_size, dtype=np.float64)
for idx in range(left_idx, min(dft_size, right_idx + 1)):
hz = self._rate * idx / width
if hz <= mid:
val = (hz - left) / (mid - left)
else:
val = (right - hz) / (right - mid)
res[idx] = val
if not half and not self._analytic:
res[-idx] = val
return res
def get_truncated_response(
self, filt_idx: int, width: int
) -> Tuple[int, np.ndarray]:
left = self._vertices[filt_idx]
mid = self._vertices[filt_idx + 1]
right = self._vertices[filt_idx + 2]
left_idx = int(np.ceil(width * left / self._rate))
right_idx = int(width * right / self._rate)
assert self._rate * (left_idx - 1) / width <= left
assert self._rate * (right_idx + 1) / width >= right, width
res = np.zeros(1 + right_idx - left_idx, dtype=np.float64)
for idx in range(left_idx, min(width, right_idx + 1)):
hz = self._rate * idx / width
if hz <= mid:
res[idx - left_idx] = (hz - left) / (mid - left)
else:
res[idx - left_idx] = (right - hz) / (right - mid)
return left_idx, res
class Fbank(LinearFilterBank):
"""A mel-triangular filter bank that is square-rooted
An ``Fbank`` instance is intended to replicate the filters from Kaldi
[povey2011]_ and HTK [young]_. Its scale is fixed to Mel-scale. Like a
``TriangularOverlappingFilterBank``, ``Fbank`` places the vertices of
triangular filters uniformly along the target scale. However, an ``Fbank``
is triangular in the Mel-scale, whereas the triangular bank is triangular
in frequency.
Parameters
----------
num_filts : int, optional
The number of filters in the bank
high_hz, low_hz : float, optional
The topmost and bottommost edge of the filters, respectively.
The default for high_hz is the Nyquist
sampling_rate : float, optional
The sampling rate (cycles/sec) of the target recordings
analytic : bool, optional
Whether to use an analytic form of the bank. The analytic form
is easily derived from the real form in [1]_ and [2]_. Since
the filter is compactly supported in frequency, the analytic
form is simply the suppression of the ``[-pi, 0)`` frequencies
Attributes
----------
centers_hz : tuple
is_real : bool
is_analytic : bool
num_filts : int
sampling_rate : float
supports_hz : tuple
supports : tuple
supports_ms : tuple
Notes
-----
In a standard mel-filterbank spectrogram, the power spectrum is calculated
before filtering. This module's spectrogram takes the power spectrum after
filtering. To recreate the frequency response of the alternate order, we
can take the pointwise square root of the frequency response.
"""
aliases = {"fbank"}
def __init__(
self,
num_filts: int = 40,
high_hz: Optional[float] = None,
low_hz: float = 20.0,
sampling_rate: float = 16000,
analytic: bool = False,
):
scaling_function = MelScaling()
if low_hz < 0 or (
high_hz and (high_hz <= low_hz or high_hz > sampling_rate // 2)
):
raise ValueError(
"Invalid frequency range: ({:.2f},{:.2f}".format(low_hz, high_hz)
)
self._rate = sampling_rate
if high_hz is None:
high_hz = sampling_rate // 2
# compute vertices
scale_low = scaling_function.hertz_to_scale(low_hz)
scale_high = scaling_function.hertz_to_scale(high_hz)
scale_delta = (scale_high - scale_low) / (num_filts + 1)
self._vertices = tuple(
scaling_function.scale_to_hertz(scale_low + scale_delta * idx)
for idx in range(0, num_filts + 2)
)
self._analytic = analytic
@property
def is_real(self) -> bool:
return not self._analytic
@property
def is_analytic(self) -> bool:
return self._analytic
@property
def is_zero_phase(self) -> bool:
return True
@property
def num_filts(self) -> int:
return len(self._vertices) - 2
@property
def sampling_rate(self) -> float:
return self._rate
@property
def centers_hz(self) -> Tuple[float]:
"""The point of maximum gain in each filter's frequency response, in Hz
This property gives the so-called "center frequencies" - the
point of maximum gain - of each filter.
"""
return self._vertices[1:-1]
@property
def supports_hz(self) -> tuple:
return tuple(
(low, high) for low, high in zip(self._vertices[:-2], self._vertices[2:])
)
@property
def supports(self) -> tuple:
# A given filter is bound above for t > 0 by
# ((w_r - w_c) ** .5 + (w_c - w_l) ** .5) /
# (2 ** 3 * t ** 3 * (w_c - w_l) * (w_r - w_c) * pi) ** .5
supports = []
for idx in range(len(self._vertices) - 2):
left = hertz_to_angular(self._vertices[idx], self._rate)
mid = hertz_to_angular(self._vertices[idx + 1], self._rate)
right = hertz_to_angular(self._vertices[idx + 2], self._rate)
K = right - left + 2 * ((right - mid) * (mid - left)) ** 2
K /= config.EFFECTIVE_SUPPORT_THRESHOLD ** 2 * np.pi
K /= (right - mid) * (mid - left)
K /= np.sqrt(config.EFFECTIVE_SUPPORT_THRESHOLD)
K /= np.sqrt(mid - left) * np.sqrt(right - mid)
K **= 0.3333
K = int(np.ceil(K))
supports.append((-K // 2 - 1, K // 2 + 1))
return tuple(supports)
def get_impulse_response(self, filt_idx: int, width: int) -> np.ndarray:
# For the time being, I'll just invert the frequency response
if self.is_analytic:
freq_response = self.get_frequency_response(filt_idx, width, half=False)
return np.fft.ifft(freq_response)
else:
freq_response = self.get_frequency_response(filt_idx, width, half=True)
return np.fft.irfft(freq_response, n=width)
def get_frequency_response(
self, filt_idx: int, width: int, half: bool = False
) -> np.ndarray:
scaling_function = MelScaling()
left_hz = self._vertices[filt_idx]
mid_hz = self._vertices[filt_idx + 1]
right_hz = self._vertices[filt_idx + 2]
left_mel = scaling_function.hertz_to_scale(left_hz)
mid_mel = scaling_function.hertz_to_scale(mid_hz)
right_mel = scaling_function.hertz_to_scale(right_hz)
left_idx = int(np.ceil(width * left_hz / self._rate))
right_idx = int(width * right_hz / self._rate)
assert self._rate * (left_idx - 1) / width <= left_hz
assert self._rate * (right_idx + 1) / width >= right_hz, width
dft_size = width
if half:
if width % 2:
dft_size = (width + 1) // 2
else:
dft_size = width // 2 + 1
res = np.zeros(dft_size, dtype=np.float64)
for idx in range(left_idx, min(dft_size, right_idx + 1)):
hz = self._rate * idx / width
mel = scaling_function.hertz_to_scale(hz)
if mel <= mid_mel:
val = (mel - left_mel) / (mid_mel - left_mel)
else:
val = (right_mel - mel) / (right_mel - mid_mel)
res[idx] = val ** 0.5
if not half and not self._analytic:
res[-idx] = val ** 0.5
return res
def get_truncated_response(self, filt_idx: int, width: int) -> np.ndarray:
scaling_function = MelScaling()
left_hz = self._vertices[filt_idx]
mid_hz = self._vertices[filt_idx + 1]
right_hz = self._vertices[filt_idx + 2]
left_mel = scaling_function.hertz_to_scale(left_hz)
mid_mel = scaling_function.hertz_to_scale(mid_hz)
right_mel = scaling_function.hertz_to_scale(right_hz)
left_idx = int(np.ceil(width * left_hz / self._rate))
right_idx = int(width * right_hz / self._rate)
assert self._rate * (left_idx - 1) / width <= left_hz
assert self._rate * (right_idx + 1) / width >= right_hz, width
res = np.zeros(min(width, right_idx + 1) - left_idx, dtype=np.float64)
for idx in range(left_idx, min(width, right_idx + 1)):
hz = self._rate * idx / width
mel = scaling_function.hertz_to_scale(hz)
if mel <= mid_mel:
res[idx - left_idx] = (mel - left_mel) / (mid_mel - left_mel)
else:
res[idx - left_idx] = (right_mel - mel) / (right_mel - mid_mel)
return left_idx, res ** 0.5
class GaborFilterBank(LinearFilterBank):
r"""Gabor filters with ERBs between points from a scale
Gabor filters are complex, mostly analytic filters that have a Gaussian envelope in
both the time and frequency domains. They are defined as
.. math::
f(t) = C \sigma^{-1/2} \pi^{-1/4}
e^{\frac{-t^2}{2\sigma^2} + i\xi t}
in the time domain and
.. math::
\widehat{f}(\omega) = C \sqrt{2\sigma} \pi^{1/4}
e^{\frac{-\sigma^2(\xi - \omega)^2}{2}}
in the frequency domain. Though Gaussians never truly reach 0, in either domain,
they are effectively compactly supported. Gabor filters are optimal with respect to
their time-bandwidth product.
`scaling_function` is used to split up the frequencies between `high_hz` and
`low_hz` into a series of filters. Every subsequent filter's width is scaled such
that, if the filters are all of the same height, the intersection with the precedent
filter's response matches the filter's Equivalent Rectangular Bandwidth (``erb ==
True``) or its 3dB bandwidths (``erb == False``). The ERB is the width of a
rectangular filter with the same height as the filter's maximum frequency response
that has the same :math:`L^2` norm.
Parameters
----------
scaling_function : pydrobert.speech.ScalingFunction, str, or dict
Dictates the layout of filters in the Fourier domain. Can be a
:class:`ScalingFunction` or something compatible with
:func:`pydrobert.speech.alias_factory_subclass_from_arg`
num_filts : int
The number of filters in the bank
high_hz, low_hz : float, optional
The topmost and bottommost edge of the filters, respectively. The default for
`high_hz` is the Nyquist
sampling_rate : float, optional
The sampling rate (cycles/sec) of the target recordings
scale_l2_norm : bool
Whether to scale the l2 norm of each filter to 1. Otherwise the frequency
response of each filter will max out at an absolute value of 1.
erb : bool
Attributes
----------
centers_hz : tuple
is_real : bool
is_analytic : bool
num_filts : int
sampling_rate : float
supports_hz : tuple
supports : tuple
supports_ms : tuple
scaled_l2_norm : bool
erb : bool
See Also
--------
pydrobert.speech.config.EFFECTIVE_SUPPORT_THRESHOLD
The absolute value below which counts as zero
"""
aliases = {"gabor"}
def __init__(
self,
scaling_function: Union[ScalingFunction, Mapping, str],
num_filts: int = 40,
high_hz: Optional[float] = None,
low_hz: float = 20.0,
sampling_rate: float = 16000,
scale_l2_norm: bool = False,
erb: bool = False,
):
scaling_function = alias_factory_subclass_from_arg(
ScalingFunction, scaling_function
)
self._scale_l2_norm = scale_l2_norm
self._erb = erb
if low_hz < 0 or (
high_hz and (high_hz <= low_hz or high_hz > sampling_rate // 2)
):
raise ValueError(
"Invalid frequency range: ({:.2f},{:.2f}".format(low_hz, high_hz)
)
self._rate = sampling_rate
if high_hz is None:
high_hz = sampling_rate // 2
scale_low = scaling_function.hertz_to_scale(low_hz)
scale_high = scaling_function.hertz_to_scale(high_hz)
scale_delta = (scale_high - scale_low) / (num_filts + 1)
# edges dictate the points where filters should intersect. We
# make a pretend intersection halfway between low_hz and
# the first filter center in the scaled domain. Likewise with
# high_hz and the last filter center. Intersections are spaced
# uniformly in the scaled domain
edges = tuple(
scaling_function.scale_to_hertz(scale_low + scale_delta * (idx + 0.5))
for idx in range(0, num_filts + 1)
)
centers_hz = []
centers_ang = []
stds = []
supports_ang = []
supports = []
wrap_supports_ang = []
self._wrap_below = False
log_2 = np.log(2)
log_pi = np.log(np.pi)
t_support_const = -2 * np.log(config.EFFECTIVE_SUPPORT_THRESHOLD)
f_support_const = t_support_const
if scale_l2_norm:
f_support_const += log_2 + 0.5 * log_pi
t_support_const -= 0.5 * log_pi
else:
t_support_const -= log_2 + log_pi
if erb:
bandwidth_const = np.sqrt(np.pi) / 2
else:
bandwidth_const = np.sqrt(3 / 10 * np.log(10))
for left_intersect, right_intersect in zip(edges[:-1], edges[1:]):
center_hz = (left_intersect + right_intersect) / 2
center_ang = hertz_to_angular(center_hz, self._rate)
std = bandwidth_const / hertz_to_angular(
center_hz - left_intersect, self._rate
)
log_std = np.log(std)
if scale_l2_norm:
diff_ang = np.sqrt(log_std + f_support_const) / std
wrap_diff_ang = np.sqrt(log_std + f_support_const + log_2) / std
diff_samps = int(np.ceil(std * np.sqrt(t_support_const - log_std)))
else:
diff_ang = np.sqrt(f_support_const) / std
wrap_diff_ang = np.sqrt(f_support_const + log_2) / std
diff_samps = int(np.ceil(std * np.sqrt(t_support_const - 2 * log_std)))
supp_ang_low = center_ang - diff_ang
if supp_ang_low < 0:
self._wrap_below = True
centers_hz.append(center_hz)
centers_ang.append(center_ang)
supports_ang.append((center_ang - diff_ang, center_ang + diff_ang))
wrap_supports_ang.append(2 * wrap_diff_ang)
supports.append((-diff_samps, diff_samps))
stds.append(std)
self._centers_ang = tuple(centers_ang)
self._centers_hz = tuple(centers_hz)
self._stds = tuple(stds)
self._supports_ang = tuple(supports_ang)
self._wrap_supports_ang = tuple(wrap_supports_ang)
self._supports_hz = tuple(
(angular_to_hertz(ang_l, self._rate), angular_to_hertz(ang_h, self._rate),)
for ang_l, ang_h in supports_ang
)
self._supports = tuple(supports)
@property
def is_real(self) -> bool:
return False
@property
def is_analytic(self) -> bool:
return not self._wrap_below
@property
def num_filts(self) -> int:
return len(self._centers_hz)
@property
def is_zero_phase(self) -> bool:
return True
@property
def sampling_rate(self) -> float:
return self._rate
@property
def centers_hz(self) -> Tuple[float]:
"""The point of maximum gain in each filter's frequency response, in Hz
This property gives the so-called "center frequencies" - the
point of maximum gain - of each filter.
"""
return self._centers_hz
@property
def supports_hz(self) -> tuple:
return self._supports_hz
@property
def supports(self) -> tuple:
return self._supports
@property
def scaled_l2_norm(self) -> bool:
return self._scale_l2_norm
@property
def erb(self) -> bool:
return self._erb
def get_impulse_response(self, filt_idx: int, width: int) -> np.ndarray:
center_ang = self._centers_ang[filt_idx]
std = self._stds[filt_idx]
res = np.zeros(width, dtype=np.complex128)
if self._scale_l2_norm:
const_term = -0.5 * np.log(std) - 0.25 * np.log(np.pi)
else:
const_term = -0.5 * | np.log(2 * np.pi) | numpy.log |
import glob
import math
import os
import random
import time
import re
import cv2
import numpy as np
import copy
from torchvision.datasets import VisionDataset
from lib.utils.image import gaussian_radius, draw_umich_gaussian, draw_msra_gaussian
from lib.utils.utils import xyxy2ctwh
from lib.datasets.jde import letterbox, random_affine
def csv_to_gt(root):
for seq_name in sorted(os.listdir(root)):
frames = []
for img in sorted(glob.glob(f'{root}/{seq_name}/images/*.jpg')):
frames.append(int(re.search(r'\d+', img.split('/')[-1].split('.')[0]).group()))
frames = np.array(frames)
gt = np.nan_to_num(np.genfromtxt(glob.glob(f"{root}/{seq_name}/*.csv")[0], delimiter=','))
gt[:, 0] = frames[gt[:, 0].astype(int)]
np.savetxt(f'{root}/{seq_name}/gt.txt', gt, delimiter=',', fmt='%.3f', )
pass
pass
class MOTDataset(VisionDataset):
def __init__(self, opt, root, img_size=(1024, 768), augment=False, transforms=None):
super().__init__(root, transforms)
self.opt = opt
self.num_classes = 1
self.imgs_seq, self.seq_gts = {}, {}
self.seq_num_id, self.seq_start_id = {}, {}
start_id = 0
csv_to_gt(root)
for seq_name in sorted(os.listdir(root)):
for img in sorted(glob.glob(f'{root}/{seq_name}/images/*.jpg')):
self.imgs_seq[img] = seq_name
self.seq_gts[seq_name] = np.genfromtxt(f'{root}/{seq_name}/gt.txt', delimiter=',')
self.seq_num_id[seq_name] = len(np.unique(self.seq_gts[seq_name][:, 1]))
self.seq_start_id[seq_name] = start_id
start_id += self.seq_num_id[seq_name]
self.nID = start_id
self.width = img_size[0]
self.height = img_size[1]
self.max_objs = opt.K
self.augment = augment
self.transforms = transforms
def __len__(self):
return len(self.imgs_seq)
def __getitem__(self, index):
img_fname = list(self.imgs_seq.keys())[index]
seq_name = self.imgs_seq[img_fname]
img, labels = self.get_data(img_fname, self.seq_gts[seq_name])
labels[labels[:, 1] > -1, 1] += self.seq_start_id[seq_name]
output_h, output_w = self.height // self.opt.down_ratio, self.width // self.opt.down_ratio
num_classes = 1
num_objs = labels.shape[0]
hm = np.zeros((num_classes, output_h, output_w), dtype=np.float32)
if self.opt.ltrb:
wh = np.zeros((self.max_objs, 4), dtype=np.float32)
else:
wh = np.zeros((self.max_objs, 2), dtype=np.float32)
reg = np.zeros((self.max_objs, 2), dtype=np.float32)
ind = np.zeros((self.max_objs,), dtype=np.int64)
reg_mask = np.zeros((self.max_objs,), dtype=np.uint8)
ids = np.zeros((self.max_objs,), dtype=np.int64)
bbox_xys = np.zeros((self.max_objs, 4), dtype=np.float32)
draw_gaussian = draw_msra_gaussian if self.opt.mse_loss else draw_umich_gaussian
# de-normalize
labels[:, 2] *= output_w
labels[:, 3] *= output_h
labels[:, 4] *= output_w
labels[:, 5] *= output_h
for k in range(num_objs):
bbox = copy.deepcopy(labels[k, 2:])
cls_id = 0
bbox[0] = | np.clip(bbox[0], 0, output_w - 1) | numpy.clip |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 17 10:31:31 2020
@author: grat05
"""
import numpy as np
from numpy import exp
import pandas as pd
from functools import wraps
class ObjDict():
def __repr__(self):
return str(self.__dict__)
def isList(thing):
return isinstance(thing, (list, tuple, np.ndarray))
class SodiumChannelModel():
def __init__(self, TEMP = 310.0, naO = 140.0, naI = 7,
recArrayNames = [],
state_vals = [],
retOptions = {'G': True, 'INa': True, 'INaL': True,\
'Open': True, 'RevPot': True}):
self.TEMP = TEMP
self.naO = naO
self.naI = naI
self.recArrayNames = pd.Index(recArrayNames)
self.num_states = len(self.recArrayNames)
self._state_vals = np.array(state_vals, dtype='float64')
self._recArray = []
self.retOptions = retOptions
self.lastVal = None
self.memoize = True
self.RGAS = 8314.0;
self.FDAY = 96487.0;
@property
def recArray(self):
return pd.DataFrame(np.array(self._recArray), columns=self.recArrayNames)
@property
def state_vals(self):
return pd.Series(self._state_vals, index=self.recArrayNames, dtype='float64')
def calc_constants(self, vOld):
pass
def jac(self, vOld):
pass
def ddtcalc(self, vals, vOld):
pass
def getRevPot(self):
return (self.RGAS * self.TEMP / self.FDAY) * np.log(self.naO / self.naI)
def calcCurrent(self, vals, vOld, setRecArray=True):
pass
def update(self, vOld, dt, record=True):
vals = self._state_vals
ddt = self.ddtcalc(vals, vOld)
vals += ddt*dt
return self.calcCurrent(vals, vOld, setRecArray=record)
def memoize_calc_constants(calc_constants):
@wraps(calc_constants)
def memoized(self, vOld):
if self.memoize:
if self.lastVal is not None and np.array_equal(self.lastVal[0], vOld):
return self.lastVal[1]
ret = calc_constants(self, vOld)
if self.memoize:
self.lastVal = (vOld, ret)
return ret
return memoized
class OHaraRudy_INa(SodiumChannelModel):
num_params = 33
param_bounds = [(-3,3)]*2 + \
[(-0.1,3)] + [(-3,3)] + [(-0.1,3)] +\
[(-3,3)] + [(-0.1,3)] +\
[(-1,3), (-3,3), (-0.1,3)] + \
[(-3,3)] + [(-0.1,3)] +\
[(-3,3)] + [(-1,3)] +\
[(-3,3)] + [(-1,3)] +\
[(-20,20)] + \
[(-3,3)] + [(-3,3)] + [(-1,3)] +\
[(-3,3)] + [(-1,3)] +\
[(-1,1)]*3 + \
[(-1,1)]*2 + \
[(-1,1)]*2 + \
[(-15,15)]*2 + \
[(-15,15), (-1,3)]
KmCaMK = 0.15
CaMKa = 1e-5
def __init__(self, GNaFactor=0, GNaLFactor=0, \
mss_tauFactor=0, tm_maxFactor=0, tm_tau1Factor=0,\
tm_shiftFactor=0, tm_tau2Factor=0,\
hss_tauFactor=0, thf_maxFactor=0, thf_tau1Factor=0,\
thf_shiftFactor=0, thf_tau2Factor=0,\
ths_maxFactor=0, ths_tau1Factor=0,\
ths_shiftFactor=0, ths_tau2Factor=0,\
Ahf_multFactor=0,\
tj_baselineFactor=0, tj_maxFactor=0, tj_tau1Factor=0,\
tj_shiftFactor=0, tj_tau2Factor=0,\
hssp_tauFactor=0, tssp_multFactor=0, tjp_multFactor=0,\
mLss_tauFactor=0, hLss_tauFactor=0,\
thL_baselineFactor=0, thLp_multFactor=0,\
mss_shiftFactor=0, hss_shiftFactor=0,\
jss_shiftFactor=0, jss_tauFactor=0,
TEMP = 310.0, naO = 140.0, naI = 7):
super().__init__(TEMP=TEMP, naO=naO, naI=naI,
recArrayNames = ["m","hf","hs","j","hsp","jp","mL","hL","hLp"],
state_vals = [0,1,1,1,1,1,0,1,1])
# scaling currents 0
self.GNa = 75*np.exp(GNaFactor);
self.GNaL = 0.0075*np.exp(GNaLFactor);
#m gate 2
self.mss_tau = 9.871*np.exp(mss_tauFactor)
self.tm_max = 0.473824721*np.exp(tm_maxFactor)
self.tm_tau1 = 34.77*np.exp(tm_tau1Factor)
self.tm_shift = -57.6379999+tm_shiftFactor
self.tm_tau2 = 5.955*np.exp(tm_tau2Factor)
tm_cshift = np.log(self.tm_tau1/self.tm_tau2)/(1/self.tm_tau1+1/self.tm_tau2)
tm_cmax = np.exp(tm_cshift/self.tm_tau1) + np.exp(-tm_cshift/self.tm_tau2)
self.tm_shift -= tm_cshift #shift correction
self.tm_max *= tm_cmax #height correction
#h gate 7
self.hss_tau = 6.086*np.exp(hss_tauFactor)
self.thf_max = 3.594172982376325*np.exp(thf_maxFactor)
self.thf_tau1 = 6.285*np.exp(thf_tau1Factor)
self.thf_shift = -57.639999999606744+thf_shiftFactor
self.thf_tau2 = 20.27* | np.exp(thf_tau2Factor) | numpy.exp |
from copy import copy
from unittest import mock
import pytest
from hypothesis import given, strategies as st
import numpy as np
from tesliper.glassware import spectra as sp
import tesliper.glassware as gw
def test_single_units():
s = sp.SingleSpectrum("ir", [1, 2], [1, 2])
assert all(k in s.units for k in ("x", "y", "width", "start", "stop", "step"))
@given(
st.lists(
st.floats(allow_nan=False, allow_infinity=False), min_size=2, max_size=100
),
st.floats(allow_nan=False, allow_infinity=False),
)
def test_single_scaling(values, factor):
s = sp.SingleSpectrum("ir", values=values, abscissa=np.arange(len(values)))
s.scaling = factor
assert np.allclose(s.y, s.values * factor)
@given(
st.integers(min_value=2, max_value=100),
st.floats(allow_nan=False, allow_infinity=False),
)
def test_single_offset(size, factor):
np.random.seed(0) # filler values only, but just to be sure, is shouldn't change
s = sp.SingleSpectrum("ir", values=np.random.random(size), abscissa=np.arange(size))
s.offset = factor
assert np.allclose(s.x, s.abscissa + factor)
@given(
st.lists(
st.floats(
max_value=1000, min_value=-1000, allow_nan=False, allow_infinity=False
),
min_size=2,
max_size=100,
),
st.floats(
min_value=0,
exclude_min=True,
max_value=1000,
allow_nan=False,
allow_infinity=False,
),
)
def test_single_scale_to(values, factor):
s1 = sp.SingleSpectrum("ir", values=values, abscissa=np.arange(len(values)))
s2 = copy(s1)
s2.scaling = factor
s1.scale_to(s2)
assert np.allclose(s1.y, s2.y)
@given(
st.integers(min_value=2, max_value=100),
st.floats(min_value=-1000, max_value=1000, allow_nan=False, allow_infinity=False),
)
def test_single_shift_to(size, factor):
np.random.seed(0) # filler values only, but just to be sure, is shouldn't change
s1 = sp.SingleSpectrum(
"ir", values=np.random.random(size), abscissa=np.arange(size)
)
s2 = copy(s1)
s2.offset = factor
s1.shift_to(s2)
assert np.allclose(s1.x, s2.x)
@pytest.fixture(scope="module")
def spectra():
return sp.Spectra("ir", ["one", "two"], [[1, 2], [6, 7]], [0, 1])
@pytest.fixture(scope="module")
def en_mock():
return mock.Mock(genre="gib", populations=[0.2, 0.8])
def test_spectra_average_return(spectra, en_mock):
n = spectra.average(en_mock)
assert n.averaged_by == en_mock.genre
assert np.allclose(
n.values, np.average(spectra.values, weights=en_mock.populations, axis=0)
)
@given(
factor=st.floats(
min_value=0,
exclude_min=True,
max_value=1000,
allow_nan=False,
allow_infinity=False,
)
)
def test_spectra_scale_to_with_en(spectra, en_mock, factor):
n = spectra.average(en_mock)
n.scaling = factor
spectra.scale_to(n, en_mock)
assert | np.isclose(spectra.scaling, factor) | numpy.isclose |
import numpy as np
from scipy import interpolate
class Animation:
def __init__(self, trajectory, time, method):
self.x = | np.array(time) | numpy.array |
"""
.. codeauthor:: <NAME> <<EMAIL>>
"""
import numpy as np
import pytest
from pde import CartesianGrid, ScalarField, Tensor2Field, UnitGrid, VectorField
from pde.fields.base import FieldBase
from pde.tools.misc import module_available, skipUnlessModule
def test_vectors_basic():
"""test some vector fields"""
grid = CartesianGrid([[0.1, 0.3], [-2, 3]], [3, 4])
v1 = VectorField(grid, np.full((2,) + grid.shape, 1))
v2 = VectorField(grid, np.full((2,) + grid.shape, 2))
np.testing.assert_allclose(v1.average, (1, 1))
assert np.allclose(v1.magnitude, np.sqrt(2))
assert v1[0] == v1["x"]
assert v1[1] == v1["y"]
v1[0] = v1[0]
with pytest.raises(IndexError):
v1["z"]
v3 = v1 + v2
assert v3.grid == grid
np.testing.assert_allclose(v3.data, 3)
v1 += v2
np.testing.assert_allclose(v1.data, 3)
# test projections
v1 = VectorField(grid)
v1[0] = 3
v1[1] = 4
for method, value in [
("min", 3),
("max", 4),
("norm", 5),
("squared_sum", 25),
("norm_squared", 25),
("auto", 5),
]:
p1 = v1.to_scalar(method)
assert p1.data.shape == grid.shape
np.testing.assert_allclose(p1.data, value)
v2 = FieldBase.from_state(v1.attributes, data=v1.data)
assert v1 == v2
assert v1.grid is v2.grid
attrs = VectorField.unserialize_attributes(v1.attributes_serialized)
v2 = FieldBase.from_state(attrs, data=v1.data)
assert v1 == v2
assert v1.grid is not v2.grid
# test dot product
v2._grid = v1.grid # make sure grids are identical
v1.data = 1
v2.data = 2
for backend in ["numpy", "numba"]:
dot_op = v1.make_dot_operator(backend)
res = ScalarField(grid, dot_op(v1.data, v2.data))
for s in (v1 @ v2, v2 @ v1, v1.dot(v2), res):
assert isinstance(s, ScalarField)
assert s.grid is grid
np.testing.assert_allclose(s.data, np.full(grid.shape, 4))
# test options for plotting images
if module_available("matplotlib"):
v1.plot(method="streamplot", transpose=True)
def test_divergence():
"""test the divergence operator"""
grid = CartesianGrid([[0, 2 * np.pi], [0, 2 * np.pi]], [16, 16], periodic=True)
x, y = grid.cell_coords[..., 0], grid.cell_coords[..., 1]
data = [np.cos(x) + y, np.sin(y) - x]
v = VectorField(grid, data)
s1 = v.divergence("natural")
assert s1.data.shape == (16, 16)
div = np.cos(y) - np.sin(x)
np.testing.assert_allclose(s1.data, div, rtol=0.1, atol=0.1)
v.divergence("natural", out=s1)
assert s1.data.shape == (16, 16)
np.testing.assert_allclose(s1.data, div, rtol=0.1, atol=0.1)
def test_vector_gradient_field():
"""test the vector gradient operator"""
grid = CartesianGrid([[0, 2 * np.pi], [0, 2 * np.pi]], [16, 16], periodic=True)
x, y = grid.cell_coords[..., 0], grid.cell_coords[..., 1]
data = [np.cos(x) + y, np.sin(y) - x]
v = VectorField(grid, data)
t1 = v.gradient("periodic")
assert t1.data.shape == (2, 2, 16, 16)
d00 = -np.sin(x)
d10 = -np.ones(grid.shape)
d01 = np.ones(grid.shape)
d11 = np.cos(y)
t2 = Tensor2Field(grid, np.array([[d00, d01], [d10, d11]]))
np.testing.assert_allclose(
t1.data[1:-1, 1:-1], t2.data[1:-1, 1:-1], rtol=0.1, atol=0.1
)
v.gradient("natural", out=t1)
assert t1.data.shape == (2, 2, 16, 16)
np.testing.assert_allclose(
t1.data[1:-1, 1:-1], t2.data[1:-1, 1:-1], rtol=0.1, atol=0.1
)
def test_vector_laplace():
"""test the laplace operator"""
grid = CartesianGrid([[0, 2 * np.pi], [0, 2 * np.pi]], [16, 16], periodic=True)
x, y = grid.cell_coords[..., 0], grid.cell_coords[..., 1]
data = [np.cos(x) + np.sin(y), np.sin(y) - np.cos(x)]
v = VectorField(grid, data)
vl = v.laplace("natural")
assert vl.data.shape == (2, 16, 16)
np.testing.assert_allclose(
vl.data[0, ...], -np.cos(x) - np.sin(y), rtol=0.1, atol=0.1
)
np.testing.assert_allclose(
vl.data[1, ...], -np.sin(y) + np.cos(x), rtol=0.1, atol=0.1
)
def test_vector_boundary_conditions():
"""test some boundary conditions of operators of vector fields"""
grid = CartesianGrid([[0, 2 * np.pi], [0, 1]], 32, periodic=[False, True])
vf = VectorField.from_expression(grid, ["sin(x)", "0"])
bc_x = [{"derivative": [-1, 0]}, {"derivative": [1, 0]}]
tf = vf.gradient(bc=[bc_x, "periodic"])
res = ScalarField.from_expression(grid, "cos(x)")
np.testing.assert_allclose(tf[0, 0].data, res.data, atol=0.01, rtol=0.01)
np.testing.assert_allclose(tf[0, 1].data, 0)
np.testing.assert_allclose(tf[1, 0].data, 0)
np.testing.assert_allclose(tf[1, 1].data, 0)
def test_outer_product():
"""test outer product of vector fields"""
vf = VectorField(UnitGrid([1, 1]), [[[1]], [[2]]])
for backend in ["numpy", "numba"]:
outer = vf.make_outer_prod_operator(backend)
tf = vf.outer_product(vf)
res = np.array([1, 2, 2, 4]).reshape(2, 2, 1, 1)
np.testing.assert_equal(tf.data, res)
np.testing.assert_equal(outer(vf.data, vf.data), res)
tf.data = 0
res = np.array([1, 2, 2, 4]).reshape(2, 2, 1, 1)
vf.outer_product(vf, out=tf)
np.testing.assert_equal(tf.data, res)
outer(vf.data, vf.data, out=tf.data)
np.testing.assert_equal(tf.data, res)
def test_from_expressions():
"""test initializing vector fields with expressions"""
grid = UnitGrid([4, 4])
vf = VectorField.from_expression(grid, ["x**2", "x * y"])
xs = grid.cell_coords[..., 0]
ys = grid.cell_coords[..., 1]
np.testing.assert_allclose(vf.data[0], xs ** 2)
np.testing.assert_allclose(vf.data[1], xs * ys)
# corner case
vf = VectorField.from_expression(grid, ["1", "x * y"])
np.testing.assert_allclose(vf.data[0], 1)
vf = VectorField.from_expression(grid, [1, "x * y"])
np.testing.assert_allclose(vf.data[0], 1)
with pytest.raises(ValueError):
VectorField.from_expression(grid, "xy")
with pytest.raises(ValueError):
VectorField.from_expression(grid, ["xy"])
with pytest.raises(ValueError):
VectorField.from_expression(grid, ["x"] * 3)
def test_vector_plot_quiver_reduction():
"""test whether quiver plots reduce the resolution"""
grid = UnitGrid([6, 6])
field = VectorField.random_normal(grid)
ref = field.plot(method="quiver", max_points=4)
assert len(ref.element.U) == 16
def test_boundary_interpolation_vector():
"""test boundary interpolation"""
grid = CartesianGrid([[0.1, 0.3], [-2, 3]], [3, 3])
field = VectorField.random_normal(grid)
# test boundary interpolation
bndry_val = np.random.randn(2, 3)
for bndry in grid._iter_boundaries():
val = field.get_boundary_values(*bndry, bc={"value": bndry_val})
np.testing.assert_allclose(val, bndry_val)
# boundary conditions have already been enforced
ev = field.make_get_boundary_values(*bndry)
np.testing.assert_allclose(ev(), bndry_val)
@pytest.mark.parametrize("transpose", [True, False])
def test_vector_plotting_2d(transpose):
"""test plotting of 2d vector fields"""
grid = UnitGrid([3, 4])
field = VectorField.random_uniform(grid, 0.1, 0.9)
for method in ["quiver", "streamplot"]:
ref = field.plot(method=method, transpose=transpose)
field._update_plot(ref)
# test sub-sampling
grid = UnitGrid([32, 15])
field = VectorField.random_uniform(grid, 0.1, 0.9)
field.get_vector_data(transpose=transpose, max_points=7)
@skipUnlessModule("napari")
def test_interactive_vector_plotting():
"""test the interactive plotting"""
grid = UnitGrid([3, 3])
field = VectorField.random_uniform(grid, 0.1, 0.9)
field.plot_interactive(viewer_args={"show": False, "close": True})
def test_complex_vectors():
"""test some complex vector fields"""
grid = CartesianGrid([[0.1, 0.3], [-2, 3]], [3, 4])
shape = (2, 2) + grid.shape
numbers = np.random.random(shape) + np.random.random(shape) * 1j
v1 = VectorField(grid, numbers[0])
v2 = VectorField(grid, numbers[1])
assert v1.is_complex and v2.is_complex
for backend in ["numpy", "numba"]:
dot_op = v1.make_dot_operator(backend)
# test complex conjugate
expected = v1.to_scalar("norm_squared").data
np.testing.assert_allclose((v1 @ v1).data, expected)
np.testing.assert_allclose(dot_op(v1.data, v1.data), expected)
# test dot product
res = dot_op(v1.data, v2.data)
for s in (v1 @ v2, (v2 @ v1).conjugate(), v1.dot(v2)):
assert isinstance(s, ScalarField)
assert s.grid is grid
| np.testing.assert_allclose(s.data, res) | numpy.testing.assert_allclose |
# -*- coding: iso-8859-15 -*-
#
# This software was written by <NAME> (<NAME>)
# Copyright <NAME>
# All rights reserved
# This software is licenced under a 3-clause BSD style license
#
#Redistribution and use in source and binary forms, with or without
#modification, are permitted provided that the following conditions are met:
#
#Redistributions of source code must retain the above copyright notice,
#this list of conditions and the following disclaimer.
#
#Redistributions in binary form must reproduce the above copyright notice,
#this list of conditions and the following disclaimer in the documentation
#and/or other materials provided with the distribution.
#
#Neither the name of the University College London nor the names
#of the code contributors may be used to endorse or promote products
#derived from this software without specific prior written permission.
#
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
#AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
#THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
#PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
#CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
#EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
#PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
#OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
#WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
#OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
#ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
# Developed by <NAME> (MSSL/UCL)
# uvotpy
# (c) 2009-2017, see Licence
from future.builtins import str
from future.builtins import input
from future.builtins import range
__version__ = '2.9.0 20171209'
import sys
import optparse
import numpy as np
import matplotlib.pyplot as plt
try:
from astropy.io import fits as pyfits
from astropy import wcs
except:
import pyfits
import re
import warnings
try:
import imagestats
except:
import stsci.imagestats as imagestats
import scipy
from scipy import interpolate
from scipy.ndimage import convolve
from scipy.signal import boxcar
from scipy.optimize import leastsq
from scipy.special import erf
from numpy import polyfit, polyval
'''
try:
#from uvotpy import uvotplot,uvotmisc,uvotwcs,rationalfit,mpfit,uvotio
import uvotplot
import uvotmisc
import uvotwcs
import rationalfit
import mpfit
import uvotio
except:
pass
'''
from uvotmisc import interpgrid, uvotrotvec, rdTab, rdList
from generate_USNOB1_cat import get_usnob1_cat
import datetime
import os
if __name__ != '__main__':
anchor_preset = list([None,None])
bg_pix_limits = list([-100,-70,70,100])
bg_lower_ = list([None,None]) # (offset, width) in pix, e.g., [20,30], default [50,50]
bg_upper_ = list([None,None]) # (offset, width) in pix, e.g., [20,30], default [50,50]
offsetlimit = None
#set Global parameters
status = 0
do_coi_correction = True # if not set, disable coi_correction
tempnames = list()
tempntags = list()
cval = -1.0123456789
interactive = True
update_curve = True
contour_on_img = False
give_result = False # with this set, a call to getSpec returns all data
give_new_result = False
use_rectext = False
background_method = 'boxcar' # alternatives 'splinefit' 'boxcar'
background_smoothing = [50,7] # 'boxcar' default smoothing in dispersion and across dispersion in pix
background_interpolation = 'linear'
trackcentroiding = True # default (= False will disable track y-centroiding)
global trackwidth
trackwidth = 2.5 # width of extraction region in sigma (alternative default = 1.0) 2.5 was used for flux calibration.
bluetrackwidth = 1.3 # multiplier width of non-order-overlapped extraction region [not yet active]
write_RMF = False
background_source_mag = 18.0
zeroth_blim_offset = 1.0
coi_half_width = None
slit_width = 200
_PROFILE_BACKGROUND_ = False # start with severe sigma-clip f background, before going to smoothing
today_ = datetime.date.today()
datestring = today_.isoformat()[0:4]+today_.isoformat()[5:7]+today_.isoformat()[8:10]
fileversion=1
calmode=True
typeNone = type(None)
senscorr = True # do sensitivity correction
print(66*"=")
print("uvotpy module uvotgetspec version=",__version__)
print("<NAME> (c) 2009-2017, see uvotpy licence.")
print("please use reference provided at http://github.com/PaulKuin/uvotpy")
print(66*"=","\n")
def getSpec(RA,DEC,obsid, ext, indir='./', wr_outfile=True,
outfile=None, calfile=None, fluxcalfile=None,
use_lenticular_image=True,
offsetlimit=None, anchor_offset=None, anchor_position=[None,None],
background_lower=[None,None], background_upper=[None,None],
background_template=None,
fixed_angle=None, spextwidth=13, curved="update",
fit_second=False, predict2nd=True, skip_field_src=False,
optimal_extraction=False, catspec=None,write_RMF=write_RMF,
get_curve=None,fit_sigmas=True,get_sigma_poly=False,
lfilt1=None, lfilt1_ext=None, lfilt2=None, lfilt2_ext=None,
wheelpos=None, interactive=interactive, sumimage=None, set_maglimit=None,
plot_img=True, plot_raw=True, plot_spec=True, zoom=True, highlight=False,
uvotgraspcorr_on=True, ank_c_0offset = False,
update_pnt=True, ifmotion=False, motion_file=None, anchor_x_offset=False,
replace=None,ifextended=False, singleside_bkg = False, fixwidth = False,
clobber=False, chatter=1):
'''Makes all the necessary calls to reduce the data.
Parameters
----------
ra, dec : float
The Sky position (J2000) in **decimal degrees**
obsid : str
The observation ID number as a **String**. Typically that is
something like "00032331001" and should be part of your
grism filename which is something like "sw00032331001ugu_dt.img"
ext : int
number of the extension to process
kwargs : dict
optional keyword arguments, possible values are:
- **fit_second** : bool
fit the second order. Off since it sometimes causes problems when the
orders overlap completely. Useful for spectra in top part detector
- **background_lower** : list
instead of default background list offset from spectrum as list
of two numbers, like [20, 40]. Distance relative to spectrum
- **background_upper** : list
instead of default background list offset from spectrum as list
of two numbers, like [20, 40]. Distance relative to spectrum
- **offsetlimit** : None,int,[center,range]
Default behaviour is to determine automatically any required offset from
the predicted anchor position to the spectrum, and correct for that.
The automated method may fail in the case of a weak spectrum and strong zeroth
or first order next to the spectrum. Two methods are provided:
(1) provide a number which will be used to limit the allowed offset. If
within that limit no peak is identified, the program will stop and require
you to provide a manual offset value. Try small numbers like 1, -1, 3, etc..
(2) if you already know the approximate y-location of the spectrum at the
anchor x-position in the rotated small image strip around the spectrum, you
can give this with a small allowed range for fine tuning as a list of two
parameter values. The first value in the list must be the y-coordinate
(by default the spectrum falls close to y=100 pixels), the second parameter
the allowed adjustment to a peak value in pixels. For example, [105,2].
This will require no further interactive input, and the spectrum will be
extracted using that offset.
- **wheelpos**: {160,200,955,1000}
filter wheel position for the grism filter mode used. Helpful for
forcing Vgrism or UVgrism input when both are present in the directory.
160:UV Clocked, 200:UV Nominal, 955:V clocked, 1000:V nominal
- **zoom** : bool
when False, the whole extracted region is displayed, including zeroth
order when present.
- **clobber** : bool
When True, overwrite earlier output (see also outfile)
- **write_RMF** : bool
When True, write the rmf file (will take extra time due to large matrix operations)
- **use_lenticular_image** : bool
When True and a lenticular image is present, it is used. If False,
the grism image header WCS-S system will be used for the astrometry,
with an automatic call to uvotgraspcorr for refinement.
- **sumimage** : str
Name summed image generated using ``sum_Extimage()``, will extract spectrum
from summed image.
- **wr_outfile** : bool
If False, no output file is written
- **outfile** : path, str
Name of output file, other than automatically generated.
- **calfile** : path, str
calibration file name
- **fluxcalfile** : path, str
flux calibration file name or "CALDB" or None
- **predict2nd** : bool
predict the second order flux from the first. Overestimates in centre a lot.
- **skip_field_src** : bool
if True do not locate zeroth order positions. Can be used if
absence internet connection or USNO-B1 server causes problems.
- **optimal_extraction** : bool, obsolete
Do not use.Better results with other implementation.
- **catspec** : path
optional full path to the catalog specification file for uvotgraspcorr.
- **get_curve** : bool or path
True: activate option to supply the curvature coefficients of all
orders by hand.
path: filename with coefficients of curvature
- **uvotgraspcorr_on** : bool
enable/disable rerun of uvotgraspcorr to update the WCS keywords
- **update_pnt** : bool
enable/disable update of the WCS keywords from the attitude file
(this is done prior to running uvotgraspcorr is that is enabled)
- **fit_sigmas** : bool
fit the sigma of trackwidths if True (not implemented, always on)
- **get_sigma_poly** : bool
option to supply the polynomial for the sigma (not implemented)
- **lfilt1**, **lfilt2** : str
name if the lenticular filter before and after the grism exposure
(now supplied by fileinfo())
- **lfilt1_ext**, **lfilt2_ext** : int
extension of the lenticular filter (now supplied by fileinfo())
- **plot_img** : bool
plot the first figure with the det image
- **plot_raw** : bool
plot the raw spectrum data
- **plot_spec** : bool
plot the flux spectrum
- **highlight** : bool
add contours to the plots to highlight contrasts
- **chatter** : int
verbosity of program
- **set_maglimit** : int
specify a magnitude limit to seach for background sources in the USNO-B1 catalog
- **background_template** : numpy 2D array
User provides a background template that will be used instead
determining background. Must be in counts. Size and alignment
must exactly match detector image.
Returns
-------
None, (give_result=True) compounded data (Y0, Y1, Y2, Y3, Y4) which
are explained in the code, or (give_new_result=True) a data dictionary.
Notes
-----
**Quick Start**
`getSpec(ra,dec,obsid, ext,)`
should produce plots and output files
**Which directory?**
The program needs to be started from the CORRECT data directory.
The attitude file [e.g., "sw<OBSID>pat.fits" ]is needed!
A link or copy of the attitude file needs to be present in the directory
or "../../auxil/" directory as well.
**Global parameters**
These parameters can be reset, e.g., during a (i)python session, before calling getSpec.
- **trackwidth** : float
width spectral extraction in units of sigma. The default is trackwidth = 2.5
The alternative default is trackwidth = 1.0 which gives better results for
weak sources, or spectra with nearby contamination. However, the flux
calibration and coincidence-loss correction give currently inconsistent
results. When using trackwidth=1.0, rescale the flux to match trackwidth=2.5
which value was used for flux calibration and coincidence-loss correction.
- **give_result** : bool
set to False since a call to getSpec with this set will return all the
intermediate results. See returns
When the extraction slit is set to be straight ``curved="straight"`` it cuts off the UV part of the
spectrum for spectra located in the top left and bottom right of the image.
History
-------
Version 2011-09-22 NPMK(MSSL) : handle case with no lenticular filter observation
Version 2012-01-15 NPMK(MSSL) : optimal extraction is no longer actively supported until further notice
Version 2013-10-23 NPMK(MSSL) : fixed bug so uvotgraspcorr gives same accuracy as lenticular filter
Version 2014-01-01 NPMK(MSSL) : aperture correction for background added; output dictionary
Version 2014-07-23 NPMK(MSSL) : coi-correction using new calibrared coi-box and factor
Version 2014-08-04 NPMK(MSSL/UCL): expanded offsetlimit parameter with list option to specify y-range.
Version 2015-12-03 NPMK(MSSL/UCL): change input parameter 'get_curve' to accept a file name with coefficients
Version 2016-01-16 NPMK(MSSL/UCL): added options for background; disable automated centroiding of spectrum
Example
-------
from uvotpy.uvotgetspec import getSpec
from uvotpy import uvotgetspec
import os, shutil
indir1 = os.getenv('UVOTPY') +'/test'
indir2 = os.getcwd()+'/test/UVGRISM/00055900056/uvot/image'
shutil.copytree(indir1, os.getcwd()+'/test' )
getSpec( 254.7129625, 34.3148667, '00055900056', 1, offsetlimit=1,indir=indir2, clobber=True )
'''
# (specfile, lfilt1_, lfilt1_ext_, lfilt2_, lfilt2_ext_, attfile), (method), \
# (Xphi, Yphi, date1), (dist12, ankerimg, ZOpos), expmap, bgimg, bg_limits_used, bgextra = Y0
#
#( (dis,spnet,angle,anker,anker2,anker_field,ank_c), (bg,bg1,bg2,extimg,spimg,spnetimg,offset),
# (C_1,C_2,img), hdr,m1,m2,aa,wav1 ) = Y1
#
#fit,(coef0,coef1,coef2,coef3),(bg_zeroth,bg_first,bg_second,bg_third),(borderup,borderdown),apercorr,expospec=Y2
#
#counts, variance, borderup, borderdown, (fractions,cnts,vars,newsigmas) = Y3
#
#wav2p, dis2p, flux2p, qual2p, dist12p = Y4[0]
#
# where,
#
#(present0,present1,present2,present3),(q0,q1,q2,q3), \
# (y0,dlim0L,dlim0U,sig0coef,sp_zeroth,co_zeroth),(y1,dlim1L,dlim1U,sig1coef,sp_first,co_first),\
# (y2,dlim2L,dlim2U,sig2coef,sp_second,co_second),(y3,dlim3L,dlim3U,sig3coef,sp_third,co_third),\
# (x,xstart,xend,sp_all,quality,co_back) = fit
#
# dis = dispersion with zero at ~260nm[UV]/420nm[V] ; spnet = background-substracted spectrum from 'spnetimg'
# angle = rotation-angle used to extract 'extimg' ; anker = first order anchor position in DET coordinates
# anker2 = second order anker X,Y position ; anker_field = Xphi,Yphy input angles with respect to reference
# ank_c = X,Y position of axis of rotation (anker) in 'extimg'
# bg = mean background, smoothed, with sources removed
# bg1 = one-sided background, sources removed, smoothed ; bg2 = same for background opposite side
# extimg = image extracted of source and background, 201 pixels wide, all orders.
# spimg = image centered on first order position ; spnetimg = background-subtracted 'spimg'
# offset = offset of spectrum from expected position based on 'anchor' at 260nm[UVG]/420nm[VG], first order
# C_1 = dispersion coefficients [python] first order; C_2 = same for second order
# img = original image ;
# WC_lines positions for selected WC star lines ; hdr = header for image
# m1,m2 = index limits spectrum ; aa = indices spectrum (e.g., dis[aa])
# wav1 = wavelengths for dis[aa] first order (combine with spnet[aa])
#
# when wr_outfile=True the program produces a flux calibrated output file by calling uvotio.
# [fails if output file is already present and clobber=False]
#
# The background must be consistent with the width of the spectrum summed.
from uvotio import fileinfo, rate2flux, readFluxCalFile
from uvotplot import plot_ellipsoid_regions
if (type(RA) == np.ndarray) | (type(DEC) == np.array):
raise IOError("RA, and DEC arguments must be of float type ")
if type(offsetlimit) == list:
if len(offsetlimit) != 2:
raise IOError("offsetlimit list must be [center, distance from center] in pixels")
get_curve_filename = None
a_str_type = type(curved)
if chatter > 4 :
print ("\n*****\na_str_type = ",a_str_type)
print ("value of get_curve = ",get_curve)
print ("type of parameter get_curve is %s\n"%(type(get_curve)) )
print ("type curved = ",type(curved))
if type(get_curve) == a_str_type:
# file name: check this file is present
if os.access(get_curve,os.F_OK):
get_curve_filename = get_curve
get_curve = True
else:
raise IOError(
"ERROR: get_curve *%s* is not a boolean value nor the name of a file that is on the disk."
%(get_curve) )
elif type(get_curve) == bool:
if get_curve:
get_curve_filename = None
print("requires input of curvature coefficients")
elif type(get_curve) == type(None):
get_curve = False
else:
raise IOError("parameter get_curve should by type str or bool, but is %s"%(type(get_curve)))
# check environment
CALDB = os.getenv('CALDB')
if CALDB == '':
print('WARNING: The CALDB environment variable has not been set')
HEADAS = os.getenv('HEADAS')
if HEADAS == '':
print('WARNING: The HEADAS environment variable has not been set')
print('That is needed for the calls to uvot Ftools ')
#SCAT_PRESENT = os.system('which scat > /dev/null')
#if SCAT_PRESENT != 0:
# print('WARNING: cannot locate the scat program \nDid you install WCSTOOLS ?\n')
SESAME_PRESENT = os.system('which sesame > /dev/null')
#if SESAME_PRESENT != 0:
# print 'WARNING: cannot locate the sesame program \nDid you install the cdsclient tools?\n'
# fix some parameters
framtime = 0.0110329 # all grism images are taken in unbinned mode
splineorder=3
getzmxmode='spline'
smooth=50
testparam=None
msg = "" ; msg2 = "" ; msg4 = ""
attime = datetime.datetime.now()
logfile = 'uvotgrism_'+obsid+'_'+str(ext)+'_'+'_'+attime.isoformat()[0:19]+'.log'
if type(fluxcalfile) == bool: fluxcalfile = None
tempnames.append(logfile)
tempntags.append('logfile')
tempnames.append('rectext_spectrum.img')
tempntags.append('rectext')
lfiltnames=np.array(['uvw2','uvm2','uvw1','u','b','v','wh'])
ext_names =np.array(['uw2','um2','uw1','uuu','ubb','uvv','uwh'])
filestub = 'sw'+obsid
histry = ""
for x in sys.argv: histry += x + " "
Y0 = None
Y2 = None
Y3 = None
Y4 = None
Yfit = {}
Yout = {"coi_level":None} # output dictionary (2014-01-01; replace Y0,Y1,Y2,Y3)
lfilt1_aspcorr = "not initialized"
lfilt2_aspcorr = "not initialized"
qflag = quality_flags()
ZOpos = None
# parameters getSpec()
Yout.update({'indir':indir,'obsid':obsid,'ext':ext})
Yout.update({'ra':RA,'dec':DEC,'wheelpos':wheelpos})
if type(sumimage) == typeNone:
if background_template is not None:
# convert background_template to a dictionary
background_template = {'template':np.asarray(background_template),
'sumimg':False}
try:
ext = int(ext)
except:
print("fatal error in extension number: must be an integer value")
# locate related lenticular images
specfile, lfilt1_, lfilt1_ext_, lfilt2_, lfilt2_ext_, attfile = \
fileinfo(filestub,ext,directory=indir,wheelpos=wheelpos,chatter=chatter)
# set some flags and variables
lfiltinput = (lfilt1 != None) ^ (lfilt2 != None)
lfiltpresent = lfiltinput | (lfilt1_ != None) | (lfilt2_ != None)
if (type(lfilt1_) == typeNone) & (type(lfilt2_) == typeNone):
# ensure the output is consistent with no lenticular filter solution
use_lenticular_image = False
# translate
filt_id = {"wh":"wh","v":"vv","b":"bb","u":"uu","uvw1":"w1","uvm2":"m2","uvw2":"w2"}
lfiltflag = False
if ((type(lfilt1) == typeNone)&(type(lfilt1_) != typeNone)):
lfilt1 = lfilt1_
lfilt1_ext = lfilt1_ext_
if chatter > 0: print("lenticular filter 1 from search lenticular images"+lfilt1+"+"+str(lfilt1_ext))
lfiltflag = True
lfilt1_aspcorr = None
try:
hdu_1 = pyfits.getheader(indir+"/sw"+obsid+"u"+filt_id[lfilt1]+"_sk.img",lfilt1_ext)
lfilt1_aspcorr = hdu_1["ASPCORR"]
except:
hdu_1 = pyfits.getheader(indir+"/sw"+obsid+"u"+filt_id[lfilt1]+"_sk.img.gz",lfilt1_ext)
lfilt1_aspcorr = hdu_1["ASPCORR"]
if ((type(lfilt2) == typeNone)&(type(lfilt2_) != typeNone)):
lfilt2 = lfilt2_
lfilt2_ext = lfilt2_ext_
if chatter > 0: print("lenticular filter 2 from search lenticular images"+lfilt2+"+"+str(lfilt2_ext))
lfiltflag = True
lfilt2_aspcorr = None
try:
hdu_2 = pyfits.getheader(indir+"/sw"+obsid+"u"+filt_id[lfilt2]+"_sk.img",lfilt2_ext)
lfilt2_aspcorr = hdu_2["ASPCORR"]
except:
hdu_2 = pyfits.getheader(indir+"/sw"+obsid+"u"+filt_id[lfilt2]+"_sk.img.gz",lfilt2_ext)
lfilt2_aspcorr = hdu_2["ASPCORR"]
# report
if chatter > 4:
msg2 += "getSpec: image parameter values\n"
msg2 += "ra, dec = (%6.1f,%6.1f)\n" % (RA,DEC)
msg2 += "filestub, extension = %s[%i]\n"% (filestub, ext)
if lfiltpresent & use_lenticular_image:
msg2 += "first/only lenticular filter = "+lfilt1+" extension first filter = "+str(lfilt1_ext)+'\n'
msg2 += " Aspect correction keyword : %s\n"%(lfilt1_aspcorr)
if lfilt2_ext != None:
msg2 += "second lenticular filter = "+lfilt2+" extension second filter = "+str(lfilt2_ext)+'\n'
msg2 += " Aspect correction keyword : %s\n"%(lfilt2_aspcorr)
if not use_lenticular_image:
msg2 += "anchor position derived without lenticular filter\n"
msg2 += "spectrum extraction preset width = "+str(spextwidth)+'\n'
#msg2 += "optimal extraction "+str(optimal_extraction)+'\n'
hdr = pyfits.getheader(specfile,int(ext))
if chatter > -1:
msg += '\nuvotgetspec version : '+__version__+'\n'
msg += ' Position RA,DEC : '+str(RA)+' '+str(DEC)+'\n'
msg += ' Start date-time : '+str(hdr['date-obs'])+'\n'
msg += ' grism file : '+specfile.split('/')[-1]+'['+str(ext)+']\n'
msg += ' attitude file : '+attfile.split('/')[-1]+'\n'
if lfiltpresent & use_lenticular_image:
if ((lfilt1 != None) & (lfilt1_ext != None)):
msg += ' lenticular file 1: '+lfilt1+'['+str(lfilt1_ext)+']\n'
msg += ' aspcorr: '+lfilt1_aspcorr+'\n'
if ((lfilt2 != None) & (lfilt2_ext != None)):
msg += ' lenticular file 2: '+lfilt2+'['+str(lfilt2_ext)+']\n'
msg += ' aspcorr: '+lfilt2_aspcorr+'\n'
if not use_lenticular_image:
msg += "anchor position derived without lenticular filter\n"
if not 'ASPCORR' in hdr: hdr['ASPCORR'] = 'UNKNOWN'
Yout.update({'hdr':hdr})
tstart = hdr['TSTART']
tstop = hdr['TSTOP']
wheelpos = hdr['WHEELPOS']
expo = hdr['EXPOSURE']
expmap = [hdr['EXPOSURE']]
Yout.update({'wheelpos':wheelpos})
if 'FRAMTIME' not in hdr:
# compute the frametime from the CCD deadtime and deadtime fraction
#deadc = hdr['deadc']
#deadtime = 600*285*1e-9 # 600ns x 285 CCD lines seconds
#framtime = deadtime/(1.0-deadc)
framtime = 0.0110329
hdr.update('framtime',framtime,comment='frame time computed from deadc ')
Yout.update({'hdr':hdr})
if chatter > 1:
print("frame time computed from deadc - added to hdr")
print("with a value of ",hdr['framtime']," ",Yout['hdr']['framtime'])
if not 'detnam' in hdr:
hdr.update('detnam',str(hdr['wheelpos']))
msg += ' exposuretime : %7.1f \n'%(expo)
maxcounts = 1.1 * expo/framtime
if chatter > 0:
msg += ' wheel position : '+str(wheelpos)+'\n'
msg += ' roll angle : %5.1f\n'% (hdr['pa_pnt'])
msg += 'coincidence loss version: 2 (2014-07-23)\n'
msg += '======================================\n'
try:
if ( (np.abs(RA - hdr['RA_OBJ']) > 0.4) ^ (np.abs(DEC - hdr['DEC_OBJ']) > 0.4) ):
sys.stderr.write("\nWARNING: It looks like the input RA,DEC and target position in header are different fields\n")
except (RuntimeError, TypeError, NameError, KeyError):
pass
msg2 += " cannot read target position from header for verification\n"
if lfiltinput:
# the lenticular filter(s) were specified on the command line.
# check that the lenticular image and grism image are close enough in time.
if type(lfilt1_ext) == typeNone:
lfilt1_ext = int(ext)
lpos = np.where( np.array([lfilt1]) == lfiltnames )
if len(lpos[0]) < 1: sys.stderr.write("WARNING: illegal name for the lenticular filter\n")
lnam = ext_names[lpos]
lfile1 = filestub+lnam[0]+'_sk.img'
hdr_l1 = pyfits.getheader(lfile1,lfilt1_ext)
tstart1 = hdr_l1['TSTART']
tstop1 = hdr_l1['TSTOP']
if not ( (np.abs(tstart-tstop1) < 20) ^ (np.abs(tstart1-tstop) < 20) ):
sys.stderr.write("WARNING: check that "+lfile1+" matches the grism image\n")
if lfilt2 != None:
if type(lfilt2_ext) == typeNone:
lfilt2_ext = lfilt1_ext+1
lpos = np.where( np.array([lfilt2]) == lfiltnames )
if len(lpos[0] < 1): sys.stderr.write("WARNING: illegal name for the lenticular filter\n")
lnam = ext_names[lpos]
lfile2 = filestub+lnam[0]+'_sk.img'
hdr_l2 = pyfits.getheader(lfile1,lfilt1_ext)
tstart2 = hdr_l2['TSTART']
tstop2 = hdr_l2['TSTOP']
if not ( (np.abs(tstart-tstop1) < 20) ^ (np.abs(tstart1-tstop) < 20) ):
sys.stderr.write("WARNING: check that "+lfile2+" matches the grism image\n")
if (not lfiltpresent) | (not use_lenticular_image):
method = "grism_only"
else:
method = None
if not senscorr: msg += "WARNING: No correction for sensitivity degradation applied.\n"
# get the USNO-B1 catalog data for the field, & find the zeroth orders
if (not skip_field_src):
if chatter > 2: print("============== locate zeroth orders due to field sources =============")
if wheelpos > 500: zeroth_blim_offset = 2.5
ZOpos = find_zeroth_orders(filestub, ext, wheelpos,indir=indir,
set_maglimit=set_maglimit,clobber="yes", chatter=chatter, )
# use for the ftools the downloaded usnob1 catalog in file "search.ub1" using the
# catspec parameter in the calls
if os.access('catalog.spec',os.F_OK) & (catspec == None):
catspec= 'catalog.spec'
# retrieve the input angle relative to the boresight
Xphi, Yphi, date1, msg3, lenticular_anchors = findInputAngle( RA, DEC, filestub, ext,
uvotgraspcorr_on=uvotgraspcorr_on, update_pnt=update_pnt, msg="", \
wheelpos=wheelpos, lfilter=lfilt1, lfilter_ext=lfilt1_ext, \
lfilt2=lfilt2, lfilt2_ext=lfilt2_ext, method=method, \
attfile=attfile, catspec=catspec, indir=indir, chatter=chatter)
Yout.update({"Xphi":Xphi,"Yphi":Yphi})
Yout.update({'lenticular_anchors':lenticular_anchors})
# read the anchor and dispersion out of the wavecal file
anker, anker2, C_1, C_2, angle, calibdat, msg4 = getCalData(Xphi,Yphi,wheelpos, date1, \
calfile=calfile, chatter=chatter)
hdrr = pyfits.getheader(specfile,int(ext))
if (hdrr['aspcorr'] == 'UNKNOWN') & (not lfiltpresent):
msg += "WARNING: No aspect solution found. Anchor uncertainty large.\n"
msg += "first order anchor position on detector in det coordinates:\n"
msg += "anchor1=(%8.2f,%8.2f)\n" % (anker[0],anker[1])
msg += "first order dispersion polynomial (distance anchor, \n"
msg += " highest term first)\n"
for k in range(len(C_1)):
msg += "DISP1_"+str(k)+"=%12.4e\n" % (C_1[k])
msg += "second order anchor position on detector in det coordinates:\n"
msg += "anchor2=(%8.2f,%8.2f)\n" % (anker2[0],anker2[1])
msg += "second order dispersion polynomial (distance anchor2,\n"
msg += " highest term first)\n"
for k in range(len(C_2)):
msg += "DISP2_"+str(k)+"=%12.4e\n" % (C_2[k])
#sys.stderr.write( "first order anchor = %s\n"%(anker))
#sys.stderr.write( "second order anchor = %s\n"%(anker2))
msg += "first order dispersion = %s\n"%(str(C_1))
msg += "second order dispersion = %s\n"%(str(C_2))
if chatter > 1:
sys.stderr.write( "first order dispersion = %s\n"%(str(C_1)) )
sys.stderr.write( "second order dispersion = %s\n"%(str(C_2)) )
msg += "lenticular filter anchor positions (det)\n"
msg += msg3
# override angle
if fixed_angle != None:
msg += "WARNING: overriding calibration file angle for extracting \n\t"\
"spectrum cal: "+str(angle)+'->'+str(fixed_angle)+" \n"
angle = fixed_angle
# override anchor position in det pixel coordinates
if anchor_position[0] != None:
cal_anker = anker
anker = np.array(anchor_position)
msg += "overriding anchor position with value [%8.1f,%8.1f]\n" % (anker[0],anker[1])
anker2 = anker2 -cal_anker + anker
msg += "overriding anchor position 2nd order with value [%8.1f,%8.1f]\n"%(anker2[0],anker2[1])
anker_field = np.array([Xphi,Yphi])
theta=np.zeros(5)+angle # use the angle from first order everywhere.
C_0 = np.zeros(3) # not in calibration file. Use uvotcal/zemax to get.
C_3 = np.zeros(3)
Cmin1 = np.zeros(3)
msg += "field coordinates:\n"
msg += "FIELD=(%9.4f,%9.4f)\n" % (Xphi,Yphi)
# order distance between anchors
dist12 = np.sqrt( (anker[0]-anker2[0])**2 + (anker[1]-anker2[1])**2 )
msg += "order distance 1st-2nd anchors :\n"
msg += "DIST12=%7.1f\n" % (dist12)
Yout.update({"anker":anker,"anker2":anker2,"C_1":C_1,"C_2":C_2,"theta":angle,"dist12":dist12})
# determine x,y locations of certain wavelengths on the image
# TBD: add curvature
if wheelpos < 500:
wavpnt = np.arange(1700,6800,slit_width)
else:
wavpnt = np.arange(2500,6600,slit_width)
dispnt=pixdisFromWave(C_1,wavpnt) # pixel distance to anchor
if chatter > 0: msg2 += 'first order angle at anchor point: = %7.1f\n'%(angle)
crpix = crpix1,crpix2 = hdr['crpix1'],hdr['crpix2']
crpix = np.array(crpix) # centre of image
ankerimg = anker - np.array([1100.5,1100.5])+crpix
xpnt = ankerimg[0] + dispnt*np.cos((180-angle)*np.pi/180)
ypnt = ankerimg[1] + dispnt*np.sin((180-angle)*np.pi/180)
msg += "1st order anchor on image at (%7.1f,%7.1f)\n"%(ankerimg[0],ankerimg[1])
if chatter > 4: msg += "Found anchor point; now extracting spectrum.\n"
if chatter > 2: print("==========Found anchor point; now extracting spectrum ========")
if type(offsetlimit) == typeNone:
if wheelpos > 300:
offsetlimit = 9
sys.stdout.write("automatically set the value for the offsetlimit = "+str(offsetlimit)+'\n')
# find position zeroth order on detector from WCS-S after update from uvotwcs
#if 'hdr' not in Yout:
# hdr = pyfits.getheader(specfile,int(ext))
# Yout.update({'hdr':hdr})
zero_xy_imgpos = [-1,-1]
if chatter > 1: print("zeroth order position on image...")
try:
wS =wcs.WCS(header=hdr,key='S',relax=True,)
zero_xy_imgpos = wS.wcs_world2pix([[RA,DEC]],0)
print("position not corrected for SIP = ", zero_xy_imgpos[0][0],zero_xy_imgpos[0][1])
zero_xy_imgpos = wS.sip_pix2foc(zero_xy_imgpos, 0)[0]
if chatter > 1:
"print zeroth order position on image:",zero_xy_imgpos
except:
pass
Yout.update({'zeroxy_imgpos':zero_xy_imgpos})
# provide some checks on background inputs:
if background_lower[0] != None:
background_lower = np.abs(background_lower)
if np.sum(background_lower) >= (slit_width-10):
background_lower = [None,None]
msg += "WARNING: background_lower set too close to edge image\n Using default\n"
if background_upper[0] != None:
background_upper = np.abs(background_upper)
if np.sum(background_upper) >= (slit_width-10):
background_upper = [None,None]
msg += "WARNING: background_upper set too close to edge image\n Using default\n"
# in case of summary file:
if (not skip_field_src) & (ZOpos == None):
if chatter > 2: print("DEBUG 802 ================== locate zeroth orders due to field sources =============")
if wheelpos > 500: zeroth_blim_offset = 2.5
try:
ZOpos = find_zeroth_orders(filestub, ext, wheelpos,indir=indir,
set_maglimit=set_maglimit,clobber="yes", chatter=chatter, )
except:
if type(sumimage) == typeNone:
print ("exception to call find_zeroth_orders : skip_field_src = ",skip_field_src)
pass
# use for the ftools the downloaded usnob1 catalog in file "search.ub1" using the
# catspec parameter in the calls
if os.access('catalog.spec',os.F_OK) & (catspec == None):
catspec= 'catalog.spec'
if (not skip_field_src):
Xim,Yim,Xa,Yb,Thet,b2mag,matched,ondetector = ZOpos
pivot_ori=np.array([(ankerimg)[0],(ankerimg)[1]])
Y_ZOpos={"Xim":Xim,"Yim":Yim,"Xa":Xa,"Yb":Yb,"Thet":Thet,"b2mag":b2mag,
"matched":matched,"ondetector":ondetector}
Yout.update({"ZOpos":Y_ZOpos})
else:
Yout.update({"ZOpos":None})
# find background, extract straight slit spectrum
if chatter > 3 : print ("DEBUG 827 compute background")
if sumimage != None:
# initialize parameters for extraction summed extracted image
print('reading summed image file : '+sumimage)
print('ext label for output file is set to : ', ext)
Y6 = sum_Extimage (None, sum_file_name=sumimage, mode='read')
extimg, expmap, exposure, wheelpos, C_1, C_2, dist12, anker, \
(coef0, coef1,coef2,coef3,sig0coef,sig1coef,sig2coef,sig3coef), hdr = Y6
if background_template != None:
background_template = {'extimg': background_template,
'sumimg': True}
if (background_template['extimg'].size != extimg.size):
print("ERROR")
print("background_template.size=",background_template['extimg'].size)
print("extimg.size=",extimg.size)
raise IOError("The template does not match the sumimage dimensions")
msg += "order distance 1st-2nd anchors :\n"
msg += "DIST12=%7.1f\n" % (dist12)
for k in range(len(C_1)):
msg += "DISP1_"+str(k)+"=%12.4e\n" % (C_1[k])
msg += "second order dispersion polynomial (distance anchor2,\n"
msg += " highest term first)\n"
for k in range(len(C_2)):
msg += "DISP2_"+str(k)+"=%12.4e\n" % (C_2[k])
print("first order anchor = ",anker)
print("first order dispersion = %s"%(str(C_1)))
print("second order dispersion = %s"%(str(C_2)))
tstart = hdr['tstart']
ank_c = [100,500,0,2000]
if type(offsetlimit) == typeNone:
offset = 0
elif type(offsetlimit) == list:
offset = offsetlimit[0]-96
ank_c[0] = offsetlimit[0]
else:
offset = offsetlimit # for sumimage used offsetlimit to set the offset
ank_c[0] = 96+offsetlimit
dis = np.arange(-500,1500)
img = extimg
# get background
bg, bg1, bg2, bgsig, bgimg, bg_limits_used, bgextra = findBackground(extimg,
background_lower=background_lower,
background_upper=background_upper,)
if singleside_bkg == 'bg1':
bg2 = bg1
elif singleside_bkg == 'bg2':
bg1 = bg2
else:
pass
skip_field_src = True
spnet = bg1 # placeholder
expo = exposure
maxcounts = exposure/0.01
anker2 = anker + [dist12,0]
spimg,spnetimg,anker_field = None, None, (0.,0.)
m1,m2,aa,wav1 = None,None,None,None
if type(outfile) == typeNone:
outfile='sum_image_'
Yfit.update({"coef0":coef0,"coef1":coef1,"coef2":coef2,"coef3":coef3,
"sig0coef":sig0coef,"sig1coef":sig1coef,"sig2coef":sig2coef,"sig3coef":sig3coef} )
Yout.update({"anker":anker,"anker2":None,
"C_1":C_1,"C_2":C_2,
"Xphi":0.0,"Yphi":0.0,
"wheelpos":wheelpos,"dist12":dist12,
"hdr":hdr,"offset":offset})
Yout.update({"background_1":bg1,"background_2":bg2})
dropout_mask = None
Yout.update({"zeroxy_imgpos":[1000,1000]})
else:
# default extraction
if chatter > 2 : print ("DEBUG 894 default extraction")
# start with a quick straight slit extraction
exSpIm = extractSpecImg(specfile,ext,ankerimg,angle,spwid=spextwidth,
background_lower=background_lower, background_upper=background_upper,
template = background_template, x_offset = anchor_x_offset, ank_c_0offset=ank_c_0offset,
offsetlimit=offsetlimit, replace=replace, chatter=chatter, singleside_bkg=singleside_bkg)
dis = exSpIm['dis']
spnet = exSpIm['spnet']
bg = exSpIm['bg']
bg1 = exSpIm['bg1']
bg2 = exSpIm['bg2']
bgsig = exSpIm['bgsigma']
bgimg = exSpIm['bgimg']
bg_limits_used = exSpIm['bg_limits_used']
bgextra = exSpIm['bgextras']
extimg = exSpIm['extimg']
spimg = exSpIm['spimg']
spnetimg = exSpIm['spnetimg']
offset = exSpIm['offset']
ank_c = exSpIm['ank_c']
if background_template != None:
background_template ={"extimg":exSpIm["template_extimg"]}
Yout.update({"template":exSpIm["template_extimg"]})
if exSpIm['dropouts']:
dropout_mask = exSpIm['dropout_mask']
else: dropout_mask = None
Yout.update({"background_1":bg1,"background_2":bg2})
#msg += "1st order anchor offset from spectrum = %7.1f\n"%(offset)
#msg += "anchor position in rotated extracted spectrum (%6.1f,%6.1f)\n"%(ank_c[1],ank_c[0])
calibdat = None # free the memory
if chatter > 2: print("============ straight slit extraction complete =================")
if np.max(spnet) < maxcounts: maxcounts = 2.0*np.max(spnet)
# initial limits spectrum (pixels)
m1 = ank_c[1]-400
if wheelpos > 500: m1 = ank_c[1]-370
if m1 < 0: m1 = 0
if m1 < (ank_c[2]+30): m1 = ank_c[2]+30
m2 = ank_c[1]+2000
if wheelpos > 500: m2 = ank_c[1]+1000
if m2 >= len(dis): m2 = len(dis)-2
if m2 > (ank_c[3]-40): m2=(ank_c[3]-40)
aa = list(range(int(m1),int(m2)))
wav1 = polyval(C_1,dis[aa])
# get grism det image
img = pyfits.getdata(specfile, ext)
if isinstance(replace,np.ndarray):
img = replace
try:
offset = np.asscalar(offset)
except:
pass
Yout.update({"offset":offset})
Zbg = bg, bg1, bg2, bgsig, bgimg, bg_limits_used, bgextra
net = extimg-bgextra[-1]
var = extimg.copy()
dims = np.asarray( img.shape )
dims = np.array([dims[1],dims[0]])
dims2 = np.asarray(extimg.shape)
dims2 = np.array([dims2[1],dims2[0]])
msg += "Lower background from y = %i pix\nLower background to y = %i pix\n" % (bg_limits_used[0],bg_limits_used[1])
msg += "Upper background from y = %i pix\nUpper background to y = %i pix\n" % (bg_limits_used[2],bg_limits_used[3])
msg += "TRACKWID =%4.1f\n" % (trackwidth)
# collect some results:
if sumimage == None:
Y0 = (specfile, lfilt1_, lfilt1_ext_, lfilt2_, lfilt2_ext_, attfile), (method), \
(Xphi, Yphi, date1), (dist12, ankerimg, ZOpos), expmap, bgimg, bg_limits_used, bgextra
else:
Y0 = None, None, None, (dist12, None, None), expmap, bgimg, bg_limits_used, bgextra
angle = 0.0
# curvature from input (TBD how - placeholder with raw_input)
# choose input coef or pick from plot
# choose order to do it for
if (get_curve & interactive) | (get_curve & (get_curve_filename != None)):
if chatter > 3 : print ("DEBUG 978 get user-provided curve coefficients and extract spectrum")
spextwidth = None
# grab coefficients
poly_1 = None
poly_2 = None
poly_3 = None
if get_curve_filename == None:
try:
poly_1 = eval(input("give coefficients of first order polynomial array( [X^3,X^2,X,C] )"))
poly_2 = eval(input("give coefficients of second order polynomial array( [X^2,X,C] )"))
poly_3 = eval(input("give coefficients of third order polynomial array( [X,C] )"))
except:
print("failed")
if (type(poly_1) != list) | (type(poly_2) != list) | (type(poly_3) != list):
print("poly_1 type = ",type(poly_1))
print("poly_2 type = ",type(poly_2))
print("poly_3 type = ",type(poly_3))
raise IOError("the coefficients must be a list")
poly_1 = np.asarray(poly_1)
poly_2 = np.asarray(poly_2)
poly_3 = np.asarray(poly_3)
else:
try:
curfile = rdList(get_curve_filename)
poly_1 = np.array(curfile[0][0].split(','),dtype=float)
poly_2 = np.array(curfile[1][0].split(','),dtype=float)
poly_3 = np.array(curfile[2][0].split(','),dtype=float)
except:
print("There seems to be a problem when readin the coefficients out of the file")
print("The format is a list of coefficient separated by comma's, highest order first")
print("The first line for the first order")
print("The second line for the secons order")
print("The third line for the third order")
print("like, \n1.233e-10,-7.1e-7,3.01e-3,0.0.\n1.233e-5,-2.3e-2,0.03.0\n1.7e-1,0.9\n")
print(get_curve_filename)
print(curfile)
print(poly_1)
print(poly_2)
print(poly_3)
raise IOError("ERROR whilst reading curvature polynomial from file\n")
print("Curvature coefficients were read in...\npoly_1: %s \npoly_2: %s \npoly_3: %s \n"%
(poly_1,poly_2,poly_3))
fitorder, cp2, (coef0,coef1,coef2,coef3), (bg_zeroth,bg_first,\
bg_second,bg_third), (borderup,borderdown), apercorr, expospec, msg, curved \
= curved_extraction(
extimg, ank_c, anker, wheelpos,
ZOpos=ZOpos, skip_field_sources=skip_field_src,
offsetlimit=offsetlimit,
predict_second_order=predict2nd,
background_template=background_template,
angle=angle, offset=offset,
poly_1=poly_1, poly_2=poly_2, poly_3=poly_3,
msg=msg, curved=curved,
outfull=True, expmap=expmap,
fit_second=fit_second,
fit_third=fit_second,
C_1=C_1,C_2=C_2,dist12=dist12,
dropout_mask=dropout_mask, ifmotion=ifmotion,
obsid=obsid,indir=indir,motion_file=motion_file,
ank_c_0offset=ank_c_0offset,
chatter=chatter,ifextended=ifextended,
fixwidth=fixwidth)
# fit_sigmas parameter needs passing
(present0,present1,present2,present3),(q0,q1,q2,q3), (
y0,dlim0L,dlim0U,sig0coef,sp_zeroth,co_zeroth),(
y1,dlim1L,dlim1U,sig1coef,sp_first,co_first),(
y2,dlim2L,dlim2U,sig2coef,sp_second,co_second),(
y3,dlim3L,dlim3U,sig3coef,sp_third,co_third),(
x,xstart,xend,sp_all,quality,co_back) = fitorder
# update the anchor y-coordinate
if chatter > 3 : print ("DEBUG 1048 update anchor coordinate\noriginal ank_c=%s\ny1=%s"%(ank_c,y1))
ank_c[0] = y1[np.int(ank_c[1])]
Yfit.update({"coef0":coef0,"coef1":coef1,"coef2":coef2,"coef3":coef3,
"bg_zeroth":bg_zeroth,"bg_first":bg_first,"bg_second":bg_second,"bg_third":bg_third,
"borderup":borderup,"borderdown":borderdown,
"sig0coef":sig0coef,"sig1coef":sig1coef,"sig2coef":sig2coef,"sig3coef":sig3coef,
"present0":present0,"present1":present1,"present2":present2,"present3":present3,
"q0":q0,"q1":q1,"q2":q2,"q3":q3,
"y0":y0,"dlim0L":dlim0L,"dlim0U":dlim0U,"sp_zeroth":sp_zeroth,"bg_zeroth":bg_zeroth,"co_zeroth":co_zeroth,
"y1":y1,"dlim1L":dlim1L,"dlim1U":dlim1U,"sp_first": sp_first, "bg_first": bg_first, "co_first": co_first,
"y2":y2,"dlim2L":dlim2L,"dlim2U":dlim2U,"sp_second":sp_second,"bg_second":bg_second,"co_second":co_second,
"y3":y3,"dlim3L":dlim3L,"dlim3U":dlim3U,"sp_third": sp_third, "bg_third": bg_third, "co_third":co_third,
"x":x,"xstart":xstart,"xend":xend,"sp_all":sp_all,"quality":quality,"co_back":co_back,
"apercorr":apercorr,"expospec":expospec})
Yout.update({"ank_c":ank_c,"extimg":extimg,"expmap":expmap})
# curvature from calibration
if spextwidth != None:
if chatter > 3 : print ("DEBUG 1067 get curve coefficients from cal file and extract spectrum ")
fitorder, cp2, (coef0,coef1,coef2,coef3), (bg_zeroth,bg_first,\
bg_second,bg_third), (borderup,borderdown) , apercorr, expospec, msg, curved \
= curved_extraction(
extimg,ank_c,anker, wheelpos,
ZOpos=ZOpos, skip_field_sources=skip_field_src,
offsetlimit=offsetlimit,
background_lower=background_lower,
background_upper=background_upper, \
background_template=background_template,\
angle=angle, offset=offset,
outfull=True, expmap=expmap,
msg = msg, curved=curved,
fit_second=fit_second,
fit_third=fit_second, C_1=C_1,C_2=C_2,dist12=dist12,
dropout_mask=dropout_mask, ifmotion=ifmotion,
obsid=obsid,indir=indir,motion_file=motion_file,
ank_c_0offset=ank_c_0offset,
chatter=chatter,ifextended=ifextended,
fixwidth=fixwidth)
(present0,present1,present2,present3),(q0,q1,q2,q3), \
(y0,dlim0L,dlim0U,sig0coef,sp_zeroth,co_zeroth),(
y1,dlim1L,dlim1U,sig1coef,sp_first,co_first),\
(y2,dlim2L,dlim2U,sig2coef,sp_second,co_second),(
y3,dlim3L,dlim3U,sig3coef,sp_third,co_third),\
(x,xstart,xend,sp_all,quality,co_back) = fitorder
Yfit.update({"coef0":coef0,"coef1":coef1,"coef2":coef2,"coef3":coef3,
"bg_zeroth":bg_zeroth,"bg_first":bg_first,"bg_second":bg_second,"bg_third":bg_third,
"borderup":borderup,"borderdown":borderdown,
"sig0coef":sig0coef,"sig1coef":sig1coef,"sig2coef":sig2coef,"sig3coef":sig3coef,
"present0":present0,"present1":present1,"present2":present2,"present3":present3,
"q0":q0,"q1":q1,"q2":q2,"q3":q3,
"y0":y0,"dlim0L":dlim0L,"dlim0U":dlim0U,"sp_zeroth":sp_zeroth,"bg_zeroth":bg_zeroth,"co_zeroth":co_zeroth,
"y1":y1,"dlim1L":dlim1L,"dlim1U":dlim1U,"sp_first": sp_first, "bg_first": bg_first, "co_first": co_first,
"y2":y2,"dlim2L":dlim2L,"dlim2U":dlim2U,"sp_second":sp_second,"bg_second":bg_second,"co_second":co_second,
"y3":y3,"dlim3L":dlim3L,"dlim3U":dlim3U,"sp_third": sp_third, "bg_third": bg_third, "co_third":co_third,
"x":x,"xstart":xstart,"xend":xend,"sp_all":sp_all,"quality":quality,"co_back":co_back,
"apercorr":apercorr,"expospec":expospec})
ank_c[0] = y1[int(ank_c[1])]
Yout.update({"ank_c":ank_c,"extimg":extimg,"expmap":expmap})
msg += "orders present:"
if present0: msg += "0th order, "
if present1: msg += "first order"
if present2: msg += ", second order"
if present3: msg += ", third order "
print('1224 CCCCCCCCCCCCC', coef1)
print(RA,DEC)
print(anker)
print(ank_c)
msg += '\nparametrized order curvature:\n'
if present0:
for k in range(len(coef0)):
msg += "COEF0_"+str(k)+"=%12.4e\n" % (coef0[k])
if present1:
for k in range(len(coef1)):
msg += "COEF1_"+str(k)+"=%12.4e\n" % (coef1[k])
if present2:
for k in range(len(coef2)):
msg += "COEF2_"+str(k)+"=%12.4e\n" % (coef2[k])
if present3:
for k in range(len(coef3)):
msg += "COEF3_"+str(k)+"=%12.4e\n" % (coef3[k])
msg += '\nparametrized width slit:\n'
if present0:
for k in range(len(sig0coef)):
msg += "SIGCOEF0_"+str(k)+"=%12.4e\n" % (sig0coef[k])
if present1:
for k in range(len(sig1coef)):
msg += "SIGCOEF1_"+str(k)+"=%12.4e\n" % (sig1coef[k])
if present2:
for k in range(len(sig2coef)):
msg += "SIGCOEF2_"+str(k)+"=%12.4e\n" % (sig2coef[k])
if present3:
for k in range(len(sig3coef)):
msg += "SIGCOEF3_"+str(k)+"=%12.4e\n" % (sig3coef[k])
if chatter > 3 : print ("DEBUG 1142 done spectral extraction, now calibrate")
offset = ank_c[0]-slit_width/2
msg += "best fit 1st order anchor offset from spectrum = %7.1f\n"%(offset)
msg += "anchor position in rotated extracted spectrum (%6.1f,%6.1f)\n"%(ank_c[1],y1[int(ank_c[1])])
msg += msg4
Yout.update({"offset":offset})
#2012-02-20 moved updateFitorder to curved_extraction
#if curved == "update":
# fit = fitorder2
#else:
# fit = fitorder
fit = fitorder
if optimal_extraction:
# development dropped, since mod8 causes slit width oscillations
# also requires a good second order flux and coi calibration for
# possible further development of order splitting.
# result in not consistent now.
print("Starting optimal extraction: This can take a few minutes ......\n\t "\
"........\n\t\t .............")
Y3 = get_initspectrum(net,var,fit,160,ankerimg,C_1=C_1,C_2=C_2,dist12=dist12,
predict2nd=predict2nd,
chatter=1)
counts, variance, borderup, borderdown, (fractions,cnts,vars,newsigmas) = Y3
# need to test that C_2 is valid here
if predict2nd:
Y4 = predict_second_order(dis,(sp_first-bg_first), C_1,C_2, dist12, quality,dlim1L, dlim1U,wheelpos)
wav2p, dis2p, flux2p, qual2p, dist12p = Y4[0]
# retrieve the effective area
Y7 = readFluxCalFile(wheelpos,anchor=anker,spectralorder=1,arf=fluxcalfile,msg=msg,chatter=chatter)
EffArea1 = Y7[:-1]
msg = Y7[-1]
Y7 = readFluxCalFile(wheelpos,anchor=anker,spectralorder=2,arf=None,msg=msg,chatter=chatter)
if type(Y7) == tuple:
EffArea2 = Y7[:-1]
else:
if type(Y7) != typeNone: msg = Y7
EffArea2 = None
# note that the output differs depending on parameters given, i.e., arf, anchor
Yout.update({"effarea1":EffArea1,"effarea2":EffArea2})
if interactive:
import matplotlib.pyplot as plt
if (plot_img) & (sumimage == None):
#plt.winter()
# make plot of model on image [figure 1]
#xa = np.where( (dis < 1400) & (dis > -300) )
bga = bg.copy()
fig1 = plt.figure(1); plt.clf()
img[img <=0 ] = 1e-16
plt.imshow(np.log(img),vmin=np.log(bga.mean()*0.1),vmax=np.log(bga.mean()*4))
levs = np.array([5,15,30,60,120,360]) * bg.mean()
if highlight: plt.contour(img,levels=levs)
# plot yellow wavelength marker
# TBD : add curvature
plt.plot(xpnt,ypnt,'+k',markersize=14)
if not skip_field_src:
plot_ellipsoid_regions(Xim,Yim,
Xa,Yb,Thet,b2mag,matched,ondetector,
pivot_ori,pivot_ori,dims,17.,)
if zoom:
#plt.xlim(np.max(np.array([0.,0.])),np.min(np.array([hdr['NAXIS1'],ankerimg[0]+400])))
#plt.ylim(np.max(np.array([0.,ankerimg[1]-400 ])), hdr['NAXIS2'])
plt.xlim(0,2000)
plt.ylim(0,2000)
else:
plt.xlim(0,2000)
plt.ylim(0,2000)
plt.savefig(indir+'/'+obsid+'_map.png',dpi=150)
#plt.show()
plt.close()
if (plot_raw):
#plt.winter()
nsubplots = 2
#if not fit_second: nsubplots=3
# make plot of spectrum [figure 2]
fig2 = plt.figure(2); plt.clf()
plt.subplots_adjust(top=1,hspace=0, wspace=0)
# image slice
ax21 = plt.subplot(nsubplots,1,1)
ac = -ank_c[1]
net[net<=0.] = 1e-16
#plt.imshow(np.log10(net),vmin=-0.8,vmax=0.8, #~FIXME:
# extent=(ac,ac+extimg.shape[1],0,extimg.shape[0]),
# origin='lower',cmap=plt.cm.winter)
plt.imshow(np.log10(net),vmin=-10,vmax=2,
extent=(ac,ac+extimg.shape[1],0,extimg.shape[0]),
origin='lower')#,cmap=plt.cm.winter)
#plt.imshow(extimg,vmin=0,vmax=50,
# extent=(ac,ac+extimg.shape[1],0,extimg.shape[0]),
# origin='lower')#,cmap=plt.cm.winter)
if highlight:
plt.contour(np.log10(net),levels=[1,1.3,1.7,2.0,3.0],
extent=(ac,ac+extimg.shape[1],0,extimg.shape[0]),
origin='lower')
#plt.imshow( extimg,vmin= (bg1.mean())*0.1,vmax= (bg1.mean()+bg1.std())*2, extent=(ac,ac+extimg.shape[1],0,extimg.shape[0]) )
#levels = np.array([5,10,20,40,70,90.])
#levels = spnet[ank_c[2]:ank_c[3]].max() * levels * 0.01
#if highlight: plt.contour(net,levels=levels,extent=(ac,ac+extimg.shape[1],0,extimg.shape[0]))
# cross_section_plot:
cp2 = cp2/np.max(cp2)*100
#plt.plot(ac+cp2+ank_c[1],np.arange(len(cp2)),'k',lw=2,alpha=0.6,ds='steps') #~TODO:
# plot zeroth orders
if not skip_field_src:
pivot= np.array([ank_c[1],ank_c[0]-offset])
#pivot_ori=ankerimg
mlim = 17.
if wheelpos > 500: mlim = 15.5
plot_ellipsoid_regions(Xim,Yim,Xa,Yb,Thet,b2mag,
matched,ondetector,
pivot,pivot_ori,
dims2,mlim,
img_angle=angle-180.0,ax=ax21)
# plot line on anchor location
#plt.plot([ac+ank_c[1],ac+ank_c[1]],[0,slit_width],'k',lw=2)
plt.plot(0,ank_c[0],'kx',MarkerSize=5) #~TODO:
# plot position centre of orders
#if present0: plt.plot(ac+q0[0],y0[q0[0]],'k--',lw=1.2)
#plt.plot( ac+q1[0],y1[q1[0]],'k--',lw=1.2)
#if present2: plt.plot(ac+q2[0],y2[q2[0]],'k--',alpha=0.6,lw=1.2)
#if present3: plt.plot(ac+q3[0],y3[q3[0]],'k--',alpha=0.3,lw=1.2)
# plot borders slit region
if present0:
plt.plot(ac+q0[0],borderup [0,q0[0]],'r-')
plt.plot(ac+q0[0],borderdown[0,q0[0]],'r-')
if present1:
plt.plot(ac+q1[0],borderup [1,q1[0]],'r-',lw=1.2)
plt.plot(ac+q1[0],borderdown[1,q1[0]],'r-',lw=1.2)
if present2:
plt.plot(ac+q2[0],borderup [2,q2[0]],'r-',alpha=0.6,lw=1)
plt.plot(ac+q2[0],borderdown[2,q2[0]],'r-',alpha=0.6,lw=1)
if present3:
plt.plot(ac+q3[0],borderup [3,q3[0]],'r-',alpha=0.3,lw=1.2)
plt.plot(ac+q3[0],borderdown[3,q3[0]],'r-',alpha=0.3,lw=1.2)
# plot limits background
plt_bg = np.ones(len(q1[0]))
if (background_lower[0] == None) & (background_upper[0] == None):
background_lower = [0,50] ; background_upper = [slit_width-50,slit_width]
plt.plot(ac+q1[0],plt_bg*(background_lower[1]),'-k',lw=1.5 )
plt.plot(ac+q1[0],plt_bg*(background_upper[0]),'-k',lw=1.5 )
else:
if background_lower[0] != None:
plt.plot(ac+q1[0],plt_bg*(y1[int(ank_c[1])]-background_lower[0]),'-k',lw=1.5 )
plt.plot(ac+q1[0],plt_bg*(y1[int(ank_c[1])]-background_lower[1]),'-k',lw=1.5 )
elif background_lower[1] != None:
plt.plot(ac+q1[0],plt_bg*(background_lower[1]),'-k',lw=1.5 )
if background_upper[1] != None:
plt.plot(ac+q1[0],plt_bg*(y1[int(ank_c[1])]+background_upper[0]),'-k',lw=1.5 )
plt.plot(ac+q1[0],plt_bg*(y1[int(ank_c[1])]+background_upper[1]),'-k',lw=1.5 )
elif background_upper[0] != None:
plt.plot(ac+q1[0],plt_bg*(background_upper[0]),'-k',lw=1.5 )
# rescale, title
plt.ylim(0,slit_width)
#plt.ylim(50,150)
if not zoom:
xlim1 = ac+ank_c[2]
xlim2 = ac+ank_c[3]
else:
xlim1 = max(ac+ank_c[2], -420)
xlim2 = min(ac+ank_c[3],1400)
plt.xlim(xlim1,xlim2)
plt.title(obsid+'+'+str(ext))
# first order raw data plot
ax22 = plt.subplot(nsubplots,1,2)
plt.rcParams['legend.fontsize'] = 'small'
if curved == 'straight':
p1, = plt.plot( dis[ank_c[2]:ank_c[3]], spnet[ank_c[2]:ank_c[3]],'k',
ds='steps',lw=0.5,alpha=0.5,label='straight')
p2, = plt.plot( dis[ank_c[2]:ank_c[3]],
spextwidth*(bg1[ank_c[2]:ank_c[3]]+bg2[ank_c[2]:ank_c[3]])*0.5,
'b',alpha=0.5,label='background')
plt.legend([p1,p2],['straight','background'],loc=0,)
if curved != "straight":
p3, = plt.plot(x[q1[0]],(sp_first-bg_first)[q1[0]],'r',ds='steps',label='spectrum')
plt.plot(x[q1[0]],(sp_first-bg_first)[q1[0]],'k',alpha=0.2,ds='steps',label='_nolegend_')
p7, = plt.plot(x[q1[0]], bg_first[q1[0]],'y',alpha=0.5,lw=1.1,ds='steps',label='background')
# bad pixels:
qbad = np.where(quality[q1[0]] > 0)
p4, = plt.plot(x[qbad],(sp_first-bg_first)[qbad],'xk',markersize=4)
#p7, = plt.plot(x[q1[0]],(bg_first)[q1[0]],'r-',alpha=0.3,label='curve_bkg')
# annotation
#plt.legend([p3,p4,p7],['spectrum','suspect','background'],loc=0,)
plt.legend([p3,p7],['spectrum','background'],loc=0,)
maxbg = np.max(bg_first[q1[0]][np.isfinite(bg_first[q1[0]])])
topcnt = 1.2 * np.max([np.max(spnet[q1[0]]),maxbg, np.max((sp_first-bg_first)[q1[0]])])
plt.ylim(np.max([ -20, np.min((sp_first-bg_first)[q1[0]])]), np.min([topcnt, maxcounts]))
if optimal_extraction:
p5, = plt.plot(x[q1[0]],counts[1,q1[0]],'g',alpha=0.5,ds='steps',lw=1.2,label='optimal' )
p6, = plt.plot(x[q1[0]],counts[1,q1[0]],'k',alpha=0.5,ds='steps',lw=1.2,label='_nolegend_' )
p7, = plt.plot(x[q1[0]], bg_first[q1[0]],'y',alpha=0.7,lw=1.1,ds='steps',label='background')
plt.legend([p3,p5,p7],['spectrum','optimal','background'],loc=0,)
topcnt = 1.2 * np.max((sp_first-bg_first)[q1[0]])
ylim1,ylim2 = -10, np.min([topcnt, maxcounts])
plt.ylim( ylim1, ylim2 )
#plt.xlim(ank_c[2]-ank_c[1],ank_c[3]-ank_c[1])
plt.xlim(xlim1,xlim2)
plt.ylabel('1st order counts')
'''
# plot second order
ax23 = plt.subplot(nsubplots,1,3)
plt.rcParams['legend.fontsize'] = 'small'
#plt.xlim(ank_c[2],ank_c[3])
if fit_second:
if curved != 'straight':
p1, = plt.plot(x[q2[0]],(sp_second-bg_second)[q2[0]],'r',label='spectrum')
plt.plot(x[q2[0]],(sp_second-bg_second)[q2[0]],'k',alpha=0.2,label='_nolegend_')
p7, = plt.plot(x[q2[0]],(bg_second)[q2[0]],'y',alpha=0.7,lw=1.1,label='background')
qbad = np.where(quality[q2[0]] > 0)
p2, = plt.plot(x[qbad],(sp_second-bg_second)[qbad],'+k',alpha=0.3,label='suspect')
plt.legend((p1,p7,p2),('spectrum','background','suspect'),loc=2)
plt.ylim(np.max([ -100, np.min((sp_second-bg_second)[q2[0]])]), \
np.min([np.max((sp_second-bg_second)[q2[0]]), maxcounts]))
plt.xlim(ank_c[2]-ank_c[1],ank_c[3]-ank_c[1])
if optimal_extraction:
p3, = plt.plot(x[q2[0]],counts[2,q2[0]],'g',alpha=0.5,ds='steps',label='optimal' )
plt.legend((p1,p7,p2,p3),('spectrum','background','suspect','optimal',),loc=2)
#plt.ylim(np.max([ -10,np.min(counts[2,q2[0]]), np.min((sp_second-bg_second)[q2[0]])]),\
# np.min([np.max(counts[2,q2[0]]), np.max((sp_second-bg_second)[q2[0]]), maxcounts]))
plt.ylim( ylim1,ylim2 )
if predict2nd :
p4, = plt.plot(dis2p+dist12,flux2p, ds='steps',label='predicted')
p5, = plt.plot(dis2p[np.where(qual2p != 0)]+dist12,flux2p[np.where(qual2p != 0)],'+k',label='suspect',markersize=4)
if optimal_extraction & fit_second:
plt.legend((p1,p2,p3,p4,p5),('curved','suspect','optimal','predicted','suspect'),loc=2)
#plt.ylim(np.max([ -100,np.min(counts[2,q2[0]]), np.min((sp_second-bg_second)[q2[0]])]),\
# np.min([np.max(counts[2,q2[0]]), np.max((sp_second-bg_second)[q2[0]]), maxcounts]))
plt.ylim( ylim1,ylim2 )
elif optimal_extraction:
plt.legend((p1,p7,p4,p5),('curved','background','predicted','suspect'),loc=2)
plt.ylim(np.max([ -10, np.min((sp_second-bg_second)[q2[0]])]), \
np.min([np.max((sp_second-bg_second)[q2[0]]), maxcounts]))
elif fit_second:
plt.legend((p1,p2,p4,p5),('curved','suspect','predicted','suspect'),loc=2)
plt.ylim(np.max([ -10, np.min((sp_second-bg_second)[q2[0]])]), \
np.min([np.max((sp_second-bg_second)[q2[0]]), maxcounts]))
else:
plt.legend((p4,p5),('predicted','suspect'),loc=2)
plt.ylim(np.max([ -10, np.min((sp_second-bg_second)[q2[0]])]), \
np.min([np.max((sp_second-bg_second)[q2[0]]), maxcounts]))
plt.xlim(ank_c[2]-ank_c[1],ank_c[3]-ank_c[1])
plt.xlim(xlim1,xlim2)
plt.ylabel('2nd order counts')
'''
'''
if fit_second:
ax24 = plt.subplot(nsubplots,1,4)
plt.rcParams['legend.fontsize'] = 'small'
if (len(q3[0]) > 1) & (curved != "xxx"):
p1, = plt.plot(x[q3[0]],(sp_third-bg_third)[q3[0]],'r',label='spectrum')
plt.plot(x[q3[0]],(sp_third-bg_third)[q3[0]],'k',alpha=0.2,label='_nolegend_')
qbad = np.where(quality[q3[0]] > 0)
p2, = plt.plot(x[qbad],(sp_third-bg_third)[qbad],'xk',alpha=0.3,label='suspect')
p3, = plt.plot(x[q3[0]],bg_third[q3[0]],'y',label='background')
plt.legend([p1,p3,p2],['spectrum','background','suspect'],loc=2)
plt.ylim(np.max([ -100, np.min((sp_second-bg_second)[q3[0]])]),\
np.min([np.max((sp_third-bg_third)[q3[0]]), maxcounts]))
if optimal_extraction:
p4, = plt.plot(x[q3[0]],counts[3,q3[0]],'b',alpha=0.5,ds='steps',label='optimal' )
plt.legend([p1,p3,p2,p4],['spectrum','background','suspect','optimal',],loc=2)
#plt.ylim(np.max([ -100,np.min(counts[3,q3[0]]), np.min((sp_second-bg_second)[q3[0]])]),\
# np.min([np.max(counts[3,q3[0]]), np.max((sp_third-bg_third)[q3[0]]), maxcounts]))
plt.ylim( ylim1,ylim2 )
#plt.xlim(ank_c[2]-ank_c[1],ank_c[3]-ank_c[1])
plt.xlim(xlim1,xlim2)
plt.ylabel(u'3rd order counts')
plt.xlabel(u'pixel distance from anchor position')
'''
plt.savefig(indir+'/'+obsid+'_count.png',dpi=150)
#plt.show()
if (plot_spec):
#plt.winter()
# NEED the flux cal applied!
nsubplots = 1
if not fit_second:
nsubplots = 1
fig3 = plt.figure(3)
plt.clf()
wav1 = polyval(C_1,x[q1[0]])
ax31 = plt.subplot(nsubplots,1,1)
if curved != "xxx":
# PSF aperture correction applies on net rate, but background
# needs to be corrected to default trackwidth linearly
rate1 = ((sp_first[q1[0]]-bg_first[q1[0]] ) * apercorr[1,[q1[0]]]
/expospec[1,[q1[0]]]).flatten()
bkgrate1 = ((bg_first)[q1[0]] * (2.5/trackwidth)
/expospec[1,[q1[0]]]).flatten()
print("computing flux for plot; frametime =",framtime)
flux1,wav1,coi_valid1 = rate2flux(wav1,rate1, wheelpos,
bkgrate=bkgrate1,
co_sprate = (co_first[q1[0]]/expospec[1,[q1[0]]]).flatten(),
co_bgrate = (co_back [q1[0]]/expospec[1,[q1[0]]]).flatten(),
pixno=x[q1[0]],
#sig1coef=sig1coef, sigma1_limits=[2.6,4.0],
arf1=fluxcalfile, arf2=None, effarea1=EffArea1,
spectralorder=1, swifttime=tstart,
#trackwidth = trackwidth,
anker=anker,
#option=1, fudgespec=1.32,
frametime=framtime,
debug=False,chatter=1)
#flux1_err = 0.5*(rate2flux(,,rate+err,,) - rate2flux(,,rate-err,,))
p1, = plt.plot(wav1[np.isfinite(flux1)],flux1[np.isfinite(flux1)],
color='darkred',label=u'curved')
p11, = plt.plot(wav1[np.isfinite(flux1)&(coi_valid1==False)],
flux1[np.isfinite(flux1)&(coi_valid1==False)],'.',
color='lawngreen',
label="too bright")
# PROBLEM quality flags !!!
qbad1 = np.where((quality[np.array(x[q1[0]],dtype=int)] > 0) & (quality[np.array(x[q1[0]],dtype=int)] < 16))
qbad2 = np.where((quality[np.array(x[q1[0]],dtype=int)] > 0) & (quality[np.array(x[q1[0]],dtype=int)] == qflag.get("bad")))
plt.legend([p1,p11],[u'calibrated spectrum',u'too bright - not calibrated'])
if len(qbad2[0]) > 0:
p2, = plt.plot(wav1[qbad2],flux1[qbad2],
'+k',markersize=4,label=u'bad data')
plt.legend([p1,p2],[u'curved',u'bad data'])
plt.ylabel(u'1st order flux $(erg\ cm^{-2} s^{-1} \AA^{-1)}$')
# find reasonable limits flux
get_flux_limit = flux1[int(len(wav1)*0.3):int(len(wav1)*0.7)]
get_flux_limit[get_flux_limit==np.inf] = np.nan
get_flux_limit[get_flux_limit==-np.inf]= np.nan
qf = np.nanmax(get_flux_limit)
if qf > 2e-12:
qf = 2e-12
plt.ylim(0.001*qf,1.2*qf)
plt.xlim(1600,6000)
if optimal_extraction: # no longer supported (2013-04-24)
print("OPTIMAL EXTRACTION IS NO LONGER SUPPORTED")
wav1 = np.polyval(C_1,x[q1[0]])
#flux1 = rate2flux(wav1, counts[1,q1[0]]/expo, wheelpos, spectralorder=1, arf1=fluxcalfile)
flux1,wav1,coi_valid1 = rate2flux(wav1,counts[1,q1[0]]/expo, wheelpos, bkgrate=bgkrate1,
co_sprate = (co_first[q1[0]]/expospec[1,[q1[0]]]).flatten(),
co_bgrate = (co_back [q1[0]]/expospec[1,[q1[0]]]).flatten(),
pixno=x[q1[0]], #sig1coef=sig1coef, sigma1_limits=[2.6,4.0],
arf1=fluxcalfile, arf2=None, spectralorder=1, swifttime=tstart,
#trackwidth = trackwidth,
anker=anker, #option=1, fudgespec=1.32,
frametime=framtime,
debug=False,chatter=1)
p3, = plt.plot(wav1, flux1,'g',alpha=0.5,ds='steps',lw=2,label='optimal' )
p4, = plt.plot(wav1,flux1,'k',alpha=0.5,ds='steps',lw=2,label='_nolegend_' )
#plt.legend([p1,p2,p3],['curved','suspect','optimal'],loc=0,)
plt.legend([p1,p3],['curved','optimal'],loc=0,)
qf = (flux1 > 0.) & (flux1 < 1.0e-11)
plt.ylim( -0.01*np.max(flux1[qf]), 1.2*np.max(flux1[qf]) )
plt.ylabel(u'1st order count rate')
plt.xlim(np.min(wav1)-10,np.max(wav1))
plt.title(obsid+'+'+str(ext))
'''
if fit_second:
ax32 = plt.subplot(nsubplots,1,2)
plt.plot([1650,3200],[0,1])
plt.text(2000,0.4,'NO SECOND ORDER DATA',fontsize=16)
if curved != 'xxx':
wav2 = polyval(C_2,x[q2[0]]-dist12)
rate2 = ((sp_second[q2[0]]-bg_second[q2[0]])*
apercorr[2,[q2[0]]].flatten()/expospec[2,[q2[0]]].flatten() )
bkgrate2 = ((bg_second)[q2[0]] * (2.5/trackwidth)
/expospec[2,[q2[0]]]).flatten()
flux2,wav2,coi_valid2 = rate2flux(wav2, rate2, wheelpos,
bkgrate=bkgrate2,
co_sprate = (co_second[q2[0]]/expospec[2,[q2[0]]]).flatten(),
co_bgrate = (co_back [q2[0]]/expospec[2,[q2[0]]]).flatten(),
pixno=x[q2[0]],
arf1=fluxcalfile, arf2=None,
frametime=framtime, effarea2=EffArea2,
spectralorder=2,swifttime=tstart,
anker=anker2,
debug=False,chatter=1)
#flux1_err = rate2flux(wave,rate_err, wheelpos, spectralorder=1,)
plt.cla()
print('#############################')
print(wav2[100],flux2[100],wav2,flux2)
p1, = plt.plot(wav2,flux2,'r',label='curved')
plt.plot(wav2,flux2,'k',alpha=0.2,label='_nolegend_')
qbad1 = np.where((quality[np.array(x[q2[0]],dtype=int)] > 0) & (quality[np.array(x[q2[0]],dtype=int)] < 16))
p2, = plt.plot(wav2[qbad1],flux2[qbad1],'+k',markersize=4,label='suspect data')
plt.legend(['uncalibrated','suspect data'])
plt.ylabel(u'estimated 2nd order flux')
plt.xlim(1600,3200)
qf = (flux1 > 0.) & (flux1 < 1.0e-11)
if np.sum(qf[0]) > 0:
plt.ylim( -0.01*np.max(flux1[qf]), 1.2*np.max(flux1[qf]) )
#else: plt.ylim(1e-16,2e-12)
else: plt.ylim(1e-12,1e-11)
# final fix to limits of fig 3,1
y31a,y31b = ax31.get_ylim()
setylim = False
if y31a < 1e-16:
y31a = 1e-16
setylim = True
if y31b > 1e-12:
y31b = 1e-12
setylim = True
if setylim: ax31.set_ylim(bottom=y31a,top=y31b)
#
'''
plt.xlabel(u'$\lambda(\AA)$',fontsize=16)
plt.savefig(indir+'/'+obsid+'_flux.png',dpi=150)
# to plot the three figures
#plt.show()
# output parameter
Y1 = ( (dis,spnet,angle,anker,anker2,anker_field,ank_c), (bg,bg1,bg2,extimg,spimg,spnetimg,offset),
(C_1,C_2,img), hdr,m1,m2,aa,wav1 )
# output parameter
Y2 = fit, (coef0,coef1,coef2,coef3), (bg_zeroth,bg_first,
bg_second,bg_third), (borderup,borderdown), apercorr, expospec
Yout.update({"Yfit":Yfit})
# writing output to a file
#try:
if wr_outfile: # write output file
if ((chatter > 0) & (not clobber)): print("trying to write output files")
import uvotio
if (curved == 'straight') & (not optimal_extraction):
ank_c2 = np.copy(ank_c) ; ank_c2[1] -= m1
F = uvotio.wr_spec(RA,DEC,filestub,ext,
hdr,anker,anker_field[0],anker_field[1],
dis[aa],wav1,
spnet[aa]/expo,bg[aa]/expo,
bg1[aa]/expo,bg2[aa]/expo,
offset,ank_c2,extimg, C_1,
history=None,chatter=1,
clobber=clobber,
calibration_mode=calmode,
interactive=interactive)
elif not optimal_extraction:
if fileversion == 2:
Y = Yout
elif fileversion == 1:
Y = (Y0,Y1,Y2,Y4)
F = uvotio.writeSpectrum(RA,DEC,filestub,ext, Y,
fileoutstub=outfile,
arf1=fluxcalfile, arf2=None,
fit_second=fit_second,
write_rmffile=write_RMF, fileversion=1,
used_lenticular=use_lenticular_image,
history=msg,
calibration_mode=calmode,
chatter=chatter,
clobber=clobber )
elif optimal_extraction:
Y = (Y0,Y1,Y2,Y3,Y4)
F = uvotio.OldwriteSpectrum(RA,DEC,filestub,ext, Y, mode=2,
quality=quality, interactive=False,fileout=outfile,
updateRMF=write_rmffile, \
history=msg, chatter=5, clobber=clobber)
#except (RuntimeError, IOError, ValueError):
# print "ERROR writing output files. Try to call uvotio.wr_spec."
# pass
# clean up fake file
if tempntags.__contains__('fakefilestub'):
filestub = tempnames[tempntags.index('fakefilestub')]
os.system('rm '+indir+filestub+'ufk_??.img ')
# update Figure 3 to use the flux...
# TBD
# write the summary
sys.stdout.write(msg)
sys.stdout.write(msg2)
flog = open(logfile,'a')
flog.write(msg)
flog.write(msg2)
flog.close()
#plt.show()
if give_result: return Y0, Y1, Y2, Y3, Y4
if give_new_result: return Yout
def extractSpecImg(file,ext,anker,angle,anker0=None,anker2=None, anker3=None,\
searchwidth=35,spwid=13,offsetlimit=None, fixoffset=None,
background_lower=[None,None], background_upper=[None,None],
template=None, x_offset = False, ank_c_0offset=False, replace=None,
clobber=True,chatter=2,singleside_bkg=False):
'''
extract the grism image of spectral orders plus background
using the reference point at 2600A in first order.
Parameters
----------
file : str
input file location
ext : int
extension of image
anker : list, ndarray
X,Y coordinates of the 2600A (1) point on the image in image coordinates
angle : float
angle of the spectrum at 2600A in first order from zemax e.g., 28.8
searchwidth : float
find spectrum with this possible offset ( in crowded fields
it should be set to a smaller value)
template : dictionary
template for the background.
use_rectext : bool
If True then the HEADAS uvotimgrism program rectext is used to extract the image
This is a better way than using ndimage.rotate() which does some weird smoothing.
offsetlimit : None, float/int, list
if None, search for y-offset predicted anchor to spectrum using searchwidth
if float/int number, search for offset only up to a distance as given from y=100
if list, two elements, no more. [y-value, delta-y] for search of offset.
if delta-y < 1, fixoffset = y-value.
History
-------
2011-09-05 NPMK changed interpolation in rotate to linear, added a mask image to
make sure to keep track of the new pixel area.
2011-09-08 NPMK incorporated rectext as new extraction and removed interactive plot,
curved, and optimize which are now olsewhere.
2014-02-28 Add template for the background as an option
2014-08-04 add option to provide a 2-element list for the offsetlimit to constrain
the offset search range.
'''
import numpy as np
import os, sys
try:
from astropy.io import fits as pyfits
except:
import pyfits
import scipy.ndimage as ndimage
#out_of_img_val = -1.0123456789 now a global
Tmpl = (template != None)
if Tmpl:
if template['sumimg']:
raise IOError("extractSpecImg should not be called when there is sumimage input")
if chatter > 4:
print('extractSpecImg parameters: file, ext, anker, angle')
print(file,ext)
print(anker,angle)
print('searchwidth,chatter,spwid,offsetlimit, :')
print(searchwidth,chatter,spwid,offsetlimit)
img, hdr = pyfits.getdata(file,ext,header=True)
if isinstance(replace,np.ndarray):
img = replace
# wcs_ = wcs.WCS(header=hdr,) # detector coordinates DETX,DETY in mm
# wcsS = wcs.WCS(header=hdr,key='S',relax=True,) # TAN-SIP coordinate type
if Tmpl:
if (img.shape != template['template'].shape) :
print("ERROR")
print("img.shape=", img.shape)
print("background_template.shape=",template['template'].shape)
raise IOError("The templare array does not match the image")
wheelpos = hdr['WHEELPOS']
if chatter > 4: print('wheelpos:', wheelpos)
if not use_rectext:
# now we want to extend the image array and place the anchor at the centre
s1 = 0.5*img.shape[0]
s2 = 0.5*img.shape[1]
d1 = -(s1 - anker[1]) # distance of anker to centre img
d2 = -(s2 - anker[0])
n1 = 2.*abs(d1) + img.shape[0] + 400 # extend img with 2.x the distance of anchor
n2 = 2.*abs(d2) + img.shape[1] + 400
#return img, hdr, s1, s2, d1, d2, n1, n2
if 2*int(n1/2) == int(n1): n1 = n1 + 1
if 2*int(n2/2) == int(n2): n2 = n2 + 1
c1 = n1 / 2 - anker[1]
c2 = n2 / 2 - anker[0]
n1 = int(n1)
n2 = int(n2)
c1 = int(c1)
c2 = int(c2)
if chatter > 3: print('array info : ',img.shape,d1,d2,n1,n2,c1,c2)
# the ankor is now centered in array a; initialize a with out_of_img_val
a = np.zeros( (n1,n2), dtype=float) + cval
if Tmpl : a_ = np.zeros( (n1,n2), dtype=float) + cval
# load array in middle
a[c1:c1+img.shape[0],c2:c2+img.shape[1]] = img
if Tmpl: a_[c1:c1+img.shape[0],c2:c2+img.shape[1]] = template['template']
# patch outer regions with something like mean to get rid of artifacts
mask = abs(a - cval) < 1.e-8
# Kludge:
# test image for bad data and make a fix by putting the image average in its place
dropouts = False
aanan = np.isnan(a) # process further for flagging
aagood = np.isfinite(a)
aaave = a[np.where(aagood)].mean()
a[np.where(aanan)] = aaave
if len( np.where(aanan)[0]) > 0 :
dropouts = True
print("extractSpecImg WARNING: BAD IMAGE DATA fixed by setting to mean of good data whole image ")
# now we want to rotate the array to have the dispersion in the x-direction
if angle < 40. :
theta = 180.0 - angle
else: theta = angle
if not use_rectext:
b = ndimage.rotate(a,theta,reshape = False,order = 1,mode = 'constant',cval = cval)
if Tmpl:
b_ = ndimage.rotate(a_,theta,reshape = False,order = 1,mode = 'constant',cval = cval)
if dropouts: #try to rotate the boolean image
aanan = ndimage.rotate(aanan,theta,reshape = False,order = 1,mode = 'constant',)
e2 = int(0.5*b.shape[0])
c = b[e2-int(slit_width/2):e2+int(slit_width/2),:]
if Tmpl: c_ = b_[e2-int(slit_width/2):e2+int(slit_width/2),:]
if dropouts: aanan = aanan[e2-int(slit_width/2):e2+int(slit_width/2),:]
ank_c = [ (c.shape[0]-1)/2+1, (c.shape[1]-1)/2+1 , 0, c.shape[1]] #~TODO:
if x_offset == False:
pass
else:
ank_c[1] += x_offset
if use_rectext:
# history: rectext is a fortran code that maintains proper density of quantity when
# performing a rotation.
# build the command for extracting the image with rectext
outfile= tempnames[tempntags.index('rectext')]
cosangle = np.cos(theta/180.*np.pi)
sinangle = np.sin(theta/180.*np.pi)
# distance anchor to pivot
dx_ank = - (hdr['naxis1']-anker[0])/cosangle + slit_width/2*sinangle #~FIXME: I am not sure if this is "+ 100.*sinangle" or "+ slit_width/2*sinangle"
if np.abs(dx_ank) > 760: dx_ank = 760 # include zeroth order (375 for just first order)
# distance to end spectrum
dx_2 = -anker[0] /cosangle + slit_width/2/sinangle # to lhs edge #~FIXME: I am not sure if this is "+ 100.*sinangle" or "+ slit_width/2*sinangle"
dy_2 = (hdr['naxis2']-anker[1])/sinangle - slit_width/2/cosangle # to top edge #~FIXME: I am not sure if this is "+ 100.*sinangle" or "+ slit_width/2*sinangle"
dx = int(dx_ank + np.array([dx_2,dy_2]).min() ) # length rotated spectrum
dy = slit_width # width rotated spectrum
# pivot x0,y0
x0 = anker[0] - dx_ank*cosangle + dy/2.*sinangle
y0 = anker[1] - dx_ank*sinangle - dy/2.*cosangle
command= "rectext infile="+file+"+"+str(ext)
command+=" outfile="+outfile
command+=" angle="+str(theta)+" width="+str(dx)
command+=" height="+str(dy)+" x0="+str(x0)+" y0="+str(y0)
command+=" null="+str(cval)
command+=" chatter=5 clobber=yes"
print(command)
os.system(command)
c = extimg = pyfits.getdata(outfile,0)
ank_c = np.array([int(slit_width/2),dx_ank,0,extimg.shape[1]])
# out_of_img_val = 0.
if clobber:
os.system("rm "+outfile)
if Tmpl:
raise("background_template cannot be used with use_rectext option")
# version 2016-01-16 revision:
# the background can be extracted via a method from the strip image
#
# extract the strips with the background on both sides, and the spectral orders
# find optimised place of the spectrum
# first find parts not off the detector -> 'qofd'
eps1 = 1e-15 # remainder after resampling for intel-MAC OSX system (could be jacked up)
qofd = np.where( abs(c[int(slit_width/2),:] - cval) > eps1 )
# define constants for the spectrum in each mode
if wheelpos < 300: # UV grism
disrange = 150 # perhaps make parameter in call?
disscale = 10 # ditto
minrange = disrange/10 # 300 is maximum
maxrange = np.array([disrange*disscale,c.shape[1]-ank_c[1]-2]).min() # 1200 is most of the spectrum
else: # V grism
disrange = 120 # perhaps make parameter in call?
disscale = 5 # ditto
minrange = np.array([disrange/2,ank_c[1]-qofd[0].min() ]).max() # 300 is maximum
maxrange = np.array([disrange*disscale,c.shape[1]-ank_c[1]-2],qofd[0].max()-ank_c[1]).min() # 600 is most of the spectrum
if chatter > 1:
#print 'image was rotated; anchor in extracted image is ', ank_c[:2]
#print 'limits spectrum are ',ank_c[2:]
print('finding location spectrum from a slice around anchor x-sized:',minrange,':',maxrange)
print('offsetlimit = ', offsetlimit)
d = (c[:,int(ank_c[1]-minrange):int(ank_c[1]+maxrange)]).sum(axis=1).squeeze()
if len(qofd[0]) > 0:
ank_c[2] = min(qofd[0])
ank_c[3] = max(qofd[0])
else:
ank_c[2] = -1
ank_c[3] = -1
# y-position of anchor spectrum in strip image (allowed y (= [50,150], but search only in
# range defined by searchwidth (default=35) )
y_default=int(slit_width/2) # reference y
if (type(offsetlimit) == list):
if (len(offsetlimit)==2):
# sane y_default
if (offsetlimit[0] > 50) & (offsetlimit[0] < 150):
y_default=int(offsetlimit[0]+0.5) # round to nearest pixel
else:
raise IOError("parameter offsetlimit[0]=%i, must be in range [51,149]."+
"\nIs the aspect correction right (in reference images)?"%(offsetlimit[0]))
if offsetlimit[1] < 1:
fixoffset = offsetlimit[0]-int(slit_width/2)
else:
searchwidth=int(offsetlimit[1]+0.5)
if fixoffset == None:
offset = ( (np.where(d == (d[y_default-searchwidth:y_default+searchwidth]).max() ) )[0] - y_default )
if chatter>0: print('offset found from y=%i is %i '%(y_default ,-offset))
if len(offset) == 0:
print('offset problem: offset set to zero')
offset = 0
offset = offset[0]
if (type(offsetlimit) != list):
if (offsetlimit != None):
if abs(offset) >= offsetlimit:
offset = 0
print('This is larger than the offsetlimit. The offset has been set to 0')
if interactive:
offset = float(input('Please give a value for the offset: '))
else:
offset = fixoffset
if ank_c_0offset == True:
offset = 0
if chatter > 0:
print('offset used is : ', -offset)
if (type(offsetlimit) == list) & (fixoffset == None):
ank_c[0] = offsetlimit[0]-offset
else:
ank_c[0] += offset
print('image was rotated; anchor in extracted image is [', ank_c[0],',',ank_c[1],']')
print('limits spectrum on image in dispersion direction are ',ank_c[2],' - ',ank_c[3])
# Straight slit extraction (most basic extraction, no curvature):
sphalfwid = int(spwid-0.5)/2
splim1 = int(slit_width/2)+offset-sphalfwid+1
splim2 = splim1 + spwid
spimg = c[int(splim1):int(splim2),:]
if chatter > 0:
print('Extraction limits across dispersion: splim1,splim2 = ',splim1,' - ',splim2)
bg, bg1, bg2, bgsigma, bgimg, bg_limits, bgextras = findBackground(c,
background_lower=background_lower, background_upper=background_upper,yloc_spectrum=ank_c[0] )
if singleside_bkg == 'bg1':
bg2 = bg1
elif singleside_bkg == 'bg2':
bg1 = bg2
else:
pass
bgmean = bg
bg = 0.5*(bg1+bg2)
if chatter > 0: print('Background : %10.2f +/- %10.2f (1-sigma error)'%( bgmean,bgsigma))
# define the dispersion with origen at the projected position of the
# 2600 point in first order
dis = np.arange((c.shape[1]),dtype=np.int16) - ank_c[1]
# remove the background
#bgimg_ = 0.* spimg.copy()
#for i in range(bgimg_.shape[0]): bgimg_[i,:]=bg
spnetimg = spimg - bg
spnet = spnetimg.sum(axis=0)
result = {"dis":dis,"spnet":spnet,"bg":bg,"bg1":bg1,
"bg2":bg2,"bgsigma":bgsigma,"bgimg":bgimg,
"bg_limits_used":bg_limits,"bgextras":bgextras,
"extimg":c,"spimg":spimg,"spnetimg":spnetimg,
"offset":offset,"ank_c":ank_c,'dropouts':dropouts}
if dropouts: result.update({"dropout_mask":aanan})
if Tmpl: result.update({"template_extimg":c_})
return result
def sigclip1d_mask(array1d, sigma, badval=None, conv=1e-5, maxloop=30):
"""
sigma clip array around mean, using number of sigmas 'sigma'
after masking the badval given, requiring finite numbers, and
either finish when converged or maxloop is reached.
return good mask
"""
import numpy as np
y = np.asarray(array1d)
if badval != None:
valid = (np.abs(y - badval) > 1e-6) & np.isfinite(y)
else:
valid = np.isfinite(y)
yv = y[valid]
mask = yv < (yv.mean() + sigma * yv.std())
ym_ = yv.mean()
ymean = yv[mask].mean()
yv = yv[mask]
while (np.abs(ym_-ymean) > conv*np.abs(ymean)) & (maxloop > 0):
ym_ = ymean
mask = ( yv < (yv.mean() + sigma * yv.std()) )
yv = yv[mask]
ymean = yv.mean()
maxloop -= 1
valid[valid] = y[valid] < ymean + sigma*yv.std()
return valid
def background_profile(img, smo1=30, badval=None):
"""
helper routine to determine for the rotated image
(spectrum in rows) the background using sigma clipping.
"""
import numpy as np
from scipy import interpolate
bgimg = img.copy()
nx = bgimg.shape[1] # number of points in direction of dispersion
ny = bgimg.shape[0] # width of the image
# look at the summed rows of the image
u_ysum = []
for i in range(ny):
u_ysum.append(bgimg[i,:].mean())
u_ysum = np.asarray(u_ysum)
u_ymask = sigclip1d_mask(u_ysum, 2.5, badval=badval, conv=1e-5, maxloop=30)
u_ymean = u_ysum[u_ymask].mean()
# look at the summed columns after filtering bad rows
u_yindex = np.where(u_ymask)[0]
u_xsum = []
u_std = []
for i in range(nx):
u_x1 = bgimg[u_yindex, i].squeeze()
# clip u_x1
u_x1mask = sigclip1d_mask(u_x1, 2.5, badval=None, conv=1e-5, maxloop=30)
u_xsum.append(u_x1[u_x1mask].mean())
u_std.append(u_x1[u_x1mask].std())
#print u_x1[u_x1mask]
#if np.isfinite(u_x1mask.mean()) & len(u_x1[u_x1mask])>0:
# print "%8.2f %8.2f %8.2f "%(u_x1[u_x1mask].mean(),u_x1[u_x1mask].std(),u_x1[u_x1mask].max())
# the best background estimate of the typical row is now u_xsum
# fit a smooth spline through the u_xsum values (or boxcar?)
#print "u_x means "
#print u_xsum
u_xsum = np.asarray(u_xsum)
u_std = np.asarray(u_std)
u_xsum_ok = np.isfinite(u_xsum)
bg_tcp = interpolate.splrep(np.arange(nx)[u_xsum_ok],
np.asarray(u_xsum)[u_xsum_ok], s=smo1)
# representative background profile in column
u_x = interpolate.splev(np.arange(nx), bg_tcp, )
return u_xsum, u_x, u_std
def findBackground(extimg,background_lower=[None,None], background_upper=[None,None],yloc_spectrum=int(slit_width/2),
smo1=None, smo2=None, chatter=2):
'''Extract the background from the image slice containing the spectrum.
Parameters
----------
extimg : 2D array
image containing spectrum. Dispersion approximately along x-axis.
background_lower : list
distance in pixels from `yloc_spectrum` of the limits of the lower background region.
background_upper : list
distance in pixels from `yloc_spectrum` of the limits of the upper background region.
yloc_spectrum : int
pixel `Y` location of spectrum
smo1 : float
smoothing parameter passed to smoothing spline fitting routine. `None` for default.
smo2 : float
smoothing parameter passed to smoothing spline fitting routine. `None` for default.
chatter : int
verbosity
Returns
-------
bg : float
mean background
bg1, bg2 : 1D arrays
bg1 = lower background; bg2 = upper background
inherits size from extimg.shape x-xoordinate
bgsig : float
standard deviation of background
bgimg : 2D array
image of the background constructed from bg1 and/or bg2
bg_limits_used : list, length 4
limits used for the background in the following order: lower background, upper background
(bg1_good, bg1_dis, bg1_dis_good, bg2_good, bg2_dis, bg2_dis_good, bgimg_lin) : tuple
various other background measures
Notes
-----
**Global parameter**
- **background_method** : {'boxcar','splinefit'}
The two background images can be computed 2 ways:
1. 'splinefit': sigma clip image, then fit a smoothing spline to each
row, then average in y for each background region
2. 'boxcar': select the background from the smoothed image created
by method 1 below.
3. 'sigmaclip': do sigma clipping on rows and columns to get column
profile background, then clip image and mask, interpolate over masked
bits.
extimg is the image containing the spectrum in the 1-axis centered in 0-axis
`ank` is the position of the anchor in the image
I create two background images:
1. split the image strip into 40 portions in x, so that the background variation is small
compute the mean
sigma clip (3 sigma) each area to to the local mean
replace out-of-image pixels with mean of whole image (2-sigma clipped)
smooth with a boxcar by the smoothing factor
2. compute the background in two regions upper and lower
linearly interpolate in Y between the two regions to create a background image
bg1 = lower background; bg2 = upper background
smo1, smo2 allow one to relax the smoothing factor in computing the smoothing spline fit
History
-------
- 8 Nov 2011 NPM Kuin complete overhaul
things to do: get quality flagging of bad background points, edges perhaps done here?
- 13 Aug 2012: possible problem was seen of very bright sources not getting masked out properly
and causing an error in the background that extends over a large distance due to the smoothing.
The cause is that the sources are more extended than can be handled by this method.
A solution would be to derive a global background
- 30 Sep 2014: background fails in visible grism e.g., 57977004+1 nearby bright spectrum
new method added (4x slower processing) to screen the image using sigma clipping
'''
import sys
import numpy as np
try:
from convolve import boxcar
except:
from stsci.convolve import boxcar
from scipy import interpolate
import stsci.imagestats as imagestats
# initialize parameters
bgimg = extimg.copy()
out = np.where( (np.abs(bgimg-cval) <= 1e-6) )
in_img = np.where( (np.abs(bgimg-cval) > 1e-6) & np.isfinite(bgimg) )
nx = bgimg.shape[1] # number of points in direction of dispersion
ny = bgimg.shape[0] # width of the image
# sigma screening of background taking advantage of the dispersion being
# basically along the x-axis
if _PROFILE_BACKGROUND_:
bg, u_x, bg_sig = background_profile(bgimg, smo1=30, badval=cval)
u_mask = np.zeros((ny,nx),dtype=bool)
for i in range(ny):
u_mask[i,(bgimg[i,:].flatten() < u_x) &
np.isfinite(bgimg[i,:].flatten())] = True
bkg_sc = np.zeros((ny,nx),dtype=float)
# the following leaves larger disps in the dispersion but less noise;
# tested but not implemented, as it is not as fast and the mean results
# are comparable:
#for i in range(ny):
# uf = interpolate.interp1d(np.where(u_mask[i,:])[0],bgimg[i,u_mask[i,:]],bounds_error=False,fill_value=cval)
# bkg_sc[i,:] = uf(np.arange(nx))
#for i in range(nx):
# ucol = bkg_sc[:,i]
# if len(ucol[ucol != cval]) > 0:
# ucol[ucol == cval] = ucol[ucol != cval].mean()
for i in range(nx):
ucol = bgimg[:,i]
if len(ucol[u_mask[:,i]]) > 0:
ucol[np.where(u_mask[:,i] == False)[0] ] = ucol[u_mask[:,i]].mean()
bkg_sc[:,i] = ucol
if background_method == 'sigmaclip':
return bkg_sc
else:
# continue now with the with screened image
bgimg = bkg_sc
kx0 = 0 ; kx1 = nx # default limits for valid lower background
kx2 = 0 ; kx3 = nx # default limits for valid upper background
ny4 = int(0.25*ny) # default width of each default background region
sig1 = 1 # unit for background offset, width
bg_limits_used = [0,0,0,0] # return values used
## in the next section I replace the > 2.5 sigma peaks with the mean
## after subdividing the image strip to allow for the
## change in background level which can be > 2 over the
## image. Off-image parts are set to image mean.
# this works most times in the absence of the sigma screening,but
# can lead to overestimates of the background.
# the call to the imagestats package is only done here, and should
# consider replacement. Its not critical for the program.
#
xlist = np.linspace(0,bgimg.shape[1],80)
xlist = np.asarray(xlist,dtype=int)
imgstats = imagestats.ImageStats(bgimg[in_img[0],in_img[1]],nclip=3)
bg = imgstats.mean
bgsig = imgstats.stddev
if chatter > 2:
sys.stderr.write( 'background statistics: mean=%10.2f, sigma=%10.2f '%
(imgstats.mean, imgstats.stddev))
# create boolean image flagging good pixels
img_good = np.ones(extimg.shape,dtype=bool)
# flag area out of picture as bad
img_good[out] = False
# replace high values in image with estimate of mean and flag them as not good
for i in range(78):
# after the sigma screening this is a bit of overkill, leave in for now
sub_bg = boxcar(bgimg[:,xlist[i]:xlist[i+2]] , (5,5), mode='reflect', cval=cval)
sub_bg_use = np.where( np.abs(sub_bg - cval) > 1.0e-5 ) # list of coordinates
imgstats = None
if sub_bg_use[0].size > 0:
imgstats = imagestats.ImageStats(sub_bg[sub_bg_use],nclip=3)
# patch values in image (not out of image) with mean if outliers
aval = 2.0*imgstats.stddev
img_clip_ = (
(np.abs(bgimg[:,xlist[i]:xlist[i+2]]-cval) < 1e-6) |
(np.abs(sub_bg - imgstats.mean) > aval) |
(sub_bg <= 0.) | np.isnan(sub_bg) )
bgimg[:,xlist[i]:xlist[i+2]][img_clip_] = imgstats.mean # patch image
img_good[:,xlist[i]:xlist[i+2]][img_clip_] = False # flag patches
# the next section selects the user-selected or default background for further processing
if chatter > 1:
if background_method == 'boxcar':
sys.stderr.write( "BACKGROUND METHOD: %s; background smoothing = %s\n"%
(background_method,background_smoothing))
else:
sys.stderr.write( "BACKGROUND METHOD:%s\n"%(background_method ))
if not ((background_method == 'splinefit') | (background_method == 'boxcar') ):
sys.stderr.write('background method missing; currently reads : %s\n'%(background_method))
if background_method == 'boxcar':
# boxcar smooth in x,y using the global parameter background_smoothing
bgimg = boxcar(bgimg,background_smoothing,mode='reflect',cval=cval)
if background_lower[0] == None:
bg1 = bgimg[0:ny4,:].copy()
bg_limits_used[0]=0
bg_limits_used[1]=ny4
bg1_good = img_good[0:ny4,:]
kx0 = np.min(np.where(img_good[0,:]))+10 # assuming the spectrum is in the top two thirds of the detector
kx1 = np.max(np.where(img_good[0,:]))-10
else:
# no curvature, no second order: limits
bg1_1= np.max(np.array([yloc_spectrum - sig1*background_lower[0],20 ]))
#bg1_0= np.max(np.array([yloc_spectrum - sig1*(background_lower[0]+background_lower[1]),0]))
bg1_0= np.max(np.array([yloc_spectrum - sig1*(background_lower[1]),0]))
bg1 = bgimg[int(bg1_0):int(bg1_1),:].copy()
bg_limits_used[0]=bg1_0
bg_limits_used[1]=bg1_1
bg1_good = img_good[int(bg1_0):int(bg1_1),:]
kx0 = np.min(np.where(img_good[int(bg1_0),:]))+10 # assuming the spectrum is in the top two thirds of the detector
kx1 = np.max(np.where(img_good[int(bg1_0),:]))-10 # corrected for edge effects
#if ((kx2-kx0) < 20):
# print 'not enough valid upper background points'
if background_upper[0] == None:
bg2 = bgimg[-ny4:ny,:].copy()
bg_limits_used[2]=ny-ny4
bg_limits_used[3]=ny
bg2_good = img_good[-ny4:ny,:]
kx2 = np.min(np.where(img_good[ny-1,:]))+10 # assuming the spectrum is in the top two thirds of the detector
kx3 = np.max(np.where(img_good[ny-1,:]))-10
else:
bg2_0= np.min(np.array([yloc_spectrum + sig1*background_upper[0],(slit_width-20) ]))
#bg2_1= np.min(np.array([yloc_spectrum + sig1*(background_upper[0]+background_upper[1]),ny]))
bg2_1= np.min(np.array([yloc_spectrum + sig1*(background_upper[1]),ny]))
bg2 = bgimg[int(bg2_0):int(bg2_1),:].copy()
bg_limits_used[2]=bg2_0
bg_limits_used[3]=bg2_1
bg2_good = img_good[int(bg2_0):int(bg2_1),:]
kx2 = np.min(np.where(img_good[int(bg2_1),:]))+10 # assuming the spectrum is in the top two thirds of the detector
kx3 = np.max(np.where(img_good[int(bg2_1),:]))-10
#if ((kx3-kx2) < 20):
# print 'not enough valid upper background points'
if background_method == 'boxcar':
bg1 = bg1_dis = bg1.mean(0)
bg2 = bg2_dis = bg2.mean(0)
bg1_dis_good = np.zeros(nx,dtype=bool)
bg2_dis_good = np.zeros(nx,dtype=bool)
for i in range(nx):
bg1_dis_good[i] = np.where(bool(int(bg1_good[:,i].mean(0))))
bg2_dis_good[i] = np.where(bool(int(bg2_good[:,i].mean(0))))
if background_method == 'splinefit':
# mean bg1_dis, bg2_dis across dispersion
bg1_dis = np.zeros(nx) ; bg2_dis = np.zeros(nx)
for i in range(nx):
bg1_dis[i] = bg1[:,i][bg1_good[:,i]].mean()
if not bool(int(bg1_good[:,i].mean())):
bg1_dis[i] = cval
bg2_dis[i] = bg2[:,i][bg2_good[:,i]].mean()
if not bool(int(bg2_good[:,i].mean())):
bg2_dis[i] = cval
# some parts of the background may have been masked out completely, so
# find the good points and the bad points
bg1_dis_good = np.where( np.isfinite(bg1_dis) & (np.abs(bg1_dis - cval) > 1.e-7) )
bg2_dis_good = np.where( np.isfinite(bg2_dis) & (np.abs(bg2_dis - cval) > 1.e-7) )
bg1_dis_bad = np.where( ~(np.isfinite(bg1_dis) & (np.abs(bg1_dis - cval) > 1.e-7)) )
bg2_dis_bad = np.where( ~(np.isfinite(bg2_dis) & (np.abs(bg2_dis - cval) > 1.e-7)) )
# fit a smoothing spline to each background
x = bg1_dis_good[0]
s = len(x) - np.sqrt(2.*len(x))
if smo1 != None: s = smo1
if len(x) > 40: x = x[7:len(x)-7] # clip end of spectrum where there is downturn
w = np.ones(len(x))
tck1 = interpolate.splrep(x,bg1_dis[x],w=w,xb=bg1_dis_good[0][0],xe=bg1_dis_good[0][-1],k=3,s=s)
bg1 = np.ones(nx) * (bg1_dis[x]).mean()
bg1[np.arange(kx0,kx1)] = interpolate.splev(np.arange(kx0,kx1), tck1)
x = bg2_dis_good[0]
s = len(x) - np.sqrt(2.*len(x))
if smo2 != None: s = smo1
if len(x) > 40: x = x[10:len(x)-10] # clip
w = np.ones(len(x))
tck2 = interpolate.splrep(x,bg2_dis[x],w=w,xb=bg2_dis_good[0][0],xe=bg2_dis_good[0][-1],k=3,s=s)
bg2 = np.ones(nx) * (bg2_dis[x]).mean()
bg2[np.arange(kx2,kx3)] = interpolate.splev(np.arange(kx2,kx3), tck2)
# force bg >= 0:
# spline can do weird things ?
negvals = bg1 < 0.0
if negvals.any():
bg1[negvals] = 0.0
if chatter > 1:
print("background 1 set to zero in ",len(np.where(negvals)[0])," points")
negvals = bg2 < 0.0
if negvals.any():
bg2[negvals] = 0.0
if chatter > 1:
print("background 1 set to zero in ",len(np.where(negvals)[0])," points")
# image constructed from linear inter/extra-polation of bg1 and bg2
bgimg_lin = np.zeros(ny*nx).reshape(ny,nx)
dbgdy = (bg2-bg1)/(ny-1)
for i in range(ny):
bgimg_lin[i,:] = bg1 + dbgdy*i
# interpolate background and generate smooth interpolation image
if ( (background_lower[0] == None) & (background_upper[0] == None)):
# default background region
dbgdy = (bg2-bg1)/150.0 # assuming height spectrum 200 and width extraction regions 30 pix each
for i9 in range(bgimg.shape[0]):
bgimg[i9,kx0:kx1] = bg1[kx0:kx1] + dbgdy[kx0:kx1]*(i9-25)
bgimg[i9,0:kx0] = bg2[0:kx0]
bgimg[i9,kx1:nx] = bg2[kx1:nx]
if chatter > 2: print("1..BACKGROUND DEFAULT from BG1 and BG2")
elif ((background_lower[0] != None) & (background_upper[0] == None)):
# set background to lower background region
for i9 in range(bgimg.shape[0]):
bgimg[i9,:] = bg1
if chatter > 2: print("2..BACKGROUND from lower BG1 only")
elif ((background_upper[0] != None) & (background_lower[0] == None)):
# set background to that of upper background region
for i9 in range(bgimg.shape[0]):
bgimg[i9,:] = bg2
if chatter > 2: print("3..BACKGROUND from upper BG2 only")
else:
# linear interpolation of the two background regions
dbgdy = (bg2-bg1)/(background_upper[0]+0.5*background_upper[1]+background_lower[0]+0.5*background_lower[1])
for i9 in range(bgimg.shape[0]):
bgimg[i9,kx0:kx1] = bg1[kx0:kx1] + dbgdy[kx0:kx1]*(i9-int(int(slit_width/2)-(background_lower[0]+0.5*background_lower[1])))
bgimg[i9,0:kx0] = bg2[0:kx0] # assuming that the spectrum in not in the lower left corner
bgimg[i9,kx1:nx] = bg2[kx1:nx]
if chatter > 2: print("4..BACKGROUND from BG1 and BG2")
return bg, bg1, bg2, bgsig, bgimg, bg_limits_used, (bg1_good, bg1_dis,
bg1_dis_good, bg2_good, bg2_dis, bg2_dis_good, bgimg_lin)
def interpol(xx,x,y):
'''
linearly interpolate a function y(x) to return y(xx)
no special treatment of boundaries
2011-12-10 NPMKuin skip all data points which are not finite
'''
import numpy as np
x = np.asarray(x.ravel())
y = np.asarray(y.ravel())
q0 = np.isfinite(x) & np.isfinite(y) # filter out NaN values
q1 = np.where(q0)
if len(q1[0]) == 0:
print("error in arrays to be interpolated")
print("x:",x)
print("y:",y)
print("arg:",xx)
x1 = x[q1[0]]
y1 = y[q1[0]]
q2 = np.where( np.isfinite(xx) ) # filter out NaN values
kk = x1.searchsorted(xx[q2])-1
# should extrapolate if element of k = len(a)
#q = np.where(k == len(a)) ; k[q] = k[q]-1
n = len(kk)
f = np.zeros(n)
f2 = np.zeros(len(xx))
for i in range(n):
k = kk[i]
if k > (len(x1)-2):
k = len(x1) - 2
s = (y1[k+1]-y1[k])/(x1[k+1]-x1[k])
f[i] = y1[k]+s*(xx[q2[0]][i]-x1[k])
f2[q2] = f
f2[int(not q2)] = np.NaN
return f2
def hydrogen(n,l):
'''
Return roughly the wavelength of the Hydrogen lines
Lymann spectrum: l=0, n>l+1
Balmer spectrum: l=1, n>2
Pachen spectrum: l=2, n>3
'''
# Rydberg constant in m-1 units
R = 1.097e7
inv_lam = R*(1./(l+1)**2 - 1./n**2)
lam = 1./inv_lam * 1e10
return lam
def boresight(filter='uvw1',order=1,wave=260,
r2d=77.0,date=0,chatter=0):
''' provide reference positions on the
UVOT filters for mapping and as function of
time for grisms.
This function name is for historical reasons,
and provides a key mapping function for the
spectral extraction.
The correct boresight of the (lenticular) filters
should be gotten from the Swift UVOT CALDB
as maintained by HEASARC. The positions here
are in some cases substantially different from
the boresight in the CALDB. They are reference
positions for the spectral extraction algorithms
rather than boresight.
The grism boresight positions at 260nm (uv grism)
and 420nm (visible grism) in first order are served
in an uncommon format (in DET pixels)
by adding (77,77) to the lenticular filter
RAW coordinate.(see TELDEF file) the grism
boresight was measured in DET coordinates,
not RAW. (offset correction should be 104,78)
Parameters
----------
filter : str
one of {'ug200','uc160','vg1000','vc955',
'wh','v','b','u','uvw1','uvm2','uvw2'}
order : {0,1,2}
order for which the anchor is needed
wave : float
anchor wavelength in nm
r2d : float
additive factor in x,y to anchor position
date: long
format in swift time (s)
if 0 then provide the first order anchor
coordinates of the boresight for mapping
from the lenticular filter position
chatter : int
verbosity
Returns
-------
When *date* = 0:
For translation: The boresight for a filter
(in DET pixels) by adding (77,77) to the
lenticular filter RAW coordinate (see TELDEF file)
the grism boresight was measured in DET
(The default r2d=77 returns the correct
boresight for the grisms in detector
coordinates. To get the grism boresight in
detector image coordinates, subtract (104,78)
typically. The difference is due to the distortion
correction from RAW to DET)
When *date* is non-zero, and *order*=0:
The zeroth order boresight
NOTE:
-----
THE TRANSLATION OF LENTICULAR IMAGE TO GRISM
IMAGE IS ALWAYS THE SAME, INDEPENDENT OF THE
BORESIGHT.
THEREFORE THE BORESIGHT DRIFT DOES NOT AFFECT
THE GRISM ANCHOR POSITIONS AS LONG AS THE DEFAULT
BORESIGHT POSITIONS ARE USED.
[Becase those were used for the calibration].
However, the zeroth order "reference" position
drift affects the "uvotgraspcorr" - derived
WCS-S. The positions used
History:
2014-01-04 NPMK : rewrite to inter/extrapolate
the boresight positions
'''
from scipy.interpolate import interp1d
import numpy as np
filterlist = ['ug200','uc160','vg1000','vc955',
'wh','v','b','u','uvw1','uvm2','uvw2']
if filter == 'list': return filterlist
grismfilters = ['ug200','uc160','vg1000','vc955']
lenticular = ['v','b','u','uvw1','uvm2','uvw2']
#old pixel offset anchor based on pre-2010 data
# dates in swift time, drift [x.y] in pixels
#dates=[209952000,179971200,154483349,139968000,121838400]
#drift=[ [0,0], [+2.4,-2.0], [+3.4,-3.0], [+6.4,-10], [+6.4,-10]]
# data from Frank's plot (email 2 dec 2013, uvw1 filter)
# original plot was in arcsec, but the drift converted
# to pixels. uvw1 seems representative (except for white)
swtime = np.array([
1.25000000e+08, 1.39985684e+08, 1.60529672e+08,
1.89248438e+08, 2.23489068e+08, 2.46907209e+08,
2.66126366e+08, 2.79601770e+08, 2.89763794e+08,
3.01251301e+08, 3.13180634e+08, 3.28423998e+08,
3.43445470e+08, 3.59351249e+08, 3.75257678e+08,
4.50000000e+08])
boredx = (np.array([-1.6, -0.870,0.546,1.174,2.328,2.47,
2.813,3.076,3.400,3.805,4.149,4.656,
5.081,5.607,6.072,8.56 ])-1.9)/0.502
boredy = (np.array([ -0.75,-2.197,-4.857,-6.527,
-7.098,-7.252,-7.142,-7.560,
-7.670,-8.000,-8.043,-8.395,
-8.637,-9.142,-9.670,-11.9])+6.8)/0.502
# I assume the same overall drift for the grism
# boresight (in pixels). Perhaps a scale factor for the
# grism would be closer to 0.56 pix/arcsec
# the range has been extrapolated for better interpolation
# and also to support the near future. The early
# time extrapolation is different from the nearly constant
# boresight in the teldef but within about a pixel.
# I think the extrapolation is more accurate.
fx = interp1d(swtime,boredx,bounds_error=False,fill_value="extrapolate")
fy = interp1d(swtime,boredy,bounds_error=False,fill_value="extrapolate")
# reference anchor positions
reference0 = {'ug200': [1449.22, 707.7],
'uc160': [1494.9 , 605.8], #[1501.4 , 593.7], # ?[1494.9, 605.8],
'vg1000':[1506.8 , 664.3],
'vc955': [1542.5 , 556.4]}
# DO NOT CHANGE THE FOLLOWING VALUES AS THE WAVECAL DEPENDS ON THEM !!!
reference1 = {'ug200': [ 928.53,1002.69],
'uc160': [1025.1 , 945.3 ],
'vg1000':[ 969.3 ,1021.3 ],
'vc955': [1063.7 , 952.6 ]}
if (filter in grismfilters):
if (date > 125000000) and (order == 0):
anchor = reference0[filter]
anchor[0] += r2d-fx(date)
anchor[1] += r2d-fy(date)
return anchor
elif (date > 125000000) and (order == 1):
anchor = reference1[filter]
anchor[0] += r2d-fx(date)
anchor[1] += r2d-fy(date)
return anchor
elif order == 1:
anchor = reference1[filter]
anchor[0] += r2d
anchor[1] += r2d
return anchor
elif order == 0:
raise RuntimeError(
"The zeroth order reference position needs a date")
else:
return reference1[filter]
elif (date > 125000000) and (filter in lenticular):
ref_lent = {'v':[951.74,1049.89],
'b':[951.87,1049.67],
'u':[956.98,1047.84],
'uvw1':[951.20,1049.36],
'uvm2':[949.75,1049.30],
'uvw2':[951.11,1050.18]}
anchor = ref_lent[filter]
anchor[0] += r2d-fx(date)
anchor[1] += r2d-fy(date)
return anchor
elif (date > 122000000) and (filter == 'wh'):
print("approximate static white filter boresight")
if date > 209952000:
return 949.902+r2d, 1048.837+r2d
elif date > 179971200:
return 953.315+r2d, 1048.014+r2d
elif date > 154483349:
return 954.506+r2d, 1043.486+r2d
elif date > 139968000:
return 956.000+r2d, 1039.775+r2d
elif date > 121838400:
return 956.000+r2d, 1039.775+r2d
else: return filterlist
else:
# this is the version used initially *(changed 2 june 2009)
# DO NOT CHANGE THESE VALUES AS THE WAVECAL DEPENDS ON THEM !!!
if filter == 'uvw1': return 954.61+r2d, 1044.66+r2d
elif filter == 'wh' : return 954.51+r2d, 1043.49+r2d
elif filter == 'v' : return 955.06+r2d, 1045.98+r2d
elif filter == 'b' : return 955.28+r2d, 1045.08+r2d
elif filter == 'u' : return 960.06+r2d, 1043.33+r2d
elif filter == 'uvm2': return 953.23+r2d, 1044.90+r2d
elif filter == 'uvw2': return 953.23+r2d, 1044.90+r2d
elif filter == 'w1' : return 954.61+r2d, 1044.66+r2d
elif filter == 'm2' : return 953.23+r2d, 1044.90+r2d
elif filter == 'w2' : return 953.23+r2d, 1044.90+r2d
elif filter == 'ug200':
if order == 1:
if wave == 260: return 928.53+r2d,1002.69+r2d
elif filter == 'uc160':
if order == 1:
if wave == 260: return 1025.1+27+r2d,945.3+r2d
elif filter == 'vg1000':
#elif order == 1: return 948.4+r2d, 1025.9+r2d
if order == 1: return 969.3+r2d, 1021.3+r2d
elif filter == 'vc955':
if order == 1: return 1063.7+r2d, 952.6+r2d
raise IOError("valid filter values are 'wh','v',"\
"'b','u','uvw1','uvm2','uvw2','ug200',"\
"'uc160','vg1000','vc955'\n")
def makeXspecInput(lamdasp,countrate,error,lamda_response=None,chatter=1):
''' Convert the count rate spectrum per pixel into a spectrum
on the given bins of the response function.
Parameters
----------
lamdasp : array
wavelengths spectrum
countrate : array
count rates at wavelengths
error : array
errors at wavelengths
kwargs : dict
- **lamda_response** : array
the wavelength for the response bins
- **chatter** : int
verbosity
Returns
-------
lambda : array
wavelengths of the bins
countrate : array
count rate in the bins
error : array
errors in the bins
Notes
-----
errors are summed as sqrt( sum (errors**2 ) )
'''
# calculate bin size response, data
if type(lamda_response) == typeNone:
print('need to read in response matrix file')
print(' please code it up')
return None
new_countrate = np.zeros(len(lamda_response))
new_error = np.zeros(len(lamda_response))
# find bin widths
dlamresp = lamda_response.copy()*0
for i in range(len(dlamresp) -1):
dlamresp[i+1] = lamda_response[i+1] - lamda_response[i]
dlamresp[0] = dlamresp[1] # set width first two data bins equal (could inter/extrapolate the lot)
dlam = lamdasp.copy()*0
for i in range(len(dlam) -1):
dlam[i+1]=lamdasp[i+1] - lamdasp[i]
dlam[0] = dlam[1]
#
for i in range(len(lamda_response)):
# find the pixels to use that have contributions to the bin
lam1 = lamda_response[i] - dlamresp[i]/2.0
lam2 = lamda_response[i] + dlamresp[i]/2.0
if ( (lam1 >= (np.max(lamdasp)+dlam[len(lamdasp)-1])) ^ (lam2 <= (np.min(lamdasp)-dlam[0]))):
# no count data
new_countrate[i] = 0
if ((chatter > 2) & (i < 450) & (i > 400)) :
print(' i = ',i,' lam1 = ',lam1,' lam2 = ', lam2,' <<< counts set to zero ')
print(' i = ',i,' term 1 ',(np.max(lamdasp)-dlam[len(lamdasp)-1]))
print(' i = ',i,' term 2 ',(np.min(lamdasp)+dlam[0] ))
else:
if chatter > 2: print('new bin ',i,' lam = ',lam1,' - ',lam2)
# find the bits to add
k = np.where( (lamdasp+dlam/2 > lam1) & (lamdasp-dlam/2 <= lam2) )
# the countrate in a bin is proportional to its width; make sure only
# the part of the data array that fall within the new bin is added
if chatter > 2:
print('data in ',k[0],' wavelengths ',lamdasp[k[0]])
print('counts are ',countrate[k[0]])
nk = len(k[0])
factor = np.zeros( nk )
for m in range(nk): # now loop over all bins that might contribute
wbin1 = lamdasp[k[0][m]] - dlam[k[0][m]]/2
wbin2 = lamdasp[k[0][m]] + dlam[k[0][m]]/2
# width bin_form override with limits bin_to
factor[m] = (np.min(np.array( (wbin2,lam2) )) - np.max(np.array((wbin1 ,lam1))))/ (wbin2-wbin1)
if chatter > 2 :
print(' ... m = ',m,' bin= ',wbin1,' - ',wbin2)
print(' ... trimmed ',np.min(np.array( (wbin2,lam2) )),' - ',np.max(np.array((wbin1 ,lam1))))
new_countrate[i] = (factor * countrate[k[0]]).sum()
new_error[i] = np.sqrt( ( (factor * error[k[0]])**2 ).sum() )
if chatter > 2:
print(' scaled factor = ', factor)
print(' new_countrate = ', new_countrate[i])
#
# check that the total number of counts is the same
print('total counts in = ', countrate.sum())
print('total counts out= ', new_countrate.sum())
#
return lamda_response, new_countrate, new_error
def find_zeroth_orders(filestub, ext, wheelpos, region=False,indir='./',
set_maglimit=None, clobber="NO", chatter=0):
'''
The aim is to identify the zeroth order on the grism image.
This is done as follows:
We run uvotdetect to get the zeroth orders in the detector image.
We also grab the USNO B1 source list and predict the positions on the image using the WCSS header.
Bases on a histogram of minimum distances, as correction is made to the WCSS header, and
also to the USNO-B1 predicted positions.
'''
import os
try:
from astropy.io import fits, ascii
except:
import pyfits as fits
from numpy import array, zeros, log10, where
import datetime
import uvotwcs
from astropy import wcs
if chatter > 0:
print("find_zeroth_orders: determining positions zeroth orders from USNO-B1")
if ((wheelpos == 160) ^ (wheelpos == 200)):
grtype = "ugu"
zp = 19.46 # zeropoint uv nominal zeroth orders for 10 arcsec circular region
else:
grtype = "ugv"
zp = 18.90 # estimated visible grism zeropoint for same
exts = repr(ext)
gfile = os.path.join(indir,filestub+grtype+"_dt.img")
infile = os.path.join(indir,filestub+grtype+"_dt.img["+exts+"]")
outfile = os.path.join(indir,filestub+grtype+"_"+exts+"_detect.fits")
if ((wheelpos == 160) ^ (wheelpos == 200)):
command = "uvotdetect infile="+infile+ " outfile="+outfile + \
' threshold=6 sexargs = "-DEBLEND_MINCONT 0.1" '+ \
" expopt = BETA calibrate=NO expfile=NONE "+ \
" clobber="+clobber+" chatter=0 > /dev/null"
else:
command = "uvotdetect infile="+infile+ " outfile="+outfile + \
' threshold=6 sexargs = "-DEBLEND_MINCONT 0.1" '+ \
" expopt = BETA calibrate=NO expfile=NONE "+ \
" clobber="+clobber+" chatter=0 > /dev/null"
if chatter > 1:
print("find_zeroth_orders: trying to detect the zeroth orders in the grism image")
print(command)
useuvotdetect = True
tt = os.system(command)
if tt != 0:
raise('find_zeroth_orders: uvotdetect had a problem with this image\nIs HEASOFT initialised?')
if not os.access(outfile,os.F_OK):
# so you can provide it another way
useuvotdetect = False
rate = 0
if useuvotdetect:
f = fits.open(outfile)
g = f[1].data
h = f[1].header
refid = g.field('refid')
rate = g.field('rate')
rate_err = g.field('rate_err')
rate_bkg = g.field('rate_bkg') # counts/sec/arcsec**2
x_img = g.field('ux_image')
y_img = g.field('uy_image')
a_img = g.field('ua_image') # semi axis
b_img = g.field('ub_image') # semi axis
theta = g.field('utheta_image') # angle of the detection ellipse
prof_major = g.field('prof_major')
prof_minor = g.field('prof_minor')
prof_theta = g.field('prof_theta')
threshold = g.field('threshold') # sigma
flags = g.field('flags')
f.close()
else:
rate_bkg = array([0.08])
hh = fits.getheader(gfile, ext)
exposure = hh['exposure']
ra = hh['RA_PNT']
dec = hh['DEC_PNT']
if "A_ORDER" in hh:
distortpresent = True
else:
distortpresent = False
if chatter > 1:
print("find_zeroth_orders: pointing position ",ra,dec)
# unfortunately uvotdetect will pick up spurious stuff as well near the spectra
# need real sources.
# get catalog sources (B magnitude most closely matches zeroth order)
CALDB = os.getenv('CALDB')
if CALDB == '':
print('find_zeroth_orders: the CALDB environment variable has not been set')
return None
HEADAS = os.getenv('HEADAS')
if HEADAS == '':
print('find_zeroth_orders: The HEADAS environment variable has not been set')
print('That is needed for the uvot Ftools ')
return None
if set_maglimit == None:
b_background = zp + 2.5*log10( (rate_bkg.std())*1256.6 )
# some typical measure for the image
blim= b_background.mean() + b_background.std() + zeroth_blim_offset
else:
blim = set_maglimit
if blim < background_source_mag: blim = background_source_mag
if np.isnan(blim): blim = 18
# if usno-b1 catalog is present for this position,
# do not retrieve again
if os.access('searchcenter.ub1',os.F_OK):
searchcenterf = open( 'searchcenter.ub1' )
searchcenter= searchcenterf.readline().split(',')
searchcenterf.close()
racen,decen = float(searchcenter[0]),float(searchcenter[1])
if np.abs(ra-racen) + np.abs(dec-decen) < 0.01:
use_previous_search = True
else:
use_previous_search = False
else:
use_previous_search = False
# empty file
if os.access('search.ub1',os.F_OK) :
searchf = open('search.ub1')
stab = searchf.readlines()
searchf.close()
if len(stab) < 3: use_previous_search = False
# retrieve catalog data
if (not os.access('search.ub1',os.F_OK)) | (not use_previous_search):
if (chatter > 4): print ("get_usnob1_cat(%f,%f,%f)"%(ra,dec,blim))
status = get_usnob1_cat(ra, dec, blim)
if status is None:
print('ra={}, dec={}, blim={}'.format(ra, dec, blim))
print("find_zeroth_orders: could not get source list from USNO-B1")
sys.exit()
else:
if chatter > 1:
print("find_zeroth_orders: using the USNO-B1 source list from file search.ub1")
# generate a new catspecfile
_write_catspecfile()
# remove reliance on astropy tables as it fails on debian linux
searchf = open('search.ub1')
stab = searchf.readlines()
searchf.close()
M = len(stab)
ra = []
dec = []
b2mag = []
for row in stab:
row_values = row.split()
if len(row_values) > 6:
ra.append(row_values[1])
dec.append(row_values[2])
b2mag.append(row_values[5])
M = len(ra)
if M == 0:
return
ra = np.asarray(ra,dtype=np.float64)
dec = np.asarray(dec,dtype=np.float64)
b2mag = np.asarray(b2mag,dtype=np.float)
Xa = zeros(M)
Yb = zeros(M)
Thet= zeros(M)
ondetector = zeros(M,dtype=bool)
matched = zeros(M,dtype=bool)
# now find the image coordinates:
#
wcsS = wcs.WCS(header=hh,key='S',relax=True,) # TAN-SIP coordinate type
Xim,Yim = wcsS.wcs_world2pix(ra,dec,0)
xdim, ydim = hh['naxis1'],hh['naxis2']
wheelpos = hh['wheelpos']
if wheelpos == 200:
q1 = (rate > 2.5*rate_bkg) & (rate < 125*rate_bkg)
defaulttheta = 151.4-180.
bins = np.arange(-29.5,29.5,1)
midbin = np.arange(-29,29,1)
elif wheelpos == 160:
q1 = (rate > 2.5*rate_bkg) & (rate < 125*rate_bkg) & (x_img > 850)
defaulttheta = 144.4-180.
bins = np.arange(-29.5,29.5,1)
midbin = np.arange(-29,29,1)
elif wheelpos == 955:
q1 = (rate > 2.5*rate_bkg) & (rate < 175*rate_bkg) & (x_img > 850)
defaulttheta = 140.5-180
bins = np.arange(-49.5,49.5,1)
midbin = np.arange(-49,49,1)
elif wheelpos == 1000:
q1 = (rate > 2.5*rate_bkg) & (rate < 175*rate_bkg)
defaulttheta = 148.1-180.
bins = np.arange(-49.5,49.5,1)
midbin = np.arange(-49,49,1)
Thet -= defaulttheta
Xa += 17.0
Yb += 5.5
# convert sky coord. to positions (Xim , Yim) , and set flag ondetector
for i in range(M):
if not distortpresent:
# now we need to apply the distortion correction:
Xim[i], Yim[i] = uvotwcs.correct_image_distortion(Xim[i],Yim[i],hh)
ondetector[i] = ((Xim[i] > 8) & (Xim[i] < xdim) & (Yim[i] > 8) & (Yim[i] < ydim-8))
xoff = 0.0
yoff = 0.0
# derive offset :
# find the minimum distances between sources in lists pair-wise
distance = []
distx = []
disty = []
kx = -1
dxlim = 100 # maximum distance in X
dylim = 100 # maximum distance in Y
tol = 5 # tolerance in x and y match
xim = x_img[q1]
yim = y_img[q1]
M2 = int(len(xim)*0.5)
for i2 in range(M2): # loop over the xdetect results
i = 2*i2
i1 = 2*i2+1
if (ondetector[i] and useuvotdetect):
dx = np.abs(Xim - xim[i ])
dy = np.abs(Yim - yim[i ])
dx1 = np.abs(Xim - xim[i1])
dy1 = np.abs(Yim - yim[i1])
op = (dx < dxlim) & (dy < dylim)
if op.sum() != 0:
dis = np.sqrt(dx[op]**2+dy[op]**2)
kx = dis == np.min(dis)
kx = np.arange(len(op))[op][kx]
op1 = (dx1 < dxlim) & (dy1 < dylim)
if op1.sum() != 0:
dis = np.sqrt(dx1[op1]**2+dy1[op1]**2)
kx1 = dis == np.min(dis)
kx1 = np.arange(len(op1))[op1][kx1]
if (np.abs(dx[kx] - dx1[kx1]) < tol ) & (np.abs(dy[kx] - dy1[kx1]) < tol ):
distx.append( Xim[kx] - xim[i ] )
disty.append( Yim[kx] - yim[i ] )
distx.append( Xim[kx1] - xim[i1] )
disty.append( Yim[kx1] - yim[i1] )
if ((type(kx) == int) & (chatter > 3)):
print("Xim: ",Xim[kx])
print("xim:",xim)
print("dx: ",dx)
if len(distx) > 0 :
hisx = np.histogram(distx,bins=bins)
#xoff = hisx[1][:-1][hisx[0] == hisx[0].max()].mean()
xoff = midbin[hisx[0] == hisx[0].max()].mean()
hisy = np.histogram(disty,bins=bins)
#yoff = hisy[1][:-1][hisy[0] == hisy[0].max()].mean()
yoff = midbin[hisy[0] == hisy[0].max()].mean()
# subtract xoff, yoff from Xim, Yim or add to origin ( hh[CRPIX1S],hh[CRPIX2S] ) if offset
# is larger than 1 pix
if (np.sqrt(xoff**2+yoff**2) > 1.0):
if ("forceshi" not in hh):
hh['crpix1s'] += xoff
hh['crpix2s'] += yoff
hh["forceshi"] = "%f,%f"%(xoff,yoff)
hh["forcesh0"] = "%f,%f"%(xoff,yoff)
print("offset (%5.1f,%5.1f) found"%(xoff,yoff))
print("offset found has been applied to the fits header of file: %s\n"%(gfile))
else:
# do not apply shift to crpix*s for subsequent shifts, but record overall ahift
# original shift is in "forcesh0" which actually WAS applied. Both items are needed
# to reconstruct shifts between pointing image and the source locations (in case
# we allow interactive adjustments of zeroth orders, that would enable pointing updates
# however, the keyword must be reset at start of reprocessing (not done now)
xoff_,yoff_ = np.array((hh["forceshi"]).split(','),dtype=float)
hh["forceshi"] = "%f,%f"%(xoff_+xoff,yoff_+yoff)
f = fits.open(gfile,mode='update')
f[ext].header = hh
f.close()
print("find_zeroth_orders result (binary matched offset): \n")
print("\tAfter comparing uvotdetect zeroth order positions to USNO-B1 predicted source positions ")
print("\tthere was found an overall offset equal to (%5.1f.%5.1f) pix "%(xoff,yoff))
Xim -= xoff
Yim -= yoff
else:
# if binary matched offsets don't pan out at all, compute simple offsets
for i in range(len(xim)): # loop over the xdetect results
if (ondetector[i] and useuvotdetect):
dx = np.abs(Xim - xim[i ])
dy = np.abs(Yim - yim[i ])
op = (dx < dxlim) & (dy < dylim)
if op.sum() != 0:
dis = np.sqrt(dx[op]**2+dy[op]**2)
kx = dis == np.min(dis)
kx = np.arange(len(op))[op][kx]
distx.append( Xim[kx] - xim[i ] )
disty.append( Yim[kx] - yim[i ] )
hisx = np.histogram(distx,bins=bins)
#xoff = hisx[1][hisx[0] == hisx[0].max()].mean()
xoff = midbin[hisx[0] == hisx[0].max()].mean()
hisy = np.histogram(disty,bins=bins)
#yoff = hisy[1][hisy[0] == hisy[0].max()].mean()
yoff = midbin[hisy[0] == hisy[0].max()].mean()
if (np.sqrt(xoff**2+yoff**2) > 1.0):
if ("forceshi" not in hh):
hh['crpix1s'] += xoff
hh['crpix2s'] += yoff
hh["forceshi"] = "%f,%f"%(xoff,yoff)
hh["forcesh0"] = "%f,%f"%(xoff,yoff)
print("offset (%5.1f,%5.1f) found"%(xoff,yoff))
print("offset found has been applied to the fits header of file: %s\n"%(gfile))
else:
# do not apply shift to crpix*s for subsequent shifts, but record overall ahift
# original shift is in "forcesh0" which actually WAS applied. Both items are needed
# to reconstruct shifts between pointing image and the source locations (in case
# we allow interactive adjustments of zeroth orders, that would enable pointing updates
# however, the keyword must be reset at start of reprocessing (not done now)
xoff_,yoff_ = np.array((hh["forceshi"]).split(','),dtype=float)
hh["forceshi"] = "%f,%f"%(xoff_+xoff,yoff_+yoff)
f = fits.open(gfile,mode='update')
f[ext].header = hh
f.close()
print("find_zeroth_orders result (simple offset): \n")
print("\tAfter comparing uvotdetect zeroth order positions to USNO-B1 predicted source positions ")
print("\tthere was found an overall offset equal to (%5.1f.%5.1f) pix "%(xoff,yoff))
Xim -= xoff
Yim -= yoff
# find ellipse belonging to source from uvotdetect output, or make up one for all ondetector
xacc = 10
yacc = 6
for i in range(M):
if (ondetector[i] and useuvotdetect):
kx = where ( abs(Xim[i] - x_img) < xacc )
if len(kx[0]) != 0:
kxy = where( abs(Yim[i] - y_img[kx]) < yacc)
if len(kxy[0]) == 1:
k = kx[0][kxy[0][0]]
Xa[i] = prof_major[k]*5.
Yb[i] = prof_minor[k]*5.
Thet[i]= -theta[k]
matched[i] = True
else:
# make up some ellipse axes in pix
Xa[i] = 17.0
Yb[i] = 5.0
if chatter > 0:
print("find_zeroth_orders: there were %i matches found between the uvotdetect sources and the USNO B1 list"%(matched.sum()))
if region:
a = datetime.date.today()
datetime = a.isoformat()[0:4]+a.isoformat()[5:7]+a.isoformat()[8:10]
# make region file for sources on detector
f = open(filestub+'_'+exts+'.reg','w')
f.write('# Region file format: DS9 version 4.1\n')
#f.write('# written by uvotgetspec.findzerothorders python program '+datetime+'\n')
f.write('# Filename: '+infile+'\n')
f.write('global color=green dashlist=8 3 width=1 font="helvetica 10 normal" select=1 highlite=1 dash=0 fixed=0 edit=1 move=1 delete=1 include=1 source=1 \n')
f.write('physical\n')
for i in range(M):
if (ondetector[i] and useuvotdetect):
f.write('ellipse(%12.2f,%12.2f,%12.2f,%12.2f,%12.2f)\n' % (Xim[i],Yim[i],Xa[i],Yb[i],180.-Thet[i]) )
f.close()
# make a second region file for sources with first order on detector [TBD]
# the sources on the detector are Xim[ondetector] etc.,
# matched[ondetector] are those sources which have both been found by uvotdetect and in the catalog
# the complete list also includes sources off the detector which may have first orders on the
# detector when the B magnitude > ~14.
# the ellipse parameters for the sources which have no uvotdetection (matched=False) are some
# arbitrary mean values. They should be scaled to brightness.
return Xim,Yim,Xa,Yb,Thet,b2mag,matched,ondetector
def spec_curvature(wheelpos,anchor,order=1,):
'''Find the coefficients of the polynomial for the curvature.
Parameters
----------
wheelpos : int, {160,200,955,1000}
grism filter position in filter wheel
anchor : list, array
anchor position in detector coordinates (pixels)
order : int
the desired spectral order
Returns
-------
Provides the polynomial coefficients for y(x).
Notes
-----
The curvature is defined with argument the pixel coordinate in the dispersion
direction with reference to the the anchor coordinates in det-img
coordinates. The polynomial returns the offset normal to the dispersion.
- 2011-03-07 <NAME>, initial version
- 2011-08-02 fixed nominal coefficients order=1
'''
from scipy import interpolate
from numpy import array
xin = anchor[0] -104
yin = anchor[1] -78
if ((wheelpos == 1000) ^ (wheelpos == 955)):
# return y = 0 + 0.0*x coefficient
return array([0.,0.])
elif wheelpos == 160:
if order == 1:
tck_c1= [array([0.,0.,0.,0.,2048., 2048., 2048., 2048.]), \
array([0.,0.,0.,0., 2048., 2048., 2048., 2048.]), \
array([ 0.1329227 , -0.28774943, 0.13672294, -0.18436127, -0.19086855,\
0.23071908, -0.21803703, 0.11983982, 0.16678715, -0.2004285 ,\
0.12813155, -0.13855324, -0.1356009 , 0.11504641, -0.10732287,\
0.03374111]),3,3]
tck_c2 = [array([0.,0.,0.,0., 2048., 2048., 2048., 2048.]),\
array([0.,0.,0.,0., 2048., 2048., 2048., 2048.]),\
array([ -3.17463632e-04, 2.53197376e-04, -3.44611897e-04,\
4.81594388e-04, 2.63206764e-04, -3.03314305e-04,\
3.25032065e-04, -2.97050826e-04, -3.06358032e-04,\
3.32952612e-04, -2.79473410e-04, 3.95150704e-04,\
2.56203495e-04, -2.34524716e-04, 2.75320861e-04,\
-6.64416547e-05]),3,3]
tck_c3 = [array([ 0.,0.,0.,0.,2048., 2048., 2048., 2048.]),\
array([ 0.,0.,0.,0.,2048., 2048., 2048., 2048.]),\
array([ -4.14989592e-07, 5.09851884e-07, -4.86551197e-07,\
1.33727326e-07, 4.87557866e-07, -5.51120320e-07,\
5.76975007e-07, -3.29793632e-07, -3.42589204e-07,\
3.00002959e-07, -2.90718693e-07, 5.57782883e-08,\
2.20540397e-07, -1.62674045e-07, 8.70230076e-08,\
-1.13489556e-07]),3,3]
#coef = array([interpolate.bisplev(xin,yin,tck_c3),interpolate.bisplev(xin,yin,tck_c2),\
# interpolate.bisplev(xin,yin,tck_c1), 0.])
coef = array([interpolate.bisplev(xin,yin,tck_c3)*0.5,interpolate.bisplev(xin,yin,tck_c2)*0.5,\
interpolate.bisplev(xin,yin,tck_c1)*0.5, 0.]) #~FIXME:
return coef
elif order == 2:
tck_c0 = [array([ 0., 0., 0., 0., 1134.78683, 2048., 2048., 2048., 2048.]), \
array([ 0., 0., 0., 0., 871.080060, 2048., 2048., 2048., 2048.]), \
array([-110.94246902, 15.02796289, -56.20252149, -12.04954456,\
311.31851187, -31.09148174, -48.44676102, 85.82835905,\
-73.06964994, 99.58445164, 46.47352776, 11.29231744,\
-68.32631894, 88.68570087, -34.78582366, -33.71033771,\
6.89774103, 25.59082616, 23.37354026, 49.61868235,\
-438.17511696, -31.63936231, 28.8779241 , 51.03055925,\
16.46852299]), 3, 3]
tck_c1 = [array([ 0., 0., 0., 0., 2048., 2048., 2048., 2048.]),\
array([ 0., 0., 0., 0., 2048., 2048., 2048., 2048.]),\
array([ 0.52932582, -0.76118033, 0.38401924, -0.189221 , -0.45446129,\
0.73092481, -0.53433133, 0.12702548, 0.21033591, -0.45067611,\
0.32032545, -0.25744487, -0.06022942, 0.22532666, -0.27174491,\
0.03352306]), 3, 3]
tck_c2 = [array([ 0., 0., 0., 0., 2048., 2048., 2048., 2048.]),\
array([ 0., 0., 0., 0., 2048., 2048., 2048., 2048.]),\
array([ -4.46331730e-04, 3.94044533e-04, -1.77072490e-04,\
2.09823843e-04, 3.02872440e-04, -6.23869655e-04,\
5.44400661e-04, -3.70038727e-04, -1.60398389e-04,\
4.90085648e-04, -4.91436626e-04, 4.62904236e-04,\
4.05692472e-05, -2.34521165e-04, 3.04866621e-04,\
-1.25811263e-04]), 3, 3]
#tck_c0 = [array([0.,0., 1132.60995961, 2048.,2048.]),
# array([0.,0., 814.28303687, 2048.,2048.]),
# array([-49.34868162, -0.22692399, -11.06660953, 5.95510567,
# -3.13109456, 37.63588808, -38.7797533 , 24.43177327, 43.27243297]),1,1]
#tck_c1 = [array([ 0., 0., 2048., 2048.]),
# array([ 0., 0., 2048., 2048.]),
# array([ 0.01418938, -0.06999955, -0.00446343, -0.06662488]),1,1]
#tck_c2 = [array([ 0., 0., 2048., 2048.]),
# array([ 0., 0., 2048., 2048.]),
# array([ -9.99564069e-05, 8.89513468e-05, 4.77910984e-05, 1.44368445e-05]),1,1]
coef = array([interpolate.bisplev(xin,yin,tck_c2),interpolate.bisplev(xin,yin,tck_c1),\
interpolate.bisplev(xin,yin,tck_c0)])
return coef
elif order == 3:
# not a particularly good fit.
tck_c0 = [array([0., 0., 1101.24169141, 2048.,2048.]),
array([0., 0., 952.39879838, 2048.,2048.]),
array([ -74.75453915, 7.63095536, -131.36395787, 11.14709189,
-5.52089337, 73.59327202, -57.25048374, 37.8898465 ,
65.90098406]), 1, 1]
tck_c1 = [array([ 0., 0., 2048., 2048.]),
array([ 0., 0., 2048., 2048.]),
array([-0.04768498, -0.02044308, 0.02984554, -0.04408517]), 1, 1]
coef = array([interpolate.bisplev(xin,yin,tck_c1),interpolate.bisplev(xin,yin,tck_c0)])
return coef
elif order == 0:
tck_c0 = [array([ 0., 0., 1075.07521348, 2048. ,2048.]),
array([ 0., 0., 1013.70915889, 2048. ,2048.]),
array([ 130.89087966, 25.49195385, 5.7585513 , -34.68684878,
-52.13229007, -168.75159696, 711.84382717, -364.9631271 ,
374.9961278 ]),1,1]
tck_c1 = [array([ 0., 0., 2048., 2048.]),
array([ 0., 0., 2048., 2048.]),
array([ 0.08258587, -0.06696916, -0.09968132, -0.31579981]),1,1]
coef = array([interpolate.bisplev(xin,yin,tck_c1),interpolate.bisplev(xin,yin,tck_c0)])
return coef
else:
raise (ValueError)
elif wheelpos == 200:
if order == 1:
tck_c1 = [array([ 0., 0., 0., 0., 2048., 2048., 2048., 2048.]),\
array([ 0., 0., 0., 0., 2048., 2048., 2048., 2048.]),\
array([-0.00820665, -0.06820851, 0.04475057, -0.06496112, 0.062989 , \
-0.05069771, -0.01397332, 0.03530437, -0.17563673, 0.12602437,\
-0.10312421, -0.02404978, 0.06091811, -0.02879142, -0.06533121,\
0.07355998]), 3, 3]
tck_c2 = [array([ 0., 0., 0., 0., 2048., 2048., 2048., 2048.]),\
array([ 0., 0., 0., 0., 2048., 2048., 2048., 2048.]),\
array([ 1.69259046e-04, -1.67036380e-04, -9.95915869e-05, \
2.87449321e-04, -4.90398133e-04, 3.27190710e-04, \
2.12389405e-04, -3.55245720e-04, 7.41048332e-04, \
-4.68649092e-04, -1.11124841e-04, 6.72174552e-04, \
-3.26167775e-04, 1.15602175e-04, 5.78187743e-04, \
-8.79488201e-04]), 3, 3]
tck_c3 = [array([ 0., 0., 0., 0., 2048., 2048., 2048., 2048.]),\
array([ 0., 0., 0., 0., 2048., 2048., 2048., 2048.]),\
array([ 1.11106098e-07, 2.72305072e-07, -7.24832745e-07,\
4.65025511e-07, -2.35416547e-07, -3.87761080e-07,\
1.05955881e-06, -6.46388216e-07, 3.15103869e-07,\
5.48402086e-07, -1.44488974e-06, 6.52867676e-07,\
1.14004672e-08, -9.48879026e-07, 1.64082320e-06,\
-8.07897628e-07]), 3, 3]
# the linear fit fails at the right side (57020002) but is quite good otherwise:
#tck_c1 = [array([ 0., 0., 2048., 2048.]), array([ 0., 0., 2048., 2048.]),\
# array([-0.02212781, -0.00873168, -0.00377861, -0.02478484]), 1, 1]
#
#tck_c2 = [array([ 0., 0., 2048., 2048.]), array([ 0., 0., 2048., 2048.]),\
# array([ -6.75189230e-05, 6.19498966e-05, 5.22322103e-05, 7.75736030e-05]), 1, 1]
#
#tck_c3 = [array([ 0., 0., 2048., 2048.]), array([ 0., 0., 2048., 2048.]), \
# array([ -1.75056810e-09, -3.61606998e-08, -6.00321832e-09, -1.39611943e-08]), 1, 1]
coef = array([interpolate.bisplev(xin,yin,tck_c3),interpolate.bisplev(xin,yin,tck_c2),\
interpolate.bisplev(xin,yin,tck_c1), 0.])
return coef
elif order == 2:
tck_c0 = [array([0.,0., 956.25596245, 2048.,2048.]),
array([0.,0., 1067.40622524, 2048.,2048.]),
array([ 17.82135471, -4.93884392, 20.55439437, -18.22869669,
13.11429182, 41.2680039 , 9.8050793 , 32.72362507, -6.56524782]), 1, 1]
tck_c1 = [array([ 0., 0., 2048., 2048.]),
array([ 0., 0., 2048., 2048.]),
array([ 0.02362119, -0.03992572, 0.0177935 , -0.10163929]),1, 1]
tck_c2 = [array([ 0., 0., 2048., 2048.]),
array([ 0., 0., 2048., 2048.]),
array([ -6.32035759e-05, 5.28407967e-05, -8.87338917e-06, 8.58873870e-05]),1,1]
coef = array([interpolate.bisplev(xin,yin,tck_c2),interpolate.bisplev(xin,yin,tck_c1),\
interpolate.bisplev(xin,yin,tck_c0)])
return coef
elif order == 3:
tck_c0 = [array([ 0. , 0. , 807.44415249, 2048.,2048.]),
array([ 0. , 0. , 1189.77686531, 2048.,2048.]),
array([-5436.10353688, 218.93823252, -254.71035527, -24.35684969,
23.26131493, 51.66273635, 37.89898456, 46.77095978,
63.22039872]), 1, 1]
tck_c1 = [array([ 0., 0., 2048., 2048.]),
array([ 0., 0., 2048., 2048.]),
array([-0.02591263, -0.03092398, 0.00352404, -0.01171369]), 1, 1]
coef = array([interpolate.bisplev(xin,yin,tck_c1),interpolate.bisplev(xin,yin,tck_c0)])
return coef
elif order == 0:
tck_c0 = [array([0.,0., 798.6983833, 2048., 2048.]),
array([0.,0., 1308.9171309, 2048., 2048.]),
array([ 1244.05322027, 24.35223956, -191.8634177 , -170.68236661,
-4.57013926, 20.35393124, -365.28237355, -235.44828185, -2455.96232688]), 1, 1]
tck_c1 = [array([ 0., 0., 2048., 2048.]),
array([ 0., 0., 2048., 2048.]),
array([ 0.54398146, -0.04547362, -0.63454342, -0.49417562]),1,1]
coef = array([interpolate.bisplev(xin,yin,tck_c1),interpolate.bisplev(xin,yin,tck_c0)])
return coef
else:
raise (ValueError)
else:
print('spec_curvature: illegal wheelpos value')
raise (ValueError)
def get_coi_box(wheelpos):
# provide half-width, length coi-box and factor
# typical angle spectrum varies with wheelpos
# 29,27,31,28 3x8/cos([144.5,151.4,140.5,148.1]) for wheelpos = 160,200,955,1000
coistuff = {'160':(7.5,29,1.11),
'200':(7.5,27,1.12),
'955':(6.5,31,1.09),
'1000':(7.0,28,1.13),}
return coistuff[str(wheelpos)]
def curved_extraction(extimg,ank_c,anchor1, wheelpos, expmap=None, offset=0., \
anker0=None, anker2=None, anker3=None, angle=None, offsetlimit=None, \
background_lower=[None,None], background_upper=[None,None],background_template=None,\
trackonly=False, trackfull=False, caldefault=True, curved="noupdate", \
poly_1=None,poly_2=None,poly_3=None, set_offset=False, \
composite_fit=True, test=None, chatter=0, skip_field_sources=False,\
predict_second_order=True, ZOpos=None,outfull=False, msg='',\
fit_second=True,fit_third=True,C_1=None,C_2=None,dist12=None, ifmotion=True,\
dropout_mask=None,obsid=None,indir=None,motion_file=None,ank_c_0offset=False,ifextended=False,fixwidth=False):
'''This routine knows about the curvature of the spectra in the UV filters
can provide the coefficients of the tracks of the orders
can provide a gaussian fit to the orders
extimg = extracted image
ank_c = array( [ X pos anchor, Y pos anchor, start position spectrum, end spectrum]) in extimg
anchor1 = anchor position in original image in det coordinates
wheelpos = filter wheel position
ZOpos variables defining Zeroth Order positions
angle [req with ZOpos]
background_template - if provided, the background will be based on this
dropout_mask from extractSpecImg
override curvature polynomial coefficients with poly_1,poly_2,poly_3
i.e., after a call to updateFitorder()
output new array of sum across fixed number of pixels across spectrum for coincidence loss
width of box depends on parameter coi_half_width
NPMK, 2010-07-09 initial version
2012-02-20 There was a problem with the offset/track y1 position/borderup,borderdown consistency
when using a prescribed offset. Changing handling. Always make a fine yank adjustment < 3 pix.
disabled for now the set_offset (it does not do anything).
2012-02-20 moved the call to updateFitorder() to curved_extraction. The result is that the
spectrum will be extracted using the updated track parameters.
2014-06-02 add support for fixed box extraction coincidence loss.
2014-08-04 add parameter curved_extraction to limit y-positioning extraction slit with list option
2014-08-06 changed code to correctly adjust y1 position
2014-08-25 fixed error in curve of location orders except first one
2016-01-17 trackcentroiding parameter added to disable centroiding
'''
import pylab as plt
from numpy import array,arange,where, zeros,ones, asarray, abs, int
from uvotplot import plot_ellipsoid_regions
import uvotmisc
anky,ankx,xstart,xend = ank_c
xstart -= ankx
xend -= ankx
anchor2 = anchor1
if test == 'cal':
from cal3 import get_1stOrderFit, get_2ndOrderFit ,get_3rdOrderFit, get_0thOrderFit
from cal3 import nominaluv, clockeduv
if wheelpos == 160:
curves = clockeduv
elif wheelpos == 200:
curves = nominaluv
else:
print("use straight extraction for V grism modes")
return
if wheelpos > 300:
return
# coincidence loss box
coi_half_width,coilength,coifactor = get_coi_box(wheelpos)
# read the table of coefficients/get the coeeficients of the Y(dis) offsets and limits[]
# stored with array of angles used.
# ZEROTH ORDER CURVATURE
if test == 'notyetcal':
coef0 = get_0thOrderFit(xin=anchor2[0],yin=anchor2[1],curvedata=curves)
else:
coef0 = spec_curvature(wheelpos,anchor2,order=0)
dlim0L=-820
dlim0U=-570
present0=True
if (xstart > dlim0U):
present0=False
coef0 = array([0.,0.])
if (xstart > dlim0L): dlim0L = xstart
# FIRST ORDER CURVATURE
if test == 'cal':
coef1 = get_1stOrderFit(xin=anchor2[0],yin=anchor2[1],curvedata=curves)
else:
coef1 = spec_curvature(wheelpos,anchor2,order=1)
#coef1[0] = -3.08e-9
#coef1[1] = 5.89e-6
#coef1[2] = -9.21e-3
dlim1L=-400
dlim1U=1150
present1=True
if (xstart > dlim1L): dlim1L = xstart
if (xend < dlim1U): dlim1U = xend
# SECOND ORDER CURVATURE
if test == 'cal':
coef2 = get_2ndOrderFit(xin=anchor2[0],yin=anchor2[1],curvedata=curves)
else:
coef2 = spec_curvature(wheelpos,anchor2,order=2)
dlim2L=25
dlim2U=3000
if (xstart > dlim2L): dlim2L = xstart
if (xend < dlim2U): dlim2U = xend
if (xend > dlim2L):
present2=True
else: present2=False
# THIRD ORDER CURVATURE
if test == 'cal':
coef3 = get_3rdOrderFit(xin=anchor2[0],yin=anchor2[1],curvedata=curves)
else:
coef3 = spec_curvature(wheelpos,anchor2,order=3)
dlim3L=425
dlim3U=3000
if (xstart > dlim3L): dlim3L = xstart
if (xend < dlim3U): dlim3U = xend
if (xend > dlim3L):
present3=True
else: present3=False
# good first approximation:
# if wheelpos == 160:
sig0coef=array([4.7])
sig1coef=array([-8.22e-09, 6.773e-04, 3.338])
#sig1coef=array([1.6*(-8.22e-09), 1.6*(6.773e-04), 1.6*3.338]) #~FIXME: try changing sigma
#sig1coef=array([ 3.0])
sig2coef=array([-5.44e-07, 2.132e-03, 3.662])
sig3coef=array([0.0059,1.5])
# override coefficients y(x):
print ("DEBUG 3431 type coef1 is ", type(coef1) )
print ("DEBUG 3432 type poly_1 is ",type(poly_1))
if (type(poly_1) != typeNone): coef1 = poly_1
if (type(poly_2) != typeNone): coef2 = poly_2
if (type(poly_3) != typeNone): coef3 = poly_3
#===================================================================
if chatter > 0:
print('================== curvature fits for y ==============')
print('zeroth order poly: ',coef0)
print('first order poly: ',coef1)
print('second order poly: ',coef2)
print('third order poly: ',coef3)
print('======================================================')
#===================================================================
# remove background
#if cval == None: cval = out_of_img_val = -1.0123456789 cval now global
if chatter > 3 : print ("DEBUG 3453 remove background")
bg, bg1, bg2, bgsig, bgimg, bg_limits, \
(bg1_good, bg1_dis, bg1_dis_good, bg2_good, bg2_dis, bg2_dis_good, bgimg_lin) \
= findBackground(extimg,background_lower=background_lower,
background_upper=background_upper,yloc_spectrum=anky, chatter=2)
if background_template != None:
bgimg = background_template['extimg']
spimg = extimg - bgimg
ny,nx = spimg.shape
# initialise quality array, exposure array for spectrum and flags
quality = zeros(nx,dtype=int)
expospec = zeros(5*nx,dtype=int).reshape(5,nx)
qflag = quality_flags()
# get the mask for zeroth orders in the way
if chatter > 3 : print ("DEBUG 3470 get mask zeroth orders ")
# set bad done while extracting spectra below
set_qual = ((not skip_field_sources) & (ZOpos != None) & (angle != None))
if set_qual:
Xim,Yim,Xa,Yb,Thet,b2mag,matched,ondetector = ZOpos
# find_zeroth_orders(filestub, ext, wheelpos,clobber="yes", )
dims = array([nx,ny])
pivot_ori=array([(anchor1)[0],(anchor1)[1]])
pivot= array([ank_c[1],ank_c[0]])
# map down to 18th magnitude in B2 (use global variable uvotgetspec.background_source_mag)
m_lim = background_source_mag
map_all = plot_ellipsoid_regions(Xim.copy(),Yim.copy(),Xa.copy(),Yb.copy(),Thet.copy(),\
b2mag.copy(),matched.copy(), ondetector,pivot,pivot_ori,dims,m_lim,img_angle=angle-180.0,\
lmap=True,makeplot=False,chatter=chatter)
if chatter > 2:
print("zeroth order map all: shape=",map_all.shape," min, max =",map_all.min(), map_all.max())
# map down to 16th magnitude in B2
m_lim = 16.0
map_strong = plot_ellipsoid_regions(Xim.copy(),Yim.copy(),Xa.copy(),Yb.copy(),Thet.copy(),\
b2mag.copy(),matched.copy(), ondetector,pivot,pivot_ori,dims,m_lim,img_angle=angle-180.0,\
lmap=True,makeplot=False,chatter=chatter)
if chatter > 2:
print("zeroth order map strong: shape=",map_strong.shape," min, max =",map_strong.min(), map_strong.max())
# tracks - defined as yi (delta) = 0 at anchor position (ankx,anky)
if chatter > 3 : print ("DEBUG 3500 set up y arrays ")
# shift to first order anchor
x = array(arange(nx))-ankx
y = zeros(nx)+anky
y0 = zeros(nx)+anky - polyval(coef1,0)
y1 = zeros(nx)+anky - polyval(coef1,0)
y2 = zeros(nx)+anky - polyval(coef1,0)
y3 = zeros(nx)+anky - polyval(coef1,0)
q0 = where((x >= dlim0L) & (x <= dlim0U))
x0 = x[q0]
if present0: y0[q0] += polyval(coef0,x[q0])
q1 = where((x >= dlim1L) & (x <= dlim1U))
x1 = x[q1]
if present1: y1[q1] += polyval(coef1,x[q1])
q2 = where((x >= dlim2L) & (x <= dlim2U))
x2 = x[q2]
if present2: y2[q2] += polyval(coef2,x[q2])
q3 = where((x >= dlim3L) & (x <= dlim3U))
x3 = x[q3]
if present3: y3[q3] += polyval(coef3,x[q3])
if trackcentroiding: # global (default = True)
if chatter > 3 : print ("DEBUG 3522 centroid track")
# refine the offset by determining where the peak in the
# first order falls.
# We NEED a map to exclude zeroth orders that fall on/near the spectrum
ny = int(ny)
cp2 = zeros(ny)
cp2_spimg = zeros(spimg.shape) #~TODO:
delpix = 50
if wheelpos == 200: delpix=25 # the accuracy for the nominal uv anchor is not as good.
offsetset = False
if type(offsetlimit) == list:
offsetval = offsetlimit[0]
delpix = array([abs(offsetlimit[1]),1],dtype=int).max() # at least 1
if offsetlimit[1] < 1.:
offsetset = True
else:
print('curved_extraction: offsetlimit=',offsetlimit,' delpix=',delpix)
eo = int(anky-slit_width/2)
if set_offset:
eo = int(offset-slit_width/2)
for q in q1[0]:
if ((x[q] < 600) & (x[q] > -200) & (quality[q] == 0)):
try:
m0 = 0.5*ny-delpix + eo #int( (ny+1)/4)
m1 = 0.5*ny+delpix + eo #int( 3*(ny+1)/4)+1
yoff = y1[q] - anky # this is just the offset from the anchor since y1[x=0] was set to anky
cp2[int(m0-yoff):int(m1-yoff)] += spimg[int(m0):int(m1),q].flatten()
cp2_spimg[int(m0-yoff):int(m1-yoff),q] += spimg[int(m0):int(m1),q].flatten()
except:
print("skipping slice %5i in adjusting first order y-position"%(q))
pass
fig = plt.figure()
plt.title(obsid)
#plt.show()
#print(np.sum(cp2_spimg[:,1632:1832],axis=1),len(np.sum(cp2_spimg[:,200:400],axis=1)))
plt.plot(arange(slit_width),np.sum(cp2_spimg[:,1032:1232],axis=1)/expmap[0],label='-200-0/1032-1232')
plt.plot(arange(slit_width),np.sum(cp2_spimg[:,1232:1432],axis=1)/expmap[0],label='0-200/1232-1432')
plt.plot(arange(slit_width),np.sum(cp2_spimg[:,1432:1632],axis=1)/expmap[0],label='200-400/1432-1632')
plt.plot(arange(slit_width),np.sum(cp2_spimg[:,1632:1832],axis=1)/expmap[0],label='400-600/1632-1832')
plt.legend()
plt.ylabel('count rate per bin')
plt.title(obsid)
plt.savefig(indir+'/'+obsid+'_wing.png')
#plt.show()
plt.close()
if offsetset:
yof = offsetval - anky
if chatter > 1:
print("spectrum location set with input parameter to: y=%5.1f"%(offsetval))
msg += "spectrum location set with input parameter to: y=%5.1f\n"%(offsetval)
else:
if ifmotion:
motion = abs(obsid2motion(obsid,motion_file)['V'])
(p0,p1,p2), ier = leastsq(Fun4, (cp2.max(),anky,3.2), args=(cp2,arange(slit_width),motion) ) #~FIXME:
sigma_mean=np.mean(polyval(sig1coef,x))
#p3= motion
elif fixwidth:
(p0,p1,p2), ier = leastsq(Fun1, (cp2.max(),anky,3.2), args=(cp2,arange(slit_width)) )
sigma_mean=fixwidth/trackwidth #np.mean(polyval(sig1coef,x))
times = sigma_mean/np.mean(polyval(sig1coef,x))
sig0coef = times*sig0coef
sig1coef = times*sig1coef
sig2coef = times*sig2coef
sig3coef = times*sig3coef
elif ifextended:
(p0,p1,p2), ier = leastsq(Fun1, (cp2.max(),anky,3.2), args=(cp2,arange(slit_width)) )
sigma_mean = p2
times = p2/np.mean(polyval(sig1coef,x))
#times = 1.
#sigma_mean = times*np.mean(polyval(sig1coef,x))
sig0coef = times*sig0coef
sig1coef = times*sig1coef
sig2coef = times*sig2coef
sig3coef = times*sig3coef
else:
(p0,p1), ier = leastsq(Fun1b, (cp2.max(),anky), args=(cp2,arange(slit_width),3.2) )
sigma_mean=np.mean(polyval(sig1coef,x))
#print(p0,p1,p2,p3,sigma_mean)
fig = plt.figure()
if ifmotion:
plt.plot(arange(slit_width),cp2)
plt.plot(arange(slit_width),smeargaussian(arange(slit_width),p0,p1,sigma_mean,motion))
plt.vlines(p1-(trackwidth *sigma_mean+motion/2),0,np.max(cp2),color='k')
plt.vlines(p1+(trackwidth *sigma_mean+motion/2),0,np.max(cp2),color='k')
plt.xlabel('y pixels')
plt.ylabel('total counts')
plt.title(obsid+' motion:'+"%.2f"%motion)
elif fixwidth:
np.savetxt(indir+'/'+obsid+'_fit.txt',np.transpose(np.array([arange(slit_width),cp2])),delimiter=',',fmt='%.2f') #~FIXME:
with open(indir+'/'+obsid+'_fit.txt','r+') as f:
content = f.read()
f.seek(0,0)
f.write('A:'+f'{p0:.2f}'+' mu:'+f'{p1:.2f}'+' sigma:'+f'{p2:.2f}'+'\n'+content)
f.close()
plt.plot(arange(slit_width),cp2)
plt.plot(arange(slit_width),singlegaussian(arange(slit_width),p0,p1,p2))
plt.vlines(p1-(trackwidth *sigma_mean),0,np.max(cp2),color='k')
plt.vlines(p1+(trackwidth *sigma_mean),0,np.max(cp2),color='k')
plt.xlabel('y pixels')
plt.ylabel('total counts')
plt.title(obsid)
else:
plt.plot(arange(slit_width),cp2)
plt.plot(arange(slit_width),singlegaussian(arange(slit_width),p0,p1,sigma_mean))
plt.vlines(p1-(trackwidth *sigma_mean),0,np.max(cp2),color='k')
plt.vlines(p1+(trackwidth *sigma_mean),0,np.max(cp2),color='k')
plt.xlabel('y pixels')
plt.ylabel('total counts')
plt.title(obsid)
plt.savefig(indir+'/'+obsid+'_fit.png')
#plt.show()
plt.close()
yof = (p1-anky)
if ank_c_0offset == True:
yof = 0
if chatter > 1:
print("\n *** cross-spectrum gaussian fit parameters: ",p0,p1)
print("the first anchor fit with gaussian peaks at %5.1f, and the Y correction\nis %5.1f (may not be used)" % (p1,yof))
#### should also estimate the likely wavelength error from the offset distance p1 and print
#msg += "cross-spectrum gaussian fit parameters: (%5.1f ,%5.1f)\n" % (p0,p1)
#msg += "the first anchor fit with gaussian peaks at %5.1f, and the Y correction was %5.1f\n" % (p1,yof)
else:
set_offset = True
offsetset = False
# so now shift the location of the curves to match the first order uv part.
if set_offset:
# ignore computed offset and offsetlimit [,] but used passed offset argument
y0 += offset
y1 += offset
y2 += offset
y3 += offset
print("shifting the y-curve with offset passed by parameter")
else:
# assuming the relative position of the orders is correct, just shift the whole bunch
y0 += yof
y1 += yof
y2 += yof
y3 += yof
if not set_qual:
map = None
print("no zeroth order contamination quality information available ")
quality[:] = qflag['good']
# OUTPUT PARAMETER spectra, background, slit init - full dimension retained
if chatter > 3 : print ("DEBUG 3594 set up spectrum arrays ")
# initialize
sp_all = zeros(nx) + cval # straight slit
bg_all = zeros(nx) + cval # straight slit
# spectrum arrays
sp_zeroth = zeros(nx) + cval # curved extraction
sp_first = zeros(nx) + cval # curved extraction
sp_second = zeros(nx) + cval # curved extraction
sp_third = zeros(nx) + cval # curved extraction
bg_zeroth = zeros(nx) + cval # curved extraction
bg_first = zeros(nx) + cval # curved extraction
bg_second = zeros(nx) + cval # curved extraction
bg_third = zeros(nx) + cval # curved extraction
# coi-area arrays
co_zeroth = zeros(nx) + cval
co_first = zeros(nx) + cval
co_second = zeros(nx) + cval
co_third = zeros(nx) + cval
co_back = zeros(nx) + cval
# quality flag arrays
at1 = zeros(nx,dtype=bool)
at2 = zeros(nx,dtype=bool)
at3 = zeros(nx,dtype=bool)
apercorr = zeros(5*nx).reshape(5,nx) + cval
borderup = zeros(5*nx).reshape(5,nx) + cval
borderdown = zeros(5*nx).reshape(5,nx) + cval
fitorder = (present0,present1,present2,present3),(q0,q1,q2,q3),(
y0,dlim0L,dlim0U,sig0coef,sp_zeroth,co_zeroth),(
y1,dlim1L,dlim1U,sig1coef,sp_first, co_first ),(
y2,dlim2L,dlim2U,sig2coef,sp_second,co_second),(
y3,dlim3L,dlim3U,sig3coef,sp_third,co_third ),(
x,xstart,xend,sp_all,quality,co_back)
if trackonly: # output the coordinates on the extimg image which specify the lay of
# each order
if outfull:
return fitorder, cp2, (coef0,coef1,coef2,coef3), (bg_zeroth,bg_first,
bg_second,bg_third), (borderup,borderdown), apercorr #, expospec, msg, curved
else: return fitorder
if not trackfull:
if (curved == "update") & (not trackcentroiding):
# the hope is, that with more data the calibration can be improved to eliminate this step
#try:
fitorder2, fval, fvalerr = updateFitorder(extimg, fitorder, wheelpos, full=True,
predict2nd=predict_second_order, fit_second=fit_second, fit_third=fit_second,
C_1=C_1, C_2=C_2, d12=dist12, chatter=chatter)
msg += "updated the curvature and width fit parameters\n"
(present0,present1,present2,present3),(q0,q1,q2,q3), (
y0,dlim0L,dlim0U,sig0coef,sp_zeroth,co_zeroth),(
y1,dlim1L,dlim1U,sig1coef,sp_first,co_first ),(
y2,dlim2L,dlim2U,sig2coef,sp_second,co_second),(
y3,dlim3L,dlim3U,sig3coef,sp_third,co_third ),(
x,xstart,xend,sp_all,quality,co_back) = fitorder2
# update the anchor y-coordinate
ank_c[0] = y1[int(ank_c[1])]
#except:
# msg += "WARNING: fit order curvature update has failed\n"
# curved = "curve"
if offsetset & (not trackcentroiding):
mess = "%s\nWARNING Using offsetlimit with parameter *curved = 'update'* \n"\
"WARNING Therefore we updated the curvature, and besides the curvature, the\n"\
"Y-position of the extraction region was updated to y1[ankx]=%5.1f and \n"\
"does not equal the offsetlimit value of %5.1f \n%s"%(30*"=*=",
y1[int(ankx)],offsetlimit[0],30*"=*=")
print(mess)
mess = "Updated the curvature, and besides the curvature, the Y-position \n"\
" of the extraction region was updated to y1[ankx]=%5.1f and does\n"\
" not equal the offsetlimit value of %5.1f \n"%(y1[int(ankx)],offsetlimit[0])
msg += mess+"\n"
# default single track extraction
sphalfwid = 4.*sig1coef[0]
spwid = 2*sphalfwid
splim1 = int(slit_width/2+offset-sphalfwid+1)
splim2 = int(splim1 + spwid)
sp_all = extimg[splim1:splim2,:].sum(axis=0).flatten()
bg_all = bgimg[splim1:splim2,:].sum(axis=0).flatten()
borderup[4,:] = splim2
borderdown[4,:] = splim1
# background for coi-loss box - using a 3x larger sampling region
k1 = int(anky-3*coi_half_width+0.5)
co_back = bgimg[k1:k1+int(6*coi_half_width),:].sum(axis=0)/3.0
if present0:
for i in range(nx):
sphalfwid = trackwidth*polyval(sig0coef,x[i])
spwid = 2*sphalfwid
#splim1 = 100+offset-sphalfwid+1 changes 19-feb-2012
#splim2 = splim1 + spwid
#k1 = splim1+y0[i]-anky
k1 = int(y0[i] - sphalfwid + 0.5)
k2 = k1 + int(spwid+0.5)
k3 = int(y0[i] - coi_half_width + 0.5)
k4 = k1 + int(2*coi_half_width)
if i in q0[0]:
co_zeroth[i] = extimg[k3:k4,i].sum()
sp_zeroth[i] = extimg[k1:k2,i].sum()
bg_zeroth[i] = bgimg[k1:k2,i].sum()
borderup[0,i] = k2
borderdown[0,i] = k1
apercorr[0,i] = x_aperture_correction(k1,k2,sig0coef,x[i],norder=0,wheelpos=wheelpos,fixwidth=fixwidth)
if len(expmap) == 1: expospec[0,i] = expmap[0]
else: expospec[0,i] = expmap[k1:k2,i].mean()
if present1:
#if ifmotion:
# apercorr_value = x_aperture_correction(0,0,sig1coef,100,norder=1,mode='gaussian',
# sigma=p2,motion=motion,tw=trackwidth,ifmotion=ifmotion)
for i in range(nx):
if ifmotion:
sphalfwid = trackwidth *polyval(sig1coef,x[i])+motion/2 #~FIXME:
else:
sphalfwid = trackwidth * polyval(sig1coef,x[i])
# if (x[i] < 30): sphalfwid *= bluetrackwidth
spwid = 2*sphalfwid
#splim1 = 100+offset-sphalfwid+1 changes 19-feb-2012
#splim2 = splim1 + spwid
#k1 = int(splim1+y1[i]-anky+0.5)
k1 = int(y1[i] - sphalfwid + 0.5)
k2 = k1 + int(spwid+0.5)
k3 = int(y1[i] - coi_half_width + 0.5)
k4 = k3 + int(2*coi_half_width) #--TODO:FIXME:
k5 = y1[i]
if i in q1[0]:
co_first[i] = extimg[k3:k4,i].sum()
sp_first[i] = extimg[k1:k2,i].sum()
bg_first[i] = bgimg[k1:k2,i].sum()
borderup[1,i] = k2
borderdown[1,i] = k1
if ifmotion:
apercorr[1,i] = x_aperture_correction(k1,k2,sig1coef,x[i],norder=1,mode='gaussian',
sigma=polyval(sig1coef,x[i]),motion=motion,ifmotion=ifmotion,wheelpos=wheelpos,fixwidth=fixwidth)
# apercorr[1,i] = apercorr_value
else:
apercorr[1,i] = x_aperture_correction(k1,k2,sig1coef,x[i],norder=1,wheelpos=wheelpos,fixwidth=fixwidth)
if len(expmap) == 1: expospec[1,i] = expmap[0]
else: expospec[1,i] = expmap[k1:k2,i].mean()
if dropout_mask != None:
at3[i] = dropout_mask[k1:k2,i].any()
if set_qual:
k5 = int(y1[i] - 49 + 0.5)
k6 = k1 + int(98+0.5)
if ny > 20:
# all zeroth orders of sources within coi-distance:
at1[i] = (map_all[i,k3:k4] == False).any()
if ny > 100:
# strong sources: circle 49 pix radius hits the centre of the track
at2[i] = (map_strong[i,k5:k6] == False).any()
quality[at1] = qflag['weakzeroth']
quality[at2] = qflag['zeroth']
quality[at3] = qflag['bad']
if present2:
for i in range(nx):
sphalfwid = trackwidth * polyval(sig2coef,x[i])
spwid = 2*sphalfwid
#splim1 = 100+offset-sphalfwid+1 changes 19-feb-2012
#splim2 = splim1 + spwid
#k1 = int(splim1+y2[i]-anky+0.5)
k1 = int(y2[i] - sphalfwid +0.5)
k2 = k1 + int(spwid+0.5)
k3 = int(y2[i] - coi_half_width + 0.5)
k4 = k1 + int(2*coi_half_width)
if i in q2[0]:
co_second[i] = extimg[k3:k4,i].sum()
sp_second[i] = extimg[k1:k2,i].sum()
bg_second[i] = bgimg[k1:k2,i].sum()
borderup[2,i] = k2
borderdown[2,i] = k1
apercorr[2,i] = x_aperture_correction(k1,k2,sig2coef,x[i],norder=2,wheelpos=wheelpos,fixwidth=fixwidth)
if len(expmap) == 1: expospec[2,i] = expmap[0]
else: expospec[2,i] = expmap[k1:k2,i].mean()
y1_y2 = np.abs(0.5*(k2+k1) - 0.5*(borderup[1,i]-borderdown[1,i]))
s1_s2 = 0.5*(np.polyval(sig1coef,x[i]) + np.polyval(sig2coef, x[i]) )
if ( y1_y2 < s1_s2) : quality[i] += qflag.get('overlap')
if present3:
for i in range(nx):
sphalfwid = trackwidth * polyval(sig3coef,x[i])
spwid = 2*sphalfwid
#splim1 = 100+offset-sphalfwid+1
#splim2 = splim1 + spwid
#k1 = int(splim1+y3[i]-anky+0.5)
k1 = int(y3[i] - sphalfwid +0.5)
k2 = k1 + int(spwid+0.5)
k3 = int(y3[i] - coi_half_width + 0.5)
k4 = k1 + int(2*coi_half_width)
if i in q3[0]:
co_third[i] = extimg[k3:k4,i].sum(axis=0)
sp_third[i] = extimg[k1:k2,i].sum(axis=0)
bg_third[i] = bgimg[k1:k2,i].sum(axis=0)
borderup[3,i] = k2
borderdown[3,i] = k1
apercorr[3,i] = x_aperture_correction(k1,k2,sig3coef,x[i],norder=3,wheelpos=wheelpos,fixwidth=fixwidth)
if len(expmap) == 1: expospec[3,i] = expmap[0]
else: expospec[3,i] = expmap[k1:k2,i].mean()
# y0,y1,y2,y3 now reflect accurately the center of the slit used.
if chatter > 3 : print ("DEBUG 3792 stacking results in structure fitorder")
fitorder = (present0,present1,present2,present3),(q0,q1,q2,q3), (
y0,dlim0L,dlim0U,sig0coef,sp_zeroth,co_zeroth),(
y1,dlim1L,dlim1U,sig1coef,sp_first, co_first ),(
y2,dlim2L,dlim2U,sig2coef,sp_second,co_second),(
y3,dlim3L,dlim3U,sig3coef,sp_third, co_third ),(
x,xstart,xend,sp_all,quality,co_back)
#~FIXME:
if outfull:
return fitorder, cp2, (coef0,coef1,coef2,coef3), (bg_zeroth,bg_first,
bg_second,bg_third), (borderup,borderdown), apercorr, expospec, msg, curved
else: return fitorder
#===================
# Now calculate the probability distributions across the orders using gaussian fits
# this section was for development only
if trackfull: #~FIXME: # fit the cross profile with gaussians; return the gaussian fit parameters
if chatter > 3 : print ("DEBUG 3810 full-track update with mfit")
# output parameter gfit:
# define output per x[i]: numpy array gfit.shape= (6,nx) of: (x,order,amplitude,y_pix_position,sig,flags)
gfit = np.zeros( 4*6*nx ).reshape(4,6,nx) -1
#check that y1,y2,y3 are full length arrays
if not ( (len(y1) == nx) & (len(y2) == nx) & (len(y3) == nx) ):
print("FATAL error in uvotgetspec.curved_extraction array sizes wrong")
# this parameter allows you to restrict the range along the dispersion being considered
if (test == None) | (test == 'cal'):
ileft = 2
irite = nx -2
else:
ileft = test[0]
irite = test[1]
for i in range(ileft,irite):
if chatter > 3: print("uvotgetspec.curved_extraction [trackfull] fitting i = %2i x=%6.2f"%(i,x[i]))
# do the zeroth order
if i in q0[0]:
Ypos = (array( [y0[i]])).flatten()
Xpos = arange(i-2,i+3)
sigmas = sig0coef
(par, flag), junk = get_components(Xpos,spimg,Ypos,wheelpos,\
caldefault=caldefault,sigmas=sigmas)
flags = str(flag[0])+str(flag[1])+str(flag[2])+str(flag[3])+str(flag[4])+str(flag[5])
iflags = int(flags)
gfit[0,:,i] = [i,0,par[0],par[1],par[2],iflags]
if chatter > 3: print(i, par, flag)
# do the first order
if ((i in q1[0]) & (i not in q2[0])) :
Ypos = array( [y1[i]] ).flatten()
Xpos = arange(i-2,i+3)
sigmas = sig1coef
(par, flag), junk = get_components(Xpos,spimg,Ypos,wheelpos,\
caldefault=caldefault,sigmas=sigmas)
flags = str(flag[0])+str(flag[1])+str(flag[2])+str(flag[3])+str(flag[4])+str(flag[5])
iflags = int(flags)
gfit[1,:,i] = [i,1,par[0],par[1],par[2],iflags]
if chatter > 3: print(i, par, flag)
# do the second order
if ((i in q1[0]) & (i in q2[0]) & (i not in q3[0])):
Ypos = array( [y1[i],y2[i]]).flatten()
Xpos = arange(i-3,i+4)
sigmas = array([ sig1coef[0], sig2coef[0] ])
if chatter > 3: print('++++ second order Xpos:',Xpos,' Ypos: ', Ypos,' wheelpos ',wheelpos)
Z = get_components(Xpos,spimg,Ypos,wheelpos,composite_fit=composite_fit,\
caldefault=caldefault,sigmas=sigmas)
par, flag = Z[0]
flags = str(flag[0])+str(flag[1])+str(flag[2])+str(flag[3])+str(flag[4])+str(flag[5])
iflags = int(flags)
gfit[1,:,i] = [i,1,par[0],par[1],par[2],iflags]
if len(par) == 6:
gfit[2,:,i] = [i,2,par[3],par[4],par[5],iflags]
if chatter > 3: print(i); print(par[0:3]); print(par[3:6]); print(flag)
# do the third order
if ((i in q1[0]) & (i in q2[0]) & (i in q3[0])):
Ypos = array([y1[i],y2[i],y3[i]]).flatten()
Xpos = arange(i-4,i+5)
sigmas = array([sig1coef[0], sig2coef[0], sig3coef[0]])
if chatter > 3: print('+++++ third order Xpos:',Xpos,' Ypos: ', Ypos,' * * * 3 3 3 3 3 * * *')
width = abs( polyval(array([2.0e-05, 0.034, -70]),(anchor2[1]-1200.)))+5.0 # rough limits
try:
Z = get_components(Xpos,spimg,Ypos,wheelpos,chatter=chatter,width=width,\
composite_fit=composite_fit,caldefault=caldefault,sigmas=sigmas)
par, flag = Z[0]
except:
print("failed 3rd order fitting width = ",width)
print("Ypos = ",Ypos)
print("Xpos range ",i-4,i+5, " sigmas = ",sigmas, " wheelpos = ",wheelpos)
print("composite_fit:",composite_fit," caldefault:",caldefault)
print(par)
print(flag)
par = array([0.,y1[i],3.,0.,y2[i],4.,0.,y3[i],6.])
flag = array([9,9,9,9,9,9])
flags = str(flag[0])+str(flag[1])+str(flag[2])+str(flag[3])+str(flag[4])+str(flag[5])
iflags = int(flags)
gfit[1,:,i] = [i,1,par[0],par[1],par[2],iflags]
if len(par) > 4:
gfit[2,:,i] = [i,2,par[3],par[4],par[5],iflags]
if len(par) == 9:
gfit[3,:,i] = [i,3,par[6],par[7],par[8],iflags]
if chatter > 3:
print(i); print(par[0:3]) ; print(par[3:6]) ; print(par[6:9]) ; print(iflags)
# thing not covered (properly):
# -- the second order falls on the first and the third order not
# -- one of the orders is not on the detector
# -- order overlap
# -- minus one order
return fitorder, gfit, (bgimg,)
def x_aperture_correction(k1,k2,sigcoef,x,norder=None, mode='best', coi=None, wheelpos=None, sigma=3.2,motion=10, tw=2.5, ifmotion=True, fixwidth=False):
'''Returns the aperture correction factor
parameters
----------
k1,k2 : int
k1 edge of track, k2 opposite track edge
in pixel coordinates
sigcoef : list
polynomial coefficient of the fit to the track width
so that sigma = polyval(sigcoef,x)
x : float
pixel/channel position
norder: int
order of the spectrum
mode : 'best'|'gaussian'
'gaussian' option causes first order to be treated as a gaussian PSF
coi : None
not implemented
wheelpos : 160|200|955|1000
filter wheel position
Notes
-----
The aperture correction is returned for given sigcoef and position x
Using the measured cumulative profile normal to the dispersion for the
first order (faint spectrum) or gaussians for orders zero,second, third.
History:
2012-02-20 Split out in preparation of non-gaussian aperture correction factor
2012-10-06 Dependence on coi-factor identified as a likely parameter
changing the PSF (no further action)
2013-12-15 revised aperture functions, one for each grism (low coi)
'''
import uvotmisc
import scipy
from scipy.interpolate import interp1d, splev
import numpy as np
apercorr = 1.0
if fixwidth:
apercorr = np.ones(np.shape(apercorr)) #~FIXME: I must remove this line to do apercorr
return apercorr
if norder == 0:
apercorr = 1.0/uvotmisc.GaussianHalfIntegralFraction( 0.5*(k2-k1)/np.polyval(sigcoef,x) )
if norder == 1:
# low coi apertures (normalised to 1 at aperture with half-width 2.5 sigma)
# fitted polynomials to the aperture (low-coi)
#for 0<aperture<6 sig
polycoef160 = np.array([ 1.32112392e-03, -2.69269447e-02, 2.10636905e-01,
-7.89493710e-01, 1.43691688e+00, -2.43239325e-02])
polycoef200 = np.array([ 1.29297314e-03, -2.66018405e-02, 2.10241179e-01,
-7.93941262e-01, 1.44678036e+00, -2.51078365e-02])
#y200 = polyval(polycoef200,x)
polycoef1000a = np.array([ 0.00260494, -0.04792046, 0.33581242, -1.11237223, 1.74086898,
-0.04026319]) # for aperture <= 2.2 sig, and for larger:
polycoef1000b = np.array([ 0.00128903, 0.00107042, 0.98446801])
polycoef955 = np.array([ 0.00213156, -0.03953134, 0.28146284, -0.96044626, 1.58429093,
-0.02412411]) # for aperture < 4 sig
# best curves for the apertures (using aperture.py plots WD1657+343)
aper_160_low = {
# half-width in units of sig
"sig": [0.00,0.30,0.51,0.700,0.90,1.000,1.100,1.200,1.400,
1.600,1.800,2.000,2.20,2.5,2.900,3.31,4.11,6.00],
# aperture correction, normalised
"ape": [0.00,0.30,0.52,0.667,0.77,0.818,0.849,0.872,0.921,
0.947,0.968,0.980,0.99,1.0,1.008,1.01,1.01,1.01]
}
aper_200_low = {
"sig": [0.0,0.300,0.510,0.700,0.800,0.900,1.000,1.10,1.20,
1.40, 1.60, 1.80, 2.0, 2.2, 2.5, 2.7, 3.0,4.0,6.0],
"ape": [0.0,0.308,0.533,0.674,0.742,0.780,0.830,0.86,0.89,
0.929,0.959,0.977,0.986,0.991,1.0,1.002,1.003,1.004,1.005 ]
}
aper_1000_low = {
"sig": [0.0, 0.3, 0.5, 0.7, 0.8, 0.9, 1.0, 1.2, 1.4, 1.6, 2.0,2.2,2.5,3.0 ,4.0 ,6.0 ],
"ape": [0.0,0.37,0.55,0.68,0.74,0.80,0.85,0.91,0.96,0.98,0.995,1. ,1. ,1.004,1.01,1.01]
}
aper_955_med = {
"sig": [0.0,0.30,0.60,0.80,1.00,1.30,1.60,1.80,2.00,2.50,3.00, 4.00,6.00],
"ape": [0.0,0.28,0.47,0.64,0.75,0.86,0.93,0.96,0.97,1.00,1.013,1.02,1.02]
}
aper_1000_med = {
"sig": [0.0,0.30,0.50,0.70,0.80,0.90,1.00,1.20,1.40,1.60,
1.80,2.00,2.20,2.50,3.00,4.00,6.00],
"ape": [0.0,0.34,0.46,0.63,0.68,0.73,0.76,0.87,0.90,0.94,
0.96,0.98,0.99,1.00,1.015,1.027,1.036]
}
renormal = 1.0430 # calibration done with aperture correction 1.043 (sig=2.5)
sig = np.polyval(sigcoef,x) # half width parameter sig in pixels
xx = 0.5*(k2-k1)/sig # half track width in units of sig
if (mode == 'gaussian'):# | (xx > 4.5):
if ifmotion:
apercorr = 1.0/uvotmisc.SmearGaussianHalfIntegralFraction(sigma,motion,tw) #~FIXME:
else:
apercorr = 1.0/uvotmisc.GaussianHalfIntegralFraction( 0.5*(k2-k1)/np.polyval(sigcoef,x) )
elif (wheelpos != None):
# low coi for wheelpos = 160,200; medium coi for wheelpos = 955, 1000
if wheelpos == 160:
if (type(coi) == typeNone) or (coi < 0.1) :
apercf1 = interp1d(aper_160_low['sig'],aper_160_low['ape'],)
apercorr = renormal / apercf1(xx)
if wheelpos == 200:
if (type(coi) == typeNone) or (coi < 0.1) :
apercf2 = interp1d(aper_200_low['sig'],aper_200_low['ape'],)
apercorr = renormal / apercf2(xx)
if wheelpos == 955:
if (type(coi) == typeNone) or (coi < 0.1) :
apercf3 = interp1d(aper_955_med['sig'],aper_955_med['ape'],)
apercorr = renormal / apercf3(xx)
#apercf3 = interp1d([0,6],[0,1],fill_value=(0,1),bounds_error=False)
#apercorr = 1.0/apercf3(xx) # change psf to test if there is apercorr before coi-corr
if wheelpos == 1000:
if (type(coi) == typeNone) or (coi < 0.1) :
apercf4 = interp1d(aper_1000_low['sig'],aper_1000_low['ape'],)
apercorr = renormal / apercf4(xx)
else:
# when xx<4.5, mode !gaussian, wheelpos==None use the following
# 2012-02-21 PSF best fit at 3500 from cal_psf aper05+aper08 valid for 0.5 < xx < 4.5
# the function does not rise as steeply so has more prominent wings
tck = (np.array([ 0. , 0. , 0. , 0. , 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8,
0.9, 1. , 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9,
2. , 2.1, 2.2, 2.3, 2.4, 2.5, 2.6, 2.7, 2.8, 2.9, 3. ,
3.1, 3.2, 3.3, 3.4, 3.5, 3.6, 3.7, 3.8, 3.9, 4. , 4.1,
4.2, 4.3, 4.4, 4.5, 4.6, 4.7, 4.8, 5. , 5. , 5. , 5. ]),
np.array([ -6.45497898e-19, 7.97698047e-02, 1.52208991e-01,
2.56482414e-01, 3.31017197e-01, 4.03222197e-01,
4.72064814e-01, 5.37148347e-01, 5.97906198e-01,
6.53816662e-01, 7.04346413e-01, 7.48964617e-01,
7.87816053e-01, 8.21035507e-01, 8.48805502e-01,
8.71348421e-01, 8.88900296e-01, 9.03143354e-01,
9.16085646e-01, 9.28196443e-01, 9.38406001e-01,
9.45971114e-01, 9.51330905e-01, 9.54947930e-01,
9.57278503e-01, 9.58780477e-01, 9.59911792e-01,
9.60934825e-01, 9.62119406e-01, 9.63707446e-01,
9.66045076e-01, 9.69089467e-01, 9.73684854e-01,
9.75257929e-01, 9.77453939e-01, 9.81061451e-01,
9.80798098e-01, 9.82633805e-01, 9.83725248e-01,
9.84876762e-01, 9.85915295e-01, 9.86929684e-01,
9.87938594e-01, 9.88979493e-01, 9.90084808e-01,
9.91288321e-01, 9.92623448e-01, 9.94123703e-01,
9.96388866e-01, 9.98435907e-01, 1.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00]), 3)
apercorr = 1.0/splev( xx, tck,)
if norder == 2:
apercorr = 1.0/uvotmisc.GaussianHalfIntegralFraction( 0.5*(k2-k1)/np.polyval(sigcoef,x) )
if norder == 3:
apercorr = 1.0/uvotmisc.GaussianHalfIntegralFraction( 0.5*(k2-k1)/np.polyval(sigcoef,x) )
return apercorr
def clipmask(f,sigclip=2.5,fpos=False):
'''Provides mask to clip bad data.
Parameters
----------
f : 2D array
kwargs : dict
optional arguments
- **sigclip** : float
clip data at `sigma` standard deviations above the mean
- **fpos** : bool
if True, clip negative values
Returns
-------
mask : 2D array, boolean
Array of same size as image, true where within sigclip standard
deviations of mean.
Notes
-----
By default infinities are clipped.
The mask is iterated until it converges. So the effect of outliers
on the standard deviation is nil. This also means that sigma needs
to be chosen large enough or the standard deviation will not be
a good measure of the real noise in the mean.
'''
import numpy as np
bg = f
if fpos:
mask = (np.isfinite(f) & (f >= 0.))
else:
mask = np.isfinite(f)
m0 = len(np.where(mask)[0])
n = 50
bad = True
while (bad & (n > 0)):
n -= 1
mask = abs(f - f[mask].mean()) < sigclip * f[mask].std()
m = len(np.where(mask)[0])
if m == m0: bad = False
else: m0 = m
return mask
def get_components(xpos,ori_img,Ypositions,wheelpos,chatter=0,caldefault=False,\
sigmas=None,noiselevel=None,width=40.0,composite_fit=True, fiterrors = True, \
smoothpix=1, amp2lim=None,fixsig=False,fixpos=False):
''' extract the spectral components for an image slice
at position(s) xpos (dispersion axis) using the Ypositions
of the orders. The value of Ypositions[0] should be the main peak.
Notes: implicit assumption is that the 'y' axis is the pixel number.
if for some reason the data pairs are (z_i,f_meas_i) then the definition of y
changes into z.
if the return value for the centre of the gaussian exceeds some number (sig?),
then the solution is probably suspect. In that case a second fit with sig? held
fixed perhaps should be done.
some tests show that the solution is very sensitive to the first guess of the
position of the peak. It will even find a dip in the noise (neg amplitude)
rather than the main peak or overshoot the peak if the starting guess is too far
off, and fudge sigma to be large.
Error Flag:
flag[0] 0 = ok, 1=solution main peak is offset from Ypositions by more than 'sig' pixels
flag[1] 0 = ok, 1=solution secondary peak is offset from Ypositions by more than 'sig' pixels
flag[2] 0 = ok, 1=solution third peak is offset from Ypositions by more than 'sig' pixels
flag[3] not used
flag[4] number of orders in answer
flag[5] error flag returned by fitting program
noiselevel:
if the fit to the peak has a maximum < noiselevel then the peak will be removed.
fiterrors True implies caldefault=True
smoothpix: the number of pixels along dispersion to smooth over for
fitting gaussians across dispersion
amp2lim: second order prediction of a (minimum, maximum) valid for all xpos
NPMK, 2010-07-15 Fecit
NPMK, 2011-08-16 adding smoothing for improved fitting
NPMK 2011-08-26 replace leastsq with mpfit based routines; clip image outside spectrum width
'''
import numpy
from numpy import array, arange,transpose, where, abs, min, zeros, atleast_1d, atleast_2d, sqrt
try:
from convolve import boxcar
except:
from stsci.convolve import boxcar
xpos = atleast_1d(xpos)
ori_img = atleast_2d(ori_img)
Ypositions = atleast_1d(Ypositions)
xpos = xpos.flatten()
Ypositions = Ypositions.flatten()
nypos = len(Ypositions)
smoothpix = int(smoothpix)
if smoothpix > 1:
spimg = boxcar(ori_img.copy(),(smoothpix,),mode='reflect')
else: spimg = ori_img
if type(sigmas) == typeNone:
sigmas = array([3.1,4.3,4.6])
if chatter > 4:
print("get_components: input prameter wheelpos ", wheelpos)
print("get_components: input parameter xpos ", xpos)
print("get_components: input parameter Ypositions ", Ypositions)
print("get_components: number of orders : ",nypos)
print("get_components: dimension input image ", spimg.shape)
xpos = xpos[ where(xpos < spimg.shape[1])[0] ] # eliminate elements outside range
if len(xpos) <1:
print("get_components: xpos must be at least one number")
raise ValueError
return
elif len(xpos) == 1:
f_meas = spimg[:,xpos]
f_ori = ori_img[:,xpos]
else:
f_meas = spimg[:,xpos].mean(axis=1)
f_ori = ori_img[:,xpos].mean(axis=1)
f_meas = f_meas.flatten()
f_ori = f_ori.flatten()
f_pos = f_meas >= 0
f_err = 9.99e+9 * numpy.ones(len(f_meas))
f_err[f_pos] = 1.4*sqrt(f_meas[f_pos])
bg_mask = clipmask( f_meas, fpos=True)
f_mask = bg_mask
bg = f_meas[bg_mask].mean()
if type(noiselevel) == typeNone:
noiselevel = f_meas[bg_mask].mean()
if chatter > 3: print("get_components: adopted noiselevel = ", noiselevel)
y = arange(spimg.shape[0],dtype=float) # pixel number
flag = zeros(6, dtype=int )
if caldefault:
if type(sigmas) == typeNone:
print("missing parameter fitorder in uvotgetspec.get_components\n")
else:
# the positions of the centre of the fits are given in Ypositions
sigmaas = atleast_1d(sigmas)
if nypos == 1:
if chatter > 3: print('len Ypositions == 1')
sig0 = sigmaas[0]
p0 = Ypositions[0]
a0 = max(f_meas)
f_mask[p0-4*sig0:p0+4*sig0] = True
Z = runfit1(y[f_mask],f_meas[f_mask],f_err[f_mask],bg,a0,p0,sig0,\
fixsig=fixsig,fixpos=fixpos)
flag[5] = Z.status
if Z.status > 0:
[bg0,bg1,a0,p0,sig0] = Z.params
else:
if chatter > 4:
print("runfit1 status:",Z.status)
print("runfit1 params:",Z.params)
if fiterrors: return (Z.params,Z.perror,flag), (y,f_meas) # errors in fit = Z.perror
else: return ((a0,p0,sig0),flag), (y,f_meas)
if nypos == 2:
if chatter > 3: print('len Ypositions == 2')
sig0, sig1 = sigmaas[0], sigmaas[1]
p0, p1 = Ypositions
a0 = 0.9 * max(f_meas)
a1 = 0.5*a0
f_mask[p0-4*sig0:p0+4*sig0] = True
f_mask[p1-4*sig1:p1+4*sig1] = True
Z = runfit2(y[f_mask],f_meas[f_mask],f_err[f_mask],bg,a0,p0,sig0,a1,p1,sig1,\
fixsig=fixsig,fixpos=fixpos,amp2lim=amp2lim)
flag[5] = Z.status
if Z.status > 0:
[bg0,bg1,a0,p0,sig0,a1,p1,sig1] = Z.params
if fiterrors: return (Z.params,Z.perror,flag), (y,f_meas) # errors in fit = Z.perror
else: return ((a0,p0,sig0,a1,p1,sig1),flag), (y,f_meas)
if nypos == 3:
if chatter > 3: print('len Ypositions == 3')
sig0,sig1,sig2 = sigmaas[:]
p0, p1, p2 = Ypositions
a0 = 0.9* max(f_meas)
a1 = a0
a2 = a1
f_mask[p0-4*sig0:p0+4*sig0] = True
f_mask[p2-4*sig2:p2+4*sig2] = True
Z = runfit3(y[f_mask],f_meas[f_mask],f_err[f_mask],bg,a0,p0,sig0,a1,p1,sig1,a2,p2,sig2,\
fixsig=fixsig,fixpos=fixpos,amp2lim=amp2lim)
flag[5] = Z.status
if Z.status > 0:
[bg0,bg1,a0,p0,sig0,a1,p1,sig1,a2,p2,sig2] = Z.params
if fiterrors: return (Z.params,Z.perror,flag), (y,f_meas) # errors in fit = Z.perror
else: return ((a0,p0,sig0,a1,p1,sig1,a2,p2,sig2),flag), (y,f_meas)
if wheelpos < 500 :
sig = 6
else:
sig = 4
sig0 = sig
Sig = sig
# width = 40 Maximum order distance - parameter in call ?
# start with fitting using a fixed sig
# to get the peaks fixed do them one by one
if len(Ypositions) < 4 :
# FIT ONE PEAK for all observations
# first guess single gaussian fit parameters
a0 = f_meas.max()
y0 = Ypositions[0]
(p0_,p1), ier = leastsq(Fun1b, (a0,y0), args=(f_meas,y,sig) )
# if the "solution" is wrong use the input as best guess:
if abs(Ypositions[0] - p1) > 15:
p1 = y0
flag[0] = 3
else: # shift the input positions
delpos = p1-Ypositions[0]
Ypositions += delpos
# refine the sigma with fixed centre for the peak
(p0,sig_), ier = leastsq(Fun1a, (p0_,sig), args=(f_meas,y,p1) )
if ((sig_ > 0.1*sig) & (sig_ < 6.* sig)):
sig1 = sig_
else: sig1 = sig
Yout = ((p0,p1,sig1), flag), (y,f_meas)
if chatter > 3:
print("highest peak amplitude=%8.1f, position=%8.1f, sigma=%8.2f, ier flag=%2i "%(p0,p1,sig1,ier))
else:
print('Error in number of orders given in Ypositions')
return
# limit acceptable range for seaching for maxima
q = where( (y < p1+width) & (y > p1-0.5*width) ) # if direction known, one can be set to 3*sig
yq = y[q[0]]
qok = len(q[0]) > 0
if ( (len(Ypositions) > 1) & qok ):
# TWO PEAKS
# double gaussian fit: remove the first peak from the data and fit the residual
f_meas_reduced = f_meas[q] - singlegaussian(yq, p0, p1, sig_)
a0 = f_meas_reduced.max()
y0 = where(f_meas_reduced == a0)[0][0]
Y2 = (p2,p3) , ier = leastsq(Fun1b, (a0,y0) , args=(f_meas_reduced,yq,sig))
if chatter > 3:
print('position order 2: %8.1f shifted to %8.1f'%(p3,p3+y[q][0]))
p3 += y[q][0]
# check that the refined value is not too far off:
if abs(p3 - Ypositions[1]) > 15:
if chatter > 3: print("problem p3 way off p3=",p3)
p3 = Ypositions[1]
flag[1] = 3
Y2 = (p2,sig2), ier = leastsq(Fun1a, (p2,sig1), args=(f_meas_reduced,yq,p3 ))
if not ((sig2 > 0.25*sig1) & (sig2 < 4.* sig1)):
sig2 = sig1
newsig2 = False
else:
# keep sig2
newsig2 = True
if chatter > 3:
print("second highest peak amplitude=%8.1f, position=%8.1f, sigma=%8.2f ; ier flag=%2i "%(p2,p3,sig2, ier))
Yout = ((p0,p1,sig1,p2,p3,sig2),flag), (y,q,f_meas,f_meas_reduced)
if ((len(Ypositions) > 2) & qok ):
# triple gaussian fit: removed the second peak from the data
(p0,p1,sig1,p2,p3,sig2), ier = \
leastsq(Fun2, (p0,p1,sig1,p2,p3,sig2) , args=(f_meas[q],y[q]))
if chatter > 3:
print("fit double gaussian (%8.2f,%8.2f,%8.2f, %8.2f,%8.2f,%8.2f)"%\
(p0,p1,sig1,p2,p3,sig2))
f_meas_reduced = f_meas[q] - doublegaussian(yq,p0,p1,sig1,p2,p3,sig2)
if not newsig2:
y0 = Ypositions[2]
a0 = 10*noiselevel
else:
a0 = f_meas_reduced.max()
y0 = y[q][where(f_meas_reduced == a0)[0][0]]
if chatter > 3: print("third order input fit: amplitude = %8.2f, position = %8.2f"%(a0,y0))
sig3 = 2*sig2
Y3 = (p4,p5), ier = leastsq(Fun1b, (a0,y0) , args=(f_meas_reduced,y[q],sig3))
p5 += y[q][0]
if abs(p5-Ypositions[2]) > 15:
p5 = Ypositions[2]
flag[2] = 3
Y3 = (p4a,sig3), ier = leastsq(Fun1a, (p4,sig3), args=(f_meas_reduced,y[q],p5 ))
if sig3 > 6*sig: sig3 = 2*sig2
if chatter > 3:
print("third highest peak amplitude=%8.1f, position=%8.1f, sigma=%8.2f, ier flag =%i "\
%(p4,p5,sig3,ier))
Yout = ((p0,p1,sig1,p2,p3,sig2,p4,p5,sig),flag),(y,q,f_meas,f_meas_reduced)
# now remove odd solutions - TBD: just flagging now
# check that the solutions for the centre are within 'Sig' of the input 'Ypositions'
if chatter > 2:
print("input Ypositions: ", Ypositions)
nposi = len(Ypositions)
if len(Ypositions) < 4 :
dy = min(abs(p1 - Ypositions))
if dy > Sig: flag[0] += 1
if ((len(Ypositions) > 1) & ( len(q[0]) > 0 )):
dy = min(abs(p3 - Ypositions))
if dy > Sig: flag[1] += 1
dy = abs(p3 - p1)
if dy < sig:
flag[1] += 10
ip = where(abs(p3-Ypositions) < 0.9*dy)[0]
indx = list(range(len(Ypositions)))
if len(ip) == 0:
print("problem with fitting peak # 2 ")
else:
indx.pop(ip[-1])
Ypositions = Ypositions[indx]
if p2 < noiselevel:
flag[1] += 20
ip = where(abs(p3-Ypositions) < 0.9*dy)[0]
if len(ip) == 0:
print("problem with fitting peak # 2 ")
else:
indx = list(range(len(Ypositions)))
#return (p0,p1,p2,p3), Ypositions, ip, noiselevel,dy
indx.pop(ip)
Ypositions = Ypositions[indx]
if ((len(Ypositions) > 2) & qok):
dy = min(abs(p5 - Ypositions))
if dy > Sig: flag[2] += 1
dy = abs(p5 - p1)
if dy < sig:
flag[2] += 10
ip = where(abs(p5-Ypositions) < 0.2*dy)[0]
indx = list(range(len(Ypositions)))
if len(ip) == 0:
print("problem with fitting peak # 2 ")
else:
indx.pop(ip)
Ypositions = Ypositions[indx]
if p4 < noiselevel:
flag[2] += 20
ip = where(abs(p5-Ypositions) < 0.9*dy)[0]
if chatter > 2: print('ip = ',ip)
indx = list(range(len(Ypositions)))
if len(ip) == 0:
print("problem with fitting peak # 2 ")
else:
indx.pop(ip[-1])
Ypositions = Ypositions[indx]
if flag[1] != 10:
dy = abs(p5 - p3)
if dy < sig:
flag[2] += 100
ip = where(abs(p5-Ypositions) < 0.9*dy)[0]
if len(ip) == 0:
print("problem with fitting peak # 2 ")
else:
indx = list(range(len(Ypositions)))
indx.pop(ip[-1])
Ypositions = Ypositions[indx]
if chatter > 2:
print("flag: ",flag)
print(" initial fit parameters: \n first peak:", p0, p1, sig1)
if nposi > 1: print(" second peak:", p2,p3, sig2)
if nposi > 2: print(" third peak:", p4,p5, sig3)
print(" intermediate Ypositions: ", Ypositions)
if not composite_fit: # bail out at this point
if len(Ypositions) == 1:
Y1 = ((p0,p1,sig), flag), 0
elif len(Ypositions) == 2:
Y1 = ((p0,p1,sig,p2,p3,sig2), flag), 0
elif len(Ypositions) == 3:
Y1 = ((p0,p1,sig,p2,p3,sig2,p4,p5,sig), flag), 0
else:
Y1 = Yout
return Y1
# free sig and refit
if ( len(Ypositions) == 1) :
# first guess single gaussian fit parameters in range given by width parameter
a0 = p0
y0 = p1
if chatter > 3:
print("f_meas :", transpose(f_meas))
print("a0: %8.2f \ny0: %8.2f \nsig0 : %8.2f "%(a0,y0,sig))
print(q)
params_fit, ier = leastsq(Fun1, (a0,y0,sig), args=(f_meas[q],y[q]) )
flag[5] = 1
flag[4] = ier
# remove odd solutions
return (params_fit, flag), (f_meas, y)
elif (qok & (len(Ypositions) == 2) ):
# double gaussian fit
a0 = p0
y0 = p1
a1 = p2
y1 = p3
Y0 = params_fit, ier = leastsq(Fun2, (a0,y0,sig,a1,y1,sig) , args=(f_meas[q],y[q]))
flag[5]=2
flag[4]=ier
# remove odd solutions - TBD
return (params_fit, flag), (f_meas, y, f_meas_reduced, q)
elif (qok & (len(Ypositions) == 3)):
# restricting the fitting to a smaller region around the peaks to
# fit will reduce the effect of broadening the fit due to noise.
q = where( (y > p1-3.*sig1) & (y < p3+3*sig3) )
# ====
# triple gaussian fit
a0 = p0
y0 = p1
a1 = p2
y1 = p3
a2 = p4
y2 = p5
Y0 = params_fit, ier = leastsq(Fun3, (a0,y0,sig1,a1,y1,sig2,a2,y2,sig3) , args=(f_meas[q],y[q]))
flag[5] = 3 # number of peaks
flag[4] = ier
# remove odd solutions
return (params_fit, flag), (f_meas, y, f_meas_reduced, q)
else:
# error in call
print("Error in get_components Ypositions not 1,2,or 3")
return Yout
def obsid2motion(obsid, file_path):
''' By Zexi
to obtain motion (pixels) from a precreated motion table
'''
import pandas as pd
data=pd.read_csv(file_path,sep=' ',header=0)
data['OBS_ID']=data['OBS_ID'].astype(str)
data['OBS_ID']='000'+data['OBS_ID']
d = data.set_index(['OBS_ID'])
motion_v = d.loc[obsid]['MOTION_V']
motion_p = d.loc[obsid]['MOTION_P']
dict = {'V':motion_v, 'P':motion_p}
return dict
def Fun1(p,y,x):
'''compute the residuals for gaussian fit in get_components '''
a0, x0, sig0 = p
return y - singlegaussian(x,a0,x0,sig0)
def Fun1a(p,y,x,x0):
'''compute the residuals for gaussian fit with fixed centre in get_components '''
a0, sig0 = p
return y - singlegaussian(x,a0,x0,sig0)
def Fun1b(p,y,x,sig0):
'''compute the residuals for gaussian fit with fixed width in get_components '''
a0, x0 = p
return y - singlegaussian(x,a0,x0,sig0)
def Fun1c(p,y,x,x0,sig0):
'''compute the residuals for gaussian fit with fixed centre and width in get_components '''
a0 = p
return y - singlegaussian(x,a0,x0,sig0)
def DFun1(p,y,x):
'''There is something wrong with the return argument. Should prob be a matrix of partial derivs '''
a0, x0, sig0 = p
return -Dsinglegaussian(x,a0,x0,sig0)
def Fun2(p,y,x):
'''compute the residuals for gaussian fit in get_components '''
a0, x0, sig0 ,a1,x1,sig1 = p
return y - doublegaussian(x,a0,x0,sig0,a1,x1,sig1)
def Fun2b(p,y,x,sig):
'''compute the residuals for gaussian fit in get_components for fixed sig '''
a0, x0, a1,x1 = p
return y - doublegaussian(x,a0,x0,sig,a1,x1,sig)
def Fun2bb(p,y,x,sig1,sig2):
'''compute the residuals for gaussian fit in get_components for fixed sig1, and sig2 '''
a0, x0, a1,x1 = p
return y - doublegaussian(x,a0,x0,sig1,a1,x1,sig2)
def Fun2bc(p,y,x,x0,x1):
'''compute the residuals for gaussian fit in get_components for fixed centre x0, x1 '''
a0, sig0, a1,sig1 = p
return y - doublegaussian(x,a0,x0,sig0,a1,x1,sig1)
def Fun2c(p,y,x,x0,sig0,x1,sig1):
'''compute the residuals for gaussian fit in get_components for fixed centre x_i and width sig_i '''
a0, a1 = p
return y - doublegaussian(x,a0,x0,sig0,a1,x1,sig1)
def DFun2(p,y,x):
a0, x0, sig0,a1,x1,sig1 = p
return -Ddoublegaussian(x,a0,x0,sig0,a1,x1,sig1)
def Fun3(p,y,x):
'''compute the residuals for gaussian fit in get_components '''
a0, x0, sig0 ,a1,x1,sig1 ,a2,x2,sig2= p
return y - trigaussian(x,a0,x0,sig0,a1,x1,sig1,a2,x2,sig2)
def Fun3b(p,y,x,sig):
'''compute the residuals for gaussian fit in get_components '''
a0,x0,a1,x1,a2,x2 = p
return y - trigaussian(x,a0,x0,sig,a1,x1,sig,a2,x2,sig)
def Fun3bb(p,y,x,sig1,sig2,sig3):
'''compute the residuals for gaussian fit in get_components '''
a0,x0,a1,x1,a2,x2 = p
return y - trigaussian(x,a0,x0,sig1,a1,x1,sig2,a2,x2,sig3)
def Fun3c(p,y,x,x0,sig0,x1,sig1,x2,sig2):
'''compute the residuals for gaussian fit in get_components for fixed centre x_i and width sig_i '''
a0, a1, a2 = p
return y - trigaussian(x,a0,x0,sig0,a1,x1,sig1,a2,x2,sig2)
def DFun3(p,y,x):
a0, x0, sig0,a1,x1,sig1,a2,x2,sig2 = p
return -Dtrigaussian(x,a0,x0,sig0,a1,x1,sig1,a2,x2,sig2)
def Fun4(p,y,x,motion0):
a0, x0, sig0 = p
return y - smeargaussian(x,a0,x0,sig0,motion0)
def singlegaussian(x, a0, x0, sig0 ):
'''
The function returns the gaussian function
on array x centred on x0 with width sig0
and amplitude a0
'''
x = np.atleast_1d(x)
f = 0. * x.copy()
q = np.where( np.abs(x-x0) < 4.*sig0 )
f[q] = a0 * np.exp( - ((x[q]-x0)/sig0)**2 )
return f
def Dsinglegaussian(x, a0, x0, sig0):
'''partial derivative of singlegaussian to all parameters'''
f = singlegaussian(x, a0, x0, sig0)
dfda0 = f/a0
dfdx0 = 2*x0*(x-x0)*f/sig0**2
dfdsig0 = 2*f*(x-x0)**2/sig0**3
return dfda0, dfdx0, dfdsig0
def doublegaussian(x, a0, x0, sig0, a1, x1, sig1 ):
'''
The function returns the double gaussian function
on array x centred on x0 and x1 with width sig0 and sig1
and amplitude a0, and a1
'''
x = np.atleast_1d(x)
f1 = 0. * x.copy()
f2 = 0. * x.copy()
q = np.where( np.abs(x-x0) < 4.*sig0 )
f1[q] = a0 * np.exp( - ((x[q]-x0)/sig0)**2 )
q = np.where( np.abs(x-x1) < 4.*sig1)
f2[q] = a1 * np.exp( - ((x[q]-x1)/sig1)**2 )
f = f1+f2
return f
def trigaussian(x, a0, x0, sig0, a1, x1, sig1, a2, x2, sig2 ):
'''
The function returns the triple gaussian function
on array x centred on x0, x1, x2 with width sig0, sig1, sig2
and amplitude a0,a1, a2. :
'''
x = np.atleast_1d(x)
f0 = 0. * x.copy()
f1 = 0. * x.copy()
f2 = 0. * x.copy()
q = np.where(np.abs( x-x0 ) < 4.*sig0)
f0[q] = a0 * np.exp( - ((x[q]-x0)/sig0)**2 )
q = np.where(np.abs( x-x1 ) < 4.*sig1)
f1[q] = a1 * np.exp( - ((x[q]-x1)/sig1)**2 )
q= np.where( np.abs(x-x2) < 4.*sig2)
f2[q] = a2 * np.exp( - ((x[q]-x2)/sig2)**2 )
f = f0 + f1 + f2
return f
def Ddoublegaussian(x, a0, x0, sig0, a1, x1, sig1):
'''partial derivative of doublegaussian to all parameters'''
f = singlegaussian(x, a0, x0, sig0)
dfda0 = f/a0
dfdx0 = 2*x0*(x-x0)*f/sig0**2
dfdsig0 = 2*f*(x-x0)**2/sig0**3
f = singlegaussian(x, a1, x1, sig1)
dfda1 = f/a1
dfdx1 = 2*x1*(x-x1)*f/sig1**2
dfdsig1 = 2*f*(x-x1)**2/sig1**3
return dfda0, dfdx0, dfdsig0, dfda1, dfdx1, dfdsig1
def gaussPlusPoly(x, a0, x0, sig0, b, n=2):
'''compute function gaussian*polynomial(n) '''
f = singlegaussian(x, a0, x0, sig0 ) * (b[2]+(b[1]+b[0]*x)*x)
return f
def DgaussPlusPoly(x, a0, x0, sig0, b, n=2):
'''compute Jacobian for gaussPlusPoly '''
dfda0, dfdx0, dfdsig0 = (Dsinglegaussian(x, a0, x0, sig0) ) * (b[2]+(b[1]+b[0]*x)*x)
dfdb2 = 0
dfdb1 = (singlegaussian(x, a0, x0, sig0) ) * b[1]
dfdb0 = (singlegaussian(x, a0, x0, sig0) ) * 2*b[2]*x
return (dfda0, dfdx0, dfdsig0, dfdb2, dfdb1,dfdb0)
def smeargaussian(x, A, mu, sigma, motion, normalize=True):
t1, t2 = -motion/2, motion/2
m1, m2 = (t1-(x-mu))/(np.sqrt(2)*sigma), (t2-(x-mu))/(np.sqrt(2)*sigma)
n1, n2 = m1*m1, m2*m2
fifth = -(np.exp(-n2)-np.exp(-n1))
sixth = np.sqrt(np.pi/2)*(x-mu)/sigma*(erf(m2)-erf(m1))
forth = fifth + sixth
third = np.exp(np.power((x-mu)/sigma,2)/2)*2*np.power(sigma,2)*forth
secnd = -1/(2*np.power(sigma,2))*third
def first_f(t):
return np.exp(-np.power(t/sigma,2)/2+t*(x-mu)/np.power(sigma,2))
first = first_f(t2)-first_f(t1)
zeroth = np.power(sigma,2)/(x-mu)*(first - secnd)
if normalize == True:
norm = 1./(sigma*np.sqrt(2*np.pi))
else:
norm = 1.
#q = norm/motion*np.exp(-np.power((x-mu)/sigma,2)/2)*zeroth
q = np.exp(-np.power((x-mu)/sigma,2)/2)*zeroth
a1, a2 = t1/(np.sqrt(2)*sigma), t2/(np.sqrt(2)*sigma)
q_max = np.sqrt(np.pi/2)*sigma*(erf(a2)-erf(a1))
q = A*q/q_max
return q
def pixdisFromWave(C_1,wave):
''' find the pixel distance from the given wavelengths for first order uv grism'''
from numpy import polyval, polyfit, linspace, where
if C_1[-2] < 4.5: d = linspace(-370,1300, num=100)
else: d = linspace(-360,550,num=100)
w = polyval(C_1,d)
w1 = min(wave) - 100
w2 = max(wave) + 100
q = where( (w > w1) & (w < w2) )
Cinv = polyfit(w[q],d[q],4)
return polyval(Cinv,wave)
def quality_flags():
'''Definition of quality flags for UVOT grism '''
flags = dict(
good=0, # data good, but may need COI correction
bad=1, # data dropout or bad pixel or user marked bad
zeroth=2, # strong zeroth order too close to/overlaps spectrum
weakzeroth=4, # weak zeroth order too close to/overlaps spectrum
first=8, # other first order overlaps and brighter than BG + 5 sigma of noise
overlap=16, # orders overlap to close to separate (first, second) or (first second and third)
too_bright=32, # the counts per frame are too large
unknown=-1
)
return flags
def plotSecondOrder(dis,C_2,anker,anker2, spnet, scale=False):
'''
The aim of this procedure is to plot
the spectrum with the second order wavelength scale.
Second order brightness scaling (scale = True)
'''
from pylab import plot, polyval
# catch when anker2 = NaN
# tbd.
D = np.sqrt((anker[0]-anker2[0])**2+(anker[1]-anker2[1])**2)
dis2 = dis-D
p = np.where( np.abs(dis2) == np.abs(dis2).min() )
p1 = p[0] - 700
p2 = len(dis2)
aa = list(range(p1,p2))
plot( polyval(C_2,dis2[aa]),spnet[aa])
def secondOrderPSF_FWHM(wavelength, C_2inv, units = 'angstroem'):
''' returns the second order PSF FWHM
in A (or pixels when units = 'pixels')
C_2inv = inverse function of dispersion coefficients for the second order
Although the PSF is horse-shoe shaped, the PSF fit is by a gaussian.
'''
w = [1900.,2000,2100,2200,2300,2530,2900,4000]
FWHM = [5.9,6.5,7.7,8.7,10,14,22,63]
a = np.polyfit(w,FWHM,2)
pix2lam = 1.76 # this could be improved using the actual dispersion relation
# dis = np.polyval(C_2inv,wavelength)
# pix2lam = np.polyval(C_2,dis+1) - np.polyval(C_2,dis)
if units == 'pixels':
return np.polyval(a,wavelength)
elif units == 'angstroem':
return np.polyval(a,wavelength) * pix2lam
def response21_grcal(wave):
'''
to get 2nd order counts per bin multiply first order peak counts/bin with
the result of this function
broad band measurements with band width > resolution
let band width D_lam = (lambda_max-lambda_min)
first order pixel ~ 3.1 A/pix
second order pixel ~ 1.7 A/pix
so first order CR/pix ~ CR1_band / 3.1
and second order CR/pix ~ CR2_band /1 .7
EWratio = CR2_band/CR1_band
so # pix/band = d_lam / 3.1 for first order and d_lam/1.7 for second order
so in second order pix the CR(2)/pix = CR(1)* (d_lam/3.1) / (d_lam/1.7) * EWratio
= CR(1) * (1.7/3.2) * EW ratio
'''
from numpy import array, exp, polyfit, log, polyval
wmean = array([1925.,2225,2650])
EWratio = array([0.80,0.42,0.22]) # ratio of broad band response ground cal nominal
EWratio_err= array([0.01,0.01,0.005]) # error
C1_over_C2 = 3.2/1.7 # ratio of pixel scales (1)/(2)
a = polyfit(wmean,log(EWratio),2) # logarithmic fit
EW2 = exp( polyval(a, wave) ) # return ratio
return EW2/C1_over_C2
def response21_firstcal(wave,wheelpos=160):
'''Second order flux calibration relative to first order based on
effective areas from 2011-12-18 at offset position uv clocked grism
Near the centre (default position) of the detector, the second order
flux is overestimated. A better value there is perhaps half the predicted
value, though the exact number is impossible to determine at present.
'''
import numpy as np
from scipy import interpolate
print("2nd order response based on offset position uv clocked at (1600,1600)_DET \n")
#if wheelpos != 160:
# do whatever
#
# return R21
coef = np.array([ 3.70653066e-06, -9.56213490e-03, 5.77251517e+00])
# ratio (sp_2/\AA)/ (sp_1/\AA)
R21 = 1./np.polyval(coef,wave)
if (np.min(wave) < 1838.):
q = (wave < 1839.)
wav = np.array([1690, 1691, 1692, 1693, 1694, 1695, 1696, 1697, 1698, 1699, 1700,
1701, 1702, 1703, 1704, 1705, 1706, 1707, 1708, 1709, 1710, 1711,
1712, 1713, 1714, 1715, 1716, 1717, 1718, 1719, 1720, 1721, 1722,
1723, 1724, 1725, 1726, 1727, 1728, 1729, 1730, 1731, 1732, 1733,
1734, 1735, 1736, 1737, 1738, 1739, 1740, 1741, 1742, 1743, 1744,
1745, 1746, 1747, 1748, 1749, 1750, 1751, 1752, 1753, 1754, 1755,
1756, 1757, 1758, 1759, 1760, 1761, 1762, 1763, 1764, 1765, 1766,
1767, 1768, 1769, 1770, 1771, 1772, 1773, 1774, 1775, 1776, 1777,
1778, 1779, 1780, 1781, 1782, 1783, 1784, 1785, 1786, 1787, 1788,
1789, 1790, 1791, 1792, 1793, 1794, 1795, 1796, 1797, 1798, 1799,
1800, 1801, 1802, 1803, 1804, 1805, 1806, 1807, 1808, 1809, 1810,
1811, 1812, 1813, 1814, 1815, 1816, 1817, 1818, 1819, 1820, 1821,
1822, 1823, 1824, 1825, 1826, 1827, 1828, 1829, 1830, 1831, 1832,
1833, 1834, 1835, 1836, 1837, 1838, 1839])
ratio = np.array([ 0.258639 , 0.26471343, 0.27042023, 0.27579628, 0.28086127,
0.28533528, 0.28957406, 0.29359907, 0.29742921, 0.3010812 ,
0.30456987, 0.30790845, 0.31110877, 0.3141814 , 0.31713589,
0.31998082, 0.32010247, 0.32081151, 0.32181713, 0.32280622,
0.32377967, 0.32473829, 0.32568282, 0.32661395, 0.32753234,
0.32843857, 0.32933322, 0.33021679, 0.33108977, 0.33195263,
0.33243225, 0.33252353, 0.33262903, 0.33274794, 0.3328795 ,
0.33302301, 0.33317782, 0.33334329, 0.33351887, 0.33370401,
0.3338982 , 0.33410098, 0.3343119 , 0.33458345, 0.33498466,
0.33538817, 0.33579382, 0.33620149, 0.33661104, 0.33702235,
0.3374353 , 0.33891465, 0.34053073, 0.3421217 , 0.34368845,
0.34663769, 0.35000718, 0.35334531, 0.35665266, 0.3599298 ,
0.3631773 , 0.36639568, 0.36958547, 0.37274719, 0.37588132,
0.37898836, 0.38206878, 0.38512304, 0.38815158, 0.39115485,
0.39413328, 0.39708727, 0.40001724, 0.40292359, 0.40616969,
0.40948579, 0.4123554 , 0.41437097, 0.41637511, 0.41836796,
0.42034965, 0.42232032, 0.42428008, 0.42622906, 0.42816739,
0.43009518, 0.43201256, 0.43391964, 0.43581654, 0.43793192,
0.44004629, 0.44215087, 0.44424574, 0.44633099, 0.44840671,
0.45047299, 0.4525299 , 0.45457754, 0.45661598, 0.45864531,
0.4607006 , 0.46279476, 0.46626514, 0.47005637, 0.47383064,
0.47758809, 0.48132887, 0.48505311, 0.48876095, 0.49245253,
0.49612799, 0.49978745, 0.50343106, 0.50705893, 0.5106712 ,
0.514268 , 0.51784944, 0.52141565, 0.52496675, 0.52850286,
0.53264671, 0.53713253, 0.5416131 , 0.54608843, 0.55055849,
0.55502327, 0.55948277, 0.56393697, 0.56838586, 0.57282942,
0.57737607, 0.58315569, 0.58892863, 0.59469489, 0.60045444,
0.60620727, 0.61195337, 0.61769272, 0.6234253 , 0.6291511 ,
0.63488101, 0.64091211, 0.64694134, 0.65296866, 0.65899403,
0.66501741, 0.67103875, 0.67705802, 0.68307519, 0.6890902 ])
func = interpolate.interp1d(wav, ratio, kind='linear', bounds_error=False )
R21[q] = 1./func(wave[q])
return R21
def response21(wave, version='firstcal',wheelpos=160 ):
'''
second over first order response per unit of angstrom
input:
dis1 range of first order bins (pix)
dis2 range of second order bins (pix)
'''
if version == 'groundcal':
return response21_grcal(wave)
elif version == 'firstcal':
return response21_firstcal(wave)
else:
print('\Fatal Error in call response21 function\n')
raise IOError
return
def polyinverse( coef, dis):
''' determine the inverse of the polynomial coefficients
of the same order as in input
so w = polyval(coef, d)
and d = polyval(coefinv, w)
Warning
-------
Accuracy is not always good.
'''
import numpy as np
wav = np.polyval(coef, dis)
norder = np.array([len(coef)-1,len(dis)-1])
norder = np.array([norder.max(),9]).min()
coef_inv = np.polyfit(wav, dis, norder)
return coef_inv
def pix_from_wave( disp, wave,spectralorder=1 ):
'''Get the pixel coordinate from wavelengths and dispersion.
Parameters
----------
disp : list
the dispersion polynomial coefficients
wave : array-like
wavelength
kwargs : disp
- **spectralorder** : int
the spectral order number
returns
-------
pix : array-like
pixel distance as
Note
----
polyinverse() was used which is inaccurate
example
-------
d = pix_from_wave([3.2,2600.], lambda )
'''
from scipy import interpolate
import numpy as np
from stsci.convolve import boxcar
wave = np.asarray( wave )
wave = np.atleast_1d(wave)
wone = np.ones(len(wave))
grism = None
if (disp[-1] > 2350.0) & (disp[-1] < 2750.) : grism = 'UV'
if (disp[-1] > 4000.0) & (disp[-1] < 4500.) : grism = 'VIS'
if grism == None:
raise RuntimeError("The dispersion coefficients do not seem correct. Aborting.")
if spectralorder == 1:
# initial guess
dinv = polyinverse( disp, np.arange(-370,1150) )
d = np.polyval(dinv, wave )
if len(wave) < 20:
dp = np.polyval(dinv, wave+10 ) # CRAP polyval!
y = (dp-d)/10.0
y[y <= 0] = y[y > 0].mean()
dpdw = y
else:
fd = interpolate.interp1d(wave,d,bounds_error=False,fill_value=0.3,kind='quadratic')
dp = fd(wave+20)
y = (dp-d)/20.0
y[y <= 0] = y[y > 0].mean()
dpdw = boxcar(y,(100,),mode='reflect')
count = 100
while (np.abs(np.polyval(disp,d) - wave) > 0.5 * wone).all() | count > 0:
dw = np.polyval(disp,d) - wave
d -= dpdw*dw*0.5
count -= 1
return d
if spectralorder == 2:
# initial guess
dinv = polyinverse( disp, np.arange(-640,1300) )
d = np.polyval(dinv, wave )
dp = np.polyval(dinv, wave+1.0 )
dpdw = dp-d
count = 100
while (np.abs(np.polyval(disp,d) - wave) > 0.5 * wone).all() | count > 0:
dw = np.polyval(disp,d) - wave
d -= dpdw*dw*0.5
count -= 1
return d
pix = np.polyval( disp, wave )
return
def predict_second_order(dis,spnet,C_1,C_2,d12,qual,dismin,dismax,wheelpos):
'''Predict the second order flux in the given wavelength range
Parameters
----------
spnet[dis] : array-like
extracted spectrum of first order (with possibly higher order contributions)
Assume anchor for dis=0, dis in pix units
C_1, C_2 : list, ndarray
dispersion coefficients for the first and second order
d12 : float
distance in pix between anchor and second order reference point
qual[dis] : array-like
quality extracted spectrum
dismin,dismax : float
define the pixel range for the wavelength range of the first order
wheelpos : int {160,200,955,1000}
position filter wheel
calling function
response21 is giving second over first order response for bins determined by dis
polyinverse determines the inverse of the polynomial coefficients
returns
-------
sp2[dis] : array-like
second order flux
wave2[dis] : array-like
second order wavelength
Notes
-----
used by response21() is giving second over first order response for bins determined by dis
polyinverse determines the inverse of the polynomial coefficients
'''
import numpy as np
from numpy import where, searchsorted, int
dis = np.asarray(1.0*dis) # ensure floating point array
spnet = np.asarray(spnet)
qual = np.asarray(qual)
wave = np.polyval(C_1,dis)
wmin = np.polyval(C_1,dismin)
wmax = np.polyval(C_1,dismax)
dis2 = dis[where(dis > 1)] - d12
wav2 = np.polyval(C_2,dis2)
n2b = wav2.searchsorted(wmin)
dis2 = dis2[n2b:]
wav2 = wav2[n2b:]
# determine the inverse of the dispersion on the domain with wmin< wav2 < wmax
#C_1inv = polyinverse(C_1,dis )
#C_2inv = polyinverse(C_2,dis2)
# second order limits
wmin2, wmax2 = np.max(np.array([wav2[0],wmin])),wav2[-1]
#compute second order prediction within the limits
# first order points to use to predict second order (range dis and indices)
#dlo, dhi = np.polyval(C_1inv,wmin2), np.polyval(C_1inv,wmax2)
dlo, dhi = pix_from_wave(C_1,wmin2), pix_from_wave(C_1,wmax2)
idlo, idhi = int(dis.searchsorted(dlo)), int(dis.searchsorted(dhi))
wav1cut = wave[idlo:idhi]
dis1cut = dis [idlo:idhi]
qua1cut = qual[idlo:idhi]
# second order dis2 corresponding to wavelength range wav1cut
#dis2cut = polyval(C_2inv,wav1cut)
dis2cut = pix_from_wave(C_2, wav1cut)
# find scale factor (1 pix = x \AA )
pixscale1 = polyval(C_1, dis1cut+1) - polyval(C_1, dis1cut)
pixscale2 = polyval(C_2, dis1cut+1) - polyval(C_2, dis1cut)
projflux2 = spnet[idlo:idhi] * pixscale1 * response21( wav1cut,)
projflux2bin = projflux2 /pixscale2
# now interpolate projflux2bin to find the counts/bin in the second order
# the interpolation is needed since the array size is based on the first order
flux2 = interpol(dis2, dis2cut, projflux2bin)
qual2 = np.array( interpol(dis2, dis2cut, qua1cut) + 0.5 , dtype=int )
# remove NaN values from output
q = np.isfinite(wav2) & np.isfinite(dis2) & np.isfinite(flux2)
wav2 = wav2[q]
dis2 = dis2[q]
flux2 = flux2[q]
qual2 = qual2[q]
return (wav2, dis2, flux2, qual2, d12), (wave, dis, spnet),
'''
the gaussian fitting algorithm is from <NAME>
I am limiting the range for fitting the position and width of the gaussians
'''
def runfit3(x,f,err,bg,amp1,pos1,sig1,amp2,pos2,sig2,amp3,pos3,sig3,amp2lim=None,
fixsig=False, fixsiglim=0.2, fixpos=False,chatter=0):
'''Three gaussians plus a linear varying background
for the rotated image, multiply err by 2.77 to get right chi-squared (.fnorm/(nele-nparm))
'''
import numpy as np
#import numpy.oldnumeric as Numeric
import mpfit
if np.isfinite(bg):
bg0 = bg
else: bg0 = 0.0
bg1 = 0.0
if np.isfinite(sig1):
sig1 = np.abs(sig1)
else: sig1 = 3.1
if np.isfinite(sig2):
sig2 = np.abs(sig2)
else: sig2 = 4.2
if np.isfinite(sig3):
sig3 = np.abs(sig3)
else: sig3 = 4.5
p0 = (bg0,bg1,amp1,pos1,sig1,amp2,pos2,sig2,amp3,pos3,sig3)
if fixpos:
pos1a = pos1-0.05
pos1b = pos1+0.05
pos2a = pos2-0.05
pos2b = pos2+0.05
pos3a = pos3-0.05
pos3b = pos3+0.05
else:
# adjust the limits to not cross half the predicted distance of orders
pos1a = pos1-sig1
pos1b = pos1+sig1
pos2a = pos2-sig1
pos2b = pos2+sig1
pos3a = pos3-sig1
pos3b = pos3+sig1
# case : pos1 < pos2 < pos3
if (pos1 < pos2):
pos1b = pos2a = 0.5*(pos1+pos2)
if (pos2 < pos3):
pos2b = pos3a = 0.5*(pos2+pos3)
else:
pos3 = pos2
pos3a = pos2
pos3b = pos2b+3
else:
pos1a = pos2b = 0.5*(pos1+pos2)
if (pos2 > pos3):
pos2a = pos3b = 0.5*(pos2+pos3)
else:
pos3 = pos2
pos3b = pos2
pos3a = pos2a-3
#x = np.arange(len(f))
if fixsig:
sig1_lo = sig1-fixsiglim
sig1_hi = sig1+fixsiglim
sig2_lo = sig2-fixsiglim
sig2_hi = sig2+fixsiglim
sig3_lo = sig3-fixsiglim
sig3_hi = sig3+fixsiglim
else:
# make sure lower limit sigma is OK
sig1_lo = max([sig1-1 ,3.0])
sig2_lo = max([sig2-1.4,3.5])
sig3_lo = max([sig3-1.9,4.0])
sig1_hi = min([sig1+1.1,4.5])
sig2_hi = min([sig2+1.4,6.])
sig3_hi = min([sig3+1.9,8.])
# define the variables for the function 'myfunct'
fa = {'x':x,'y':f,'err':err}
if amp2lim != None:
amp2min, amp2max = amp2lim
parinfo = [{ \
'limited': [1,0], 'limits' : [np.min([0.0,bg0]),0.0],'value': bg, 'parname': 'bg0' },{ \
'limited': [0,0], 'limits' : [0.0,0.0], 'value' : 0.0, 'parname': 'bg1' },{ \
'limited': [1,0], 'limits' : [0.0,0.0], 'value' : amp1, 'parname': 'amp1' },{ \
'limited': [1,1], 'limits' : [pos1a,pos1b], 'value' : pos1, 'parname': 'pos1' },{ \
'limited': [1,1], 'limits' : [sig1_lo,sig1_hi], 'value' : sig1, 'parname': 'sig1' },{ \
'limited': [1,0], 'limits' : [amp2min,amp2max], 'value' : amp2, 'parname': 'amp2' },{ \
'limited': [1,1], 'limits' : [pos2a,pos2b], 'value' : pos2, 'parname': 'pos2' },{ \
'limited': [1,1], 'limits' : [sig2_lo,sig2_hi], 'value' : sig2, 'parname': 'sig2' },{ \
'limited': [1,0], 'limits' : [0.0,0.0], 'value' : amp3, 'parname': 'amp3' },{ \
'limited': [1,1], 'limits' : [pos3a,pos3b], 'value' : pos3, 'parname': 'pos3' },{ \
'limited': [1,1], 'limits' : [sig3_lo,sig3_hi], 'value' : sig3, 'parname': 'sig3' }]
else:
parinfo = [{ \
'limited': [1,0], 'limits' : [np.min([0.0,bg0]),0.0],'value': bg, 'parname': 'bg0' },{ \
'limited': [0,0], 'limits' : [0.0,0.0], 'value' : 0.0, 'parname': 'bg1' },{ \
'limited': [1,0], 'limits' : [0.0,0.0], 'value' : amp1, 'parname': 'amp1' },{ \
'limited': [1,1], 'limits' : [pos1a,pos1b], 'value' : pos1, 'parname': 'pos1' },{ \
'limited': [1,1], 'limits' : [sig1_lo,sig1_hi], 'value' : sig1, 'parname': 'sig1' },{ \
'limited': [1,0], 'limits' : [0.0,0.0], 'value' : amp2, 'parname': 'amp2' },{ \
'limited': [1,1], 'limits' : [pos2a,pos2b], 'value' : pos2, 'parname': 'pos2' },{ \
'limited': [1,1], 'limits' : [sig2_lo,sig2_hi], 'value' : sig2, 'parname': 'sig2' },{ \
'limited': [1,0], 'limits' : [0.0,0.0], 'value' : amp3, 'parname': 'amp3' },{ \
'limited': [1,1], 'limits' : [pos3a,pos3b], 'value' : pos3, 'parname': 'pos3' },{ \
'limited': [1,1], 'limits' : [sig3_lo,sig3_hi], 'value' : sig3, 'parname': 'sig3' }]
if chatter > 4:
print("parinfo has been set to: ")
for par in parinfo: print(par)
Z = mpfit.mpfit(fit3,p0,functkw=fa,parinfo=parinfo,quiet=True)
'''.status :
An integer status code is returned. All values greater than zero can
represent success (however .status == 5 may indicate failure to
converge). It can have one of the following values:
-16
A parameter or function value has become infinite or an undefined
number. This is usually a consequence of numerical overflow in the
user's model function, which must be avoided.
-15 to -1
These are error codes that either MYFUNCT or iterfunct may return to
terminate the fitting process. Values from -15 to -1 are reserved
for the user functions and will not clash with MPFIT.
0 Improper input parameters.
1 Both actual and predicted relative reductions in the sum of squares
are at most ftol.
2 Relative error between two consecutive iterates is at most xtol
3 Conditions for status = 1 and status = 2 both hold.
4 The cosine of the angle between fvec and any column of the jacobian
is at most gtol in absolute value.
5 The maximum number of iterations has been reached.
6 ftol is too small. No further reduction in the sum of squares is
possible.
7 xtol is too small. No further improvement in the approximate solution
x is possible.
8 gtol is too small. fvec is orthogonal to the columns of the jacobian
to machine precision.
'''
if (Z.status <= 0):
print('uvotgetspec.runfit3.mpfit error message = ', Z.errmsg)
print("parinfo has been set to: ")
for par in parinfo: print(par)
elif (chatter > 3):
print("\nparameters and errors : ")
for i in range(8): print("%10.3e +/- %10.3e\n"%(Z.params[i],Z.perror[i]))
return Z
def fit3(p, fjac=None, x=None, y=None, err=None):
import numpy as np
# Parameter values are passed in "p"
# If fjac==None then partial derivatives should not be
# computed. It will always be None if MPFIT is called with default
# flag.
# model = F(x, p)
(bg0,bg1,amp1,pos1,sig1,amp2,pos2,sig2,amp3,pos3,sig3) = p
model = bg0 + bg1*x + \
amp1 * np.exp( - ((x-pos1)/sig1)**2 ) + \
amp2 * np.exp( - ((x-pos2)/sig2)**2 ) + \
amp3 * np.exp( - ((x-pos3)/sig3)**2 )
# Non-negative status value means MPFIT should continue, negative means
# stop the calculation.
status = 0
return [status, (y-model)/err]
def runfit2(x,f,err,bg,amp1,pos1,sig1,amp2,pos2,sig2,amp2lim=None,fixsig=False,
fixsiglim=0.2, fixpos=False,chatter=0):
'''Three gaussians plus a linear varying background
for the rotated image, multiply err by 2.77 to get right chi-squared (.fnorm/(nele-nparm))
'''
import numpy as np
#import numpy.oldnumeric as Numeric
import mpfit
if np.isfinite(bg):
bg0 = bg
else: bg0 = 0.0
bg1 = 0.0
if np.isfinite(sig1):
sig1 = np.abs(sig1)
else: sig1 = 3.1
if np.isfinite(sig2):
sig2 = np.abs(sig2)
else: sig2 = 4.2
p0 = (bg0,bg1,amp1,pos1,sig1,amp2,pos2,sig2)
# define the variables for the function 'myfunct'
fa = {'x':x,'y':f,'err':err}
if fixpos:
pos1a = pos1-0.05
pos1b = pos1+0.05
pos2a = pos2-0.05
pos2b = pos2+0.05
else:
# adjust the limits to not cross half the predicted distance of orders
pos1a = pos1-sig1
pos1b = pos1+sig1
pos2a = pos2-sig1
pos2b = pos2+sig1
# case : pos1 < pos2
if (pos1 < pos2):
pos1b = pos2a = 0.5*(pos1+pos2)
else:
pos1a = pos2b = 0.5*(pos1+pos2)
if fixsig:
sig1_lo = sig1-fixsiglim
sig1_hi = sig1+fixsiglim
sig2_lo = sig2-fixsiglim
sig2_hi = sig2+fixsiglim
else:
# make sure lower limit sigma is OK
sig1_lo = max([sig1-1 ,3.0])
sig2_lo = max([sig2-1.4,3.5])
sig1_hi = min([sig1+1.1,4.5])
sig2_hi = min([sig2+1.4,6.])
if amp2lim != None:
amp2min, amp2max = amp2lim
parinfo = [{ \
'limited': [1,0], 'limits' : [np.min([0.0,bg0]),0.0],'value': bg, 'parname': 'bg0' },{ \
'limited': [0,0], 'limits' : [0.0,0.0], 'value' : 0.0, 'parname': 'bg1' },{ \
'limited': [1,0], 'limits' : [0.0,0.0], 'value' : amp1, 'parname': 'amp1' },{ \
'limited': [1,1], 'limits' : [pos1a,pos1b], 'value' : pos1, 'parname': 'pos1' },{ \
'limited': [1,1], 'limits' : [sig1_lo,sig1_hi], 'value' : sig1, 'parname': 'sig1' },{ \
'limited': [1,1], 'limits' : [amp2min,amp2max], 'value' : amp2, 'parname': 'amp2' },{ \
'limited': [1,1], 'limits' : [pos2a,pos2b], 'value' : pos2, 'parname': 'pos2' },{ \
'limited': [1,1], 'limits' : [sig2_lo,sig2_hi], 'value' : sig2, 'parname': 'sig2' }]
else:
parinfo = [{ \
'limited': [1,0], 'limits' : [np.min([0.0,bg0]),0.0],'value': bg, 'parname': 'bg0' },{ \
'limited': [0,0], 'limits' : [0.0,0.0], 'value' : 0.0, 'parname': 'bg1' },{ \
'limited': [1,0], 'limits' : [0.0,0.0], 'value' : amp1, 'parname': 'amp1' },{ \
'limited': [1,1], 'limits' : [pos1a,pos1b], 'value' : pos1, 'parname': 'pos1' },{ \
'limited': [1,1], 'limits' : [sig1_lo,sig1_hi], 'value' : sig1, 'parname': 'sig1' },{ \
'limited': [1,0], 'limits' : [0.0,0.0], 'value' : amp2, 'parname': 'amp2' },{ \
'limited': [1,1], 'limits' : [pos2a,pos2b], 'value' : pos2, 'parname': 'pos2' },{ \
'limited': [1,1], 'limits' : [sig2_lo,sig2_hi], 'value' : sig2, 'parname': 'sig2' }]
if chatter > 4:
print("parinfo has been set to: ")
for par in parinfo: print(par)
Z = mpfit.mpfit(fit2,p0,functkw=fa,parinfo=parinfo,quiet=True)
if (Z.status <= 0):
print('uvotgetspec.runfit2.mpfit error message = ', Z.errmsg)
print("parinfo has been set to: ")
for par in parinfo: print(par)
elif (chatter > 3):
print("\nparameters and errors : ")
for i in range(8): print("%10.3e +/- %10.3e\n"%(Z.params[i],Z.perror[i]))
return Z
def fit2(p, fjac=None, x=None, y=None, err=None):
import numpy as np
(bg0,bg1,amp1,pos1,sig1,amp2,pos2,sig2) = p
model = bg0 + bg1*x + \
amp1 * np.exp( - ((x-pos1)/sig1)**2 ) + \
amp2 * np.exp( - ((x-pos2)/sig2)**2 )
status = 0
return [status, (y-model)/err]
def runfit1(x,f,err,bg,amp1,pos1,sig1,fixsig=False,fixpos=False,fixsiglim=0.2,chatter=0):
'''Three gaussians plus a linear varying background
for the rotated image, multiply err by 2.77 to get right chi-squared (.fnorm/(nele-nparm))
'''
import numpy as np
#import numpy.oldnumeric as Numeric
import mpfit
if np.isfinite(bg):
bg0 = bg
else: bg0 = 0.00
bg1 = 0.0
if np.isfinite(sig1):
sig1 = np.abs(sig1)
else:
sig1 = 3.2
p0 = (bg0,bg1,amp1,pos1,sig1)
# define the variables for the function 'myfunct'
fa = {'x':x,'y':f,'err':err}
if fixsig:
sig1_lo = sig1-fixsiglim
sig1_hi = sig1+fixsiglim
else:
# make sure lower limit sigma is OK
sig1_lo = max([sig1-1 ,2.7])
sig1_hi = min([sig1+1.1,4.5])
if fixpos:
pos1a = pos1-0.05
pos1b = pos1+0.05
else:
# adjust the limits to not cross half the predicted distance of orders
pos1a = pos1-sig1
pos1b = pos1+sig1
parinfo = [{ \
'limited': [1,0], 'limits' : [np.min([0.,bg0]),0.0],'value' : bg, 'parname': 'bg0' },{ \
'limited': [0,0], 'limits' : [0.0,0.0], 'value' : 0.0, 'parname': 'bg1' },{ \
'limited': [1,0], 'limits' : [0.0,0.0], 'value' : amp1, 'parname': 'amp1' },{ \
'limited': [1,1], 'limits' : [pos1a,pos1b], 'value' : pos1, 'parname': 'pos1' },{ \
'limited': [1,1], 'limits' : [sig1_lo,sig1_hi], 'value' : sig1, 'parname': 'sig1' }]
if chatter > 4:
print("parinfo has been set to: ")
for par in parinfo: print(par)
Z = mpfit.mpfit(fit1,p0,functkw=fa,parinfo=parinfo,quiet=True)
if (Z.status <= 0): print('uvotgetspec.runfit1.mpfit error message = ', Z.errmsg)
return Z
def fit1(p, fjac=None, x=None, y=None, err=None):
import numpy as np
(bg0,bg1,amp1,pos1,sig1) = p
model = bg0 + bg1*x + amp1 * np.exp( - ((x-pos1)/sig1)**2 )
status = 0
return [status, 1e8*(y-model)]
def getCalData(Xphi, Yphi, wheelpos,date, chatter=3,mode='bilinear',
kx=1,ky=1,s=0,calfile=None,caldir=None, msg=''):
'''Retrieve the calibration data for the anchor and dispersion (wavelengths).
Parameters
----------
Xphi, Yphi : float
input angles in degrees, from, e.g., `findInputAngle`.
wheelpos : int, {160,200,955,1000}
filter wheel position selects grism
date : swifttime in seconds
obsolete - not used
kwargs : dict
optional arguments
- **calfile** : str
calibration file name
- **caldir** : str
path of directory calibration files
- **mode** : str
interpolation method. Use 'bilinear' only.
- **kx**, **ky** : int, {1,2,3}
order of interpolation. Use linear interpolation only.
- **s** : float
smoothing factor, use s=0.
- **chatter** : int
verbosity
Returns
-------
anker, anker2 : list
coordinate of anchor in first order.
C_1, C_2 :
dispersion in first and second order.
theta : float
find angle of dispersion on detector as 180-theta.
data : FITS_rec
the wavecal data table
Notes
-----
Given the input angle Xphi, Yphi in deg., the filterwheel
position, and the date the spectrum was taken (in swift seconds),
this gets the calibration data.
The boresight must be set to the one used in deriving the calibration.
'''
import os
import numpy as np
try:
from astropy.io import fits as pyfits
except:
import pyfits
from scipy import interpolate
#==================================================================
# The following calculation in reverse prepared the zemax model for
# the calibration table lookup. Keep for the record. UV Nominal case.
# first calculate the offset of the rotate the input angles due to
# the difference in boresight of grism and model
# = input_angle + grism_bs_angle - model_bs
# scale = 6554.0 (deg/pix)
# xfi = Xphi + (( 928.53-27) - (1100.5+8))/scale
# yfi = Yphi + ((1002.69- 1) - (1100.5-4))/scale
# rx,ry = uvotmisc.uvotrotvec(xf,yf,-64.6)
#==================================================================
if calfile == None:
#
# get the calibration file
#
try:
uvotpy = os.getenv('UVOTPY')+'/uvotpy'
caldb = os.getenv('CALDB')
if uvotpy != None:
caldir = uvotpy+'/calfiles'
elif caldb != None:
caldir = caldb+'/data/swift/uvota/bcf/grism/'
except:
print("CALDB nor UVOTPY environment variable set.")
#if caldir == None:
# # hardcoded development system
# caldir = '/Volumes/users/Users/kuin/dev/uvotpy.latest/calfiles'
if wheelpos == 200:
calfile = 'swugu0200wcal20041120v001.fits'
oldcalfile='swwavcal20090406_v1_mssl_ug200.fits'
calfile = caldir+'/'+calfile
if chatter > 1: print('reading UV Nominal calfile '+calfile)
elif wheelpos == 160:
calfile='swugu0160wcal20041120v002.fits'
oldcalfile= 'swwavcal20090626_v2_mssl_uc160_wlshift6.1.fits'
calfile = caldir+'/'+calfile
if chatter > 1: print('reading UV clocked calfile '+calfile)
elif wheelpos == 955:
calfile='swugv0955wcal20041120v001.fits'
oldcalfile= 'swwavcal20100421_v0_mssl_vc955_wlshift-8.0.fits'
calfile = caldir+'/'+calfile
if chatter > 1: print('reading V Clockedcalfile '+calfile)
elif wheelpos == 1000:
calfile='swugv1000wcal20041120v001.fits'
oldcalfile= 'swwavcal20100121_v0_mssl_vg1000.fits'
calfile = caldir+'/'+calfile
if chatter > 1: print('reading V Nominal calfile '+calfile)
else:
if chatter > 1:
print("Could not find a valid wave calibration file for wheelpos = ",wheelpos)
print("Aborting")
print("******************************************************************")
raise IOError("missing calibration file")
msg += "wavecal file : %s\n"%(calfile.split('/')[-1])
# look up the data corresponding to the (Xphi,Yphi) point in the
# calibration file (which already has rotated input arrays)
#
cal = pyfits.open(calfile)
if chatter > 0: print("opening the wavelength calibration file: %s"%(calfile))
if chatter > 1: print(cal.info())
hdr0 = cal[0].header
hdr1 = cal[1].header
data = cal[1].data
# the rotated field grid xf,yf (inconsistent naming - use to be xrf,yrf)
xf = xrf = data.field('PHI_X')
N1 = int(np.sqrt( len(xf) ))
if N1**2 != len(xf):
raise RuntimeError("GetCalData: calfile array not square" )
if chatter > 2: print("GetCalData: input array size on detector is %i in x, %i in y"%(N1,N1))
xf = xrf = data.field('PHI_X').reshape(N1,N1)
yf = yrf = data.field('PHI_Y').reshape(N1,N1)
# first order anchor and angle array
xp1 = data.field('DETX1ANK').reshape(N1,N1)
yp1 = data.field('DETY1ANK').reshape(N1,N1)
th = data.field('SP1SLOPE').reshape(N1,N1)
if wheelpos == 955:
# first order dispersion
c10 = data.field('DISP1_0').reshape(N1,N1)
c11 = data.field('DISP1_1').reshape(N1,N1)
c12 = data.field('DISP1_2').reshape(N1,N1)
c13 = data.field('DISP1_3').reshape(N1,N1)
c14 = np.zeros(N1*N1).reshape(N1,N1)
c1n = data.field('DISP1_N').reshape(N1,N1)
# second order
xp2 = data.field('DETX2ANK').reshape(N1,N1)
yp2 = data.field('DETY2ANK').reshape(N1,N1)
c20 = data.field('DISP2_0').reshape(N1,N1)
c21 = data.field('DISP2_1').reshape(N1,N1)
c22 = data.field('DISP2_2').reshape(N1,N1)
c2n = data.field('DISP2_N').reshape(N1,N1)
else:
# first order dispersion
c10 = data.field('disp1_0').reshape(N1,N1)
c11 = data.field('disp1_1').reshape(N1,N1)
c12 = data.field('disp1_2').reshape(N1,N1)
c13 = data.field('disp1_3').reshape(N1,N1)
c14 = data.field('disp1_4').reshape(N1,N1)
c1n = data.field('disp1_N').reshape(N1,N1)
# second order
xp2 = data.field('detx2ank').reshape(N1,N1)
yp2 = data.field('dety2ank').reshape(N1,N1)
c20 = data.field('disp2_0').reshape(N1,N1)
c21 = data.field('disp2_1').reshape(N1,N1)
c22 = data.field('disp2_2').reshape(N1,N1)
c2n = data.field('disp2_n').reshape(N1,N1)
#
# no transform here. but done to lookup array
#
rx, ry = Xphi, Yphi
#
# test if within ARRAY boundaries
#
xfp = xf[0,:]
yfp = yf[:,0]
if ((rx < min(xfp)) ^ (rx > max(xfp))):
inXfp = False
else:
inXfp = True
if ((ry < min(yfp)) ^ (ry > max(yfp))):
inYfp = False
else:
inYfp = True
#
# lower corner (ix,iy)
#
if inXfp :
ix = max( np.where( rx >= xf[0,:] )[0] )
ix_ = min( np.where( rx <= xf[0,:] )[0] )
else:
if rx < min(xfp):
ix = ix_ = 0
print("WARNING: point has xfield lower than calfile provides")
if rx > max(xfp):
ix = ix_ = N1-1
print("WARNING: point has xfield higher than calfile provides")
if inYfp :
iy = max( np.where( ry >= yf[:,0] )[0] )
iy_ = min( np.where( ry <= yf[:,0] )[0] )
else:
if ry < min(yfp):
iy = iy_ = 0
print("WARNING: point has yfield lower than calfile provides")
if ry > max(yfp):
iy = iy_ = 27
print("WARNING: point has yfield higher than calfile provides")
if inYfp & inXfp & (chatter > 2):
print('getCalData. rx, ry, Xank, Yank ')
print(ix, ix_, iy, iy_)
print('getCalData. gridpoint 1 position: ', xf[iy_,ix_], yf[iy_,ix_], xp1[iy_,ix_], yp1[iy_,ix_])
print('getCalData. gridpoint 2 position: ', xf[iy ,ix_], yf[iy ,ix_], xp1[iy ,ix_], yp1[iy ,ix_])
print('getCalData. gridpoint 3 position: ', xf[iy ,ix ], yf[iy ,ix ], xp1[iy ,ix ], yp1[iy ,ix ])
print('getCalData. gridpoint 4 position: ', xf[iy_,ix ], yf[iy_,ix ], xp1[iy_,ix ], yp1[iy_,ix ])
#
# exception at outer grid edges:
#
if ((ix == N1-1) ^ (iy == N1-1) ^ (ix_ == 0) ^ (iy_ == 0)):
# select only coefficient with order 4 (or 3 for wheelpos=955)
print("IMPORTANT:")
print("\nanchor point is outside the calibration array: extrapolating all data")
try:
if wheelpos == 955 :
# first order solution
q4 = np.where( c1n.flatten() == 3 )
xf = xf.flatten()[q4]
yf = yf.flatten()[q4]
xp1 = xp1.flatten()[q4]
yp1 = yp1.flatten()[q4]
th = th.flatten()[q4]
c10 = c10.flatten()[q4]
c11 = c11.flatten()[q4]
c12 = c12.flatten()[q4]
c13 = c13.flatten()[q4]
c14 = np.zeros(len(q4[0]))
c1n = c1n.flatten()[q4]
mode = 'bisplines'
# second order solution only when at lower or right boundary
if (ix == N1-1) ^ (iy == 0):
q2 = np.where( c2n.flatten() == 2 )[0]
xp2 = xp2.flatten()[q2]
yp2 = yp2.flatten()[q2]
c20 = c20.flatten()[q2]
c21 = c21.flatten()[q2]
c22 = c22.flatten()[q2]
c2n = c2n.flatten()[q2]
else:
N2 = N1/2
xp2 = np.zeros(N2)
yp2 = np.zeros(N2)
c20 = np.zeros(N2)
c21 = np.zeros(N2)
c22 = np.zeros(N2)
c2n = np.zeros(N2)
else:
q4 = np.where( c1n.flatten() == 4 )
xf = xf.flatten()[q4]
yf = yf.flatten()[q4]
xp1 = xp1.flatten()[q4]
yp1 = yp1.flatten()[q4]
th = th.flatten()[q4]
c10 = c10.flatten()[q4]
c11 = c11.flatten()[q4]
c12 = c12.flatten()[q4]
c13 = c13.flatten()[q4]
c14 = np.zeros(len(q4[0]))
c1n = c1n.flatten()[q4]
xp2 = xp2.flatten()[q4]
yp2 = yp2.flatten()[q4]
c20 = c20.flatten()[q4]
c21 = c21.flatten()[q4]
c22 = c22.flatten()[q4]
c2n = c2n.flatten()[q4]
# find the anchor positions by extrapolation
anker = np.zeros(2)
anker2 = np.zeros(2)
tck1x = interpolate.bisplrep(xf, yf, xp1, xb=-0.19,xe=+0.19,yb=-0.19,ye=0.19,kx=3,ky=3,s=None)
tck1y = interpolate.bisplrep(xf, yf, yp1, xb=-0.19,xe=+0.19,yb=-0.19,ye=0.19,kx=3,ky=3,s=None)
tck2x = interpolate.bisplrep(xf, yf, xp1, xb=-0.19,xe=+0.19,yb=-0.19,ye=0.19,kx=3,ky=3,s=None)
tck2y = interpolate.bisplrep(xf, yf, yp1, xb=-0.19,xe=+0.19,yb=-0.19,ye=0.19,kx=3,ky=3,s=None)
anker[0] = xp1i = interpolate.bisplev(rx,ry, tck1x)
anker[1] = yp1i = interpolate.bisplev(rx,ry, tck1y)
anker2[0] = xp2i = interpolate.bisplev(rx,ry, tck2x)
anker2[1] = yp2i = interpolate.bisplev(rx,ry, tck2y)
# find the angle
tck = interpolate.bisplrep(xf, yf, th,xb=-0.19,xe=+0.19,yb=-0.19,ye=0.19, kx=3,ky=3,s=None)
thi = interpolate.bisplev(rx,ry, tck)
# find the dispersion
tck = interpolate.bisplrep(xf, yf, c10,xb=-0.19,xe=+0.19,yb=-0.19,ye=0.19, kx=3,ky=3,s=None)
c10i = interpolate.bisplev(rx,ry, tck)
tck = interpolate.bisplrep(xf, yf, c11,xb=-0.19,xe=+0.19,yb=-0.19,ye=0.19, kx=3,ky=3,s=None)
c11i = interpolate.bisplev(rx,ry, tck)
tck = interpolate.bisplrep(xf, yf, c12,xb=-0.19,xe=+0.19,yb=-0.19,ye=0.19, kx=3,ky=3,s=None)
c12i = interpolate.bisplev(rx,ry, tck)
tck = interpolate.bisplrep(xf, yf, c13,xb=-0.19,xe=+0.19,yb=-0.19,ye=0.19, kx=3,ky=3,s=None)
c13i = interpolate.bisplev(rx,ry, tck)
tck = interpolate.bisplrep(xf, yf, c14,xb=-0.19,xe=+0.19,yb=-0.19,ye=0.19, kx=3,ky=3,s=None)
c14i = interpolate.bisplev(rx,ry, tck)
if ((ix == N1-1) ^ (iy == 0)):
tck = interpolate.bisplrep(xf, yf, c20,xb=-0.19,xe=+0.19,yb=-0.19,ye=0.19, kx=3,ky=3,s=None)
c20i = interpolate.bisplev(rx,ry, tck)
tck = interpolate.bisplrep(xf, yf, c21,xb=-0.19,xe=+0.19,yb=-0.19,ye=0.19, kx=3,ky=3,s=None)
c21i = interpolate.bisplev(rx,ry, tck)
tck = interpolate.bisplrep(xf, yf, c22,xb=-0.19,xe=+0.19,yb=-0.19,ye=0.19, kx=3,ky=3,s=None)
c22i = interpolate.bisplev(rx,ry, tck)
else:
c20i = c21i = c22i = np.NaN
if chatter > 2:
print('getCalData. bicubic extrapolation ')
print('getCalData. first order anchor position = (%8.1f,%8.1f), angle theta = %7.1f ' % (xp1i,yp1i,thi ))
print('getCalData. dispersion first order = ',c10i,c11i,c12i,c13i,c14i)
if c20i == NaN:
print(" no second order extracted ")
else:
print('getCalData. second order anchor position = (%8.1f,%8.1f) ' % (xp2i,yp2i))
print('getCalData. dispersion second order = ', c20i,c21i, c22i)
except:
print("failed - ABORTING")
raise
return
else:
#
# reduce arrays to section surrounding point
# get interpolated quantities and pass them on
#
if mode == 'bisplines':
# compute the Bivariate-spline coefficients
# kx = ky = 3 # cubic splines (smoothing) and =1 is linear
task = 0 # find spline for given smoothing factor
# s = 0 # 0=spline goes through the given points
# eps = 1.0e-6 (0 < eps < 1)
m = N1*N1
if chatter > 2: print('\n getCalData. splines ')
qx = qy = np.where( (np.isfinite(xrf.reshape(m))) & (np.isfinite(yrf.reshape(m)) ) )
tck1 = interpolate.bisplrep(xrf.reshape(m)[qx], yrf.reshape(m)[qy], xp1.reshape(m)[qx],xb=-0.19,xe=+0.19,yb=-0.19,ye=0.19, kx=kx,ky=ky,s=s)
tck2 = interpolate.bisplrep(xrf.reshape(m)[qx], yrf.reshape(m)[qy], yp1.reshape(m)[qx],xb=-0.19,xe=+0.19,yb=-0.19,ye=0.19, kx=kx,ky=ky,s=s)
xp1i = interpolate.bisplev(rx,ry, tck1)
yp1i = interpolate.bisplev(rx,ry, tck2)
tck3 = interpolate.bisplrep(xrf.reshape(m)[qx], yrf.reshape(m)[qy], th.reshape(m),xb=-0.19,xe=+0.19,yb=-0.19,ye=0.19, kx=kx,ky=ky,s=s)
thi = interpolate.bisplev(rx,ry, tck3)
xp2i = 0
yp2i = 0
if chatter > 2: print('getCalData. x,y,theta = ',xp1i,yp1i,thi, ' second order ', xp2i, yp2i)
tck = interpolate.bisplrep(xrf.reshape(m)[qx], yrf.reshape(m)[qy], c10.reshape(m),xb=-0.19,xe=+0.19,yb=-0.19,ye=0.19, kx=kx,ky=ky,s=s)
c10i = interpolate.bisplev(rx,ry, tck)
tck = interpolate.bisplrep(xrf.reshape(m)[qx], yrf.reshape(m)[qy], c11.reshape(m),xb=-0.19,xe=+0.19,yb=-0.19,ye=0.19, kx=kx,ky=ky,s=s)
c11i = interpolate.bisplev(rx,ry, tck)
tck = interpolate.bisplrep(xrf.reshape(m)[qx], yrf.reshape(m)[qy], c12.reshape(m),xb=-0.19,xe=+0.19,yb=-0.19,ye=0.19, kx=kx,ky=ky,s=s)
c12i = interpolate.bisplev(rx,ry, tck)
tck = interpolate.bisplrep(xrf.reshape(m)[qx], yrf.reshape(m)[qy], c13.reshape(m),xb=-0.19,xe=+0.19,yb=-0.19,ye=0.19, kx=kx,ky=ky,s=s)
c13i = interpolate.bisplev(rx,ry, tck)
tck = interpolate.bisplrep(xrf.reshape(m)[qx], yrf.reshape(m)[qy], c14.reshape(m),xb=-0.19,xe=+0.19,yb=-0.19,ye=0.19, kx=kx,ky=ky,s=s)
c14i = interpolate.bisplev(rx,ry, tck)
if chatter > 2: print('getCalData. dispersion first order = ',c10i,c11i,c12i,c13i,c14i)
tck = interpolate.bisplrep(xrf.reshape(m)[qx], yrf.reshape(m)[qy], c20.reshape(m),xb=-0.19,xe=+0.19,yb=-0.19,ye=0.19, kx=kx,ky=ky,s=s)
c20i = interpolate.bisplev(rx,ry, tck)
tck = interpolate.bisplrep(xrf.reshape(m)[qx], yrf.reshape(m)[qy], c21.reshape(m),xb=-0.19,xe=+0.19,yb=-0.19,ye=0.19, kx=kx,ky=ky,s=s)
c21i = interpolate.bisplev(rx,ry, tck)
tck = interpolate.bisplrep(xrf.reshape(m)[qx], yrf.reshape(m)[qy], c22.reshape(m),xb=-0.19,xe=+0.19,yb=-0.19,ye=0.19, kx=kx,ky=ky,s=s)
c22i = interpolate.bisplev(rx,ry, tck)
if chatter > 2: print('getCalData. dispersion second order = ', c20i,c21i, c22i)
#
if mode == 'bilinear':
xp1i = bilinear( rx, ry, xf[0,:].squeeze(), yf[:,0].squeeze(), xp1 ,chatter=chatter)
yp1i = bilinear( rx, ry, xf[0,:].squeeze(), yf[:,0].squeeze(), yp1 ,chatter=chatter)
thi = bilinear( rx, ry, xf[0,:].squeeze(), yf[:,0].squeeze(), th )# ,chatter=chatter)
c10i = bilinear( rx, ry, xf[0,:].squeeze(), yf[:,0].squeeze(), c10 )#,chatter=chatter)
c11i = bilinear( rx, ry, xf[0,:].squeeze(), yf[:,0].squeeze(), c11 )#,chatter=chatter)
c12i = bilinear( rx, ry, xf[0,:].squeeze(), yf[:,0].squeeze(), c12 )#,chatter=chatter)
c13i = bilinear( rx, ry, xf[0,:].squeeze(), yf[:,0].squeeze(), c13 )#,chatter=chatter)
c14i = bilinear( rx, ry, xf[0,:].squeeze(), yf[:,0].squeeze(), c14 )#,chatter=chatter)
xp2i = bilinear( rx, ry, xf[0,:].squeeze(), yf[:,0].squeeze(), xp2 )#,chatter=chatter)
yp2i = bilinear( rx, ry, xf[0,:].squeeze(), yf[:,0].squeeze(), yp2 )#,chatter=chatter)
c20i = bilinear( rx, ry, xf[0,:].squeeze(), yf[:,0].squeeze(), c20 )#,chatter=chatter)
c21i = bilinear( rx, ry, xf[0,:].squeeze(), yf[:,0].squeeze(), c21 )#,chatter=chatter)
c22i = bilinear( rx, ry, xf[0,:].squeeze(), yf[:,0].squeeze(), c22 )#,chatter=chatter)
if chatter > 1:
print('getCalData. bilinear interpolation')
print('getCalData. first order anchor position = (%8.1f,%8.1f), angle theta = %7.1f ' % (xp1i,yp1i,thi ))
print('getCalData. dispersion first order = ',c10i,c11i,c12i,c13i,c14i)
print('getCalData. second order anchor position = (%8.1f,%8.1f) ' % (xp2i,yp2i))
print('getCalData. dispersion second order = ', c20i,c21i, c22i)
if mode == 'interp2d':
x1 = xf[0,:].squeeze()
x2 = yf[:,0].squeeze()
xp1i = interpolate.interp2d(x1,x2,xp1,kind='linear')
#same as bisplines with s=0 and k=1
return
C_1 = np.array([c14i,c13i,c12i,c11i,c10i])
C_2 = np.array([c22i,c21i,c20i])
#
# only theta for the first order is available
cal.close()
anker = np.array([xp1i,yp1i])
anker2 = np.array([xp2i,yp2i])
if chatter > 0:
print('getCalData. anker [DET-pix] = ', anker)
print('getCalData. anker [DET-img] = ', anker - [77+27,77+1])
print('getCalData. second order anker at = ', anker2, ' [DET-pix] ')
return anker, anker2, C_1, C_2, thi, data, msg
def bilinear(x1,x2,x1a,x2a,f,chatter=0):
'''
Given function f(i,j) given as a 2d array of function values at
points x1a[i],x2a[j], derive the function value y=f(x1,x2)
by bilinear interpolation.
requirement: x1a[i] is increasing with i
x2a[j] is increasing with j
20080303 NPMK
'''
import numpy as np
# check that the arrays are numpy arrays
x1a = np.asarray(x1a)
x2a = np.asarray(x2a)
# find the index for sorting the arrays
n1 = len(x1a)
n2 = len(x2a)
x1a_ind = x1a.argsort()
x2a_ind = x2a.argsort()
# make a sorted copy
x1as = x1a.copy()[x1a_ind]
x2as = x2a.copy()[x2a_ind]
# find indices i,j for the square containing (x1, x2)
k1s = x1as.searchsorted(x1)-1
k2s = x2as.searchsorted(x2)-1
# find the indices of the four points in the original array
ki = x1a_ind[k1s]
kip1 = x1a_ind[k1s+1]
kj = x2a_ind[k2s]
kjp1 = x2a_ind[k2s+1]
if chatter > 2:
print('FIND solution in (x,y) = (',x1,x2,')')
print('array x1a[k-5 .. k+5] ',x1a[ki-5:ki+5])
print('array x2a[k-5 .. k+5] ',x2a[kj-5:kj+5])
print('length x1a=',n1,' x2a=',n2)
print('indices in sorted arrays = (',k1s,',',k2s,')')
print('indices in array x1a: ',ki, kip1)
print('indices in array x2a: ',kj, kjp1)
# exception at border:
if ((k1s+1 >= n1) ^ (k2s+1 >= n2) ^ (k1s < 0) ^ (k2s < 0) ):
print('bilinear. point outside grid x - use nearest neighbor ')
if ki + 1 > len(x1a) : ki = len(x1a) - 1
if ki < 0 : ki = 0
if kj + 1 > len(x2a) : kj = len(x2a) - 1
if kj < 0 : kj = 0
return f[ki, kj]
# Find interpolated solution
y1 = f[kj ,ki ]
y2 = f[kj ,kip1]
y3 = f[kjp1,kip1]
y4 = f[kjp1,ki ]
t = (x1 - x1a[ki])/(x1a[kip1]-x1a[ki])
u = (x2 - x2a[kj])/(x2a[kjp1]-x2a[kj])
y = (1.-t)*(1.-u)*y1 + t*(1.-u)*y2 + t*u*y3 + (1.-t)*u*y4
if chatter > 2:
print('bilinear. x y f[x,y] ')
print('bilinear. first point ',x1a[ki ],x2a[kj], f[ki,kj])
print('bilinear. second point ',x1a[kip1],x2a[kj], f[kip1,kj])
print('bilinear. third point ',x1a[kip1],x2a[kjp1], f[kip1,kjp1])
print('bilinear. fourth point ',x1a[ki ],x2a[kjp1], f[ki,kjp1])
print('bilinear. fractions t, u ', t, u)
print('bilinear. interpolate at ', x1, x2, y)
return y
def findInputAngle(RA,DEC,filestub, ext, wheelpos=200,
lfilter='uvw1', lfilter_ext=None,
lfilt2=None, lfilt2_ext=None,
method=None, attfile=None, msg="",
uvotgraspcorr_on=True,
update_pnt=True,
catspec=None, indir='./', chatter=2):
'''Find the angles along the X,Y axis for the target distance from the bore sight.
Parameters
----------
RA,DEC : float
sky position, epoch J2000, decimal degrees
filestub : str
part of filename consisting of "sw"+`obsid`
ext : int
number of the extension
kwargs : dict
- **wheelpos** : int, {160,200,955,1000}
grism filter selected in filter wheel
- **lfilter**, **lfilt2** : str, {'uvw2','uvm2','uvw1','u','b','v'}
lenticular filter name before and after grism exposure
- **lfilter_ext**, **lfilt2_ext** : int
lenticular filter extension before and after grism exposure
- **method** : str, {'grism_only'}
if set to `grism_only`, create a temporary header to compute the
target input angles, otherwise use the lenticular file image.
- **attfile** : str, path
full path+filename of attitude file
- **catspec** : path
optional full path to catalog spec file to use with uvotgraspcorr
- **indir** : str, path
data directory path
- **uvotgraspcorr_on** : bool
enable/disable update of the WCS keywords in the grism file using uvotgraspcorr
- **update_pnt** : bool
enable/disable and update to the WCS keywords in the grism file from the
attitude file, prior to running uvotgraspcorr (if enabled)
- **chatter** : int
verbosity
Returns
-------
anker_as : array
offset (DX,DY) in arcsec in DET coordinate system of the source
from the boresight
needs to be converted to input rays by applying transform.
anker_field : array
offset(theta,phi) in degrees from the axis for
the input field coordinates for the zemax model lookup
tstart : float
start time exposure (swift time in seconds)
msg : string
messages
Notes
-----
Provided a combined observation is available in a lenticular filter and a grism
(i.e., they were aquired in the the same observation,) this routine determines the
input angles from the boresight. Assumed is that the grism and lenticular filter
image have the same extension.
If not a lenticular filter image was taken just before or after the grism exposure,
the input angles are determined from the grism aspect only. Before running this,
run uvotgrapcorr on the grism image when there is no lenticular filter to get a
better aspect solution.
'''
# 2017-05-17 an error was found in fits header read of the extension of a second filter
# which was introduced when converting to astropy wcs transformations
# 2015-06-10 output the lenticular filter anchor position
# and fix deleted second lenticular filter
# 2015-07-16 changeover to astropy.wcs from ftools
# 2010-07-11 added code to move existing uvw1 raw and sky files out of the way and cleanup afterwards.
# <EMAIL>
import numpy as np
try:
from astropy.io import fits
except:
import pyfits as fits
from uvotwcs import makewcshdr
import os, sys
__version__ = '1.2 NPMK 20170517 NPMK(MSSL)'
msg = ""
lenticular_anchors = {}
if (chatter > 1):
print("uvotgetspec.getSpec(",RA,DEC,filestub, ext, wheelpos, lfilter, lfilter_ext, \
lfilt2, lfilt2_ext, method, attfile, catspec, chatter,')')
if ( (wheelpos == 160) ^ (wheelpos == 200) ):
gfile = indir+'/'+filestub+'ugu_dt.img'
elif ( (wheelpos == 955) ^ (wheelpos == 1000) ):
gfile = indir+'/'+filestub+'ugv_dt.img'
else:
sys.stderr.write("uvotgetspec.findInputAngle: \n\tThe wheelpos=%s is wrong! \n"+\
"\tAborting... could not determine grism type\n\n"%(wheelpos))
return
if ((lfilter == None) & (lfilt2 == None)) | (method == 'grism_only') :
lfilter = 'fk'
method == 'grism_only'
lfilter_ext = 1
uw1rawrenamed = False
uw1skyrenamed = False
if method == 'grism_only':
if chatter > 1: print("grism only method. Creating fake lenticular uvw1 file for grism position")
# test if there is already a uvw1 raw or sky file before proceeding
if chatter > 2:
print('wheelpos ',wheelpos)
print('attfile ',attfile)
wheelp1 = wheelpos
rawfile = makewcshdr(filestub,ext,
attfile,
wheelpos=wheelp1,
indir=indir,
catspec=catspec,
uvotgraspcorr_on=uvotgraspcorr_on,
update_pnt=update_pnt,
chatter=chatter)
# note that the path rawfile = indir+'/'+filestub+'ufk_sk.img'
tempnames.append(filestub)
tempntags.append('fakefilestub')
if lfilter_ext == None:
lfext = ext
else:
lfext = lfilter_ext
ffile = indir+'/'+filestub+'uw1_sk.img'
if lfilter == 'wh' : ffile = indir+'/'+filestub+'uwh_sk.img'
if lfilter == 'u' : ffile = indir+'/'+filestub+'uuu_sk.img'
if lfilter == 'v' : ffile = indir+'/'+filestub+'uvv_sk.img'
if lfilter == 'b' : ffile = indir+'/'+filestub+'ubb_sk.img'
if lfilter == 'uvm2' : ffile = indir+'/'+filestub+'um2_sk.img'
if lfilter == 'uvw2' : ffile = indir+'/'+filestub+'uw2_sk.img'
if lfilter == 'fk' : ffile = indir+'/'+filestub+'ufk_sk.img'
hf = fits.getheader(ffile,lfext)
hg = fits.getheader(gfile,ext)
# check for losses in grism image
if (' BLOCLOSS' in hg):
if float(hg['BLOCLOSS']) != 0:
print('#### BLOCLOSS = '+repr(hg['BLOCLOSS']))
msg += "BLOCLOSS=%4.1f\n"%(hg['BLOCLOSS'])
if ('STALLOSS' in hg):
if (float(hg['STALLOSS']) != 0):
print('#### STALLOSS = '+repr(hg['STALLOSS']))
msg += "STALLOSS=%4.1f\n"%(hg['STALLOSS'])
if ('TOSSLOSS' in hg):
if float(hg['TOSSLOSS']) != 0:
print('#### TOSSLOSS = '+repr(hg['TOSSLOSS']))
msg += "TOSSLOSS=%4.1f\n"%(hg['TOSSLOSS'])
tstart = hg['TSTART']
if chatter > 1: print('grism exposure time = ',hg['EXPOSURE'],' seconds')
RA_PNT = hg['RA_PNT']
DEC_PNT = hg['DEC_PNT']
PA_PNT = hg['PA_PNT'] # roll angle
time = hg['TSTART'] # time observation
ra_diff = RA - RA_PNT
dec_diff = DEC - DEC_PNT
if ((ra_diff > 0.4) ^ (dec_diff > 0.4) ):
sys.stderr.write(
"\nWARNING: \n\tthe difference in the pointing from the header to the RA,DEC parameter is \n"+\
"\tlarge delta-RA = %f deg, delta-Dec = %f deg\n\n"%(ra_diff,dec_diff))
W1 = wcs.WCS(hf,)
xpix_, ypix_ = W1.wcs_world2pix(RA,DEC,0)
W2 = wcs.WCS(hf,key='D',relax=True)
x1, y1 = W2.wcs_pix2world(xpix_,ypix_,0)
RAs = repr(RA)
DECs= repr(DEC)
exts = repr(ext)
lfexts = repr(lfext)
# tbd - get random number for temp file name
from os import getenv,system
#system('echo '+RAs+' '+DECs+' > radec.txt ' )
CALDB = getenv('CALDB')
if CALDB == '':
print('the CALDB environment variable has not been set')
return None
HEADAS = getenv('HEADAS')
if HEADAS == '':
print('The HEADAS environment variable has not been set')
print('That is needed for the uvot Ftools ')
return None
#command = HEADAS+'/bin/uvotapplywcs infile=radec.txt outfile=skyfits.out wcsfile=\"'\
# +ffile+'['+lfexts+']\" operation=WORLD_TO_PIX chatter='+str(chatter)
#if chatter > 0: print command
#system( command )
#f = open('skyfits.out', "r")
#line = f.read()
#if chatter > 1: print 'skyfits.out: '+line
#x1, y1 = (line.split())[2:4]
#f.close
#system( 'echo '+repr(x1)+' '+repr(y1)+' > skyfits.in' )
##
#command = HEADAS+'/bin/uvotapplywcs infile=skyfits.in outfile=detmm.txt wcsfile=\"'\
# +ffile+'['+lfexts+']\" operation=PIX_TO_WORLD to=D chatter='+str(chatter)
#if chatter > 1: print command
#system( command )
#f = open('detmm.txt', "r")
#line = f.read()
#if chatter > 1: print 'detmm: '+line
#x1, y1 = line.split()[2:4]
#f.close
#x1 = float(x1)
#y1 = float(y1)
if chatter > 1:
print("\t The [det]coordinates in mm are (%8.4f,%8.4f) " % ( x1, y1))
# convert anchor in DET coordinate mm to pixels and arcsec from boresight
anker_uvw1det = np.array([x1,y1])/0.009075+np.array((1100.5,1100.5))
msg += "LFILT1_ANCHOR= [%6.1f,%6.1f]\n"%(anker_uvw1det[0],anker_uvw1det[1])
lenticular_anchors.update({"lfilt1":lfilter,"lfilt1_anker":anker_uvw1det})
if (x1 < -14) | (x1 > 14) | (y1 < -14) | (y1 > 14) :
# outside detector
print("\nERROR: source position is not on the detector! Aborting...",(x1,y1))
raise IOError("\nERROR: source position is not on the detector! ")
if lfilter == "fk" :
l2filter = "uvw1"
else: l2filter = lfilter
if wheelpos != 160:
anker_uvw1det_offset = anker_uvw1det - np.array( boresight(filter=l2filter)) # use fixed default value boresight
else:
anker_uvw1det_offset = anker_uvw1det - np.array( boresight(filter=l2filter,date=209952100) )
Xphi, Yphi = anker_uvw1det_offset*0.502
as2deg = 1./3600.
# second lenticular filter
if lfilt2 != None:
if lfilt2 == 'wh' : f2ile = indir+'/'+filestub+'uwh_sk.img'
if lfilt2 == 'u' : f2ile = indir+'/'+filestub+'uuu_sk.img'
if lfilt2 == 'v' : f2ile = indir+'/'+filestub+'uvv_sk.img'
if lfilt2 == 'b' : f2ile = indir+'/'+filestub+'ubb_sk.img'
if lfilt2 == 'uvw1' : f2ile = indir+'/'+filestub+'uw1_sk.img'
if lfilt2 == 'uvm2' : f2ile = indir+'/'+filestub+'um2_sk.img'
if lfilt2 == 'uvw2' : f2ile = indir+'/'+filestub+'uw2_sk.img'
if lfilt2 == 'fk' : f2ile = indir+'/'+filestub+'ufk_sk.img'
if lfilt2_ext == None:
lf2ext = ext
else:
lf2ext = lfilt2_ext
if chatter > 4: print("getting fits header for %s + %i\n"%(f2ile,lf2ext))
hf2 = fits.getheader(f2ile,lf2ext)
W1 = wcs.WCS(hf2,)
xpix_, ypix_ = W1.wcs_world2pix(RA,DEC,0)
W2 = wcs.WCS(hf2,key='D',relax=True)
x2, y2 = W2.wcs_pix2world(xpix_,ypix_,0)
print('ZEXI XING:')
print({'xpix_':xpix_,'ypix_':ypix_,'x2':x2,'y2':y2})
#command = HEADAS+'/bin/uvotapplywcs infile=radec.txt outfile=skyfits.out wcsfile=\"'\
# +f2ile+'['+str(lf2ext)+']\" operation=WORLD_TO_PIX chatter='+str(chatter)
#if chatter > 0: print command
#system( command )
#f = open('skyfits.out', "r")
#line = f.read()
#if chatter > 1: print 'skyfits.out: '+line
#x2, y2 = (line.split())[2:4]
#f.close
#system( 'echo '+repr(x2)+' '+repr(y2)+' > skyfits.in' )
#
#command = HEADAS+'/bin/uvotapplywcs infile=skyfits.in outfile=detmm.txt wcsfile=\"'\
# +f2ile+'['+str(lf2ext)+']\" operation=PIX_TO_WORLD to=D chatter='+str(chatter)
#if chatter > 1: print command
#system( command )
#f = open('detmm.txt', "r")
#line = f.read()
#if chatter > 1: print 'detmm: '+line
#x2, y2 = line.split()[2:4]
#f.close
#x2 = float(x1)
#y2 = float(y1)
if chatter > 2:
print(" The [det]coordinates in mm are (%8.4f,%8.4f) " % ( x2, y2))
# convert anchor in DET coordinate mm to pixels and arcsec from boresight
anker_lf2det = np.array([x2,y2])/0.009075+np.array((1100.5,1100.5))
msg += "LFILT2_ANCHOR= [%6.1f,%6.1f]\n"%(anker_lf2det[0],anker_lf2det[1])
lenticular_anchors.update({'lfilt2':lfilt2,'lfilt2_anker':anker_lf2det})
if (x2 < -14) | (x2 > 14) | (y2 < -14) | (y2 > 14) :
# outside detector
print("/nERROR: source position is not on the detector! Aborting...")
raise IOError("/nERROR: source position in second lenticular filter is not on the detector! ")
# combine lenticular filter anchors, compute (mean) offset, convert in units of degrees
if lfilt2 != None:
anker_uvw1det = (anker_uvw1det+anker_lf2det)*0.5
if lfilter == "fk" :
l2filter = "uvw1"
else: l2filter = lfilter
if wheelpos != 160:
anker_uvw1det_offset = anker_uvw1det - np.array( boresight(filter=l2filter)) # use fixed default value boresight
else:
anker_uvw1det_offset = anker_uvw1det - np.array( boresight(filter=l2filter,date=209952100) )
Xphi, Yphi = anker_uvw1det_offset*0.502
as2deg = 1./3600.
# cleanup
# taken out since file is needed still:
# if method == 'grism_only': os.system('rm '+filestub+'uw1_??.img ')
if uw1rawrenamed: os.system('mv '+uw1newraw+' '+uw1oldraw)
if uw1skyrenamed: os.system('mv '+uw1newsky+' '+uw1oldsky)
crpix = crpix1,crpix2 = hg['crpix1'],hg['crpix2']
crpix = np.array(crpix) # centre of image
cent_ref_2img = np.array([1100.5,1100.5])-crpix
if chatter > 4:
sys.stderr.write('findInputAngle. derived undistorted detector coord source in lenticular filter 1 = (%8.5f,%8.5f) mm '%(x1,y1))
if lfilt2 != None:
sys.stderr.write('findInputAngle. derived undistorted detector coord source in lenticular filter 2 = (%8.5f,%8.5f) mm '%(x2,y2))
if chatter > 2:
print('findInputAngle. derived undistorted detector coord lenticular filter 1 = ',anker_uvw1det)
print('findInputAngle. derived undistorted physical image coord lenticular filter 1 = ',anker_uvw1det-cent_ref_2img)
if lfilt2 != None:
print('findInputAngle. derived undistorted detector coord lenticular filter 2 = ',anker_lf2det)
print('findInputAngle. derived undistorted physical image coord lenticular filter 1 = ',anker_lf2det -cent_ref_2img)
print('findInputAngle. derived boresight offset lenticular filter ',lfilter,' (DET pix): ',anker_uvw1det_offset)
print('findInputAngle. derived boresight offset: (', Xphi, Yphi,') in \" = (',Xphi*as2deg, Yphi*as2deg,') degrees')
# cleanup temp files:
#system('rm radec.txt skyfits.out skyfits.in detmm.txt')
return Xphi*as2deg, Yphi*as2deg, tstart, msg, lenticular_anchors
def get_radec(file='radec.usno', objectid=None, tool='astropy', chatter=0):
'''Read the decimal ra,dec from a file
or look it up using the objectid name from CDS
Parameters
----------
file: str, optional
path, filename of ascii file with just the ra, dec position in decimal degrees
objectid : str, optional
name of object that is recognized by the (astropy.coordinates/CDS Sesame) service
if not supplied a file name is required
tool : str
name tool to use; either 'astropy' or 'cdsclient'
chatter : int
verbosity
Returns
-------
ra,dec : float
Position (epoch J2000) in decimal degrees
Note
----
requires network service
either the file present or the objectid is required
'''
if objectid == None:
try:
f = open(file)
line = f.readline()
f.close()
ra,dec = line.split(',')
ra = float( ra)
dec = float(dec)
if chatter > 0:
print("reading from ",file," : ", ra,dec)
return ra,dec
except:
raise IOError("Error reading ra,dec from file. Please supply an objectid or filename with the coordinates")
elif tool == 'cdsclient' :
import os
# see http://cdsarc.u-strasbg.fr/doc/sesame.htx
# using 'sesame' script from cdsclient package
# -- tbd: need to probe internet connection present or bail out ?
command = "sesame -o2 "+objectid+" > radec.sesame"
if chatter > 1:
print(command)
try:
if not os.system(command):
os.system('cat radec.sesame')
f = open('radec.sesame')
lines = f.readlines()
things = lines[1].split()
f.close()
command = "scat -c ub1 -ad "+things[0]+" "+things[1]+" > radec.usnofull"
if chatter > 0: print(command)
if not os.system(command):
f = open('radec.usnofull')
line = f.readline()
f.close()
if len( line.split() ) == 0:
if chatter > 3: print("ra,dec not found in usno-b1: returning sesame result")
return float(things[0]),float(things[1])
ra,dec, = line.split()[1:3]
f = open('radec.usno','w')
f.write("%s,%s" % (ra,dec) )
f.close()
ra = float( ra)
dec = float(dec)
return ra,dec
else:
if chatter > 0: print('get_radec() error call sesame ')
else:
if chatter > 0: print("get_radec() error main call ")
return None,None
except:
raise RuntimeError("no RA and DEC were found")
elif tool == 'astropy' :
if objectid == None:
raise RuntimeError("objectid is needed for position lookup")
from astropy import coordinates
pos = coordinates.ICRS.from_name(objectid)
return pos.ra.degree, pos.dec.degree
else:
raise IOError("improper tool or file in calling parameters ")
def get_initspectrum(net,var,fitorder, wheelpos, anchor, C_1=None,C_2=None,dist12=None,
xrange=None, nave = 3, predict2nd=True, chatter=0):
""" wrapper for call
boxcar smooth image over -nave- pixels
"""
try:
from convolve import boxcar
except:
from stsci.convolve import boxcar
return splitspectrum(boxcar(net,(nave,)),boxcar(var,(nave,)),fitorder,wheelpos,
anchor, C_1=C_1, C_2=C_2, dist12=dist12,
xrange=xrange,predict2nd=predict2nd, chatter=chatter)
def splitspectrum(net,var,fitorder,wheelpos,anchor,C_1=None,C_2=None,dist12=None,
xrange=None,predict2nd=True,plotit=-790,chatter=0):
''' This routine will compute the counts in the spectrum
using the mean profiles of the orders modeled as gaussians with fixed sigma
for each order. The counts are weighted according to the position in the
profile and the variance in the image (see Eq. 8, <NAME>,1986, PASP 98, 609.)
WARNING: No attempt is made to improve the fit of the profile to the data.
if the distance of the orders is less then a fraction of order width sigma,
the second order is estimated from the first, and the third order is neglected.
assumed fitorder arrays (from curved_extraction) include (first) guess spectrum.
output array of counts[order, dispersion]
anchor is needed to decide if the orders split up or down
2010-08-21 NPMKuin (MSSL) initial code
2011-08-23 to do: quality in output
2011-09-05 mods to handle order merging
2011-09-11 normal extraction added as well as optimal extraction for region [-sig,+sig] wide.
larger widths violate assumption of gaussian profile. Lorentzian profile might work
for more extended widths.
'''
from numpy import zeros,sqrt,pi,arange, array, where, isfinite, polyval, log10
# the typical width of the orders as gaussian sigma [see singlegaussian()] in pixels
sig0 = 4.8
sig1 = 3.25
sig2 = 4.3
sig3 = 6.0
# required order distance to run non-linear fit (about half the sigma)
req_dist_12 = 2.0
req_dist_13 = 2.0
req_dist_23 = 2.0
# factor to account for reduction in variance due to resampling/rotation
varFudgeFactor = 0.5
# approximate width of extended spectral feature in a line profile (in pix) poly
# width = polyval(widthcoef, lambda) ; while the main peak ~ 3 pix (~sigma)
widthcoef = array([-8.919e-11,2.637e-06,-1.168e-02,15.2])
# extract simple sum to n x sigma
nxsig = 1.0
# set amplitude limits second order
amp2lim = None
top = True
if (anchor[0] < 1400) & (anchor[1] < 800) : top = False
try:
(present0,present1,present2,present3),(q0,q1,q2,q3), (
y0,dlim0L,dlim0U,sig0coef,sp_zeroth,co_zeroth),(
y1,dlim1L,dlim1U,sig1coef,sp_first, co_first ),(
y2,dlim2L,dlim2U,sig2coef,sp_second,co_second),(
y3,dlim3L,dlim3U,sig3coef,sp_third, co_third ),(
x,xstart,xend,sp_all,quality,co_back) = fitorder
x0 = x1 = x2 = x3 = x
except RuntimeError:
print("get_cuspectrum: input parameter fitorder is not right\n ABORTING . . . ")
raise RuntimeError
return
nx = len(x0)
x0 = x0[q0]
x1 = x1[q1]
x2 = x2[q2]
x3 = x3[q3]
# check that the dimension size is right
if nx != net.shape[1]:
print("get_cuspectrum: size of input image %4i and fitorder %4i not compatible "%(nx,net.shape[1]))
raise RuntimeError
return
# force var to be positive; assume var is in counts/pix
q = where(var <= 0)
var[q] = 1.e-10
# initialize
counts = zeros(nx*4).reshape(4,nx)
variance = zeros(nx*4).reshape(4,nx)
borderup = zeros(nx*4).reshape(4,nx) - 99
borderdown = zeros(nx*4).reshape(4,nx) - 99
newsigmas = zeros(nx*4).reshape(4,nx)
bs = 1.0 # borderoffset in sigma for plot
qflag = quality_flags()
# here counts[0,:] = zeroth order
# counts[1,:] = first order
# etc. for 2nd, 3rd orders.
fractions = zeros(nx*4).reshape(4,nx) -1
count_opt = zeros(nx*4).reshape(4,nx)
var_opt = zeros(nx*4).reshape(4,nx)
# predict the second order amplitude
if (predict2nd & present2 & (sp_first[q2].mean() > 0.0) & (C_1 != None) & (C_2 != None)):
# dis = q1[0]
# spnet = sp_first[q1[0]]
# qual = quality[q1[0]]
# dismin = dlim1L
# dismax = dlim1U
# (wav2, dis2, flux2, qual2, d12), (wave, dis, spnet) = predict_second_order(dis,spnet,C_1,C_2,d12,qual,dismin,dismax,wheelpos)
SO = predict_second_order(x[q1[0]], sp_first[q1[0]], C_1,C_2,dist12,quality[q1[0]], dlim1L,dlim1U,wheelpos)
dis2 = (SO[1][1]+dist12)
if type(xrange) == typeNone:
ileft = 2
irite = nx -2
else:
ileft = xrang[0]
irite = xrang[1]
for i in range(ileft,irite):
#if i in q3[0]:
# ans = raw_input('continue?')
# chatter = 5
if chatter > 3: print("get_initspectrum.curved_extraction [trackfull] fitting i = %2i x=%6.2f"%(i,x[i]))
# do/try the zeroth order
if i in q0[0]:
if chatter > 4: print(" zeroth order")
# normalization factor for singlegaussian is sqrt(pi).sigma.amplitude
# but use the measured counts within 3 sigma.
sig0 = polyval(sig0coef, i)
j1 = int(y0[i] - nxsig*sig0)
j2 = int(y0[i] + nxsig*sig0 + 1)
# get weighted sum now. Renormalize to get total counts in norm.
yr = arange(j1,j2)
prob = singlegaussian(yr,1.0,y0[i],sig0)
P = (prob/prob.sum()).flatten()
V = var[j1:j2,i].flatten()*varFudgeFactor
net0 = net[j1:j2,i].flatten()
net0[net0 < 0.] = 0.
qfin = isfinite(net0)
variance[0,i] = (V[qfin]).sum()
counts[0,i] = net0[qfin].sum()
# optimal extraction
j1 = int(y0[i] - sig0)
j2 = int(y0[i] + sig0)
yr = arange(j1,j2)
prob = singlegaussian(yr,1.0,y0[i],sig0)
P = (prob/prob.sum()).flatten()
V = var[j1:j2,i].flatten()*varFudgeFactor
net0 = net[j1:j2,i].flatten()
net0[net0 < 0.] = 0.
qfin = isfinite(net0)
var_opt[0,i] = 1.0/ (( P[qfin]*P[qfin]/V[qfin]).sum())
count_opt[0,i] = var_opt[0,i] * ( P[qfin] * net0[qfin] / V[qfin] ).sum()
newsigmas[0,i] = sig0
borderup [0,i] = y0[i] - bs*sig0
borderdown[0,i] = y0[i] + bs*sig0
# do the first order
if ((i in q1[0]) & (i not in q2[0])) :
if chatter > 4: print(" first order")
sig1 = polyval(sig1coef,i)
j1 = int(y1[i] - nxsig*sig1)
j2 = int(y1[i] + nxsig*sig1 + 1)
Xpos = array([i])
Ypos = array(y1[i])
sigmas = array([sig1])
Z = get_components(Xpos,net,Ypos,wheelpos,chatter=chatter,\
composite_fit=True,caldefault=True,sigmas=sigmas,
fiterrors=False,fixsig=True,fixpos=True,amp2lim=None)
a1 = Z[0][0][0]
sig1 = Z[0][0][2]
# get weighted sum now. Renormalize to get total counts in norm.
yr = arange(j1,j2)
prob = singlegaussian(yr,1.0,y1[i],sig1)
P = (prob/prob.sum()).flatten()
V = var[j1:j2,i].flatten()*varFudgeFactor
net1 = net[j1:j2,i].flatten()
net1[net1 < 0.] = 0.
qfin = isfinite(net1)
counts[1,i] = net1[qfin].sum()
variance[1,i] = (V[qfin]).sum()
# optimal extraction
j1 = int(y1[i] - sig1)
j2 = int(y1[i] + sig1 + 1)
# get weighted sum now. Renormalize to get total counts in norm.
yr = arange(j1,j2)
prob = singlegaussian(yr,1.0,y1[i],sig1)
P = (prob/prob.sum()).flatten()
V = var[j1:j2,i].flatten()*varFudgeFactor
net1 = net[j1:j2,i].flatten()
net1[net1 < 0.] = 0.
qfin = isfinite(net1)
var_opt[1,i] = 1.0/ (( P[qfin]*P[qfin]/V[qfin]).sum())
count_opt[1,i] = var_opt[1,i] * ( P[qfin] * net1[qfin] / V[qfin] ).sum()
newsigmas [1,i] = sig1
borderup [1,i] = y1[i] - bs*sig1
borderdown[1,i] = y1[i] + bs*sig1
fractions [1,i] = 1.
# do the first and second order
if ((i in q1[0]) & (i in q2[0]) & (i not in q3[0])):
if chatter > 4: print(" first and second orders")
sig1 = polyval(sig1coef,i)
sig2 = polyval(sig2coef,i)
if abs(y2[i]-y1[i]) < req_dist_12:
# do not fit profiles; use predicted second order
# first order fit
Xpos = array([i])
if top:
j1 = int(y1[i] - nxsig*sig1)
j2 = int(y2[i] + nxsig*sig2 + 1)
Ypos = array([y1[i]])
sigmas = array([sig1])
else:
j1 = int(y2[i] - nxsig * sig2)
j2 = int(y1[i] + nxsig * sig1)
Ypos = array([y1[i]])
sigmas = array([sig1])
Z = get_components(Xpos,net,Ypos,wheelpos,chatter=chatter,\
composite_fit=True,caldefault=True,sigmas=sigmas,
fixsig=True,fixpos=True,fiterrors=False)
a1 = Z[0][0][2]
sig1 = Z[0][0][4]
quality[i] += qflag['overlap']
# find second order prediction min, max -> amp2lim
ilo = dis2.searchsorted(i)
a2 = SO[1][3][ilo-1:ilo+1].mean()
if a1 > a2:
a1 -= a2
else: a1 = 0.
else:
# orders 1,2 separated enough to fit profiles
if top:
j1 = int(y1[i] - nxsig*sig1)
j2 = int(y2[i] + nxsig*sig2 + 1)
Ypos = array([y1[i],y2[i]])
sigmas = array([sig1,sig2])
else:
j1 = int(y2[i] - nxsig * sig2)
j2 = int(y1[i] + nxsig * sig1)
Ypos = array([y2[i],y1[i]])
sigmas = array([sig2,sig1])
# fit for the amplitudes of first and second order
Xpos = array([i])
Z = get_components(Xpos,net,Ypos,wheelpos,chatter=chatter,\
composite_fit=True,caldefault=True,sigmas=sigmas,
fiterrors=False,fixsig=True,fixpos=True,amp2lim=amp2lim)
# amplitudes of first and second order determine the flux ratio
if top:
a1 = Z[0][0][0]
a2 = Z[0][0][3]
sig1 = Z[0][0][2]
sig2 = Z[0][0][5]
else:
a2 = Z[0][0][0]
a1 = Z[0][0][3]
sig2 = Z[0][0][2]
sig1 = Z[0][0][5]
if a1 <= 0. : a1 = 1.e-6
if a2 <= 0. : a2 = 1.e-7
if chatter > 4:
print('get_initspectrum: i=%5i a1=%6.1f a2=%6.1f y1=%6.1f y2=%6.1f ' % (i,a1,a2,y1[i],y2[i]))
yr = arange( max([int(y1[i]-3.*sig1),0]) , min([int(y2[i]+3.*sig1),slit_width]) ) # base 1 pixels
ff1 = singlegaussian(yr,a1,y1[i],sig1)
ff2 = singlegaussian(yr,a2,y2[i],sig2)
fft = ff1+ff2 # total
frac1 = ff1/fft # fraction of counts belonging to first order for each pixel
frac2 = ff2/fft # fractional contribution of other order to counts
# normalised by total for each pixel (= divide by ff1t)
Var = var[yr,i] * varFudgeFactor
P1 = (ff1/fft.sum()).flatten() # probability normalised fraction per pixel
net1 = net[yr ,i].flatten() * frac1 # counts that belong to first order
net1[net1 < 0.] = 0.
qfin = isfinite(net1)
net1_tot = net1[qfin].sum()
V1 = Var * (1.+ frac2) # variance of pixel - add other order as noise source
counts[1,i] = net1_tot
# compute a simple weighted pixel-by-pixel variance, and add it. Weight by normalized net counts/pixel.
variance[1,i] = (V1[qfin]).sum()
P2 = (ff2/fft.sum()).flatten()
V2 = Var * (1.+ frac1)
net2 = net[yr ,i].flatten() * frac2
net2[net2 < 0.] = 0.
qfin = isfinite(net2)
net2_tot = net2[qfin].sum()
counts[2,i] = net2_tot
variance[2,i] = (V2[qfin]).sum()
fractions [1,i] = frac1.sum()
fractions [2,i] = frac2.sum()
# optimal extraction order 1
yr1 = arange( max([0,int(y1[i]-sig1)]) , min([int(y1[i]+sig1),slit_width]) ) # base 1 pixels
Var = var[yr1,i] * varFudgeFactor
ff1 = singlegaussian(yr1,a1,y1[i],sig1)
ff2 = singlegaussian(yr1,a2,y2[i],sig2)
fft = ff1+ff2 # total
frac1 = ff1/fft # fraction of counts belonging to first order for each pixel
frac2 = ff2/fft # fractional contribution of other order to counts
# normalised by total for each pixel (= divide by ff1t)
P1 = (ff1/fft.sum()).flatten() # probability normalised fraction per pixel
net1 = net[yr1 ,i].flatten() * frac1 # counts that belong to first order
net1[net1 < 0.] = 0.
qfin = isfinite(net1)
net1_tot = net1[qfin].sum()
V1 = Var * (1.+ frac2) # variance of pixel - add other order as noise source
var_opt[1,i] = 1.0/ (( P1[qfin]*P1[qfin]/V1[qfin]).sum())
count_opt[1,i] = var_opt[1,i] * ( P1[qfin] * net1[qfin] / V1[qfin] ).sum()
newsigmas[1,i] = sig1
yr2 = arange( max([0,int(y2[i]-sig2)]) , min([int(y2[i]+sig2),slit_width]) ) # base 1 pixels
Var = var[yr2,i] * varFudgeFactor
ff1 = singlegaussian(yr2,a1,y1[i],sig1)
ff2 = singlegaussian(yr2,a2,y2[i],sig2)
fft = ff1+ff2 # total
frac1 = ff1/fft # fraction of counts belonging to first order for each pixel
frac2 = ff2/fft # fractional contribution of other order to counts
# normalised by total for each pixel (= divide by ff1t)
P2 = (ff2/fft.sum()).flatten()
V2 = Var * (1.+ frac1)
net2 = net[yr2 ,i].flatten() * frac2
net2[net2 < 0.] = 0.
qfin = isfinite(net2)
net2_tot = net2[qfin].sum()
var_opt[2,i] = 1.0/ (( P2[qfin]*P2[qfin]/V2[qfin]).sum())
count_opt[2,i] = var_opt[2,i] * ( P2[qfin] * net2[qfin] / V2[qfin] ).sum()
newsigmas[2,i] = sig2
borderup [1,i] = y1[i] - bs*sig1
borderdown[1,i] = y1[i] + bs*sig1
borderup [2,i] = y2[i] - bs*sig2
borderdown[2,i] = y2[i] + bs*sig2
if ((plotit > 0) & (i >= plotit)):
from pylab import plot, legend, figure, clf,title,text
print(Z[0])
print('*********************')
print(qfin)
print(net1)
print(counts[1,i],count_opt[1,i],variance[2,i],var_opt[2,i])
figure(11) ; clf()
plot(yr,net[yr,i],'y',lw=2)
plot(yr,ff1,'k')
plot(yr,ff2,'r')
plot(yr,net1/P1,'bv')
plot(yr,net2/P2,'c^',alpha=0.7)
legend(['net','ff1','ff2','net1/P1','net2/P2'])
title("%7.1e %6.1f %4.1f %7.1e %6.1f %4.1f"%(a1,y1[i],sig1,a2,y2[i],sig2))
figure(12) ; clf()
plot(yr,P1,'k')
plot(yr,P2,'r')
plot(yr,frac1,'b')
plot(yr,frac2,'m')
legend(['P1','P2','frac1','frac2'])
gogo = input('continue?')
# do the first, second and third order case
if ((i in q1[0]) & (i in q2[0]) & (i in q3[0])):
if chatter > 4: print("first, second and third order")
sig1 = polyval(sig1coef,i)
sig2 = polyval(sig2coef,i)
sig3 = polyval(sig3coef,i)
if ((abs(y2[i]-y1[i]) < req_dist_12) & (abs(y3[i]-y1[i]) < req_dist_13)):
# do not fit profiles; use only predicted second order
# first order fit
Xpos = array([i])
if top:
j1 = int(y1[i] - nxsig*sig1)
j2 = int(y2[i] + nxsig*sig2 + 1)
Ypos = array([y1[i]])
sigmas = array([sig1])
else:
j1 = int(y2[i] - nxsig * sig2)
j2 = int(y1[i] + nxsig * sig1)
Ypos = array([y1[i]])
sigmas = array([sig1])
Z = get_components(Xpos,net,Ypos,wheelpos,chatter=chatter,\
composite_fit=True,caldefault=True,sigmas=sigmas,
fiterrors=False,fixsig=True,fixpos=True)
#a1 = Z[0][0][2]
#sig1 = Z[0][0][4]
a1 = Z[0][0][0]
sig1 = Z[0][0][2]
# find second order prediction min, max -> amp2lim
ilo = dis2.searchsorted(i)
a2 = SO[1][2][ilo-1:ilo+1].mean()
if a1 > a2:
a1 -= a2
else: a1 = 0.
a3 = 0.
quality[i] += qflag['overlap']
else:
if top:
j1 = int(y1[i] - nxsig*sig1)
j2 = int(y3[i] + nxsig*sig3 + 1)
Ypos = array([y1[i],y2[i],y3[i]])
sigmas = array([sig1,sig2,sig3])
else:
j1 = int(y3[i] - nxsig*sig3)
j2 = int(y1[i] + nxsig*sig1)
Ypos = array([y3[i],y2[i],y1[i]])
sigmas = array([sig3,sig2,sig1])
# fit for the amplitudes of first and second order
Xpos = array([i])
Z = get_components(Xpos,net,Ypos,wheelpos,chatter=chatter,\
composite_fit=True,caldefault=True,sigmas=sigmas,
fiterrors=False,amp2lim=amp2lim,fixsig=True,fixpos=True)
if top:
a1 = Z[0][0][0]
a2 = Z[0][0][3]
a3 = Z[0][0][6]
sig1 = Z[0][0][2]
sig2 = Z[0][0][5]
sig3 = Z[0][0][8]
else:
a1 = Z[0][0][6]
a2 = Z[0][0][3]
a3 = Z[0][0][0]
sig1 = Z[0][0][8]
sig2 = Z[0][0][5]
sig3 = Z[0][0][2]
yr1 = arange(int( y1[i]-nxsig*sig1) , int(y1[i]+nxsig*sig1) )
ff1 = singlegaussian(yr1,a1,y1[i],sig1)
ff1t = ff1+singlegaussian(yr1,a2,y2[i],sig2)+singlegaussian(yr1,a3,y3[i],sig3)
frac1 = ff1/ff1t
yr2 = arange( int(y2[i]-nxsig*sig2) , int(y2[i]+nxsig*sig2) )
ff2 = singlegaussian(yr2,a2,y2[i],sig2)
ff2t = ff2 + singlegaussian(yr2,a1,y1[i],sig1) + singlegaussian(yr2,a3,y3[i],sig3)
frac2 = ff2/ff2t
yr3 = arange( int(y3[i]-nxsig*sig3 ),int( y3[i]+nxsig*sig3 ))
ff3 = singlegaussian(yr3,a3,y3[i],sig3)
ff3t = ff3+singlegaussian(yr3,a1,y1[i],sig1)+singlegaussian(yr3,a2,y2[i],sig2)
frac3 = ff3/ff3t
fra21 = singlegaussian(yr2,a1,y1[i],sig1)
fra21 /= (fra21+singlegaussian(yr2,a2,y2[i],sig2)+singlegaussian(yr2,a3,y3[i],sig3))
fra31 = singlegaussian(yr3,a1,y1[i],sig1)
fra31 /= (fra31+singlegaussian(yr3,a2,y2[i],sig2)+singlegaussian(yr3,a3,y3[i],sig3))
fra12 = singlegaussian(yr1,a2,y2[i],sig2)
fra12 /= (fra12+singlegaussian(yr1,a1,y1[i],sig1) + singlegaussian(yr1,a3,y3[i],sig3))
fra32 = singlegaussian(yr3,a2,y2[i],sig2)
fra32 /= (fra32+singlegaussian(yr3,a1,y1[i],sig1) + singlegaussian(yr3,a3,y3[i],sig3))
fra13 = singlegaussian(yr1,a3,y3[i],sig3)
fra13 /= (fra13+singlegaussian(yr1,a1,y1[i],sig1)+singlegaussian(yr1,a2,y2[i],sig2))
fra23 = singlegaussian(yr2,a3,y3[i],sig3)
fra23 /= (fra23+singlegaussian(yr2,a1,y1[i],sig1)+singlegaussian(yr2,a2,y2[i],sig2))
Var1 = var[yr1,i].flatten()* varFudgeFactor
Var2 = var[yr2,i].flatten()* varFudgeFactor
Var3 = var[yr3,i].flatten()* varFudgeFactor
P1 = (ff1/ff1.sum()).flatten() # probability of first order photon
P2 = (ff2/ff2.sum()).flatten()
P3 = (ff3/ff3.sum()).flatten()
V1 = Var1 * (1.+ fra12+fra13) # variance of pixel
V2 = Var2 * (1.+ fra21+fra23)
V3 = Var3 * (1.+ fra31+fra32)
net1 = net[yr1 ,i].flatten() * frac1 # counts that belong to first order
net2 = net[yr2 ,i].flatten() * frac2
net3 = net[yr3 ,i].flatten() * frac3
net1[ net1 < 0.] = 0.
net2[ net2 < 0.] = 0.
net3[ net3 < 0.] = 0.
qfin1 = isfinite(net1)
qfin2 = isfinite(net2)
qfin3 = isfinite(net3)
counts[1,i] = net1[qfin1].sum()
counts[2,i] = net2[qfin2].sum()
counts[3,i] = net3[qfin3].sum()
variance[1,i] = (V1[qfin1]).sum()
variance[2,i] = (V2[qfin2]).sum()
variance[3,i] = (V3[qfin3]).sum()
borderup [1,i] = y1[i] - bs*sig1
borderdown[1,i] = y1[i] + bs*sig1
borderup [2,i] = y2[i] - bs*sig2
borderdown[2,i] = y2[i] + bs*sig2
borderup [3,i] = y3[i] - bs*sig3
borderdown[3,i] = y3[i] + bs*sig3
fractions [1,i] = frac1.sum()
fractions [2,i] = frac2.sum()
fractions [3,i] = frac3.sum()
# optimal extraction
yr1 = arange(int( y1[i]-sig1) , int(y1[i]+sig1) )
ff1 = singlegaussian(yr1,a1,y1[i],sig1)
ff1t = ff1+singlegaussian(yr1,a2,y2[i],sig2)+singlegaussian(yr1,a3,y3[i],sig3)
frac1 = ff1/ff1t
yr2 = arange( int(y2[i]-sig2) , int(y2[i]+sig2) )
ff2 = singlegaussian(yr2,a2,y2[i],sig2)
ff2t = ff2 + singlegaussian(yr2,a1,y1[i],sig1) + singlegaussian(yr2,a3,y3[i],sig3)
frac2 = ff2/ff2t
yr3 = arange( int(y3[i]-sig3 ),int( y3[i]+sig3 ))
ff3 = singlegaussian(yr3,a3,y3[i],sig3)
ff3t = ff3+singlegaussian(yr3,a1,y1[i],sig1)+singlegaussian(yr3,a2,y2[i],sig2)
frac3 = ff3/ff3t
fra21 = singlegaussian(yr2,a1,y1[i],sig1)
fra21 /= (fra21+singlegaussian(yr2,a2,y2[i],sig2)+singlegaussian(yr2,a3,y3[i],sig3))
fra31 = singlegaussian(yr3,a1,y1[i],sig1)
fra31 /= (fra31+singlegaussian(yr3,a2,y2[i],sig2)+singlegaussian(yr3,a3,y3[i],sig3))
fra12 = singlegaussian(yr1,a2,y2[i],sig2)
fra12 /= (fra12+singlegaussian(yr1,a1,y1[i],sig1) + singlegaussian(yr1,a3,y3[i],sig3))
fra32 = singlegaussian(yr3,a2,y2[i],sig2)
fra32 /= (fra32+singlegaussian(yr3,a1,y1[i],sig1) + singlegaussian(yr3,a3,y3[i],sig3))
fra13 = singlegaussian(yr1,a3,y3[i],sig3)
fra13 /= (fra13+singlegaussian(yr1,a1,y1[i],sig1)+singlegaussian(yr1,a2,y2[i],sig2))
fra23 = singlegaussian(yr2,a3,y3[i],sig3)
fra23 /= (fra23+singlegaussian(yr2,a1,y1[i],sig1)+singlegaussian(yr2,a2,y2[i],sig2))
Var1 = var[yr1,i].flatten()* varFudgeFactor
Var2 = var[yr2,i].flatten()* varFudgeFactor
Var3 = var[yr3,i].flatten()* varFudgeFactor
P1 = (ff1/ff1.sum()).flatten() # probability of first order photon
P2 = (ff2/ff2.sum()).flatten()
P3 = (ff3/ff3.sum()).flatten()
V1 = Var1 * (1.+ fra12+fra13) # variance of pixel
V2 = Var2 * (1.+ fra21+fra23)
V3 = Var3 * (1.+ fra31+fra32)
net1 = net[yr1 ,i].flatten() * frac1 # counts that belong to first order
net2 = net[yr2 ,i].flatten() * frac2
net3 = net[yr3 ,i].flatten() * frac3
net1[ net1 < 0.] = 0.
net2[ net2 < 0.] = 0.
net3[ net3 < 0.] = 0.
qfin1 = isfinite(net1)
qfin2 = isfinite(net2)
qfin3 = isfinite(net3)
var_opt[1,i] = 1.0/ (( P1[qfin1]*P1[qfin1]/V1[qfin1]).sum())
count_opt[1,i] = var_opt[1,i] * ( P1[qfin1] * net1[qfin1] / V1[qfin1] ).sum()
newsigmas[1,i] = sig1
var_opt[2,i] = 1.0/ (( P2[qfin2]*P2[qfin2]/V2[qfin2]).sum())
count_opt[2,i] = var_opt[2,i] * ( P2[qfin2] * net2[qfin2] / V2[qfin2] ).sum()
newsigmas[2,i] = sig2
var_opt[3,i] = 1.0/ (( P3[qfin3]*P3[qfin3]/V3[qfin3]).sum())
count_opt[3,i] = var_opt[3,i] * ( P3[qfin3] * net3[qfin3] / V3[qfin3] ).sum()
newsigmas[3,i] = sig3
return count_opt, var_opt, borderup, borderdown, (fractions,counts, variance, newsigmas)
def updateFitorder(extimg, fitorder1, wheelpos, predict2nd=False, fit_second=False, \
fit_third=False, C_1=None, C_2=None, d12=None, full=False, chatter=0):
'''
2011-08-26 NPMKuin (MSSL/UCL) fine-tune the parameters determining
the overall profile of the orders, especially the position of the
centre and the width by fitting gaussians to a limited number of bands.
Return an updated fitorder array, and new background. Won't work when
the orders overlap too much. (TBD what exactly is -too much-)
Use the predicted second order if predict@nd is set - requires C_1, C_2, d12
2012-01-05 NPMK
'''
from numpy import zeros,sqrt,pi,arange, array, where, isfinite,linspace
import numpy as np
# the typical width of the orders as gaussian sigma in pixels
sig0 = 4.8
sig1 = 3.25
sig2 = 4.3
sig3 = 4.9
try: (present0,present1,present2,present3),(q0,q1,q2,q3),(
y0,dlim0L,dlim0U,sig0coef,sp_zeroth,co_zeroth),(
y1,dlim1L,dlim1U,sig1coef,sp_first, co_first ),(
y2,dlim2L,dlim2U,sig2coef,sp_second,co_second),(
y3,dlim3L,dlim3U,sig3coef,sp_third, co_third ),(
x,xstart,xend,sp_all,quality,co_back) = fitorder1
except RuntimeError:
print("updateFitorder: input parameter fitorder is not right\n ABORTING . . . ")
raise RuntimeError
return
fsig0coef = array([4.2])
nx = len(x)
amp2lim = None # default
# check that the dimension size is right
if nx != extimg.shape[1]:
print("spectrumProfile: size of input image %4i and fitorder %4i not compatible "%(nx,extimg.shape[1]))
raise RuntimeError
return
oldpres2, oldpres3 = present2, present3
# do not update third order when it is too short or fit_second false
if present3 & ((abs(dlim3U-dlim3L) < 100) | (not fit_second) | (not fit_third)):
present3 = False
if chatter > 2:
print("third order update curvature disabled: not enough points")
# do not update second order when it is too short
if present2 & ((abs(dlim2U-dlim2L) < 100) | (not fit_second)) :
if chatter > 2:
print("second order update curvature disabled: not enough points")
present2 = False
# define some list to tuck the newly fitted parameters into
fx0=list() ; fx1=list() ; fx2=list() ; fx3=list() # position along dispersion direction
fy0=list() ; fy1=list() ; fy2=list() ; fy3=list() # position normal to dispersion direction
bg0=list() ; bg1=list() ; e_bg0=list() ; e_bg1=list()
fsig0=list(); fsig1=list(); fsig2=list(); fsig3=list() # sigma
e_fx0=list() ; e_fx1=list() ; e_fx2=list() ;e_fx3=list() # errors
e_fy0=list() ; e_fy1=list() ; e_fy2=list() ; e_fy3=list() # errors
e_fsig0=list(); e_fsig1=list(); e_fsig2=list(); e_fsig3=list() # errors
# Fit the orders with gaussians based on the approximate positions to get
# a trusted solution for the position of the orders and the sigmas of the
# orders.
# do the zeroth order
xpos = arange(30)
if present0:
for i in range(q0[0][0]+15,q0[0][-1],30):
if chatter > 4: print(" refit zeroth order position and sigma")
# only fit high quality
q = where(quality[i-15:i+15] == 0)[0] + (i-15)
Z = get_components(xpos,extimg[:,i-15:i+15],y0[i],wheelpos,chatter=chatter,\
composite_fit=True,caldefault=True,sigmas=None)
(params,e_params,flag),input = Z
status = flag[5]
# here [bg0,bg1,a0,p0,sig0] = params
# here [e_bg0,e_bg1,e_a0,e_p0,e_sig0] = e_params
if status > 0:
fx0.append( x[i] )
fy0.append( params[3] )
fsig0.append( params[4] )
e_fx0.append( 15 )
e_fy0.append( e_params[3] )
e_fsig0.append( e_params[4] )
bg0.append(params[0])
bg1.append(params[1])
e_bg0.append(e_params[0])
e_bg1.append(e_params[1])
elif chatter > 1:
print('updateFitorder zeroth order failure fit: ')
print('INPUT i: ',i,', xpos : ',xpos,' ypos : ',y0[i])
print('params : ',params)
print('e_params : ',e_params)
fx0q = np.isfinite(np.array(fx0)) & np.isfinite(np.array(fy0))
if len(fx0) > 0:
# re-fit the zeroth order y-offset (remove bad points ???)
fcoef0 = np.polyfit(np.array(fx0)[fx0q],np.array(fy0)[fx0q]-slit_width/2,2)
fsig0coef = np.polyfit(np.array(fx0)[fx0q],np.array(fsig0)[fx0q],2)
else:
if chatter > 1: print("updateFitorder: no success refitting zeroth order")
fcoef0 = array([-0.07,-49.])
fsigcoef0 = sig0coef
else:
fcoef0 = array([-0.07,-49.])
fsig0coef = sig0coef
# positions in first order (no second / third order to fit)
# implied present1
if chatter > 4:
print("updateFitorder: refit first order position and sigma")
print("updateFitorder: centre bins ",list(range(q1[0][0]+15,q2[0][0],30)))
if present2:
uprange1 = q2[0][0]
else:
uprange1 = q1[0][-1]
for i in range(q1[0][0]+15,uprange1,30):
if chatter > 4: print("bin: ",i," x[i] = ",x[i])
# only fit high quality
q = where(quality[i-15:i+15] == 0)[0] + (i-15)
Z = get_components(xpos,extimg[:,i-15:i+15],y1[i],wheelpos,chatter=chatter,\
composite_fit=True,caldefault=True,sigmas=None)
(params,e_params,flag),input = Z
status = flag[5]
if chatter > 4:
print("updateFitorder: 1st, status = ",flag)
print("params = ",params)
print("errors = ",e_params)
# here [bg0,bg1,a1,p1,sig1] = params
# here [e_bg0,e_bg1,e_a1,e_p1,e_sig1] = e_params
if status > 0:
fx1.append( x[i] )
fy1.append( params[3] )
fsig1.append( params[4] )
e_fx1.append( 15 )
e_fy1.append( e_params[3] )
e_fsig1.append( e_params[4] )
bg0.append(params[0])
bg1.append(params[1])
e_bg0.append(e_params[0])
e_bg1.append(e_params[1])
elif chatter > 1:
print('updateFitorder 1st order failure fit: ')
print('INPUT i: ',i,', xpos : ',xpos,' ypos : ',y1[i])
print('params : ',params)
print('e_params : ',e_params)
# predict the second order amplitude
if (predict2nd & present2 & (type(C_1) != typeNone) & (type(C_2) != typeNone) & (type(d12) != typeNone)):
print("updateFitorder: calling predict_second_order()")
# here the arguments are: dis = q1[0]
# spnet = sp_first[q1[0]]
# qual = quality[q1[0]] ? or ... x[q1[0]] argument?
# dismin = dlim1L
# dismax = dlim1U
# (wav2, dis2, flux2, qual2, d12), (wave, dis, spnet) = predict_second_order(dis,spnet,C_1,C_2,d12,qual,dismin,dismax,wheelpos)
SO = predict_second_order(x[q1[0]], sp_first[q1[0]], C_1, C_2, d12, quality[q1[0]], dlim1L,dlim1U,wheelpos)
dis2 = (SO[0][1]+d12)
flx2 = SO[0][2]
sq = isfinite(dis2+flx2)
#dis2 = dis2[sq]
flx2 = flx2[sq]
else:
print("updateFitorder: skipped call to predict_second_order()")
# positions in first and second orders before third order appears
if present2:
if present3: uprange2 = q3[0][0]
else: uprange2 = q2[0][-1]
if chatter > 4:
print("updateFitorder: refit first + second order position and sigma")
print("updateFitorder: centre bins ",list(range(q2[0][0]+15,uprange2,30)))
for i in range(q2[0][0]+15,uprange2,30):
if chatter > 4: print("bin: ",i," x[i] = ",x[i])
# only fit high quality
q = where(quality[i-15:i+15] == 0)[0] + (i-15)
# use the predicted second order to define limits to the amplitude for fitting
if isfinite(y2[i]) & isfinite(y1[i]):
if ( (abs(y2[i]-y1[i]) < 5) & (abs(y2[i]-y1[i]) >= 1.5) ):
# find second order prediction for this range, min, max -> amp2lim
if predict2nd:
if dis2[0] <= i-15:
ilo = dis2.searchsorted(i-15)
else: ilo=0
if dis2[-1] > i+15:
iup = dis2.searchsorted(i+15)+1
else: iup = dis2[-1]
if chatter > 4:
print("ilo:iup = ",ilo,iup)
print(" min: ",np.min(flx2))
print(" max: ",np.max(flx2))
amp2lim = array([np.min(flx2),np.max(flx2)])
else:
print("Error: need to predict 2nd order")
amp2lim=None
elif ( abs(y2[i]-y1[i]) < 1.5 ):
if predict2nd:
# find second order prediction for this range,but restrict range min, max -> amp2lim
if dis2[0] <= i-15:
ilo = dis2.searchsorted(i-15)
else: ilo=0
if dis2[-1] > i+15:
iup = dis2.searchsorted(i+15)+1
else: iup = dis2[-1]
amp2range = abs(np.min(flx2) - np.max(flx2))
amp2lim = amp2range*array([-0.5,0.25]) + (flx2).mean()
else:
print("Error: need to predict 2nd order")
amp2lim=None
else:
amp2lim = None
else:
amp2lim = None
Z = get_components(xpos,extimg[:,i-15:i+15],array([y1[i],y2[i]]),wheelpos,chatter=chatter,\
composite_fit=True,caldefault=True,sigmas=None,amp2lim=amp2lim)
(params,e_params,flag),input = Z
status = flag[5]
# here [bg0,bg1,a1,p1,sig1,a2,p2,sig2] = params
# here [e_bg0,e_bg1,e_a1,e_p1,e_sig1,e_a2,e_p2,e_sig2] = e_params
if status > 0:
fx1.append( x[i] )
fy1.append( params[3] )
fsig1.append( params[4] )
e_fx1.append( 15 )
e_fy1.append( e_params[3] )
e_fsig1.append( e_params[4] )
fx2.append( x[i] )
fy2.append( params[6] )
fsig2.append( params[7] )
e_fx2.append( 15 )
e_fy2.append( e_params[6] )
e_fsig2.append( e_params[7] )
bg0.append(params[0])
bg1.append(params[1])
e_bg0.append(e_params[0])
e_bg1.append(e_params[1])
elif chatter > 1:
print('updateFitorder: 1+2nd order updateFitorder failure fit: ')
print('updateFitorder: INPUT i: ',i,', xpos : ',xpos,' ypos : ',array([y1[i],y2[i]]))
print('updateFitorder: params : ',params)
print('updateFitorder: e_params : ',e_params)
# positions in first, second and third orders
if present3:
for i in range(q3[0][0]+15,q3[0][-1],30):
if chatter > 4:
print(" refit first + second + third orders position and sigma")
print(" centre bins ",list(range(q3[0][0]+15,q3[0][-1],30)))
# only fit high quality
q = where(quality[i-15:i+15] == 0)[0] + (i-15)
if isfinite(y2[i]) & isfinite(y1[i]):
if ( (abs(y2[i]-y1[i]) < 5) & (abs(y2[i]-y1[i]) >= 1.5) ):
if predict2nd & (len(SO[0][2]) > 0):
# find second order prediction for this range, min, max -> amp2lim
try:
if dis2[0] <= i-15:
ilo = dis2.searchsorted(i-15)
else: ilo=0
if dis2[-1] > i+15:
iup = dis2.searchsorted(i+15)+1
else: iup = dis2[-1]
if iup != ilo:
amp2lim = array([min(SO[0][2][ilo:iup]),max(SO[0][2][ilo:iup])])
else:
amp2lim = None
except:
amp2lim = None
else:
print("Error: need to predict 2nd order")
amp2lim = None
elif ( abs(y2[i]-y1[i]) < 1.5 ):
if predict2nd:
# find second order prediction for this range,but restrict range min, max -> amp2lim
try:
if dis2[0] <= i-15:
ilo = dis2.searchsorted(i-15)
else: ilo=0
if dis2[-1] > i+15:
iup = dis2.searchsorted(i+15)
else: iup = dis2[-1]
amp2range = abs(min(SO[0][2][ilo:iup])-max(SO[0][2][ilo:iup]))
amp2lim = amp2range*array([-0.25,0.25]) + (SO[0][2][ilo:iup]).mean()
except:
amp2lim = None
else:
print("Error: need to predict 2nd order")
amp2lim = None
else:
amp2lim = None
if isfinite(y3[i]):
Z = get_components(xpos,extimg[:,i-15:i+15],array([y1[i],y2[i],y3[i]]),wheelpos,chatter=chatter,\
composite_fit=True,caldefault=True,sigmas=None,amp2lim=amp2lim)
(params,e_params,flag),input = Z
status = flag[5]
# here [bg0,bg1,a1,p1,sig1,a2,p2,sig2,a3,p3,sig3] = params
# here [e_bg0,e_bg1,e_a1,e_p1,e_sig1,e_a2,e_p2,e_sig2,e_a3,e_p3,e_sig3] = e_params
if status > 0:
fx1.append( x[i] )
fy1.append( params[3] )
fsig1.append( params[4] )
e_fx1.append( 15 )
e_fy1.append( e_params[3] )
e_fsig1.append( e_params[4] )
fx2.append( x[i] )
fy2.append( params[6] )
fsig2.append( params[7] )
e_fx2.append( 15 )
e_fy2.append( e_params[6] )
e_fsig2.append( e_params[7] )
fx3.append( x[i] )
fy3.append( params[9] )
fsig3.append( params[10] )
e_fx3.append( 15 )
e_fy3.append( e_params[9] )
e_fsig3.append( e_params[10] )
bg0.append(params[0])
bg1.append(params[1])
e_bg0.append(e_params[0])
e_bg1.append(e_params[1])
elif chatter > 1:
print('updateFitorder failure fit 1,2,3rd: ')
print('INPUT i: ',i,', xpos : ',xpos,' ypos : ',array([y1[i],y2[i],y3[i]]))
print('params : ',params)
print('e_params : ',e_params)
# re-fit the 1,2, 3 order y-offset and fit background coefficients (remove bad points ???)
if len(fx1) > 0:
fcoef1 = np.polyfit(array(fx1),array(fy1)-slit_width/2,3)
fsig1coef = np.polyfit(array(fx1),array(fsig1),3)
fx4 = fx0
for i in fx1: fx4.append(i)
fbg0coef = np.polyfit(array(fx4),array(bg0),3)
fbg1coef = np.polyfit(array(fx4),array(bg1),3)
y1[q1] = np.polyval(fcoef1,x[q1]) + slit_width/2
else:
fsig1coef = sig1coef
if fit_second & (len(fx2) > 0):
fcoef2 = np.polyfit(array(fx2),array(fy2)-slit_width/2,2)
fsig2coef = np.polyfit(array(fx2),array(fsig2),2)
y2[q2] = np.polyval(fcoef2,x[q2]) + slit_width/2
else:
fsig2coef = sig2coef
if fit_third & (len(fx3) > 0):
fcoef3 = np.polyfit(array(fx3),array(fy3)-slit_width/2,1)
fsig3coef = np.polyfit(array(fx3),array(fsig3),1)
y3[q3] = np.polyval(fcoef3,x[q3]) + slit_width/2
else:
fsig3coef = sig3coef
values=(bg0,bg1),(fx0,fx1,fx2,fx3),(fy0,fy1,fy2,fy3),(fsig0,fsig1,fsig2,fsig3)
errors=(e_bg0,e_bg1),(e_fx0,e_fx1,e_fx2,e_fx3),(e_fy0,e_fy1,e_fy2,e_fy3),(e_fsig0,e_fsig1,e_fsig2,e_fsig3)
y0[q0] = np.polyval(fcoef0,x[q0]) + slit_width/2
#y1[q1] = np.polyval(fcoef1,x[q1]) + 100.
#y2[q2] = np.polyval(fcoef2,x[q2]) + 100.
#y3[q3] = np.polyval(fcoef3,x[q3]) + 100.
fitorder = (present0,present1,oldpres2,oldpres3),(q0,q1,q2,q3), (
y0,dlim0L,dlim0U,fsig0coef,sp_zeroth,co_zeroth),(
y1,dlim1L,dlim1U,fsig1coef,sp_first ,co_first ),(
y2,dlim2L,dlim2U,fsig2coef,sp_second,co_second),(
y3,dlim3L,dlim3U,fsig3coef,sp_third, co_third ),(
x,xstart,xend,sp_all,quality,co_back)
if full: return fitorder, values, errors
else: return fitorder
def dAngstrom_dpix_pix (pix,disp,):
"""
Input pix = distance to anchor in pix units
Input disp = polynomial for dispersion
Return Angstroms per pix as a function of x
"""
import numpy as np
w1 = np.polyval(disp,pix-0.5) # wavelengths half a pix from centre
w2 = np.polyval(disp,pix+0.5)
return w2-w1 # angstroms per pix
def dAngstrom_dpix_wave (wave, disp, sp_order=1):
"""
Input wave = wavelengths
Input disp = polynomial for dispersion
Return Angstroms per pix as a function of wave
"""
import numpy as np
#if sp_order == 1:
# x = np.arange(-370,1250)
#elif sp_order == 2:
# x = np.arange(30,1500)
#else:
# print "error in dAngstrom_dpix_wave: wrong order: ", sp_order
# raise
#Dinv = polyinverse(disp,x)
#pix = np.polyval(Dinv, wave)
pix = pix_from_wave(disp,wave,spectralorder=sp_order)
return dAngstrom_dpix_pix(pix,disp)
def rebin(binin,func,binout, mode='interpolate',N=20):
'''
Given func(binin) rebin the data to func(binout)
Either
'redistribute' the func values to the new bins (conserve the integral)
or
'interpolate' the func to the the new bins
'''
try:
from convolve import boxcar
except:
from stsci.convolve import boxcar
if mode == 'interpolate':
f = boxcar(func,(N,))
return interpol(binout,binin,f)
elif mode == 'redistribute':
# see xspec prep routine for method
print('TBD')
else:
print('rebin: wrong mode')
raise
def spectrumpixshift(w1,spec1, w2,spec2, wmin=None, wmax=None, spectrum=False,
delwav=False, chatter=0):
'''Accurately determine relative wavelength/pixel shift between 2 spectra.
Parameters
----------
w1,spec1, w2,spec2 : array-like
wavelength, spectrum pairs
kwargs : dict
- **wmin,wmax**: float
limits to region to use
- **spectrum** : bool
resample 2nd spectra and return second spectrum shifted
- **delwav** : bool
- **chatter** : int
verbosity
Returns
-------
k : int
shift in pixels. option spectrum `False`, for option delwav `False`
delwav : float
shift in angstroms. For option spectrum `False`, option delwav `True`
k, (w1,s2) : int, tuple
pixel shift,
tuple of wave, flux for second spectrum shifted and resampled on wavelength first spectrum
for option spectrum `True`
Notes
-----
k ~ 1/6 pixel
[option: resample 2nd spectra ]
'''
from scipy.signal import correlate
import numpy as np
from scipy import interpolate
# valid fluxes
q1 = np.isfinite(spec1)
w1 = w1[q1].flatten()
spec1 = spec1[q1].flatten()
q2 = np.isfinite(spec2)
w2 = w2[q2].flatten()
spec2 = spec2[q2].flatten()
if chatter > 2: print(" * len before min, max - ",len(w1),len(spec1),len(w2),len(spec2))
# interpolating functions
tck1 = interpolate.splrep(w1, spec1, )
tck2 = interpolate.splrep(w2, spec2, )
# limits
if type(wmin) == typeNone:
wmin = np.max([w1[0],w2[0]])
if chatter > 0: print("spectrumpixshift: wmin = ",wmin)
if type(wmax) == typeNone:
wmax = np.min([w1[-1],w2[-1]])
if chatter > 0: print("spectrumpixshift: wmax = ",wmax)
q1 = (w1 > wmin) & (w1 < wmax)
#print "q1:--> ",np.where(q1)
# put both spectra on the same footing
w1 = np.arange(int(w1[q1][0]+0.5),int(w1[q1][-1]+0.5),0.5)
if len(w1) < 1:
print("ERROR in spectrumpixshift; set to 0")
print("q1 = ",q1)
k = 0
if spectrum:
return k, (w2,s2)
else: return k
s1 = interpolate.splev(w1,tck1,)
s2 = interpolate.splev(w1,tck2,)
n = len(s1)
# find peak in correlation
k = np.argmax(correlate(s1,s2))+1
k = n - k
# shift spectrum s1 by k to match s2
dw = 0
try:
if k > 0:
dw = (w1[k:]-w1[:-k]).mean()
elif k < 0:
dw = (w1[0:k] - w1[-k:]).mean()
except: pass
if chatter > 2:
print("spectrumpixshift: k, dw : ",k,dw)
if spectrum: # return second spectrum shifted
if k < 0:
w1 = w1[0:n+k]
s2 = s2[-k:n]
if k > 0:
w1 = w1[k:n]
s2 = s2[0:n-k]
return k, (w1,s2)
elif delwav:
return dw
else:
return k
def sum_Extimage( pha_file_list, sum_file_name='extracted_image_sum.fit', mode='create',
ankerlist=None, plotimage=True,correlate=True, correlate_wavewindow=[None,None] ,
figno=20, shiftlist=[] ,clobber=False, chatter=1 ):
''' This routine will create/update/read a summed extracted image.
Parameters
----------
pha_file_list : list
list of PHA filenames written by calls of `getSpec`
kwargs : dict
- **sum_file_name** : str
file name for sum
- **mode** : str, {'create','read'}
when 'create' make the sum file; when 'read' read the sum file
- **ankerlist** : list, optional
list of anchor positions
- **plotimage** : bool, optional
make a plot of the image
- **correlate** : bool, optional
try to determine shifts by correlating the image
- **correlate_wavewindow** : list
when correlate `True` then use only the part of the spectrum within [wavemin, wavemax]
- **figno** : int, optional
figure number to use
- **shiftlist** : list, optional
list of shifts to apply
- **clobber** : bool
write over existing file
- **chatter** : int
verbosity
Returns
-------
When `option=read` the following are returned:
- sumimg : 2D array
summed image
- expmap : 2D array
exposure map for each pixel in summed image
- exposure : float
exposure time (maximum)
- wheelpos : int
grism wheel position
- C_1, C_2 : list
dispersion coefficients
- dist12 : float
distance in pixels between the first and second order anchors
- anker : list
anchor position in summed image
- coefficients : tuple
(coef0,coef1,coef2,coef3,sig0coef,sig1coef,sig2coef,sig3coef)
curvature and sigma coefficients for the summed image
- hdr : fits header
Notes
-----
The anchor point, by default, will be at point [100,500]
mode = 'create' <make new sum file>, 'read' <read sum file>
The anchor position in the pha_file will need to be passed via ankerlist or
be given as keyword ANKXIMG, ANKYIMG in the header of the PHA file (it is).
when correlate_wavewindow = [none,none] nothing is done
= [2300,4000] wavelength range where to do cross correlation on flux to
generate corrections to ankx
shiftlist = [None, 0, -2, None ] can be used to force the shifts (in pix)
of the given number in the list of spectra (here assumed to be four.
List length must equal pha_file_list length.
Example:
phafiles = ['sw00032150003ugu_1_pha.fits','sw00032150003ugu_2_pha.fits',
'sw00032150003ugu_3_pha.fits', 'sw00032150003ugu_4_pha.fits',
'sw00032150003ugu_5_pha.fits', 'sw00032150003ugu_6_pha.fits',
'sw00032150003ugu_7_pha.fits', 'sw00032150003ugu_8_pha.fits',
'sw00032150003ugu_9_pha.fits', 'sw00032150003ugu_10_pha.fits',
'sw00032150003ugu_11_pha.fits', 'sw00032150003ugu_12_pha.fits',
'sw00032150003ugu_13_pha.fits']
uvotgetspec.sumimage( phafiles, mode='create',chatter=1,clobber=True)
<NAME> 2011 (MSSL/UCL)
'''
from astropy.io import fits
import numpy as np
from uvotpy import uvotmisc
import pylab as plt
if plotimage & (mode == 'create'):
fig1 = plt.figure(figno)
plt.clf()
fig2 = plt.figure(figno+1)
plt.clf()
m = -1
img = np.zeros([200,2000],dtype=float)
img2 = np.zeros([200,2000],dtype=float)
expmap = np.zeros([200,2000],dtype=float)
# quamap = np.zeros([200,2000],dtype=float) # need quality map to extracted image in the pha file
tot_exposure = 0.
tstart = 999999999.
tstop = 0.
headers = list()
legend= []
ysh = [0]
yshift = 0.
if mode == 'create':
for m in range(len(pha_file_list)):
pha_file = pha_file_list[m]
d = fits.getdata(pha_file,2)
#print m," - ",pha_file
if m == 0:
w1 = d['lambda']
f1 = d['flux']
w1 = w1[np.isfinite(f1)]
f1 = f1[np.isfinite(f1)]
norm = f1[(np.abs(w1-w1.mean()) < 0.35 * w1.mean())].mean()
f1 /= norm
#print " len w1, f1 = (",len(w1),',',len(f1),')'
else:
w2 = d['lambda']
f2 = d['flux']
w2 = w2[np.isfinite(f2)]
f2 = f2[np.isfinite(f2)]/norm
#print " len w+, f+ = (",len(w2),',',len(f2),')'
ysh.append( spectrumpixshift(w1,f1, w2,f2, wmin=correlate_wavewindow[0], wmax=correlate_wavewindow[1], ) )
# adjust ysh to the mean
if len(shiftlist) == len(pha_file_list):
for ys in range(len(shiftlist)):
if shiftlist[ys] != None:
ysh[ys] = shiftlist[ys]
print("updated shift for "+pha_file_list[ys]+" to ",ysh[ys])
print("shifts are now (in A):",ysh)
ysh -= np.mean(ysh)
# convert ysh (per 0.5 angstrom) to pixels
ysh = np.array( ysh/6+0.5 , dtype=int )
print("plan to apply pixel shifts to images of magnitude = ",ysh)
if not correlate:
ysh = 0 * ysh
print("reset shifts ",ysh)
for m in range(len(pha_file_list)):
pha_file = pha_file_list[m]
f = fits.open(pha_file)
headers.append( f[1].header )
if chatter > 0 :
print('reading '+pha_file+' in mode='+mode)
f.info()
try:
ankx = f[3].header['ANKXIMG'] + ysh[m]
anky = f[3].header['ANKYIMG']
except:
ankx,anky = ankerlist[m]
pass
ankx = int(ankx+0.5)
anky = int(anky+0.5)
expo = f[1].header['exposure']
if chatter > 0:
print('ankx, anky = [',ankx,', ',anky,' ]')
print('exposure = ',expo)
print('ankx was shifted by ',ysh[m],' pix')
if anky <= int(slit_width/2):
y0 = int(slit_width/2)-anky
y1 = int(slit_width)
y2 = 0
y3 = int(slit_width/2)+anky
else:
y0 = 0
y1 = int(slit_width*3/2)-anky
y2 = anky-int(slit_width/2)
y3 = int(slit_width)
x0 = 0
x2 = ankx-500
if ankx <= 500:
x0 = 500-ankx
x2 = 0
y23,x3 = f[3].data.shape
x1 = x3 - x2
if x1 > 2000:
x1=2000
x3=x2+2000
if chatter > 2:
print(img[y0:y1,x0:x1].shape)
print(f[3].data[y2:y3,x2:x3].shape)
print(y0,y1,y2,y3)
print(x0,x1,x2,x3)
# add to sum
tot_exposure += expo
img[y0:y1,x0:x1] += f[3].data[y2:y3,x2:x3]
expmap[y0:y1,x0:x1] += expo
img2[y0:y1,x0:x1] = f[3].data[y2:y3,x2:x3]
#quamap[y0:y1,x0:x1] += f[4].data[y2:y3,x2:x3]
if m == 0: # calculate a sensible value for the shift of the spectra
xlam = f[2].data['lambda']
qys = abs(xlam - xlam.mean()) < 0.2*xlam.mean()
yshift = f[2].data['flux'][qys].mean()
plt.figure(figno)
p1 = plt.plot(f[2].data['lambda'],(m-1)*yshift+f[2].data['flux'],)
legend.append(pha_file)
plt.legend(legend)
plt.title("images offset in flux by %10.3e"%(yshift))
plt.xlabel('uncorrected wavelength ($\AA$)')
plt.ylabel('flux + shift (erg cm-2 s-1 A-1')
plt.figure(figno+1)
plt.plot( img2[80:120,:].sum(0) )
plt.grid()
plt.legend(legend)
plt.title('adding image: pixels summed y[80:120] to check x-alignment')
f.close()
# create file with sum extracted image
hdr = headers[0]
fsum = fits.PrimaryHDU(data=img,header=hdr)
hdulist = fits.HDUList(fsum)
hdr0 = hdulist[0].header
hdr0['EXPOSURE'] = (tot_exposure,'total exposure time')
hdr0['EXTNAME'] = 'SPECTRUMSUM'
hdr0['EXPID'] = ('989979969','completely bogus id')
for head in headers:
hist = head['history']
filetag = head['filetag']
hdulist[0].header.add_history(" copy header[1] of filetag "+filetag)
tstart = min([head['tstart'],tstart])
tstop = max([head['tstop'],tstop])
for h in hist:
hdulist[0].header.add_history(h)
for pha_file in pha_file_list:
hdulist[0].header.add_history('added file'+pha_file)
hdulist[0].header['TSTART']=tstart
hdulist[0].header['TSTOP']=tstop
exthdu = fits.ImageHDU(expmap) # add extension for the expmap
hdulist.append(exthdu)
hdulist[1].header['EXTNAME']='EXPOSUREMAP'
# quahdu = fits.ImageHDU( quahdu )
# hdulist.append(quahdu)
#hdulist[2].header['EXTNAME']='QUALITYMAP'
hdulist.writeto(sum_file_name,clobber=clobber)
hdulist.close()
print("total exposure of images = ",tot_exposure)
elif mode == 'read': # read the summed, extracted image and header
hdulist = fits.open(sum_file_name)
hdr = hdulist[0].header
exposure = hdr['exposure']
wheelpos = hdulist[0].header['wheelpos']
sumimg = hdulist[0].data
hist = hdulist[0].header['history']
if len(hdulist) > 1:
expmap = hdulist[1].data
else:
expmap = None
C_1 = list([])
C_2 = list([])
coef0 = list()
coef1 = list()
coef2 = list()
coef3 = list()
sig0coef = list()
sig1coef = list()
sig2coef = list()
sig3coef = list()
dist12 = None
C_1.append(uvotmisc.get_keyword_from_history(hist,'DISP1_0'))
C_1.append(uvotmisc.get_keyword_from_history(hist,'DISP1_1'))
C_1.append(uvotmisc.get_keyword_from_history(hist,'DISP1_2'))
C_1.append(uvotmisc.get_keyword_from_history(hist,'DISP1_3'))
C_1.append(uvotmisc.get_keyword_from_history(hist,'DISP1_4'))
C_1 = np.array(C_1,dtype=float)
C_2.append(uvotmisc.get_keyword_from_history(hist,'DISP2_0'))
C_2.append(uvotmisc.get_keyword_from_history(hist,'DISP2_1'))
C_2.append(uvotmisc.get_keyword_from_history(hist,'DISP2_2'))
C_2 = np.array(C_2,dtype=float)
dist12 = float(uvotmisc.get_keyword_from_history(hist,'DIST12'))
anchor1 = uvotmisc.get_keyword_from_history(hist,'anchor1')
anker = np.array([ float(anchor1.split(',')[0].split('(')[1]), float(anchor1.split(',')[1].split(')')[0]) ] )
coef0.append(uvotmisc.get_keyword_from_history(hist,'COEF0_0'))
coef0.append(uvotmisc.get_keyword_from_history(hist,'COEF0_1'))
coef1.append(uvotmisc.get_keyword_from_history(hist,'COEF1_0'))
coef1.append(uvotmisc.get_keyword_from_history(hist,'COEF1_1'))
coef1.append(uvotmisc.get_keyword_from_history(hist,'COEF1_2'))
coef1.append(uvotmisc.get_keyword_from_history(hist,'COEF1_3'))
coef2.append(uvotmisc.get_keyword_from_history(hist,'COEF2_0'))
coef2.append(uvotmisc.get_keyword_from_history(hist,'COEF2_1'))
coef2.append(uvotmisc.get_keyword_from_history(hist,'COEF2_2'))
coef3.append(uvotmisc.get_keyword_from_history(hist,'COEF3_0'))
coef3.append(uvotmisc.get_keyword_from_history(hist,'COEF3_1'))
coef0 = np.array(coef0,dtype=float)
coef1 = np.array(coef1,dtype=float)
coef2 = np.array(coef2,dtype=float)
coef3 = np.array(coef3,dtype=float)
sig0coef.append(uvotmisc.get_keyword_from_history(hist,'SIGCOEF0_0'))
sig0coef.append(uvotmisc.get_keyword_from_history(hist,'SIGCOEF0_1'))
sig0coef.append(uvotmisc.get_keyword_from_history(hist,'SIGCOEF0_2'))
sig1coef.append(uvotmisc.get_keyword_from_history(hist,'SIGCOEF1_0'))
sig1coef.append(uvotmisc.get_keyword_from_history(hist,'SIGCOEF1_1'))
sig1coef.append(uvotmisc.get_keyword_from_history(hist,'SIGCOEF1_2'))
sig1coef.append(uvotmisc.get_keyword_from_history(hist,'SIGCOEF1_3'))
sig2coef.append(uvotmisc.get_keyword_from_history(hist,'SIGCOEF2_0'))
sig2coef.append(uvotmisc.get_keyword_from_history(hist,'SIGCOEF2_1'))
sig2coef.append(uvotmisc.get_keyword_from_history(hist,'SIGCOEF2_2'))
sig3coef.append(uvotmisc.get_keyword_from_history(hist,'SIGCOEF3_0'))
sig3coef.append(uvotmisc.get_keyword_from_history(hist,'SIGCOEF3_1'))
sig0coef = np.array(sig0coef,dtype=float)
sig1coef = np.array(sig1coef,dtype=float)
sig2coef = np.array(sig2coef,dtype=float)
sig3coef = np.array(sig3coef,dtype=float)
if chatter > 0:
print('first order dispersion = ',C_1)
print('second order dispersion= ',C_2)
print('1-2 order distance = ',dist12)
return sumimg, expmap, exposure, wheelpos, C_1, C_2, dist12, anker, (coef0,
coef1,coef2,coef3,sig0coef,sig1coef,sig2coef,sig3coef), hdr
def sum_PHAspectra(phafiles, wave_shifts=[], exclude_wave=[],
ignore_flags=True, use_flags=['bad'],
interactive=True, outfile=None, returnout = False,
figno=[14], ylim=[-0.2e-14,5e-13],chatter=1, clobber=True):
'''Read a list of phafiles. Sum the spectra after applying optional wave_shifts.
The sum is weighted by the errors.
Parameters
----------
phafiles : list
list of filenames
wave_shifts : list
list of shifts to add to the wavelength scale; same length as phafiles
exclude_wave : list
list of lists of exclude regions; same length as pha files; one list per file
for an indivisual file the the list element is like [[1600,1900],[2700,2750],]
ignore_flags : bool
do not automatically convert flagged sections of spectrum to exclude_wave regions
use_flags : list
list of flags (except - 'good') to exclude.
Valid keyword values for the flags are defined in quality_flags(),
interactive : bool
if False, the program will only use the given wave_shifts, and exclude_regions
outfile : str
name for output file. If "None" then write to 'sumpha.txt'
ylim : list
force limits of Y-axis figure
figno : int, or list
numbers for figures or (if only one) the start number of figures
Returns
-------
debug information when `outfile=None`.
example
-------
phafiles = ['sw00031935002ugu_1ord_1_f.pha',
'sw00031935002ugu_1ord_2_f.pha',
'sw00031935002ugu_1ord_3_f.pha',
'sw00031935002ugu_1ord_4_f.pha',]
sum_PHAspectra(phafiles)
This will interactively ask for changes to the wavelengths of one spectra compared
to one chosen as reference.
Notes
-----
Two figures are shown, one with flux for all spectra after shifts, one with
broad sum of counts in a region which includes the spectrum, unscaled, not even
by exposure.
** fails quietly for interactive=T, ignore_flags=F, exclude_wave=[],
wave_shifts=[0,0,..], use interactive=F
** not yet implemented: selection on flags using use-flags
'''
import os, sys
try:
from astropy.io import fits as pyfits
except:
import pyfits
import numpy as np
from scipy import interpolate
import pylab as plt
import copy
from uvotspec import quality_flags_to_ranges
sys.stderr.write("Notice: further development of sum_PHAspectra is now done in the uvotspec module.\n")
# first create the wave_shifts and exclude_wave lists; then call routine again to
# create output file (or if None, return result)
if outfile == None:
outfile = 'sumpha.txt'
returnout = True
nfiles = len(phafiles)
# check phafiles are all valid paths
for phafile in phafiles:
if not os.access(phafile,os.F_OK):
raise IOError("input file : %s not found \n"%(phafile))
# check wave_shifts and exclude_wave are lists
if (type(wave_shifts) != list) | (type(exclude_wave) != list):
raise IOError("parameters wave_list and exclude_wave must be a list")
if chatter > 2:
sys.stderr.write(" INPUT =============================================================================\n")
sys.stderr.write("sum_PHAspectra(\nphafiles;%s,\nwave_shifts=%s,\nexclude_wave=%s,\nignore_flags=%s\n" %(
phafiles,wave_shifts,exclude_wave,ignore_flags))
sys.stderr.write("interactive=%s, outfile=%s, \nfigno=%s, chatter=%i, clobber=%s)\n" % (
interactive,outfile,figno,chatter,clobber) )
sys.stderr.write("====================================================================================\n")
exclude_wave_copy = copy.deepcopy(exclude_wave)
if (interactive == False) & (len(wave_shifts) == nfiles) & (len(exclude_wave) == nfiles):
if chatter > 1 : print("merging spectra ")
# create the summed spectrum
result = None
# find wavelength range
wmin = 7000; wmax = 1500
f = [] # list of open fits file handles
for fx in phafiles:
f.append( pyfits.open(fx) )
for fx in f:
q = np.isfinite(fx[2].data['flux'])
wmin = np.min([wmin, | np.min(fx[2].data['lambda'][q]) | numpy.min |
import numpy as np
import scipy.sparse as spa
import scipy.sparse.linalg as sla
import utils.codegen_utils as cu
# Set numpy seed for reproducibility
np.random.seed(2)
# Test sparse matrix construction vs dense
test_sp_matrix_Adns = np.around(.6*np.random.rand(5, 6)) + np.random.randn(5,6)
test_sp_matrix_A = spa.csc_matrix(test_sp_matrix_Adns)
# Test vector operations
test_vec_ops_n = 10
test_vec_ops_v1 = np.random.randn(test_vec_ops_n)
test_vec_ops_v2 = np.random.randn(test_vec_ops_n)
test_vec_ops_sc = np.random.randn()
test_vec_ops_norm_inf = | np.linalg.norm(test_vec_ops_v1, np.inf) | numpy.linalg.norm |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import numpy as np
from astropy.io import fits
import dask
import dask.array as da
from dask.diagnostics import ProgressBar
from africanus.dft.dask import im_to_vis
from daskms import xds_from_ms, xds_from_table, xds_to_table
def create_parser():
p = argparse.ArgumentParser()
p.add_argument("ms", help="Name of MS")
p.add_argument("--fitsmodel", help="Fits file to predict from")
p.add_argument("--row_chunks", default=30000, type=int,
help="How to chunks up row dimension.")
p.add_argument("--ncpu", default=0, type=int,
help="Number of threads to use for predict")
p.add_argument("--colname", default="MODEL_DATA",
help="Name of column to write data to.")
p.add_argument('--field', default=0, type=int,
help="Field ID to predict to.")
return p
args = create_parser().parse_args()
if args.ncpu:
ncpu = args.ncpu
from multiprocessing.pool import ThreadPool
dask.config.set(pool=ThreadPool(ncpu))
else:
import multiprocessing
ncpu = multiprocessing.cpu_count()
print("Using %i threads" % ncpu)
# Get MS frequencies
spw_ds = list(xds_from_table("::".join((args.ms, "SPECTRAL_WINDOW")),
group_cols="__row__"))[0]
# Get frequencies in the measurement set
# If these do not match those in the fits
# file we need to interpolate
ms_freqs = spw_ds.CHAN_FREQ.data[0].compute()
nchan = ms_freqs.size
# load in the fits file
model = fits.getdata(args.fitsmodel)
# get header
hdr = fits.getheader(args.fitsmodel)
# TODO - check that PHASE_DIR in MS matches that in fits
# get image coordinates
if hdr['CUNIT1'] != "DEG" and hdr['CUNIT1'] != "deg":
raise ValueError("Image units must be in degrees")
npix_l = hdr['NAXIS1']
refpix_l = hdr['CRPIX1']
delta_l = hdr['CDELT1'] * np.pi/180 # assumes untis are deg
l0 = hdr['CRVAL1'] * np.pi/180
l_coord = np.sort(np.arange(1 - refpix_l, 1 + npix_l - refpix_l)*delta_l)
if hdr['CUNIT2'] != "DEG" and hdr['CUNIT2'] != "deg":
raise ValueError("Image units must be in degrees")
npix_m = hdr['NAXIS2']
refpix_m = hdr['CRPIX2']
delta_m = hdr['CDELT2'] * np.pi/180 # assumes untis are deg
m0 = hdr['CRVAL2'] * np.pi/180
m_coord = np.arange(1 - refpix_m, 1 + npix_m - refpix_m)*delta_m
npix_tot = npix_l * npix_m
# get frequencies
if hdr["CTYPE4"] == 'FREQ':
nband = hdr['NAXIS4']
refpix_nu = hdr['CRPIX4']
delta_nu = hdr['CDELT4'] # assumes units are Hz
ref_freq = hdr['CRVAL4']
ncorr = hdr['NAXIS3']
freq_axis = str(4)
elif hdr["CTYPE3"] == 'FREQ':
nband = hdr['NAXIS3']
refpix_nu = hdr['CRPIX3']
delta_nu = hdr['CDELT3'] # assumes units are Hz
ref_freq = hdr['CRVAL3']
ncorr = hdr['NAXIS4']
freq_axis = str(3)
else:
raise ValueError("Freq axis must be 3rd or 4th")
freqs = ref_freq + np.arange(1 - refpix_nu, 1 + nband - refpix_nu) * delta_nu
print("Reference frequency is ", ref_freq)
# TODO - need to use convert for this
if ncorr > 1:
raise ValueError("Currently only works on a single correlation")
# if frequencies do not match we need to reprojects fits cube
if | np.any(ms_freqs != freqs) | numpy.any |
'''
Created on 11 Oct 2016
@author: <NAME>
'''
# pylint: disable=missing-docstring
import argparse
import collections
import csv
import json
import logging
import os
import re
import shutil
import sys
import zipfile
import zlib
import jinja2
import msgpack
import numpy
import pandas
import yaml
import pubtransit
LOG = logging.getLogger(__name__)
OUT_STREAM = sys.stdout
TEMPLATE_MANAGER = jinja2.Environment(
loader=jinja2.PackageLoader(pubtransit.__name__, ''))
TARGET_METHODS = {}
DEFAULT_STOPS_PER_TILE = 128
def main():
logging.basicConfig(
level=logging.WARNING, format="%(asctime)-15s | %(message)s")
parser = argparse.ArgumentParser(
description='Departures transit feed compiler.')
parser.add_argument(
'--target', type=str, nargs=1, choices=TARGET_METHODS,
default='all',
help='One between: {}'.format(', '.join(TARGET_METHODS)))
parser.add_argument(
'--build-dir', type=str, default='build',
help='Folder where to put produced data.')
parser.add_argument(
'--quiet', dest='logging_level', action='store_const',
const=logging.ERROR, default=None, help='Show only error messages.')
parser.add_argument(
'--logging-level', dest='logging_level', default=logging.WARNING,
type=int,
help='Set logging level (from {min} to {max}.'.format(
min=logging.DEBUG, max=logging.FATAL))
parser.add_argument(
'--max-stops', dest='max_stops', default=DEFAULT_STOPS_PER_TILE,
type=int, help='Set maximum number of stops-per-tile.')
parser.add_argument(
'--verbose', dest='logging_level', action='store_const',
const=logging.INFO, default=None, help='Show verbose messages.')
parser.add_argument(
'--debug', dest='logging_level', action='store_const',
const=logging.DEBUG, default=None, help='Show debug messages.')
parser.add_argument(
'files', type=str, default=['site.yml'], nargs='*',
help='Feed file to extract feed rules from.')
parser.add_argument(
'--dest', type=str, help='Destination feed file.')
args = parser.parse_args()
if args.logging_level:
# Raise logging level
logging.getLogger().setLevel(args.logging_level)
method = TARGET_METHODS[args.target[0]]
try:
for inpute_file in args.files or [None]:
method(args, inpute_file)
except Exception as error: # pylint: disable=broad-except
if args.logging_level is logging.DEBUG:
LOG.fatal("Unhandled exception.", exc_info=1)
else:
LOG.fatal(str(error) or str(type(error)))
exit(1)
except BaseException:
logging.warning('interrupted', exc_info=1)
raise
else:
logging.debug('SUCCESS')
def target_method(name):
def decorator(func):
TARGET_METHODS[name] = func
return func
return decorator
MethodParameters = collections.namedtuple(
'MethodParameters', ['site', 'feed', 'target_path'])
@target_method("version")
def print_version(args):
# pylint: disable=unused-argument
OUT_STREAM.write(pubtransit.__version__ + '\n')
@target_method("makefile")
def make_makefiles(args, site_file=None):
feeds_conf = read_yaml_file(site_file or 'site.yml')
for site in feeds_conf['feed']:
for feed in site["feeds"]:
target_path = os.path.join(
args.build_dir, site["name"], feed["name"])
if not os.path.isdir(target_path):
os.makedirs(target_path)
url = feed.get("url") or (site["url"] + '/' + feed["path"])
# pylint: disable=unused-argument,no-member
OUT_STREAM.write(target_path + ".mk ")
target_template = TEMPLATE_MANAGER.get_template("feed_item.mk")
target_make = target_template.render(
install_dir=os.path.join('$(INSTALL_DIR)', 'feed'),
build_dir=args.build_dir,
target=os.path.join(site["name"], feed["name"]),
url=url,
make_flags="--logging-level " + str(args.logging_level),
make_me='python -m pubtransit ' + ' '.join(
repr(arg) for arg in sys.argv[1:]),
script_name="pubtransit")
with open(target_path + ".mk", 'wt') as target_stream:
target_stream.write(target_make)
@target_method("datastore")
def generate_datastores(args, feed_file):
dest_dir = os.path.splitext(args.dest or args.source)[0]
if os.path.isdir(dest_dir):
shutil.rmtree(dest_dir)
os.makedirs(dest_dir)
with zipfile.ZipFile(feed_file) as zip_file:
routes = read_routes(zip_file)
generate_routes(dest_dir=dest_dir, routes=routes)
trips = read_trips(zip_file)
generate_trips(dest_dir=dest_dir, trips=trips, route_id=routes.id)
stops = read_stops(zip_file)
tiles = generate_tiled_stops(
dest_dir=dest_dir, stops=stops, max_rows=args.max_stops)
stop_times = read_stop_times(zip_file)
generate_tiled_stop_times(
dest_dir=dest_dir, stop_times=stop_times, trip_id=trips.id,
tiles=tiles)
feed_info = dict(
west=stops.west, east=stops.east, south=stops.south, north=stops.north)
store_object(feed_info, dest_dir, 'feed')
@target_method("index")
def make_index(args, feed_file=None):
feeds_conf = read_yaml_file(feed_file or 'feeds.yaml')
paths = []
west = []
east = []
south = []
north = []
for site in feeds_conf['feed']:
for feed in site["feeds"]:
target_path = os.path.join(
args.build_dir, site["name"], feed["name"])
if not os.path.isdir(target_path):
LOG.error('Target feed dir not found: %r', target_path)
os.makedirs(target_path)
with open(os.path.join(target_path, 'feed.gz')) as in_stream:
zipped = in_stream.read()
packed = zlib.decompress(zipped)
feed_info = msgpack.unpackb(packed)
paths.append(os.path.join(site["name"], feed["name"]))
west.append(feed_info['west'])
east.append(feed_info['east'])
south.append(feed_info['south'])
north.append(feed_info['north'])
store_column(paths, args.build_dir, 'index', 'path')
store_column(west, args.build_dir, 'index', 'west', float)
store_column(east, args.build_dir, 'index', 'east', float)
store_column(south, args.build_dir, 'index', 'south', float)
store_column(north, args.build_dir, 'index', 'north', float)
def generate_routes(dest_dir, routes):
store_column(routes.name, dest_dir, 'routes', 'name')
def generate_trips(dest_dir, trips, route_id):
trip_route_id = numpy.searchsorted(route_id, trips.route_id)
store_column(trip_route_id, dest_dir, 'trips', 'route_id', int)
store_column(trips.name, dest_dir, 'trips', 'name')
def generate_tiled_stops(dest_dir, stops, max_rows=None):
if not max_rows:
max_rows = len(stops.id)
max_rows = max(max_rows, 4)
tiles_tree, tiles = create_tree(
stops, index_columns=['lon', 'lat'], max_rows=max_rows)
tiles_num = len(tiles)
tiles_shape = tiles_num,
tiles_id_format = '0' + str(len(str(tiles_num)))
tiles_west = numpy.zeros(tiles_shape, dtype=float)
tiles_east = numpy.zeros(tiles_shape, dtype=float)
tiles_south = numpy.zeros(tiles_shape, dtype=float)
tiles_north = numpy.zeros(tiles_shape, dtype=float)
for i, tile in enumerate(tiles):
tile_dir = os.path.join(dest_dir, format(i, tiles_id_format))
store_column(tile.name, tile_dir, 'stops', 'name')
store_column(tile.lon, tile_dir, 'stops', 'lon', float)
store_column(tile.lat, tile_dir, 'stops', 'lat', float)
tiles_west[i] = tile.west
tiles_east[i] = tile.east
tiles_south[i] = tile.south
tiles_north[i] = tile.north
store_object(tiles_tree, os.path.join(dest_dir, 'tiles'), 'tree')
store_column(tiles_west, dest_dir, 'tiles', 'west')
store_column(tiles_east, dest_dir, 'tiles', 'east')
store_column(tiles_south, dest_dir, 'tiles', 'south')
store_column(tiles_north, dest_dir, 'tiles', 'north')
return tiles
def generate_tiled_stop_times(dest_dir, stop_times, trip_id, tiles):
# pylint: disable=too-many-locals
tiles_num = len(tiles)
tiles_id_format = '0' + str(len(str(tiles_num)))
stop_times = stop_times.sort_by('stop_id')
trip_id_sorter = | numpy.argsort(trip_id) | numpy.argsort |
import igraph
import numpy as np
import pandas as pd
import geopandas
from shapely.geometry import LineString
from skimage.graph import MCP_Geometric, MCP
from skimage import graph
from pyproj import Transformer
from scipy import stats
def cost_tobler_hiking_function(S,symmetric=True):
"""
Applies Tobler's Hiking Function to slope data supplied in DEGREES.
From Tobler. 1993. Three Presentation on Geographical Analysis and Modeling.
Simple Example:
C = lcp.cost_tobler_hiking_function(S,symmetric=True)
Parameters:
- 'S' is an array (any dimension) of slope values in DEGREES.
- 'symmetric' flags whether to consider slope values symmetrically. Note that this_end
is NOT the same as just taking the positive values. This returns an average
of the positive and negative value for the given slope.
Returns:
- 'C' a cost surface of velocity in km/hr
"""
# Convert to dz/dx
S = np.tan(np.deg2rad(S))
V = 6 * np.exp(-3.5 * np.abs(S + .05))
if symmetric:
V2 = 6 * np.exp(-3.5 * np.abs(-S + .05))
V = (V + V2) / 2
return 1 / V
def cost_rademaker(S,weight=50,pack_weight=0,terrain_coefficient=1.1,velocity=1.2):
"""
Applies Rademaker et al's model (2012) to slope values for LCP calculation.
Simple Example:
C = lcp.cost_rademaker(S,weight=50,pack_weight=0,terrain_coefficient=1.1,velocity=1.2)
Parameters:
- 'S' is an array (any dimension) of slope values in DEGREES.
- 'weight' is weight of traveler is given in kg
- 'pack_weight' is cargo weight, given in kg
- 'terrain_coefficient' is a value to introduce "friction". Values greater than
one have more than 'average' friction.
- 'velocity' is mean walking speed in meters per second
Returns:
- 'C' a cost surface of shape S.
"""
# Rademaker assumes a grade in percent (0 to 100, rather than 0 to 1):
G = 100 * np.arctan(np.deg2rad(S))
W = weight
L = pack_weight
tc = terrain_coefficient
V = velocity
# Cost, in MWatts
MW = 1.5*W + 2.0 * (W + L) * ((L/W)**2) + tc * (W+L) * (1.5 * V**2 + .35 * V * G)
return MW
def cost_pingel_exponential(S,scale_factor=9.25):
"""
Applies the exponental LCP cost function described by Pingel (2010).
Simple Example:
C = lcp.cost_pingel_exponential(S,scale_factor=9.25)
Parameters:
- 'S' is an array (any dimension) of slope values in DEGREES.
- 'scale_factor' is a value in degrees that generally corresponds to the mean slope
(in degrees) of a path network. Larger values represent a larger tolerance for
steeper slopes. Smaller values will cause an LCP to avoid steeper slopes.
"""
EXP = stats.expon.pdf(0,0,scale_factor) / stats.expon.pdf(S,0,scale_factor)
return EXP
def ve(S,ve=2.3):
"""
Applies a vertical exaggeration to a slope raster and returns it. Slope raster must be in DEGREES.
Simple Example:
S_ve = lcp.ve(S,2.3)
"""
S = np.tan(np.deg2rad(S))
S = np.rad2deg(np.arctan(ve * S))
return S
def get_lists(nodes,edges):
"""
Simple Example:
start_list, end_list, ids, start_coords, end_coords = lcp.get_lists(nodes, edges)
Internal method to transform nodes and edges into lists of start coords and lists of lists of end coords.
Returns: start_list, end_list, ids, start_coords, end_coords
"""
nodes['coords'] = list(zip(nodes.iloc[:,0], nodes.iloc[:,1]))
start_list = edges.iloc[:,0].unique()
end_list = [edges.iloc[:,1].loc[edges.iloc[:,0]==item].values for item in start_list]
start_coords = []
end_coords = []
ids = []
for i, this_start in enumerate(start_list):
these_ends = end_list[i]
these_ids = [this_start + '_to_' + te for te in these_ends]
these_start_coords = nodes.loc[this_start,'coords']
these_end_coords = nodes.loc[these_ends,'coords'].values
start_coords.append(these_start_coords)
end_coords.append(these_end_coords)
ids.append(these_ids)
return start_list, end_list, ids, start_coords, end_coords
def direct_routes(nodes,edges):
"""
Returns a straight-line path between edges.
Simple Example:
gdf = lcp.direct_routes(nodes, edges)
Parameters:
- 'nodes' is a Pandas DataFrame where the first column is a unique ID, the second is
an x coordinate (e.g., longitude) and the third is a y coordinate (e.g.,
latitude).
- 'edges' is a Pandas DataFrame were the first column is a source ID (matching a node)
and the second column is a destination. At the moment, we assume no
directionality / edges are symmetric.
- 'array' is a numpy array representing the cost surface.
- 'meta' is a dictionary, that must contain 'crs' and 'transform' items corresponding
to those returned by rasterio. neilpy.imread returns such a dictionary
by default.
- 'label' is used to identify the type of cost path/surface in the GeoDataFrame output
rows.
Output:
- 'gdf' is a GeoPandas GeoDataFrame with fields 'ids' describing the source and target
, 'label' corresponding to the label, and a geometry field containing the
path in shapely / WKT format.
"""
start_list, end_list, ids, start_coords, end_coords = get_lists(nodes,edges)
gdf = pd.DataFrame()
for i,this_start in enumerate(start_coords):
df = pd.DataFrame()
these_end_coords = end_coords[i]
df['ids'] = ids[i]
df['label'] = 'direct'
df['geometry'] = [LineString([this_start,this_end]) for this_end in these_end_coords]
gdf = gdf.append(df,ignore_index=True)
gdf = geopandas.GeoDataFrame(gdf,geometry=gdf['geometry'],crs=4326)
return gdf
def lcp_coordinate_conversion(start_coords,end_coords,crs,transform):
"""
Simple Example:
network = lcp.create_raster_network(array)
Parameters:
- 'start_coords' is a list of tuples (lon,lat)
- 'end_coords' is a list of lists of tuples. Each list of end points corresponds to
a start point, so len(start_coords) must equal len(end_coords), although each
list OF end points can be of any length one or greater.
- 'crs' is a Coordinate Reference System of the type returned by rasterio (or neilpy).
- 'transform' is an Affine transformation matrix as returned by rasterio (or neilpy).
Output:
- 'converted_start_coords' is a list of tuples of PIXEL coordinates.
- 'converted_end_coords' is a list of list of tupes of pixel coordiantes.
"""
converted_start_coords = []
converted_end_coords = []
for i,this_start_coord in enumerate(start_coords):
these_end_coords = end_coords[i]
# Convert from lat/lon to map coordinates
this_start_coord = coord_transform(*this_start_coord,4326,crs)
these_end_coords = [coord_transform(*item,4326,crs) for item in these_end_coords]
# Convert from map coordinates to pixel coordinates
this_start_coord = (~transform*this_start_coord)[::-1]
these_end_coords = [(~transform*item)[::-1] for item in these_end_coords]
# Round them to ints
this_start_coord = tuple(np.round(this_start_coord).astype(np.uint32))
these_end_coords = [tuple(item) for item in np.round(these_end_coords).astype(np.uint32)]
converted_start_coords.append(this_start_coord)
converted_end_coords.append(these_end_coords)
return converted_start_coords, converted_end_coords
def get_areal_routes(nodes,edges,surface,meta,label='areal'):
"""
Simple Example:
gdf = lcp.get_areal_routes(nodes, edges, array, meta, label)
Parameters:
- 'nodes' is a Pandas DataFrame where the first column is a unique ID, the second is
an x coordinate (e.g., longitude) and the third is a y coordinate (e.g.,
latitude).
- 'edges' is a Pandas DataFrame were the first column is a source ID (matching a node)
and the second column is a destination. At the moment, we assume no
directionality / edges are symmetric.
- 'array' is a numpy array representing the cost surface.
- 'meta' is a dictionary, that must contain 'crs' and 'transform' items corresponding
to those returned by rasterio. neilpy.imread returns such a dictionary
by default.
- 'label' is used to identify the type of cost path/surface in the GeoDataFrame output
rows.
Output:
- 'gdf' is a GeoPandas GeoDataFrame with fields 'ids' describing the source and target
, 'label' corresponding to the label, and a geometry field containing the
path in shapely / WKT format.
"""
gdf = pd.DataFrame()
print('Creating surface network for',label)
m = MCP_Geometric(surface,fully_connected=True)
print('Done creating surface network.')
start_list, end_list, ids, start_coords, end_coords = get_lists(nodes,edges)
conv_start_coords, conv_end_coords = lcp_coordinate_conversion(start_coords,end_coords,meta['crs'],meta['transform'])
for i,this_start_coord in enumerate(conv_start_coords):
these_end_coords = conv_end_coords[i]
print('Calculating costs and routes.')
costs, traceback_array = m.find_costs([this_start_coord],these_end_coords,find_all_ends=True)
print('Done calculating costs and routes.')
# Pull routes and convert
routes = [m.traceback(this_end_coord) for this_end_coord in these_end_coords]
geometries= [LineString(np.vstack(meta['transform']*np.fliplr(route).T).T) for route in routes]
df = pd.DataFrame()
df['ids'] = ids[i]
df['label'] = label
df['geometry'] = geometries
gdf = gdf.append(df,ignore_index=True)
gdf = geopandas.GeoDataFrame(gdf,geometry=gdf['geometry'],crs=meta['crs'])
return gdf
def create_raster_network(X):
"""
Simple Example:
network = lcp.create_raster_network(array)
Parameters:
- 'array' is a numpy array representing the cost surface.
Output:
- 'network' is a Pandas DataFrame with fields 'source' and 'target' representing 1D
(flattened) indices, source_value and target_value for pixel data, 'distance'
which is the pixel distance (1 for orthogonal, 2**.5 for diagonal). These
should be used directly by the operator to calculate a 'weight' field
before passing to lcp.get_linear_routes()
"""
m,n = np.shape(X)
I = np.reshape(np.arange(np.size(X),dtype=np.int32),np.shape(X))
df = pd.DataFrame()
df['source'] = np.hstack((I[1:,1:].flatten(),
I[1:,:].flatten(),
I[1:,:-1].flatten(),
I[:,:-1].flatten()))
df['target'] = np.hstack((ashift(I,0)[1:,1:].flatten(),
ashift(I,1)[1:,:].flatten(),
ashift(I,2)[1:,:-1].flatten(),
ashift(I,3)[:,:-1].flatten()))
df['source_value'] = X.flatten()[df['source'].values]
df['target_value'] = X.flatten()[df['target'].values]
df['distance'] = np.hstack((2**.5*np.ones((m-1)*(n-1)),
np.ones(n*(m-1)),
2**.5*np.ones((m-1)*(n-1)),
np.ones(m*(n-1))))
return df
def get_linear_routes(nodes,edges,df,meta,label='linear'):
"""
Simple Example:
network = lcp.create_raster_network(array)
network['weight'] = np.abs(network['source_value'] - network['target_value']) / network['distance']
gdf = lcp.get_linear_routes(nodes, edges, network, meta, label)
Parameters:
- 'nodes' is a Pandas DataFrame where the first column is a unique ID, the second is
an x coordinate (e.g., longitude) and the third is a y coordinate (e.g.,
latitude).
- 'edges' is a Pandas DataFrame were the first column is a source ID (matching a node)
and the second column is a destination. At the moment, we assume no
directionality / edges are symmetric.
- 'network' is a Pandas DataFrame created by lcp.create_raster_network(). It MUST
include a column called 'weight'.
- 'meta' is a dictionary, that must contain 'crs' and 'transform' items corresponding
to those returned by rasterio. It must also contain 'height' and
'width' items. neilpy.imread returns such a dictionary by default.
- 'label' is used to identify the type of cost path/surface in the GeoDataFrame output
rows.
Output:
- 'gdf' is a GeoPandas GeoDataFrame with fields 'ids' describing the source and target
, 'label' corresponding to the label, and a geometry field containing the
path in shapely / WKT format.
"""
img_dim = (meta['height'],meta['width'])
G = igraph.Graph()
G.add_vertices(img_dim[0] * img_dim[1])
G.add_edges(list(zip(df.source,df.target)),attributes={'weight':df.weight})
del df
gdf = pd.DataFrame()
start_list, end_list, ids, start_coords, end_coords = get_lists(nodes,edges)
conv_start_coords, conv_end_coords = lcp_coordinate_conversion(start_coords,end_coords,meta['crs'],meta['transform'])
for i,this_start_coord in enumerate(conv_start_coords):
these_end_coords = conv_end_coords[i]
flat_start = np.ravel_multi_index(this_start_coord,img_dim)
flat_ends = [np.ravel_multi_index(item,img_dim) for item in these_end_coords]
routes = G.get_shortest_paths(flat_start,flat_ends,weights='weight')
routes2 = [np.flipud(np.vstack(np.unravel_index(route,img_dim))) for route in routes]
geometries = [LineString( | np.vstack(meta['transform']*route2) | numpy.vstack |
# -*- coding: utf-8 -*-
"""
<NAME>
Computational Biologist
Target Sciences
GSK
<EMAIL>
"""
import numpy as np
import statsmodels.sandbox.stats.multicomp as multicomp
from scipy import stats
def mad(X, axis, keepdims=False):
return 1.4826*np.median(np.abs(X - np.median(X, axis=axis, keepdims=True)), axis=axis, keepdims=keepdims)
def corr_pearson(X, Y=np.zeros(0), axis=0, getpvalues=False):
if axis == 0:
X = X.T
elif axis == 1:
pass
else:
raise ValueError('invalid axis')
X = X - X.mean(axis=0)
X = X/np.linalg.norm(X, ord=2, axis=0) # same as X = X/np.sqrt((X**2).sum(axis=0))
n = X.shape[0]
if Y.size == 0:
r = (X.T).dot(X)
else:
if axis == 0:
Y = Y.T
Y = Y - Y.mean(axis=0)
Y = Y/np.linalg.norm(Y, ord=2, axis=0)
r = (X.T).dot(Y)
if getpvalues:
p = 2*stats.norm.cdf(-np.abs((np.log((np.float64(1) + r)/(np.float64(1) - r))/np.float64(2))*np.sqrt(np.float64(n) - np.float64(3))))
return r, p
else:
return r
def corr_spearman(X, Y=np.zeros(0), axis=0, getpvalues=False):
if axis == 0:
X = X.T
elif axis == 1:
pass
else:
raise ValueError('invalid axis')
X = np.apply_along_axis(stats.rankdata, 0, X, method='average')
if Y.size > 0:
if axis == 0:
Y = Y.T
Y = np.apply_along_axis(stats.rankdata, 0, Y, method='average')
return corr_pearson(X, Y, axis=1, getpvalues=getpvalues)
def corr_cosine(X, Y=np.zeros(0), axis=0, getpvalues=False):
if axis == 0:
X = X.T
elif axis == 1:
pass
else:
raise ValueError('invalid axis')
X = X/ | np.linalg.norm(X, ord=2, axis=0) | numpy.linalg.norm |
import os
import subprocess
import tempfile
from typing import Hashable, Sequence
import click
import numpy as np
import xarray as xr
from .gsutil import authenticate, download_directory, cp
MOSAIC_FILES_URL_DEFAULT = (
"gs://vcm-ml-raw/2020-11-12-gridspec-orography-and-mosaic-data"
)
class FregridLatLon:
def __init__(
self,
resolution: str,
nlat: int,
nlon: int,
mosaic_files_url: str = MOSAIC_FILES_URL_DEFAULT,
):
"""Cubed-sphere to lat-lon interpolation using the command-line fregrid tool.
Note:
Input mosaic file is downloaded from GCS and remapping coefficients are
computed during object initialization.
Args:
resolution: one of "C48", "C96" or "C384".
nlat: length of target latitude dimension.
nlon: length of target longitude dimension.
mosaic_files_url: (optional) local or remote directory containing mosaic
files. Defaults to 'gs://vcm-ml-raw/2020-11-12-gridspec-orography-and-
mosaic-data'.
"""
self.resolution = resolution
self.nlat = nlat
self.nlon = nlon
mosaic_files_url_for_resolution = os.path.join(mosaic_files_url, resolution)
# download mosaic and generate remapping file for future interpolation
with tempfile.TemporaryDirectory() as tmpdir:
mosaic_dir = os.path.join(tmpdir, "mosaic")
mosaic_grid_spec_path = os.path.join(mosaic_dir, "grid_spec.nc")
remap_file_path = os.path.join(tmpdir, "remap.nc")
download_directory(mosaic_files_url_for_resolution, mosaic_dir)
args = self._get_initialize_args(mosaic_grid_spec_path, remap_file_path)
subprocess.check_call(["fregrid"] + args)
self.mosaic = {
os.path.basename(path): xr.open_dataset(path).load()
for path in self._get_mosaic_paths(mosaic_dir)
}
self.remap = xr.open_dataset(remap_file_path).load()
def regrid_scalar(
self,
ds: xr.Dataset,
x_dim: str = "x",
y_dim: str = "y",
lon_dim: str = "longitude",
lat_dim: str = "latitude",
fields: Sequence[Hashable] = (),
) -> xr.Dataset:
"""Regrid scalar variables in dataset from cubed-sphere grid to lat-lon grid.
Note:
Saves dataset to disk and uses command-line fregrid to do regridding.
Args:
ds: dataset to be regridded. Must have 'tile' dimension.
x_dim (optional): name of x-dimension. Defaults to 'x'.
y_dim (optional): name of y-dimension. Defaults to 'y'.
lon_dim (optional): name of output longitude dim. Defaults to 'longitude'.
lat_dim (optional): name of output latitude dim. Defaults to 'latitude'.
fields (optional): sequence of variable names to regrid. Defaults to
all variables in ds whose dimensions include x_dim, y_dim and 'tile'
Returns:
Dataset on a lat-lon grid. Dimension names are 'longitude' and 'latitude'.
"""
if not fields:
fields = [
v for v in ds.data_vars if {x_dim, y_dim, "tile"} <= set(ds[v].dims)
]
with tempfile.TemporaryDirectory() as tmpdir:
input_prefix = os.path.join(tmpdir, "input_data")
remap_file_path = os.path.join(tmpdir, "remap.nc")
output_file_path = os.path.join(tmpdir, "regridded_data.nc")
mosaic_grid_spec_path = os.path.join(tmpdir, "grid_spec.nc")
args = self._get_regrid_args(
mosaic_grid_spec_path,
remap_file_path,
input_prefix,
output_file_path,
fields,
)
ds = self._standardize_dataset_for_fregrid(ds, x_dim, y_dim)
for filename, mosaic_file in self.mosaic.items():
path = os.path.join(tmpdir, filename)
mosaic_file.to_netcdf(path)
self.remap.to_netcdf(remap_file_path)
self._write_dataset_to_tiles(ds, input_prefix)
subprocess.check_call(["fregrid"] + args)
ds_latlon = xr.open_dataset(output_file_path)
return ds_latlon.rename(
{
x_dim: lon_dim,
y_dim: lat_dim,
f"{x_dim}_bnds": f"{lon_dim}_bnds",
f"{y_dim}_bnds": f"{lat_dim}_bnds",
}
)
@staticmethod
def _standardize_dataset_for_fregrid(ds, x_dim, y_dim):
required_attrs = {
x_dim: {"cartesian_axis": "X"},
y_dim: {"cartesian_axis": "Y"},
}
for dim, attrs in required_attrs.items():
if dim in ds.dims:
ds = ds.assign_coords({dim: | np.arange(1.0, ds.sizes[dim] + 1) | numpy.arange |
# -*- coding: utf-8 -*-
# Copyright (c) 2015-2016 MIT Probabilistic Computing Project
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
from math import isnan
import numpy as np
from cgpm.cgpm import CGpm
from cgpm.mixtures.dim import Dim
from cgpm.network.importance import ImportanceNetwork
from cgpm.utils import config as cu
from cgpm.utils import general as gu
from cgpm.utils.config import cctype_class
from cgpm.utils.general import merged
class View(CGpm):
"""CGpm represnting a multivariate Dirichlet process mixture of CGpms."""
def __init__(
self, X, outputs=None, inputs=None, alpha=None,
cctypes=None, distargs=None, hypers=None, Zr=None, rng=None):
"""View constructor provides a convenience method for bulk incorporate
and unincorporate by specifying the data and optional row partition.
Parameters
----------
X : dict{int:list}
Dataset, where the cell `X[outputs[i]][rowid]` contains the value
for column outputs[i] and rowd index `rowid`. All rows are
incorporated by default.
outputs : list<int>
List of output variables. The first item is mandatory, corresponding
to the token of the exposed cluster. outputs[1:] are the observable
output variables.
inputs : list<int>
Currently disabled.
alpha : float, optional.
Concentration parameter for row CRP.
cctypes : list<str>, optional.
A `len(outputs[1:])` list of cctypes, see `utils.config` for names.
distargs : list<str>, optional.
A `len(outputs[1:])` list of distargs.
hypers : list<dict>, optional.
A `len(outputs[1:])` list of hyperparameters.
Zr : list<int>, optional.
Row partition, where `Zr[rowid]` is the cluster identity of rowid.
rng : np.random.RandomState, optional.
Source of entropy.
"""
# -- Seed --------------------------------------------------------------
self.rng = gu.gen_rng() if rng is None else rng
# -- Inputs ------------------------------------------------------------
if inputs:
raise ValueError('View does not accept inputs.')
self.inputs = []
# -- Dataset -----------------------------------------------------------
self.X = X
# -- Outputs -----------------------------------------------------------
if len(outputs) < 1:
raise ValueError('View needs at least one output.')
if len(outputs) > 1:
if not distargs:
distargs = [None] * len(cctypes)
if not hypers:
hypers = [None] * len(cctypes)
assert len(outputs[1:])==len(cctypes)
assert len(distargs) == len(cctypes)
assert len(hypers) == len(cctypes)
self.outputs = list(outputs)
# -- Row CRP -----------------------------------------------------------
self.crp = Dim(
outputs=[self.outputs[0]],
inputs=[-1],
cctype='crp',
hypers=None if alpha is None else {'alpha': alpha},
rng=self.rng
)
n_rows = len(self.X[self.X.keys()[0]])
self.crp.transition_hyper_grids([1]*n_rows)
if Zr is None:
for i in xrange(n_rows):
s = self.crp.simulate(i, [self.outputs[0]], None, {-1:0})
self.crp.incorporate(i, s, {-1:0})
else:
for i, z in enumerate(Zr):
self.crp.incorporate(i, {self.outputs[0]: z}, {-1:0})
# -- Dimensions --------------------------------------------------------
self.dims = dict()
for i, c in enumerate(self.outputs[1:]):
# Prepare inputs for dim, if necessary.
dim_inputs = []
if distargs[i] is not None and 'inputs' in distargs[i]:
dim_inputs = distargs[i]['inputs']['indexes']
dim_inputs = [self.outputs[0]] + dim_inputs
# Construct the Dim.
dim = Dim(
outputs=[c],
inputs=dim_inputs,
cctype=cctypes[i],
hypers=hypers[i],
distargs=distargs[i],
rng=self.rng
)
dim.transition_hyper_grids(self.X[c])
self.incorporate_dim(dim)
# -- Validation --------------------------------------------------------
self._check_partitions()
# --------------------------------------------------------------------------
# Observe
def incorporate_dim(self, dim, reassign=True):
"""Incorporate dim into View. If not reassign, partition should match."""
dim.inputs[0] = self.outputs[0]
if reassign:
self._bulk_incorporate(dim)
self.dims[dim.index] = dim
self.outputs = self.outputs[:1] + self.dims.keys()
return dim.logpdf_score()
def unincorporate_dim(self, dim):
"""Remove dim from this View (does not modify)."""
del self.dims[dim.index]
self.outputs = self.outputs[:1] + self.dims.keys()
return dim.logpdf_score()
def incorporate(self, rowid, observation, inputs=None):
"""Incorporate an observation into the View.
Parameters
----------
rowid : int
Fresh, non-negative rowid.
observation : dict{output:val}
Keys of the observation must exactly be the output (Github #89).
Optionally, use {self.outputs[0]: k} to specify the latent cluster
assignment of rowid. The cluster is an observation variable since
View has a generative model for k, unlike Dim which requires k as
inputs.
"""
k = observation.get(self.outputs[0], 0)
self.crp.incorporate(rowid, {self.outputs[0]: k}, {-1: 0})
for d in self.dims:
self.dims[d].incorporate(
rowid,
observation={d: observation[d]},
inputs=self._get_input_values(rowid, self.dims[d], k))
# If the user did not specify a cluster assignment, sample one.
if self.outputs[0] not in observation:
self.transition_rows(rows=[rowid])
def unincorporate(self, rowid):
# Unincorporate from dims.
for dim in self.dims.itervalues():
dim.unincorporate(rowid)
# Account.
k = self.Zr(rowid)
self.crp.unincorporate(rowid)
if k not in self.Nk():
for dim in self.dims.itervalues():
del dim.clusters[k] # XXX Abstract me!
# XXX Major hack to force values of NaN cells in incorporated rowids.
def force_cell(self, rowid, observation):
k = self.Zr(rowid)
for d in observation:
self.dims[d].unincorporate(rowid)
inputs = self._get_input_values(rowid, self.dims[d], k)
self.dims[d].incorporate(rowid, {d: observation[d]}, inputs)
# --------------------------------------------------------------------------
# Update schema.
def update_cctype(self, col, cctype, distargs=None):
"""Update the distribution type of self.dims[col] to cctype."""
if distargs is None:
distargs = {}
distargs_dim = dict(distargs)
inputs = []
# XXX Horrid hack.
if cctype_class(cctype).is_conditional():
inputs = distargs_dim.get('inputs', [
d for d in sorted(self.dims)
if d != col and not self.dims[d].is_conditional()
])
if len(self.dims) == 0 or len(inputs) == 0:
raise ValueError('No inputs for conditional dimension.')
distargs_dim['inputs'] = {
'indexes' : inputs,
'stattypes': [self.dims[i].cctype for i in inputs],
'statargs': [self.dims[i].get_distargs() for i in inputs]
}
D_old = self.dims[col]
D_new = Dim(
outputs=[col], inputs=[self.outputs[0]]+inputs,
cctype=cctype, distargs=distargs_dim, rng=self.rng)
self.unincorporate_dim(D_old)
self.incorporate_dim(D_new)
# --------------------------------------------------------------------------
# Inference
def transition(self, N):
for _ in xrange(N):
self.transition_rows()
self.transition_crp_alpha()
self.transition_dim_hypers()
def transition_crp_alpha(self):
self.crp.transition_hypers()
self.crp.transition_hypers()
def transition_dim_hypers(self, cols=None):
if cols is None:
cols = self.dims.keys()
for c in cols:
self.dims[c].transition_hypers()
def transition_dim_grids(self, cols=None):
if cols is None:
cols = self.dims.keys()
for c in cols:
self.dims[c].transition_hyper_grids(self.X[c])
def transition_rows(self, rows=None):
if rows is None:
rows = self.Zr().keys()
rows = self.rng.permutation(rows)
for rowid in rows:
self._gibbs_transition_row(rowid)
# --------------------------------------------------------------------------
# logscore.
def logpdf_likelihood(self):
"""Compute the logpdf of the observations only."""
logp_dims = [dim.logpdf_score() for dim in self.dims.itervalues()]
return sum(logp_dims)
def logpdf_prior(self):
logp_crp = self.crp.logpdf_score()
return logp_crp
def logpdf_score(self):
"""Compute the marginal logpdf CRP assignment and data."""
lp_prior = self.logpdf_prior()
lp_likelihood = self.logpdf_likelihood()
return lp_prior + lp_likelihood
# --------------------------------------------------------------------------
# logpdf
def logpdf(self, rowid, targets, constraints=None, inputs=None):
# As discussed in https://github.com/probcomp/cgpm/issues/116 for an
# observed rowid, we synthetize a new hypothetical row which is
# identical (in terms of observed and latent values) to the observed
# rowid. In this version of the implementation, the user may not
# override any non-null values in the observed rowid
# (_populate_constraints returns an error in this case). A user should
# either (i) use another rowid, since overriding existing values in the
# observed rowid no longer specifies that rowid, or (ii) use some
# sequence of incorporate/unicorporate depending on their query.
constraints = self._populate_constraints(rowid, targets, constraints)
if not self.hypothetical(rowid):
rowid = None
# Prepare the importance network.
network = self.build_network()
if self.outputs[0] in constraints:
# Condition on the cluster assignment.
# p(xT|xC,z=k) computed directly by network.
return network.logpdf(rowid, targets, constraints, inputs)
elif self.outputs[0] in targets:
# Query the cluster assignment.
# p(z=k,xT|xC)
# = p(z=k,xT,xC) / p(xC) Bayes rule
# = p(z=k)p(xT,xC|z=k) / p(xC) chain rule on numerator
# The terms are then:
# p(z=k) lp_cluster
# p(xT,xC|z=k) lp_numer
# p(xC) lp_denom
k = targets[self.outputs[0]]
constraints_z = {self.outputs[0]: k}
targets_nz = {c: targets[c] for c in targets if c != self.outputs[0]}
targets_numer = merged(targets_nz, constraints)
lp_cluster = network.logpdf(rowid, constraints_z, inputs)
lp_numer = \
network.logpdf(rowid, targets_numer, constraints_z, inputs) \
if targets_numer else 0
lp_denom = self.logpdf(rowid, constraints) if constraints else 0
return (lp_cluster + lp_numer) - lp_denom
else:
# Marginalize over cluster assignment by enumeration.
# Let K be a list of values for the support of z:
# P(xT|xC)
# = \sum_k p(xT|z=k,xC)p(z=k|xC) marginalization
# Now consider p(z=k|xC) \propto p(z=k,xC) Bayes rule
# p(z=K[i],xC) lp_constraints_unorm[i]
# p(z=K[i]|xC) lp_constraints[i]
# p(xT|z=K[i],xC) lp_targets[i]
K = self.crp.clusters[0].gibbs_tables(-1)
constraints = [merged(constraints, {self.outputs[0]: k}) for k in K]
lp_constraints_unorm = [network.logpdf(rowid, const, None, inputs)
for const in constraints]
lp_constraints = gu.log_normalize(lp_constraints_unorm)
lp_targets = [network.logpdf(rowid, targets, const, inputs)
for const in constraints]
return gu.logsumexp(np.add(lp_constraints, lp_targets))
# --------------------------------------------------------------------------
# simulate
def simulate(self, rowid, targets, constraints=None, inputs=None, N=None):
# Refer to comment in logpdf.
constraints = self._populate_constraints(rowid, targets, constraints)
if not self.hypothetical(rowid):
rowid = None
network = self.build_network()
# Condition on the cluster assignment.
if self.outputs[0] in constraints:
return network.simulate(rowid, targets, constraints, inputs, N)
# Determine how many samples to return.
unwrap_result = N is None
if unwrap_result:
N = 1
# Expose cluster assignments to the samples?
exposed = self.outputs[0] in targets
if exposed:
targets = [q for q in targets if q != self.outputs[0]]
# Weight clusters by probability of constraints in each cluster.
K = self.crp.clusters[0].gibbs_tables(-1)
constr2 = [merged(constraints, {self.outputs[0]: k}) for k in K]
lp_constraints_unorm = [network.logpdf(rowid, ev) for ev in constr2]
# Find number of samples in each cluster.
Ks = gu.log_pflip(lp_constraints_unorm, array=K, size=N, rng=self.rng)
counts = {k:n for k, n in enumerate(np.bincount(Ks)) if n > 0}
# Add the cluster assignment to the constraints and sample the rest.
constr3 = {k: merged(constraints, {self.outputs[0]: k}) for k in counts}
samples = [network.simulate(rowid, targets, constr3[k], inputs, counts[k])
for k in counts]
# If cluster assignments are exposed, append them to the samples.
if exposed:
samples = [[merged(l, {self.outputs[0]: k}) for l in s]
for s, k in zip(samples, counts)]
# Return 1 sample if N is None, otherwise a list.
result = list(itertools.chain.from_iterable(samples))
return result[0] if unwrap_result else result
# --------------------------------------------------------------------------
# Internal simulate/logpdf helpers
def relevance_probability(self, rowid_target, rowid_query, col):
"""Compute probability of rows in same cluster."""
if col not in self.outputs:
raise ValueError('Unknown column: %s' % (col,))
from relevance import relevance_probability
return relevance_probability(self, rowid_target, rowid_query)
# --------------------------------------------------------------------------
# Internal simulate/logpdf helpers
def build_network(self):
return ImportanceNetwork(
cgpms=[self.crp.clusters[0]] + self.dims.values(),
accuracy=1,
rng=self.rng)
# --------------------------------------------------------------------------
# Internal row transition.
def _gibbs_transition_row(self, rowid):
# Probability of row crp assignment to each cluster.
K = self.crp.clusters[0].gibbs_tables(rowid)
logp_crp = self.crp.clusters[0].gibbs_logps(rowid)
# Probability of row data in each cluster.
logp_data = self._logpdf_row_gibbs(rowid, K)
assert len(logp_data) == len(logp_crp)
# Sample new cluster.
p_cluster = | np.add(logp_data, logp_crp) | numpy.add |
from astropy import cosmology as cosmo
import numpy as np
import pytest
import autolens as al
class TestTracedGridListFrom:
def test__x2_planes__no_galaxy__image_and_source_planes_setup__same_coordinates(
self, sub_grid_2d_7x7
):
galaxies = [al.Galaxy(redshift=0.5), al.Galaxy(redshift=1.0)]
planes = al.util.plane.planes_via_galaxies_from(galaxies=galaxies)
traced_grid_list = al.util.ray_tracing.traced_grid_2d_list_from(
planes=planes, grid=sub_grid_2d_7x7
)
assert traced_grid_list[0][0] == pytest.approx(np.array([1.25, -1.25]), 1e-3)
assert traced_grid_list[0][1] == pytest.approx(np.array([1.25, -0.75]), 1e-3)
assert traced_grid_list[0][2] == pytest.approx(np.array([0.75, -1.25]), 1e-3)
assert traced_grid_list[0][3] == pytest.approx(np.array([0.75, -0.75]), 1e-3)
assert traced_grid_list[1][0] == pytest.approx(np.array([1.25, -1.25]), 1e-3)
assert traced_grid_list[1][1] == pytest.approx(np.array([1.25, -0.75]), 1e-3)
assert traced_grid_list[1][2] == pytest.approx(np.array([0.75, -1.25]), 1e-3)
assert traced_grid_list[1][3] == pytest.approx(np.array([0.75, -0.75]), 1e-3)
def test__x2_planes__sis_lens__traced_grid_includes_deflections__on_planes_setup(
self, sub_grid_2d_7x7_simple, gal_x1_mp
):
galaxies = [gal_x1_mp, al.Galaxy(redshift=1.0)]
planes = al.util.plane.planes_via_galaxies_from(galaxies=galaxies)
traced_grid_list = al.util.ray_tracing.traced_grid_2d_list_from(
planes=planes, grid=sub_grid_2d_7x7_simple
)
assert traced_grid_list[0][0] == pytest.approx(np.array([1.0, 1.0]), 1e-3)
assert traced_grid_list[0][1] == pytest.approx(np.array([1.0, 0.0]), 1e-3)
assert traced_grid_list[0][2] == pytest.approx(np.array([1.0, 1.0]), 1e-3)
assert traced_grid_list[0][3] == pytest.approx(np.array([1.0, 0.0]), 1e-3)
assert traced_grid_list[1][0] == pytest.approx(
np.array([1.0 - 0.707, 1.0 - 0.707]), 1e-3
)
assert traced_grid_list[1][1] == pytest.approx(np.array([0.0, 0.0]), 1e-3)
assert traced_grid_list[1][2] == pytest.approx(
np.array([1.0 - 0.707, 1.0 - 0.707]), 1e-3
)
assert traced_grid_list[1][3] == pytest.approx(np.array([0.0, 0.0]), 1e-3)
galaxies = [gal_x1_mp, gal_x1_mp, al.Galaxy(redshift=1.0)]
planes = al.util.plane.planes_via_galaxies_from(galaxies=galaxies)
traced_grid_list = al.util.ray_tracing.traced_grid_2d_list_from(
planes=planes, grid=sub_grid_2d_7x7_simple
)
assert traced_grid_list[0][0] == pytest.approx( | np.array([1.0, 1.0]) | numpy.array |
import numpy as np
from numba import njit
###########
# 1. base #
###########
@njit
def mean_var_skew_kurt(x):
""" calculate mean, variance, skewness and kurtosis """
# a. allocate memory and initialize
out = np.zeros(4)
mean = out[0:]
var = out[1:]
skew = out[2:]
kurt = out[3:]
Nactive = 0
# b. determine sum and active
for i in range(x.size):
if ~np.isnan(x[i]):
Nactive += 1
mean[0] += x[i]
# c. means
if Nactive == 0:
mean[0] = np.nan
else:
mean[0] /= Nactive
# d. variance, skewness and kurtosis
for i in range(x.size):
if Nactive == 0: continue
if ~np.isnan(x[i]):
diff = x[i]-mean[0]
diff2 = diff*diff
var[0] += diff2
skew[0] += diff2*diff
kurt[0] += diff2*diff2
# e. results
if Nactive > 0:
var[0] /= Nactive-1
else:
var[0] = np.nan
if Nactive > 2:
cor_fac = Nactive/((Nactive-1)*(Nactive-2))
skew[0] *= cor_fac
skew[0] /= var[0]**(3/2)
else:
skew[0] = np.nan
if Nactive > 3:
cor_fac = (((Nactive-1)/Nactive)*((Nactive-2)/(Nactive+1))*(Nactive-3))
cor_sub = 3*(Nactive-1)*(Nactive-1) / ((Nactive-2)*(Nactive-3))
kurt[0] /= cor_fac
kurt[0] /= var[0]*var[0]
kurt[0] -= cor_sub
else:
kurt[0] = np.nan
return out
@njit
def cov(a,b):
""" calculate covariance """
# a. initialize
mean_a = 0.0
mean_b = 0.0
Nactive = 0
# b. determine sums and active
for i in range(a.size):
if ~np.isnan(a[i]) and ~np.isnan(b[i]):
Nactive += 1
mean_a += a[i]
mean_b += b[i]
# c. means
if Nactive == 0:
return np.nan
mean_a /= Nactive
mean_b /= Nactive
# d. covariance
cov = 0.0
for i in range(a.size):
if ~np.isnan(a[i]) and ~np.isnan(b[i]):
cov += (a[i]-mean_a)*(b[i]-mean_b)
# e. result
cov /= (Nactive-1)
return cov
@njit
def share_in_range(x,omegas_low,omegas_high):
""" calculate share in range """
# a. allocate memory and inialize
Nomegas = omegas_low.size
Ntrue = np.zeros(Nomegas)
Nactive = 0
# b. compute
for i in range(x.size):
if ~np.isnan(x[i]):
Nactive += 1
for h in range(Nomegas):
if (x[i] >= omegas_low[h]) and (x[i] <= omegas_high[h]):
Ntrue[h] += 1
else:
break # assuming ordered from high to low
# c. result
out = np.zeros(Nomegas)
if Nactive > 0:
for h in range(Nomegas):
out[h] = Ntrue[h]/Nactive
else:
for h in range(Nomegas):
out[h] = np.nan
return out
@njit
def share_in_range_cond(x,y,omegas_low,omegas_high,cond_low,cond_high):
""" calculate conditional share in range """
# a. allocate memory and inialize
Nomegas = omegas_low.size
Ntrue = | np.zeros(Nomegas) | numpy.zeros |
import numpy as np
import matplotlib.pyplot as plt
import math
import matplotlib.patches as patches
import matplotlib.animation as animation
import gym
import random
from datetime import date
float_formatter = "{:.2f}".format
np.set_printoptions(formatter={'float_kind':float_formatter})
import numpy as np
#%matplotlib qt
import matplotlib.pyplot as plt
import math
import matplotlib.patches as patches
from matplotlib.collections import PatchCollection
import matplotlib.animation as animation
from matplotlib.offsetbox import TextArea, DrawingArea, OffsetImage, AnnotationBbox
import matplotlib.image as mpimg
float_formatter = "{:.2f}".format
np.set_printoptions(formatter={'float_kind':float_formatter})
class UnbreakableSeaweed(gym.Env):
"""
Description:
A one D model of Aplysia californica feeding.
The goal is to ingest the most edible food
Source:
This enviornment cooresponds to the model of Aplysia feeding presented in
Control for Multifunctionality: Bioinspired Control Based on Feeding in Aplysia californica
2
Observation (7-element):
Type:
Num Observation Min Max
0 x_h 0 1
1 x_g 0 1
2 force_on_object -Inf Inf
3 pressure_grasper -Inf Inf
4 pressure_jaws -Inf Inf
5 edible -1 1
6 grasper_friction_state 0 1
Actions (32-element):
Type: 5 element array
element 0 - B7 state
element 1 - B6/B9/B3 state
element 2 - B 8a/b state
element 3 - B31/B32 state
element 4 - B38 state
Control frequency: 20 Hz
Reward:
Reward is proportional to the amount of seaweed ingested
Episode Termination:
Episode is greater than max_steps_per_iteration. Default: 1000
"""
##properties for visualization
#define the location of the ground plane
x_ground = np.array([[0],[0]])
len_ground_line = 5
#define the location of the force transducer
x_transducer = x_ground + np.array([[8],[0]])
len_transducer_line = 5
#define location and shape of head
x_H = x_ground + np.array([[0],[0]])
x_H_width = 1
x_H_height = 4
#define the extents of the grasper protraction/retraction path
grasper_origin = x_H + np.array([[0],[0]])
grasper_full = grasper_origin + np.array([[1],[0]])
#define the starting position for the bottom of the grasper along this track
x_R = grasper_origin + np.array([[0],[0]])
#specify vectors based on the grasper in the upright position
theta_grasper_initial = math.pi/2
#specify the grasper radius
r_grasper = 1
grasper_offset = 1
#define the positions of the I2 muscle origins
x_I2_Borigin = grasper_origin + np.array([[0],[0]])
x_I2_Aorigin = grasper_origin + np.array([[0],[2*r_grasper]])
#define the position of the hinge origin
x_hinge_origin = grasper_origin + np.array([[0],[0]])
#specify the angle relative to horizontal for each of the attachment points fixed on the grasper surface
theta_s = 0
theta_I2_A = math.pi/6
theta_I2_B = math.pi/6
#plot line representing ground
line_ground =[]
#plot a dot at the origin
dot_ground =[]
#plot line representing force transducer
line_transducer =[]
#plot line representing track
line_grapser_track =[]
#plot line from R to G
line_RG =[]
#plot dot at point R
dot_R =[]
#plot dot at point G
dot_G =[]
#plot dot at point S
dot_S =[]
#plot dot at I2 attachment point A
dot_I2_A =[]
#plot dot at I2 attachment point B
dot_I2_B =[]
#plot dot at I2 attachment point A
dot_I2_Aorigin =[]
#plot dot at I2 attachment point B
dot_I2_Borigin =[]
#draw grasper
draw_circle =[]
#draw head
head =[]
dot_H_spring =[]
#draw head spring as dashed line
line_H_spring =[]
#draw grasper to head spring as dashed line
line_G_spring =[]
preset_inputs = 0
generat_plots_toggle = 0
init_reward = 0.0
init_force_level = 'low'
high_threshold = 4
low_threshold = 40
output_expert_mean = np.load('output_expert_mean.npy')
output_expert_std = np.load('output_expert_std.npy')
def __init__(self, foo=0, max_steps=1000, threshold=-1000.0, delay=1, patience = 20, cr_threshold = -1000):
self.output_expert_mean = np.load('output_expert_mean.npy')
self.output_expert_std = np.load('output_expert_std.npy')
self.biomechanicsModel = 1
self.generat_plots_toggle = 0
self.verbose = 0
self.cr_threshold = cr_threshold
self.patience = patience
self.delta_gm = 0
self.idle_count = 0
self.gfs = 0
self.foo = foo
self.threshold = threshold
self.total_reward = 0
self.total_reward_log = [self.total_reward]
self.reward_range = (-1e6, 1e6)
self.P_I4 = 0
self.A_I4 = 0.05
self.P_I3_anterior = 0
self.A_I3_anterior = 0.05
self.T_I3 = 0.05
self.A_I3 = 0.05
self.T_I2 = 0.05
self.A_I2 = 0.05
self.T_hinge = 0
self.A_hinge = 0.05
self.x_h = 0.0
self.x_g = 0.0
self.force_on_object = 0
#Friction coefficients
self.mu_s_g = 0.4 #mu_s coefficient of static friction at grasper
self.mu_k_g = 0.3 #mu_k coefficient of kinetic friction at grasper
self.mu_s_h = 0.3 #mu_s coefficient of static friction at jaws
self.mu_k_h = 0.3 #mu_k coefficient of kinetic friction at jaws
#Maximum muscle forces
self.max_I4 = 1.75 #Maximum pressure grasper can exert on food
self.max_I3ant = 0.6 #Maximum I3 anterior force
self.max_I3 = 1 #Maximum I3 force
self.max_I2 = 1.5 #Maximum I2 force
self.max_hinge = 0.2 #Maximum hinge force
#Muscle time constants
self.tau_I4 = 1.0/np.sqrt(2) #time constant (in seconds) for I4 activation
self.tau_I3anterior = 2.0/np.sqrt(2) #time constant (in seconds) for I3anterior activation
self.tau_I2_ingestion = 0.5*1/ | np.sqrt(2) | numpy.sqrt |
import tequila as tq
import numpy
def make_expval_list():
H = tq.paulis.X(0)
Hz = tq.paulis.Z(0)
U1 = tq.gates.Ry(angle='a',target=0)
U2 = tq.gates.X(0)+U1
U3 = tq.gates.Ry(angle='b',target=0)
E1 = tq.ExpectationValue(H=H, U=U1)
E2 = tq.ExpectationValue(H=H, U=U2)
E3 = tq.ExpectationValue(H=H, U=U1+U3)
return [E1, E2, E3, E1]
def test_qtensor_with_numbers():
list1 = [0.5+tq.Objective(),1.5+tq.Objective(),0.75+tq.Objective(),2+tq.Objective()]
list2 = [1+tq.Objective(),1+tq.Objective()]
mat1 = tq.QTensor(objective_list=list1,shape = [2,2])
vec1 = tq.QTensor(objective_list=list2,shape = [2])
res = tq.simulate(numpy.dot(mat1,vec1))
numpy.testing.assert_allclose(res,[2, 2.75],atol=1e-05)
def test_qtensor_with_objectives():
list1 = make_expval_list()
mat1 = tq.QTensor(objective_list=list1, shape=(2,2))
E = tq.simulate(mat1, {'a':1.0,'b':0.5})
F = numpy.array([[0.84147098, -0.84147098],[0.99749499, 0.84147098]])
numpy.testing.assert_allclose(E,F,atol=1e-05)
def test_apply():
list1 = make_expval_list()
mat1 = tq.QTensor(objective_list=list1, shape=(2,2))
mat2 = mat1.apply(numpy.exp)
E = tq.simulate(mat2, {'a':1.0,'b':0.5})
F = numpy.array([[2.31977682, 0.43107595], [2.71148102, 2.31977682]])
| numpy.testing.assert_allclose(E,F,atol=1e-05) | numpy.testing.assert_allclose |
import os, sys
import json
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
class Horns(object):
wind_pdf = np.array([[0, 30, 60, 90, 120, 150, 180, 210, 240, 270, 300, 330, 360],
[8.89, 9.27, 8.23, 9.78, 11.64, 11.03, 11.50,
11.92, 11.49, 11.08, 11.34, 10.76, 8.89],
[2.09, 2.13, 2.29, 2.30, 2.67, 2.45,
2.51, 2.40, 2.35, 2.27, 2.24, 2.19, 2.09],
[4.82, 4.06, 3.59, 5.27, 9.12, 6.97, 9.17,
11.84, 12.41, 11.34, 11.70, 9.69, 4.82]])
ref_pdf = {'single': np.array([[1.90, 1.90, 1.90, 1.90, 1.90, 1.90, 1.90,
1.90, 79.10, 1.90, 1.90, 1.90, 1.90]]),
'average': np.array([[8.33, 8.33, 8.33, 8.33, 8.33, 8.33, 8.33,
8.33, 8.33, 8.33, 8.33, 8.33, 8.33]])}
@classmethod
def layout(cls):
# Wind turbines labelling
c_n, r_n = 8, 10
labels = []
for i in range(1, r_n + 1):
for j in range(1, c_n + 1):
l = "c{}_r{}".format(j, i)
labels.append(l)
# Wind turbines location generating wt_c1_r1 = (0., 4500.)
locations = np.zeros((c_n * r_n, 2))
num = 0
for i in range(r_n):
for j in range(c_n):
loc_x = 0. + 68.589 * j + 7 * 80. * i
loc_y = 3911. - j * 558.616
locations[num, :] = [loc_x, loc_y]
num += 1
return np.array(locations)
@classmethod
def params(cls):
params = dict()
params["D_r"] = [80.]
params["z_hub"] = [70.]
params["v_in"] = [4.]
params["v_rated"] = [15.]
params["v_out"] = [25.]
params["P_rated"] = [2.] # 2WM
params["power_curve"] = ["horns"]
params["ct_curve"] = ["horns"]
return pd.DataFrame(params)
@classmethod
def pow_curve(cls, vel):
if vel <= 4.:
return 0.
elif vel >= 15.:
return 2.
else:
return 1.45096246e-07 * vel**8 - 1.34886923e-05 * vel**7 + \
5.23407966e-04 * vel**6 - 1.09843946e-02 * vel**5 + \
1.35266234e-01 * vel**4 - 9.95826651e-01 * vel**3 + \
4.29176920e+00 * vel**2 - 9.84035534e+00 * vel + \
9.14526132e+00
@classmethod
def ct_curve(cls, vel):
if vel <= 10.:
vel = 10.
elif vel >= 20.:
vel = 20.
return np.array([-2.98723724e-11, 5.03056185e-09, -3.78603307e-07, 1.68050026e-05,
-4.88921388e-04, 9.80076811e-03, -1.38497930e-01, 1.38736280e+00,
-9.76054549e+00, 4.69713775e+01, -1.46641177e+02, 2.66548591e+02,
-2.12536408e+02]).dot(np.array([vel**12, vel**11, vel**10, vel**9,
vel**8, vel**7, vel**6, vel**5,
vel**4, vel**3, vel**2, vel, 1.]))
def power_to_cpct(curves, temp='Vesta_2MW'):
pow_curve, ct_curve = curves
air_density = 1.225
generator_efficiency = 1.0
input_json = f"./{temp}.json"
with open(input_json, 'r+') as jsonfile:
turbine_data = json.load(jsonfile)
radius = turbine_data["turbine"]["properties"]["rotor_diameter"] / 2
wind_speed = np.array(
turbine_data["turbine"]["properties"]["power_thrust_table"]["wind_speed"])
power = np.vectorize(pow_curve)(wind_speed) * 1e6 # change units of MW to W
cp = 2 * power / (air_density * np.pi * radius ** 2 * generator_efficiency * wind_speed ** 3)
ct = | np.vectorize(ct_curve) | numpy.vectorize |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
from sigmoid import sigmoid
def feed_forward(theta1, theta2, X):
# Reshape nn_params back into the parameters theta1 and theta2, the weight 2-D arrays
# for our two layer neural network
# 前向传播算法
m = X.shape[0]
a1 = np.c_[ | np.ones(m) | numpy.ones |
#! /usr/bin/env python3
# Author: <NAME> (srinivas . zinka [at] gmail . com)
# Copyright (c) 2014 <NAME>
# License: New BSD License.
import numpy as np
from scipy import integrate, special
import matplotlib.pyplot as plt
from . import planar as pl
from . import Zolotarev as zl
# from mayavi import mlab
import warnings
# adjusting "matplotlib" label fonts
from matplotlib import rc
rc('text', usetex=True)
def ip_format_c(N, radius, A="uniform", starts_at_zero=True, plot_type="2D",
color='b', linewidth=1, linestyle='-', alpha=1, show=True,
stem=False, stemline='g--', stemmarker='ro', fgcolor=(1, 1, 1),
bgcolor=(0.5, 0.5, 0.5), mayavi_app=False):
r"""
Function to generate the 'Arraytool' input format for circular ring arrays.
:param N: number of elements in the uniformly spaced circular ring array
:param radius: radius of the circular ring in wavelengths
:param A: a 'column matrix' specifying the excitation values of the
circular ring array; by default it will be uniform excitation
:param plot_type: can be '2D'/'3D' ... if False, nothing happens
:param stem: if True, the array excitation is plotted as 'stem plot'
:param mayavi_app: if True, the 3D plot will be opened in the MayaVi application
:param starts_at_zero: 'True' if array starts at beta=0
All other parameters are nothing but the 'Matplotlib/Mayavi' parameters.
These should be familiar to 'Matlab' or 'Matplotlib/Mayavi' users.
:rtype: array_ip, a Numpy array of size (Number of elements(A),4)
"""
# Creating Arraytool input form 'array_ip' for the circular ring array
if (A == "uniform"):
A = np.ones((N, 1))
if (starts_at_zero):
position_beta = (np.linspace(1, N, num=N) - 1) * (2 * np.pi / N)
else:
position_beta = (np.linspace(1, N, num=N) - 0.5) * (2 * np.pi / N)
position_beta = np.reshape(position_beta, (N, -1))
position_x = radius * np.cos(position_beta)
position_y = radius * np.sin(position_beta)
position_z = np.zeros_like(position_x)
array_ip = np.hstack((position_x, position_y, position_z, A))
# Plotting 2D/3D plots
if (plot_type):
# checking whether 'A' has any imaginary values
if ((abs(A.imag) > 1e-10).sum()):
A_plt = abs(A) # if A.imag are significant, then '|A|' will be plotted
warnings.warn(
'Since, the given excitation "A" has significant imaginary parts, stem plot for abs(A) is plotted')
else:
A_plt = A.real # if A.imag are negligible, then 'A' will be plotted
warnings.warn(
'Since, the given excitation "A" has very small imaginary parts, stem plot for "A.real" is plotted')
if (plot_type == "2D"): # plot 2D plot in Matplotlib
plt.plot(position_beta, A_plt, color=color, linewidth=linewidth,
linestyle=linestyle, alpha=alpha)
if (stem): plt.stem(position_beta, A_plt, linefmt=stemline, markerfmt=stemmarker)
plt.axis('tight');
plt.grid(True)
plt.xlabel(r'$y$', fontsize=16);
plt.ylabel(r'$\left|A_{n}\right|$', fontsize=16)
if (show): plt.title(r'$\mathrm{Array}\ \mathrm{Excitation}$', fontsize=18); plt.show()
else:
if (mayavi_app): # this option opens the 3D plot in MayaVi Application
mlab.options.backend = 'envisage'
mlab.figure(fgcolor=fgcolor, bgcolor=bgcolor)
s1 = mlab.quiver3d(position_x, position_y, position_z, position_z, position_z,
A_plt) # stem3D representation
ranges1 = [position_x.min(), position_x.max(), position_y.min(), position_y.max(), A_plt.min(), A_plt.max()]
mlab.axes(xlabel="x", ylabel="y", zlabel="A", ranges=ranges1, nb_labels=3)
mlab.colorbar(orientation="vertical", nb_labels=5)
s1.scene.isometric_view()
if (show): mlab.show()
return array_ip
def FS(fun_str_re, fun_str_im='0', T0=2 * np.pi, m_start=-5, m_stop=5, err_lim=1e-8):
"""Function to generate a finite number of Fourier series coefficients of
a periodic function."""
N = m_stop - m_start + 1
FS = np.zeros((N, 1), dtype='complex')
m_index = list(range(m_start, m_stop + 1))
w0 = 2 * np.pi / T0
for m in m_index:
fun_re = lambda x: (eval(fun_str_re)) * np.cos(m * w0 * x) + (eval(fun_str_im)) * np.sin(m * w0 * x)
fun_img = lambda x: -(eval(fun_str_re)) * np.sin(m * w0 * x) + (eval(fun_str_im)) * np.cos(m * w0 * x)
FS_re = integrate.quad(fun_re, 0, 2 * np.pi)
FS_img = integrate.quad(fun_img, 0, 2 * np.pi)
if ((FS_re[1] + FS_img[1]) < err_lim):
FS[m - m_start] = (1 / T0) * (FS_re[0] + 1j * FS_img[0])
else:
print("Absolute error of the integration is not less than 1e-10 while calculating Fourier series")
print("error(FS_re): ", FS_re[1])
print("error(FS_img): ", FS_img[1])
m_index = | np.array(m_index) | numpy.array |
"""
Tests for net.ssd module
"""
import numpy as np
import tensorflow as tf
import net.ssd
def test_get_single_shot_detector_loss_op_with_no_positives_matches():
"""
Test ssd loss op when no positive match was present - means loss should be 0
"""
default_boxes_categories_ids_vector = np.array([0, 0])
predictions_logits_matrix = np.array([
[0.8, 0.5],
[0.4, 0.2]
])
loss_op = net.ssd.get_single_shot_detector_loss_op(
default_boxes_categories_ids_vector_op=tf.constant(default_boxes_categories_ids_vector, dtype=tf.int32),
predictions_logits_matrix_op=tf.constant(predictions_logits_matrix, dtype=tf.float32),
hard_negatives_mining_ratio=3)
with tf.Session() as session:
loss = session.run(loss_op)
assert loss == 0
def test_get_single_shot_detector_loss_op_with_no_hard_negatives_mining():
"""
Test ssd loss op when no negative samples are used
"""
default_boxes_categories_ids_vector = np.array([1, 0])
predictions_logits_matrix = np.array([
[0.5, 0.1],
[0.4, 0.2]
])
loss_op = net.ssd.get_single_shot_detector_loss_op(
default_boxes_categories_ids_vector_op=tf.constant(default_boxes_categories_ids_vector, dtype=tf.int32),
predictions_logits_matrix_op=tf.constant(predictions_logits_matrix, dtype=tf.float32),
hard_negatives_mining_ratio=0)
with tf.Session() as session:
expected = -np.log(0.4013)
actual = session.run(loss_op)
assert np.isclose(expected, actual, atol=0.001)
def test_get_single_shot_detector_loss_op_with_all_samples_used():
"""
Test ssd loss op when all positive and negative samples are used
"""
default_boxes_categories_ids_vector = np.array([1, 0])
predictions_logits_matrix = np.array([
[0.5, 0.1],
[0.4, 0.2]
])
loss_op = net.ssd.get_single_shot_detector_loss_op(
default_boxes_categories_ids_vector_op=tf.constant(default_boxes_categories_ids_vector, dtype=tf.int32),
predictions_logits_matrix_op=tf.constant(predictions_logits_matrix, dtype=tf.float32),
hard_negatives_mining_ratio=1)
with tf.Session() as session:
expected = -(np.log(0.4013) + np.log(0.5498)) / 2.0
actual = session.run(loss_op)
assert np.isclose(expected, actual, atol=0.001)
def test_get_single_shot_detector_loss_op_with_not_all_negative_samples_used():
"""
Test ssd loss op when not all negative samples are used
"""
default_boxes_categories_ids_vector = | np.array([1, 0, 0]) | numpy.array |
# -*- coding: utf-8 -*-
"""
Defines unit tests for :mod:`colour.colorimetry.lightness` module.
"""
import numpy as np
import unittest
from colour.colorimetry import (lightness_Glasser1958, lightness_Wyszecki1963,
intermediate_lightness_function_CIE1976,
lightness_CIE1976, lightness_Fairchild2010,
lightness_Fairchild2011)
from colour.colorimetry.lightness import lightness
from colour.utilities import domain_range_scale, ignore_numpy_errors
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2021 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = '<EMAIL>'
__status__ = 'Production'
__all__ = [
'TestLightnessGlasser1958', 'TestLightnessWyszecki1963',
'TestIntermediateLightnessFunctionCIE1976', 'TestLightnessCIE1976',
'TestLightnessFairchild2010', 'TestLightnessFairchild2011', 'TestLightness'
]
class TestLightnessGlasser1958(unittest.TestCase):
"""
Defines :func:`colour.colorimetry.lightness.lightness_Glasser1958`
definition unit tests methods.
"""
def test_lightness_Glasser1958(self):
"""
Tests :func:`colour.colorimetry.lightness.lightness_Glasser1958`
definition.
"""
self.assertAlmostEqual(
lightness_Glasser1958(12.19722535), 39.83512646492521, places=7)
self.assertAlmostEqual(
lightness_Glasser1958(23.04276781), 53.585946877480623, places=7)
self.assertAlmostEqual(
lightness_Glasser1958(6.15720079), 27.972867038082629, places=7)
def test_n_dimensional_lightness_Glasser1958(self):
"""
Tests :func:`colour.colorimetry.lightness.lightness_Glasser1958`
definition n-dimensional arrays support.
"""
Y = 12.19722535
L = lightness_Glasser1958(Y)
Y = np.tile(Y, 6)
L = np.tile(L, 6)
np.testing.assert_almost_equal(lightness_Glasser1958(Y), L, decimal=7)
Y = | np.reshape(Y, (2, 3)) | numpy.reshape |
# -*- coding: utf-8 -*-
"""
Functionality for reading Sentinel-1 data into a SICD model.
"""
import os
import logging
from datetime import datetime
from xml.etree import ElementTree
from typing import List, Tuple, Union
import numpy
from numpy.polynomial import polynomial
from scipy.constants import speed_of_light
from scipy.interpolate import griddata
from sarpy.compliance import string_types
from sarpy.io.general.base import SubsetReader, BaseReader
from sarpy.io.general.tiff import TiffDetails, TiffReader
from sarpy.io.general.utils import get_seconds, parse_timestring
from sarpy.io.complex.sicd_elements.blocks import Poly1DType, Poly2DType
from sarpy.io.complex.sicd_elements.SICD import SICDType
from sarpy.io.complex.sicd_elements.CollectionInfo import CollectionInfoType, RadarModeType
from sarpy.io.complex.sicd_elements.ImageCreation import ImageCreationType
from sarpy.io.complex.sicd_elements.RadarCollection import RadarCollectionType, WaveformParametersType, \
TxFrequencyType, ChanParametersType
from sarpy.io.complex.sicd_elements.ImageData import ImageDataType
from sarpy.io.complex.sicd_elements.GeoData import GeoDataType, SCPType
from sarpy.io.complex.sicd_elements.Position import PositionType, XYZPolyType
from sarpy.io.complex.sicd_elements.Grid import GridType, DirParamType, WgtTypeType
from sarpy.io.complex.sicd_elements.Timeline import TimelineType, IPPSetType
from sarpy.io.complex.sicd_elements.ImageFormation import ImageFormationType, RcvChanProcType, TxFrequencyProcType
from sarpy.io.complex.sicd_elements.RMA import RMAType, INCAType
from sarpy.io.complex.sicd_elements.Radiometric import RadiometricType, NoiseLevelType_
from sarpy.geometry.geocoords import geodetic_to_ecf
from sarpy.io.complex.utils import two_dim_poly_fit, get_im_physical_coords
__classification__ = "UNCLASSIFIED"
__author__ = ("<NAME>", "<NAME>")
########
# base expected functionality for a module with an implemented Reader
def is_a(file_name):
"""
Tests whether a given file_name corresponds to a Sentinel file. Returns a reader instance, if so.
Parameters
----------
file_name : str
the file_name to check
Returns
-------
SentinelReader|None
`SentinelReader` instance if Sentinel-1 file, `None` otherwise
"""
try:
sentinel_details = SentinelDetails(file_name)
print('Path {} is determined to be or contain a Sentinel-1 manifest.safe file.'.format(file_name))
return SentinelReader(sentinel_details)
except (IOError, AttributeError, SyntaxError, ElementTree.ParseError):
return None
##########
# helper functions
def _parse_xml(file_name, without_ns=False):
root_node = ElementTree.parse(file_name).getroot()
if without_ns:
return root_node
else:
ns = dict([node for _, node in ElementTree.iterparse(file_name, events=('start-ns', ))])
return ns, root_node
###########
# parser and interpreter for sentinel-1 manifest.safe file
class SentinelDetails(object):
__slots__ = ('_file_name', '_root_node', '_ns', '_satellite', '_product_type', '_base_sicd')
def __init__(self, file_name):
"""
Parameters
----------
file_name : str
"""
if os.path.isdir(file_name): # its' the directory - point it at the manifest.safe file
t_file_name = os.path.join(file_name, 'manifest.safe')
if os.path.exists(t_file_name):
file_name = t_file_name
if not os.path.exists(file_name) or not os.path.isfile(file_name):
raise IOError('path {} does not exist or is not a file'.format(file_name))
if os.path.split(file_name)[1] != 'manifest.safe':
raise IOError('The sentinel file is expected to be named manifest.safe, got path {}'.format(file_name))
self._file_name = file_name
self._ns, self._root_node = _parse_xml(file_name)
# note that the manifest.safe apparently does not have a default namespace,
# so we have to explicitly enter no prefix in the namespace dictionary
self._ns[''] = ''
self._satellite = self._find('./metadataSection'
'/metadataObject[@ID="platform"]'
'/metadataWrap'
'/xmlData'
'/safe:platform'
'/safe:familyName').text
if self._satellite != 'SENTINEL-1':
raise ValueError('The platform in the manifest.safe file is required '
'to be SENTINEL-1, got {}'.format(self._satellite))
self._product_type = self._find('./metadataSection'
'/metadataObject[@ID="generalProductInformation"]'
'/metadataWrap'
'/xmlData'
'/s1sarl1:standAloneProductInformation'
'/s1sarl1:productType').text
if self._product_type != 'SLC':
raise ValueError('The product type in the manifest.safe file is required '
'to be "SLC", got {}'.format(self._product_type))
self._base_sicd = self._get_base_sicd()
@property
def file_name(self):
"""
str: the file name
"""
return self._file_name
@property
def satellite(self):
"""
str: the satellite
"""
return self._satellite
@property
def product_type(self):
"""
str: the product type
"""
return self._product_type
def _find(self, tag):
"""
Pass through to ElementTree.Element.find(tag, ns).
Parameters
----------
tag : str
Returns
-------
ElementTree.Element
"""
return self._root_node.find(tag, self._ns)
def _findall(self, tag):
"""
Pass through to ElementTree.Element.findall(tag, ns).
Parameters
----------
tag : str
Returns
-------
List[ElementTree.Element
"""
return self._root_node.findall(tag, self._ns)
@staticmethod
def _parse_pol(str_in):
# type: (str) -> str
return '{}:{}'.format(str_in[0], str_in[1])
def _get_file_sets(self):
"""
Extracts paths for measurement and metadata files from a Sentinel manifest.safe file.
These files will be grouped according to "measurement data unit" implicit in the
Sentinel structure.
Returns
-------
List[dict]
"""
def get_file_location(schema_type, tids):
if isinstance(tids, string_types):
tids = [tids, ]
for tid in tids:
do = self._find('dataObjectSection/dataObject[@repID="{}"]/[@ID="{}"]'.format(schema_type, tid))
if do is None:
continue
return os.path.join(base_path, do.find('./byteStream/fileLocation').attrib['href'])
return None
base_path = os.path.dirname(self._file_name)
files = []
for mdu in self._findall('./informationPackageMap'
'/xfdu:contentUnit'
'/xfdu:contentUnit/[@repID="s1Level1MeasurementSchema"]'):
# get the data file for this measurement
fnames = {'data': get_file_location('s1Level1MeasurementSchema',
mdu.find('dataObjectPointer').attrib['dataObjectID'])}
# get the ids for product, noise, and calibration associated with this measurement data unit
ids = mdu.attrib['dmdID'].split()
# translate these ids to data object ids=file ids for the data files
fids = [self._find('./metadataSection'
'/metadataObject[@ID="{}"]'
'/dataObjectPointer'.format(did)).attrib['dataObjectID'] for did in ids]
# NB: there is (at most) one of these per measurement data unit
fnames['product'] = get_file_location('s1Level1ProductSchema', fids)
fnames['noise'] = get_file_location('s1Level1NoiseSchema', fids)
fnames['calibration'] = get_file_location('s1Level1CalibrationSchema', fids)
files.append(fnames)
return files
def _get_base_sicd(self):
"""
Gets the base SICD element.
Returns
-------
SICDType
"""
from sarpy.__about__ import __version__
# CollectionInfo
platform = self._find('./metadataSection'
'/metadataObject[@ID="platform"]'
'/metadataWrap'
'/xmlData/safe:platform')
collector_name = platform.find('safe:familyName', self._ns).text + platform.find('safe:number', self._ns).text
mode_id = platform.find('./safe:instrument'
'/safe:extension'
'/s1sarl1:instrumentMode'
'/s1sarl1:mode', self._ns).text
if mode_id == 'SM':
mode_type = 'STRIPMAP'
else:
# TOPSAR - closest SICD analog is Dynamic Stripmap
mode_type = 'DYNAMIC STRIPMAP'
collection_info = CollectionInfoType(Classification='UNCLASSIFIED',
CollectorName=collector_name,
CollectType='MONOSTATIC',
RadarMode=RadarModeType(ModeID=mode_id, ModeType=mode_type))
# ImageCreation
processing = self._find('./metadataSection'
'/metadataObject[@ID="processing"]'
'/metadataWrap'
'/xmlData'
'/safe:processing')
facility = processing.find('safe:facility', self._ns)
software = facility.find('safe:software', self._ns)
image_creation = ImageCreationType(
Application='{name} {version}'.format(**software.attrib),
DateTime=processing.attrib['stop'],
Site='{name}, {site}, {country}'.format(**facility.attrib),
Profile='sarpy {}'.format(__version__))
# RadarCollection
polarizations = self._findall('./metadataSection'
'/metadataObject[@ID="generalProductInformation"]'
'/metadataWrap'
'/xmlData'
'/s1sarl1:standAloneProductInformation'
'/s1sarl1:transmitterReceiverPolarisation')
radar_collection = RadarCollectionType(RcvChannels=[
ChanParametersType(TxRcvPolarization=self._parse_pol(pol.text), index=i)
for i, pol in enumerate(polarizations)])
return SICDType(CollectionInfo=collection_info, ImageCreation=image_creation, RadarCollection=radar_collection)
def _parse_product_sicd(self, product_file_name):
"""
Parameters
----------
product_file_name : str
Returns
-------
SICDType|List[SICDType]
"""
DT_FMT = '%Y-%m-%dT%H:%M:%S.%f'
root_node = _parse_xml(product_file_name, without_ns=True)
burst_list = root_node.findall('./swathTiming/burstList/burst')
# parse the geolocation infoarmation - for SCP caluclation
geo_grid_point_list = root_node.findall('./geolocationGrid/geolocationGridPointList/geolocationGridPoint')
geo_pixels = numpy.zeros((len(geo_grid_point_list), 2), dtype=numpy.float64)
geo_coords = numpy.zeros((len(geo_grid_point_list), 3), dtype=numpy.float64)
for i, grid_point in enumerate(geo_grid_point_list):
geo_pixels[i, :] = (float(grid_point.find('./pixel').text),
float(grid_point.find('./line').text)) # (row, col) order
geo_coords[i, :] = (float(grid_point.find('./latitude').text),
float(grid_point.find('./longitude').text),
float(grid_point.find('./height').text))
geo_coords = geodetic_to_ecf(geo_coords)
def get_center_frequency(): # type: () -> float
return float(root_node.find('./generalAnnotation/productInformation/radarFrequency').text)
def get_image_col_spacing_zdt(): # type: () -> float
# Image column spacing in zero doppler time (seconds)
# Sentinel-1 is always right-looking, so this should always be positive
return float(root_node.find('./imageAnnotation/imageInformation/azimuthTimeInterval').text)
def get_image_data(): # type: () -> ImageDataType
_pv = root_node.find('./imageAnnotation/imageInformation/pixelValue').text
if _pv == 'Complex':
pixel_type = 'RE16I_IM16I'
else:
# NB: we only handle SLC
raise ValueError('SLC data should be 16-bit complex, got pixelValue = {}.'.format(_pv))
if len(burst_list) > 0:
# should be TOPSAR
num_rows = int(root_node.find('./swathTiming/samplesPerBurst').text)
num_cols = int(root_node.find('./swathTiming/linesPerBurst').text)
else:
# STRIPMAP
# NB - these fields always contain the number of rows/cols in the entire tiff,
# even if there are multiple bursts
num_rows = int(root_node.find('./imageAnnotation/imageInformation/numberOfSamples').text)
num_cols = int(root_node.find('./imageAnnotation/imageInformation/numberOfLines').text)
# SCP pixel within single burst image is the same for all burst
return ImageDataType(PixelType=pixel_type,
NumRows=num_rows,
NumCols=num_cols,
FirstRow=0,
FirstCol=0,
FullImage=(num_rows, num_cols),
SCPPixel=(int((num_rows - 1)/2), int((num_cols - 1)/2)))
def get_common_grid(): # type: () -> GridType
center_frequency = get_center_frequency()
image_plane = 'SLANT' if root_node.find('./generalAnnotation/productInformation/projection').text == \
'Slant Range' else None
# get range processing node
range_proc = root_node.find('./imageAnnotation'
'/processingInformation'
'/swathProcParamsList'
'/swathProcParams'
'/rangeProcessing')
delta_tau_s = 1. / float(root_node.find('./generalAnnotation/productInformation/rangeSamplingRate').text)
row_window_name = range_proc.find('./windowType').text.upper()
row_params = None
if row_window_name == 'NONE':
row_window_name = 'UNIFORM'
elif row_window_name == 'HAMMING':
row_params = {'COEFFICIENT': range_proc.find('./windowCoefficient').text}
row = DirParamType(SS=(speed_of_light/2)*delta_tau_s,
Sgn=-1,
KCtr=2*center_frequency/speed_of_light,
ImpRespBW=2. * float(range_proc.find('./processingBandwidth').text) / speed_of_light,
DeltaKCOAPoly=Poly2DType(Coefs=[[0, ]]),
WgtType=WgtTypeType(WindowName=row_window_name, Parameters=row_params))
# get azimuth processing node
az_proc = root_node.find('./imageAnnotation'
'/processingInformation'
'/swathProcParamsList'
'/swathProcParams'
'/azimuthProcessing')
col_ss = float(root_node.find('./imageAnnotation/imageInformation/azimuthPixelSpacing').text)
dop_bw = float(az_proc.find('./processingBandwidth').text) # Doppler bandwidth
ss_zd_s = get_image_col_spacing_zdt()
col_window_name = az_proc.find('./windowType').text.upper()
col_params = None
if col_window_name == 'NONE':
col_window_name = 'UNIFORM'
elif col_window_name == 'HAMMING':
col_params = {'COEFFICIENT': az_proc.find('./windowCoefficient').text}
col = DirParamType(SS=col_ss,
Sgn=-1,
KCtr=0,
ImpRespBW=dop_bw*ss_zd_s/col_ss,
WgtType=WgtTypeType(WindowName=col_window_name, Parameters=col_params))
return GridType(ImagePlane=image_plane, Type='RGZERO', Row=row, Col=col)
def get_common_timeline(): # type: () -> TimelineType
prf = float(root_node.find('./generalAnnotation'
'/downlinkInformationList'
'/downlinkInformation'
'/prf').text)
# NB: TEnd and IPPEnd are nonsense values which will be corrected
return TimelineType(IPP=[IPPSetType(TStart=0, TEnd=0, IPPStart=0, IPPEnd=0, IPPPoly=(0, prf), index=0), ])
def get_common_radar_collection(): # type: () -> RadarCollectionType
radar_collection = out_sicd.RadarCollection.copy()
center_frequency = get_center_frequency()
min_frequency = center_frequency + \
float(root_node.find('./generalAnnotation/downlinkInformationList/downlinkInformation'
'/downlinkValues/txPulseStartFrequency').text)
tx_pulse_length = float(root_node.find('./generalAnnotation'
'/downlinkInformationList'
'/downlinkInformation'
'/downlinkValues'
'/txPulseLength').text)
tx_fm_rate = float(root_node.find('./generalAnnotation'
'/downlinkInformationList'
'/downlinkInformation'
'/downlinkValues'
'/txPulseRampRate').text)
band_width = tx_pulse_length*tx_fm_rate
pol = root_node.find('./adsHeader/polarisation').text
radar_collection.TxPolarization = pol[0]
radar_collection.TxFrequency = TxFrequencyType(Min=min_frequency, Max=min_frequency+band_width)
adc_sample_rate = float(root_node.find('./generalAnnotation'
'/productInformation'
'/rangeSamplingRate').text) # Raw not decimated
swl_list = root_node.findall('./generalAnnotation/downlinkInformationList/' +
'downlinkInformation/downlinkValues/swlList/swl')
radar_collection.Waveform = [
WaveformParametersType(index=j,
TxFreqStart=min_frequency,
TxPulseLength=tx_pulse_length,
TxFMRate=tx_fm_rate,
TxRFBandwidth=band_width,
RcvFMRate=0,
ADCSampleRate=adc_sample_rate,
RcvWindowLength=float(swl.find('./value').text))
for j, swl in enumerate(swl_list)]
return radar_collection
def get_image_formation(): # type: () -> ImageFormationType
st_beam_comp = 'GLOBAL' if out_sicd.CollectionInfo.RadarMode.ModeID[0] == 'S' else 'SV'
pol = self._parse_pol(root_node.find('./adsHeader/polarisation').text)
# which channel does this pol correspond to?
chan_indices = None
for element in out_sicd.RadarCollection.RcvChannels:
if element.TxRcvPolarization == pol:
chan_indices = [element.index, ]
return ImageFormationType(RcvChanProc=RcvChanProcType(NumChanProc=1,
PRFScaleFactor=1,
ChanIndices=chan_indices),
TxRcvPolarizationProc=pol,
TStartProc=0,
TxFrequencyProc=TxFrequencyProcType(
MinProc=out_sicd.RadarCollection.TxFrequency.Min,
MaxProc=out_sicd.RadarCollection.TxFrequency.Max),
ImageFormAlgo='RMA',
ImageBeamComp='SV',
AzAutofocus='NO',
RgAutofocus='NO',
STBeamComp=st_beam_comp)
def get_rma(): # type: () -> RMAType
center_frequency = get_center_frequency()
tau_0 = float(root_node.find('./imageAnnotation/imageInformation/slantRangeTime').text)
delta_tau_s = 1. / float(root_node.find('./generalAnnotation/productInformation/rangeSamplingRate').text)
return RMAType(
RMAlgoType='RG_DOP',
INCA=INCAType(
FreqZero=center_frequency,
DopCentroidCOA=True,
R_CA_SCP=(0.5*speed_of_light)*(tau_0 + out_sicd.ImageData.SCPPixel.Row*delta_tau_s))
)
def get_slice(): # type: () -> str
slice_number = root_node.find('./imageAnnotation/imageInformation/sliceNumber')
if slice_number is None:
return '0'
else:
return slice_number.text
def get_swath(): # type: () -> str
return root_node.find('./adsHeader/swath').text
def get_collection_info(): # type: () -> CollectionInfoType
collection_info = out_sicd.CollectionInfo.copy()
collection_info.CollectorName = root_node.find('./adsHeader/missionId').text
collection_info.RadarMode.ModeID = root_node.find('./adsHeader/mode').text
t_slice = get_slice()
swath = get_swath()
collection_info.Parameters = {
'SLICE': t_slice, 'BURST': '1', 'SWATH': swath, 'ORBIT_SOURCE': 'SLC_INTERNAL'}
return collection_info
def get_state_vectors(start):
# type: (numpy.datetime64) -> Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray, numpy.ndarray]
orbit_list = root_node.findall('./generalAnnotation/orbitList/orbit')
shp = (len(orbit_list), )
Ts = numpy.empty(shp, dtype=numpy.float64)
Xs = numpy.empty(shp, dtype=numpy.float64)
Ys = numpy.empty(shp, dtype=numpy.float64)
Zs = numpy.empty(shp, dtype=numpy.float64)
for j, orbit in enumerate(orbit_list):
Ts[j] = get_seconds(parse_timestring(orbit.find('./time').text), start, precision='us')
Xs[j] = float(orbit.find('./position/x').text)
Ys[j] = float(orbit.find('./position/y').text)
Zs[j] = float(orbit.find('./position/z').text)
return Ts, Xs, Ys, Zs
def get_doppler_estimates(start):
# type: (numpy.datetime64) -> Tuple[numpy.ndarray, numpy.ndarray, List[numpy.ndarray]]
dc_estimate_list = root_node.findall('./dopplerCentroid/dcEstimateList/dcEstimate')
shp = (len(dc_estimate_list), )
dc_az_time = numpy.empty(shp, dtype=numpy.float64)
dc_t0 = numpy.empty(shp, dtype=numpy.float64)
data_dc_poly = []
for j, dc_estimate in enumerate(dc_estimate_list):
dc_az_time[j] = get_seconds(parse_timestring(dc_estimate.find('./azimuthTime').text),
start, precision='us')
dc_t0[j] = float(dc_estimate.find('./t0').text)
data_dc_poly.append(numpy.fromstring(dc_estimate.find('./dataDcPolynomial').text, sep=' '))
return dc_az_time, dc_t0, data_dc_poly
def get_azimuth_fm_estimates(start):
# type: (numpy.datetime64) -> Tuple[numpy.ndarray, numpy.ndarray, List[numpy.ndarray]]
azimuth_fm_rate_list = root_node.findall('./generalAnnotation/azimuthFmRateList/azimuthFmRate')
shp = (len(azimuth_fm_rate_list), )
az_t = numpy.empty(shp, dtype=numpy.float64)
az_t0 = numpy.empty(shp, dtype=numpy.float64)
k_a_poly = []
for j, az_fm_rate in enumerate(azimuth_fm_rate_list):
az_t[j] = get_seconds(parse_timestring(az_fm_rate.find('./azimuthTime').text),
start, precision='us')
az_t0[j] = float(az_fm_rate.find('./t0').text)
if az_fm_rate.find('c0') is not None:
# old style annotation xml file
k_a_poly.append(numpy.array([float(az_fm_rate.find('./c0').text),
float(az_fm_rate.find('./c1').text),
float(az_fm_rate.find('./c2').text)], dtype=numpy.float64))
else:
k_a_poly.append(numpy.fromstring(az_fm_rate.find('./azimuthFmRatePolynomial').text, sep=' '))
return az_t, az_t0, k_a_poly
def set_core_name(sicd, start_dt, burst_num):
# type: (SICDType, datetime, int) -> None
t_slice = int(get_slice())
swath = get_swath()
sicd.CollectionInfo.CoreName = '{0:s}{1:s}{2:s}_{3:02d}_{4:s}_{5:02d}'.format(
start_dt.strftime('%d%b%y'),
root_node.find('./adsHeader/missionId').text,
root_node.find('./adsHeader/missionDataTakeId').text,
t_slice,
swath,
burst_num+1)
sicd.CollectionInfo.Parameters['BURST'] = '{0:d}'.format(burst_num+1)
def set_timeline(sicd, start, duration):
# type: (SICDType, numpy.datetime64, float) -> None
prf = float(root_node.find('./generalAnnotation/downlinkInformationList/downlinkInformation/prf').text)
timeline = sicd.Timeline
timeline.CollectStart = start
timeline.CollectDuration = duration
timeline.IPP[0].TEnd = duration
timeline.IPP[0].IPPEnd = int(timeline.CollectDuration*prf)
sicd.ImageFormation.TEndProc = duration
def set_position(sicd, start):
# type: (SICDType, numpy.datetime64) -> None
Ts, Xs, Ys, Zs = get_state_vectors(start)
poly_order = min(5, Ts.size-1)
P_X = polynomial.polyfit(Ts, Xs, poly_order)
P_Y = polynomial.polyfit(Ts, Ys, poly_order)
P_Z = polynomial.polyfit(Ts, Zs, poly_order)
sicd.Position = PositionType(ARPPoly=XYZPolyType(X=P_X, Y=P_Y, Z=P_Z))
def update_rma_and_grid(sicd, first_line_relative_start, start, return_time_dets=False):
# type: (SICDType, Union[float, int], numpy.datetime64, bool) -> Union[None, Tuple[float, float]]
center_frequency = get_center_frequency()
# set TimeCAPoly
ss_zd_s = get_image_col_spacing_zdt()
eta_mid = ss_zd_s * float(out_sicd.ImageData.SCPPixel.Col)
sicd.RMA.INCA.TimeCAPoly = Poly1DType(
Coefs=[first_line_relative_start+eta_mid, ss_zd_s/out_sicd.Grid.Col.SS])
range_time_scp = sicd.RMA.INCA.R_CA_SCP*2/speed_of_light
# get velocity polynomial
vel_poly = sicd.Position.ARPPoly.derivative(1, return_poly=True)
# We pick a single velocity magnitude at closest approach to represent
# the entire burst. This is valid, since the magnitude of the velocity
# changes very little.
vm_ca = numpy.linalg.norm(vel_poly(sicd.RMA.INCA.TimeCAPoly[0]))
az_rate_times, az_rate_t0, k_a_poly = get_azimuth_fm_estimates(start)
# find the closest fm rate polynomial
az_rate_poly_ind = int(numpy.argmin(numpy.abs(az_rate_times - sicd.RMA.INCA.TimeCAPoly[0])))
az_rate_poly = Poly1DType(Coefs=k_a_poly[az_rate_poly_ind])
dr_ca_poly = az_rate_poly.shift(t_0=az_rate_t0[az_rate_poly_ind] - range_time_scp,
alpha=2/speed_of_light,
return_poly=False)
r_ca = numpy.array([sicd.RMA.INCA.R_CA_SCP, 1], dtype=numpy.float64)
sicd.RMA.INCA.DRateSFPoly = numpy.reshape(
-numpy.convolve(dr_ca_poly, r_ca)*(speed_of_light/(2*center_frequency*vm_ca*vm_ca)),
(-1, 1))
# Doppler Centroid
dc_est_times, dc_t0, data_dc_poly = get_doppler_estimates(start)
# find the closest doppler centroid polynomial
dc_poly_ind = int(numpy.argmin( | numpy.abs(dc_est_times - sicd.RMA.INCA.TimeCAPoly[0]) | numpy.abs |
import pytest
import numpy as np
import numpy.testing as npt
import pandas as pd
import pandas.testing as pdt
import networkx as nx
from mossspider import NetworkTMLE
@pytest.fixture
def sm_network():
"""Loads a small network for short test runs and checks of data set creations"""
G = nx.Graph()
G.add_nodes_from([(1, {'W': 1, 'A': 1, 'Y': 1, 'C': 1}),
(2, {'W': 0, 'A': 0, 'Y': 0, 'C': -1}),
(3, {'W': 0, 'A': 1, 'Y': 0, 'C': 5}),
(4, {'W': 0, 'A': 0, 'Y': 1, 'C': 0}),
(5, {'W': 1, 'A': 0, 'Y': 0, 'C': 0}),
(6, {'W': 1, 'A': 0, 'Y': 1, 'C': 0}),
(7, {'W': 0, 'A': 1, 'Y': 0, 'C': 10}),
(8, {'W': 0, 'A': 0, 'Y': 0, 'C': -5}),
(9, {'W': 1, 'A': 1, 'Y': 0, 'C': -5})])
G.add_edges_from([(1, 2), (1, 3), (1, 9),
(2, 3), (2, 6),
(3, 4),
(4, 7),
(5, 7), (5, 9)
])
return G
@pytest.fixture
def r_network():
"""Loads network from the R library tmlenet for comparison"""
df = pd.read_csv("tests/tmlenet_r_data.csv")
df['IDs'] = df['IDs'].str[1:].astype(int)
df['NETID_split'] = df['Net_str'].str.split()
G = nx.DiGraph()
G.add_nodes_from(df['IDs'])
for i, c in zip(df['IDs'], df['NETID_split']):
if type(c) is list:
for j in c:
G.add_edge(i, int(j[1:]))
# Adding attributes
for node in G.nodes():
G.nodes[node]['W'] = np.int(df.loc[df['IDs'] == node, 'W1'])
G.nodes[node]['A'] = np.int(df.loc[df['IDs'] == node, 'A'])
G.nodes[node]['Y'] = np.int(df.loc[df['IDs'] == node, 'Y'])
return G
class TestNetworkTMLE:
def test_error_node_ids(self):
G = nx.Graph()
G.add_nodes_from([(1, {'A': 1, 'Y': 1}), (2, {'A': 0, 'Y': 1}), ("N", {'A': 1, 'Y': 0}), (4, {'A': 0, 'Y': 0})])
with pytest.raises(ValueError):
NetworkTMLE(network=G, exposure='A', outcome='Y')
def test_error_self_loops(self):
G = nx.Graph()
G.add_nodes_from([(1, {'A': 1, 'Y': 1}), (2, {'A': 0, 'Y': 1}), (3, {'A': 1, 'Y': 0}), (4, {'A': 0, 'Y': 0})])
G.add_edges_from([(1, 1), (1, 2), (3, 4)])
with pytest.raises(ValueError):
NetworkTMLE(network=G, exposure='A', outcome='Y')
def test_error_nonbinary_a(self):
G = nx.Graph()
G.add_nodes_from([(1, {'A': 2, 'Y': 1}), (2, {'A': 5, 'Y': 1}), (3, {'A': 1, 'Y': 0}), (4, {'A': 0, 'Y': 0})])
with pytest.raises(ValueError):
NetworkTMLE(network=G, exposure='A', outcome='Y')
def test_error_degree_restrictions(self, r_network):
with pytest.raises(ValueError):
NetworkTMLE(network=r_network, exposure='A', outcome='Y', degree_restrict=2)
with pytest.raises(ValueError):
NetworkTMLE(network=r_network, exposure='A', outcome='Y', degree_restrict=[0, 1, 2])
with pytest.raises(ValueError):
NetworkTMLE(network=r_network, exposure='A', outcome='Y', degree_restrict=[2, 0])
def test_error_fit_gimodel(self, r_network):
tmle = NetworkTMLE(network=r_network, exposure='A', outcome='Y')
# tmle.exposure_model('W')
tmle.exposure_map_model('W', distribution=None)
tmle.outcome_model('A + W')
with pytest.raises(ValueError):
tmle.fit(p=0.0, samples=10)
def test_error_fit_gsmodel(self, r_network):
tmle = NetworkTMLE(network=r_network, exposure='A', outcome='Y')
tmle.exposure_model('W')
# tmle.exposure_map_model('W', distribution=None)
tmle.outcome_model('A + W')
with pytest.raises(ValueError):
tmle.fit(p=0.0, samples=10)
def test_error_gs_distributions(self, r_network):
tmle = NetworkTMLE(network=r_network, exposure='A', outcome='Y')
with pytest.raises(ValueError):
tmle.exposure_map_model('W', measure='mean', distribution=None)
with pytest.raises(ValueError):
tmle.exposure_map_model('W', measure='mean', distribution='multinomial')
def test_error_fit_qmodel(self, r_network):
tmle = NetworkTMLE(network=r_network, exposure='A', outcome='Y')
tmle.exposure_model('W')
tmle.exposure_map_model('W', distribution=None)
# tmle.outcome_model('A + W')
with pytest.raises(ValueError):
tmle.fit(p=0.0, samples=10)
def test_error_p_bound(self, r_network):
tmle = NetworkTMLE(network=r_network, exposure='A', outcome='Y')
tmle.exposure_model('W')
tmle.exposure_map_model('W', distribution=None)
tmle.outcome_model('A + W')
# For single 'p'
with pytest.raises(ValueError):
tmle.fit(p=1.5, samples=10)
# For multiple 'p'
with pytest.raises(ValueError):
tmle.fit(p=[0.1, 1.5, 0.1,
0.1, 0.1, 0.1,
0.1, 0.1, 0.1], samples=100)
def test_error_p_type(self, r_network):
tmle = NetworkTMLE(network=r_network, exposure='A', outcome='Y')
tmle.exposure_model('W')
tmle.exposure_map_model('W', distribution=None)
tmle.outcome_model('A + W')
with pytest.raises(ValueError):
tmle.fit(p=5, samples=10)
def test_error_summary(self, r_network):
tmle = NetworkTMLE(network=r_network, exposure='A', outcome='Y')
tmle.exposure_model('W')
tmle.exposure_map_model('W', distribution=None)
tmle.outcome_model('A + W')
with pytest.raises(ValueError):
tmle.summary()
def test_df_creation(self, sm_network):
columns = ["_original_id_", "W", "A", "Y", "A_sum", "A_mean", "W_sum", "W_mean", "degree"]
expected = pd.DataFrame([[1, 1, 1, 1, 2, 2/3, 1, 1/3, 3],
[2, 0, 0, 0, 2, 2/3, 2, 2/3, 3],
[3, 0, 1, 0, 1, 1/3, 1, 1/3, 3],
[4, 0, 0, 1, 2, 1, 0, 0, 2],
[5, 1, 0, 0, 2, 1, 1, 1/2, 2],
[6, 1, 0, 1, 0, 0, 0, 0, 1],
[7, 0, 1, 0, 0, 0, 1, 1/2, 2],
[8, 0, 0, 0, 0, 0, 0, 0, 0],
[9, 1, 1, 0, 1, 1/2, 2, 1, 2]],
columns=columns,
index=[0, 1, 2, 3, 4, 5, 6, 7, 8])
tmle = NetworkTMLE(network=sm_network, exposure='A', outcome='Y')
created = tmle.df
# Checking that expected is the same as the created
assert tmle._continuous_outcome is False
pdt.assert_frame_equal(expected,
created[columns],
check_dtype=False)
def test_df_creation_restricted(self, sm_network):
expected = pd.DataFrame([[1, 1, 1, 2, 2/3, 1, 1/3, 3],
[0, 0, 0, 2, 2/3, 2, 2/3, 3],
[0, 1, 0, 1, 1/3, 1, 1/3, 3],
[0, 0, 1, 2, 1, 0, 0, 2],
[1, 0, 0, 2, 1, 1, 1/2, 2],
[1, 0, 1, 0, 0, 0, 0, 1],
[0, 1, 0, 0, 0, 1, 1/2, 2],
[0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 0, 1, 1/2, 2, 1, 2]],
columns=["W", "A", "Y", "A_sum", "A_mean", "W_sum", "W_mean", "degree"],
index=[0, 1, 2, 3, 4, 5, 6, 7, 8])
expected_r = pd.DataFrame([[0, 0, 1, 2, 1, 0, 0, 2],
[1, 0, 0, 2, 1, 1, 1/2, 2],
[1, 0, 1, 0, 0, 0, 0, 1],
[0, 1, 0, 0, 0, 1, 1/2, 2],
[0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 0, 1, 1/2, 2, 1, 2]],
columns=["W", "A", "Y", "A_sum", "A_mean", "W_sum", "W_mean", "degree"],
index=[3, 4, 5, 6, 7, 8])
tmle = NetworkTMLE(network=sm_network, exposure='A', outcome='Y', degree_restrict=[0, 2])
created = tmle.df
created_r = tmle.df_restricted
# Checking that expected is the same as the created
pdt.assert_frame_equal(expected,
created[["W", "A", "Y", "A_sum", "A_mean", "W_sum", "W_mean", "degree"]],
check_dtype=False)
pdt.assert_frame_equal(expected_r,
created_r[["W", "A", "Y", "A_sum", "A_mean", "W_sum", "W_mean", "degree"]],
check_dtype=False)
def test_restricted_number(self, sm_network):
tmle = NetworkTMLE(network=sm_network, exposure='A', outcome='Y', degree_restrict=[0, 2])
n_created = tmle.df.shape[0]
n_created_r = tmle.df_restricted.shape[0]
assert 6 == n_created_r
assert 3 == n_created - n_created_r
tmle = NetworkTMLE(network=sm_network, exposure='A', outcome='Y', degree_restrict=[1, 3])
n_created = tmle.df.shape[0]
n_created_r = tmle.df_restricted.shape[0]
assert 8 == n_created_r
assert 1 == n_created - n_created_r
def test_continuous_processing(self):
G = nx.Graph()
y_list = [1, -1, 5, 0, 0, 0, 10, -5]
G.add_nodes_from([(1, {'A': 0, 'Y': y_list[0]}), (2, {'A': 1, 'Y': y_list[1]}),
(3, {'A': 1, 'Y': y_list[2]}), (4, {'A': 0, 'Y': y_list[3]}),
(5, {'A': 1, 'Y': y_list[4]}), (6, {'A': 1, 'Y': y_list[5]}),
(7, {'A': 0, 'Y': y_list[6]}), (8, {'A': 0, 'Y': y_list[7]})])
tmle = NetworkTMLE(network=G, exposure='A', outcome='Y', continuous_bound=0.0001)
# Checking all flagged parts are correct
assert tmle._continuous_outcome is True
assert tmle._continuous_min_ == -5.0001
assert tmle._continuous_max_ == 10.0001
assert tmle._cb_ == 0.0001
# Checking that TMLE bounding works as intended
maximum = 10.0001
minimum = -5.0001
y_bound = (np.array(y_list) - minimum) / (maximum - minimum)
pdt.assert_series_equal(pd.Series(y_bound, index=[0, 1, 2, 3, 4, 5, 6, 7]),
tmle.df['Y'],
check_dtype=False, check_names=False)
def test_df_creation_continuous(self, sm_network):
expected = pd.DataFrame([[1, 1, 2, 1, 3],
[0, 0, 2, 2, 3],
[0, 1, 1, 1, 3],
[0, 0, 2, 0, 2],
[1, 0, 2, 1, 2],
[1, 0, 0, 0, 1],
[0, 1, 0, 1, 2],
[0, 0, 0, 0, 0],
[1, 1, 1, 2, 2]],
columns=["W", "A", "A_sum", "W_sum", "degree"],
index=[0, 1, 2, 3, 4, 5, 6, 7, 8])
expected["C"] = [4.00001333e-01, 2.66669778e-01, 6.66664444e-01, 3.33335556e-01, 3.33335556e-01,
3.33335556e-01, 9.99993333e-01, 6.66657778e-06, 6.66657778e-06]
tmle = NetworkTMLE(network=sm_network, exposure='A', outcome='C', continuous_bound=0.0001)
created = tmle.df
# Checking that expected is the same as the created
assert tmle._continuous_outcome is True
pdt.assert_frame_equal(expected[["W", "A", "C", "A_sum", "W_sum", "degree"]],
created[["W", "A", "C", "A_sum", "W_sum", "degree"]],
check_dtype=False)
def test_no_consecutive_ids(self):
G = nx.Graph()
G.add_nodes_from([(1, {'W': 1, 'A': 1, 'Y': 1}), (2, {'W': 0, 'A': 0, 'Y': 0}),
(3, {'W': 0, 'A': 1, 'Y': 0}), (4, {'W': 0, 'A': 0, 'Y': 1}),
(5, {'W': 1, 'A': 0, 'Y': 0}), (7, {'W': 1, 'A': 0, 'Y': 1}),
(9, {'W': 0, 'A': 1, 'Y': 0}), (11, {'W': 0, 'A': 0, 'Y': 0}),
(12, {'W': 1, 'A': 1, 'Y': 0})])
G.add_edges_from([(1, 2), (1, 3), (1, 12), (2, 3), (2, 7),
(3, 4), (4, 9), (5, 9), (5, 12)])
expected = pd.DataFrame([[1, 1, 1, 1, 2, 2 / 3, 1, 1 / 3, 3],
[2, 0, 0, 0, 2, 2/3, 2, 2/3, 3],
[3, 0, 1, 0, 1, 1 / 3, 1, 1 / 3, 3],
[4, 0, 0, 1, 2, 1, 0, 0, 2],
[5, 1, 0, 0, 2, 1, 1, 1 / 2, 2],
[7, 1, 0, 1, 0, 0, 0, 0, 1],
[8, 0, 1, 0, 0, 0, 1, 1 / 2, 2],
[11, 0, 0, 0, 0, 0, 0, 0, 0],
[12, 1, 1, 0, 1, 1 / 2, 2, 1, 2]
],
columns=["_original_id_", "W", "A", "Y", "A_sum",
"A_mean", "W_sum", "W_mean", "degree"],
index=[0, 1, 2, 3, 4, 5, 6, 7, 8])
tmle = NetworkTMLE(network=G, exposure='A', outcome='Y')
created = tmle.df.sort_values(by='_original_id_').reset_index()
pdt.assert_frame_equal(expected[["W", "A", "Y", "A_sum", "A_mean", "W_sum", "W_mean", "degree"]],
created[["W", "A", "Y", "A_sum", "A_mean", "W_sum", "W_mean", "degree"]],
check_dtype=False)
def test_df_creation_nonparametric(self, sm_network):
columns = ["_original_id_", "A", "A_map1", "A_map2", "A_map3"]
expected = pd.DataFrame([[1, 1, 0, 1, 1],
[2, 0, 1, 1, 0],
[3, 1, 1, 0, 0],
[4, 0, 1, 1, 0],
[5, 0, 1, 1, 0],
[6, 0, 0, 0, 0],
[7, 1, 0, 0, 0],
[8, 0, 0, 0, 0],
[9, 1, 1, 0, 0]],
columns=columns,
index=[0, 1, 2, 3, 4, 5, 6, 7, 8])
tmle = NetworkTMLE(network=sm_network, exposure='A', outcome='Y')
created = tmle.df.sort_values(by='_original_id_').reset_index()
# Checking that expected is the same as the created
pdt.assert_frame_equal(expected[columns], created[columns], check_dtype=False)
def test_summary_measures_creation(self, sm_network):
columns = ["_original_id_", "A_sum", "A_mean", "A_var", "W_sum", "W_mean", "W_var"]
neighbors_w = {1: np.array([0, 0, 1]), 2: np.array([0, 1, 1]), 3: np.array([0, 0, 1]), 4: np.array([0, 0]),
5: np.array([0, 1]), 6: np.array([0]), 7: np.array([0, 1]), 9: np.array([1, 1])}
neighbors_a = {1: np.array([0, 1, 1]), 2: np.array([0, 1, 1]), 3: np.array([0, 0, 1]), 4: np.array([1, 1]),
5: np.array([1, 1]), 6: np.array([0]), 7: np.array([0, 0]), 9: np.array([0, 1])}
expected = pd.DataFrame([[1, np.sum(neighbors_a[1]), np.mean(neighbors_a[1]), np.var(neighbors_a[1]),
np.sum(neighbors_w[1]), np.mean(neighbors_w[1]), np.var(neighbors_w[1])],
[2, np.sum(neighbors_a[2]), np.mean(neighbors_a[2]), np.var(neighbors_a[2]),
np.sum(neighbors_w[2]), np.mean(neighbors_w[2]), np.var(neighbors_w[2])],
[3, np.sum(neighbors_a[3]), np.mean(neighbors_a[3]), np.var(neighbors_a[3]),
np.sum(neighbors_w[3]), np.mean(neighbors_w[3]), np.var(neighbors_w[3])],
[4, np.sum(neighbors_a[4]), np.mean(neighbors_a[4]), np.var(neighbors_a[4]),
np.sum(neighbors_w[4]), np.mean(neighbors_w[4]), np.var(neighbors_w[4])],
[5, np.sum(neighbors_a[5]), np.mean(neighbors_a[5]), np.var(neighbors_a[5]),
np.sum(neighbors_w[5]), np.mean(neighbors_w[5]), np.var(neighbors_w[5])],
[6, np.sum(neighbors_a[6]), np.mean(neighbors_a[6]), np.var(neighbors_a[6]),
np.sum(neighbors_w[6]), np.mean(neighbors_w[6]), np.var(neighbors_w[6])],
[7, np.sum(neighbors_a[7]), np.mean(neighbors_a[7]), np.var(neighbors_a[7]),
np.sum(neighbors_w[7]), np.mean(neighbors_w[7]), np.var(neighbors_w[7])],
[8, 0, 0, 0, 0, 0, 0], # Isolates are = 0
[9, np.sum(neighbors_a[9]), np.mean(neighbors_a[9]), np.var(neighbors_a[9]),
np.sum(neighbors_w[9]), | np.mean(neighbors_w[9]) | numpy.mean |
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import matplotlib
#matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
from matplotlib.colors import colorConverter
import os
import ast
from scipy import ndimage
import paras_dorsoventral as dors
import paras_rostrocaudal as ros
import colourmix as colmix
matplotlib.rcParams.update({'font.size': 18})
time = 4000.0
slicenr = 5
tstep=50.0
axis = 'dorso'#'dorso'
runThrough = 'space'
scale = 0.5
StaticDataPath = 'cooltube_0.5_1'
if axis == 'dorso':
fig = plt.figure(figsize = [6.5, 8])
if axis == 'rostro':
fig = plt.figure(figsize = [4, 14])
ax1 = fig.add_subplot(111)
#DORSOVENTRAL
# generate the colors for your colormap
colorP = colorConverter.to_rgba(dors.colours[0])
colorO = colorConverter.to_rgba(dors.colours[1])
colorN = colorConverter.to_rgba(dors.colours[2])
white='white'
# make the colormaps
cmapP = matplotlib.colors.LinearSegmentedColormap.from_list('my_cmapP',[white,colorP],256)
cmapO = matplotlib.colors.LinearSegmentedColormap.from_list('my_cmapO',[white,colorO],256)
cmapN = matplotlib.colors.LinearSegmentedColormap.from_list('my_cmapN',[white,colorN],256)
cmapP._init()
cmapO._init()
cmapN._init() # create the _lut array, with rgba values
# create your alpha array and fill the colormap with them.
# here it is progressive, but you can create whathever you want
dAlphas = np.linspace(0, 0.8, cmapO.N+3)
cmapP._lut[:,-1] = dAlphas
cmapO._lut[:,-1] = dAlphas
cmapN._lut[:,-1] = dAlphas
#ROSTROCAUDAL
colorFB = colorConverter.to_rgba(ros.colours[0])
colorMB = colorConverter.to_rgba(ros.colours[1])
colorHB = colorConverter.to_rgba(ros.colours[2])
white='white'
# make the colormaps
cmapFB = matplotlib.colors.LinearSegmentedColormap.from_list('my_cmapFB',[white,colorFB],256)
cmapMB = matplotlib.colors.LinearSegmentedColormap.from_list('my_cmapMB',[white,colorMB],256)
cmapHB = matplotlib.colors.LinearSegmentedColormap.from_list('my_cmapHB',[white,colorHB],256)
cmapFB._init()
cmapMB._init()
cmapHB._init() # create the _lut array, with rgba values
# create your alpha array and fill the colormap with them.
# here it is progressive, but you can create whathever you want
rAlphas = np.linspace(0, 0.8, cmapMB.N+3)
cmapFB._lut[:,-1] = rAlphas
cmapMB._lut[:,-1] = rAlphas
cmapHB._lut[:,-1] = rAlphas
def compare(matrices):
dimy = len(matrices[0])
dimx = len(matrices[0][0])
dimz = len(matrices[0][0][0])
show= np.zeros_like(matrices)
for i in range(dimy):
for j in range(dimx):
for k in range(dimz):
comparevalues =[m[i][j][k] for m in matrices]
gene = np.argmax(comparevalues)
show[gene][i][j][k] = np.max(comparevalues)
return show
def getCut(axis,t=0,s=0,dataPath=StaticDataPath):
if axis == 'dorso':
dorsoDir = dataPath + '/dorso/'
dcFile = dorsoDir + 'T%1.1f' %t + '_dComp.npy'
pFile = dorsoDir + 'T%1.1f' %t + '_P.npy'
oFile = dorsoDir + 'T%1.1f' %t + '_O.npy'
nFile = dorsoDir + 'T%1.1f' %t + '_N.npy'
if os.path.isfile(dcFile):
dComp = np.load(dcFile)
else:
pArray = np.load(pFile)
oArray = np.load(oFile)
nArray = np.load(nFile)
dComp = compare([pArray,oArray,nArray])
np.save(dcFile,dComp)
arrA = dComp[0]
arrB = dComp[1]
arrC = dComp[2]
arrA = arrA[s,:,:]
arrB = arrB[s,:,:]
arrC = arrC[s,:,:]
if axis == 'rostro':
rostroDir = dataPath + '/rostro/'
rcFile = rostroDir + 'T%1.1f' %t + '_rComp.npy'
FBFile = rostroDir + 'T%1.1f' %t + '_FB.npy'
MBFile = rostroDir + 'T%1.1f' %t + '_MB.npy'
HBFile = rostroDir + 'T%1.1f' %t + '_HB.npy'
if os.path.isfile(rcFile):
rComp = np.load(rcFile)
else:
FBArray = np.load(FBFile)
MBArray = np.load(MBFile)
HBArray = np.load(HBFile)
rComp = compare([FBArray,MBArray,HBArray])
np.save(rcFile,rComp)
arrA = rComp[0]
arrB = rComp[1]
arrC = rComp[2]
# arrA = arrA[:,s,:]
# arrB = arrB[:,s,:]
# arrC = arrC[:,s,:]
arrA = arrA[:,:,s]
arrB = arrB[:,:,s]
arrC = arrC[:,:,s]
if axis == 'rostro2':
rostroDir = dataPath + '/rostro/'
rcFile = rostroDir + 'T%1.1f' %t + '_rComp.npy'
FBFile = rostroDir + 'T%1.1f' %t + '_FB'
MBFile = rostroDir + 'T%1.1f' %t + '_MB'
HBFile = rostroDir + 'T%1.1f' %t + '_HB'
if os.path.isfile(rcFile):
rComp = np.load(rcFile)
else:
FBArray = np.load(FBFile)
MBArray = np.load(MBFile)
HBArray = np.load(HBFile)
rComp = compare([FBArray,MBArray,HBArray])
np.save(rcFile,rComp)
arrA = rComp[0]
arrB = rComp[1]
arrC = rComp[2]
arrA = arrA[:,s,:]
arrB = arrB[:,s,:]
arrC = arrC[:,s,:]
return arrA,arrB,arrC
def getTS(ts, rate, t=time, s=slicenr):
"""ts = what is looped over in the animation"""
if ts == 'time':
t_ret = rate*tstep
s_ret = slicenr
if ts =='space':
t_ret = t
s_ret = rate
return t_ret,s_ret
def update(rate):
ax1.clear()
t,s = getTS(runThrough,rate)
#print(rate,t,s)
cut = getCut(axis,t,s)
ax1.set_title("slice nr %d time %1.1f" %(s,t))
#if t < len(data[0][0]):
#ax1.matshow(data[:,t,:])
#t+=1
#else:
#t=0
# ax1.imshow(arrFB[rate,:,:],interpolation='bilinear',cmap=cmap1)
# ax1.imshow(arrMB[rate,:,:],interpolation='bilinear',cmap=cmap2)
# ax1.imshow(arrHB[rate,:,:],interpolation='bilinear',cmap=cmap3)
if axis == 'dorso':
cmap1,cmap2,cmap3 = cmapP,cmapO,cmapN
size = 500
if axis == 'rostro':
cmap1,cmap2,cmap3 = cmapFB,cmapMB,cmapHB
size =100
# ax1.imshow(cut[0],cmap=cmap1)
# ax1.imshow(cut[1],cmap=cmap2)
# ax1.imshow(cut[2],cmap=cmap3)
"""
ax1.imshow(cut[0],interpolation='nearest',cmap=cmap1)
ax1.imshow(cut[1],interpolation='nearest',cmap=cmap2)
ax1.imshow(cut[2],interpolation='nearest',cmap=cmap3)
"""
mapper1 = matplotlib.cm.ScalarMappable(cmap=cmap1)
mapper2 = matplotlib.cm.ScalarMappable(cmap=cmap2)
mapper3 = matplotlib.cm.ScalarMappable(cmap=cmap3)
c1= np.where(cut[0])
colors1 = mapper1.to_rgba(cut[0][c1])
c2= np.where(cut[1])
colors2 = mapper2.to_rgba(cut[1][c2])
c3= np.where(cut[2])
colors3 = mapper3.to_rgba(cut[2][c3])
ax1.set_aspect('auto')
ax1.set_xlim([-1,16])
ax1.scatter(c1[0],c1[1],c=colors1,s=size)
ax1.scatter(c2[0],c2[1],c=colors2,s=size)
ax1.scatter(c3[0],c3[1],c=colors3, s=size)
#plt.savefig('unsinnfig/t%d'% rate)
def plotSlices(time, dorsnr, rosnr, rosnr2, plotmethod='circle',save=True, dataPath=StaticDataPath):
# fug = plt.figure(figsize=(8, 6))
# gs = gridspec.GridSpec(1, 2, width_ratios=[3, 1])
# axDors = fug.add_subplot(gs[0])
# axRos = fug.add_subplot(gs[1])
plt.close("all")
fug = plt.figure(figsize = [7.5, 8])
fag = plt.figure(figsize = [10, 14])
axDors = fug.add_subplot(1,1,1)
axRos = fag.add_subplot(1,2,1)
axRos2 = fag.add_subplot(1,2,2)
axDors.set_title("DV slice at \n x = %d µm, t = %1.1f " %(dorsnr*10/scale, time))
axDors.set_xlabel("y [µm]")
dxticks = np.arange(0,20,5)
axDors.xaxis.set_ticks(dxticks)
axDors.xaxis.set_ticklabels(['%d' %(x*10/scale) for x in dxticks])
dyticks = np.arange(0,65,10)
axDors.yaxis.set_ticks(dyticks)
axDors.yaxis.set_ticklabels(['%d' %(y*10/scale) for y in dyticks])
axDors.set_ylabel("z [µm]")
axRos.set_title("RC slice at \n z = %d µm, t = %1.1f " %(rosnr*10/scale, time))
rxticks = dxticks
axRos.xaxis.set_ticks(rxticks)
axRos.xaxis.set_ticklabels(['%d' %(x*10/scale) for x in rxticks])
ryticks = np.arange(0,65,10)
axRos.yaxis.set_ticks(ryticks)
axRos.yaxis.set_ticklabels(['%d' %(y*10/scale) for y in ryticks])
axRos.set_xlabel("y [µm]")
axRos.set_ylabel("x [µm]")
axRos2.set_title("RC slice at \n y = %d µm, t = %1.1f " %(rosnr*10/scale, time))
r2xticks = np.arange(0,65,10)
axRos2.xaxis.set_ticks(r2xticks)
axRos2.xaxis.set_ticklabels(['%d' %(x*10/scale) for x in r2xticks])
r2yticks = np.arange(0,65,10)
axRos2.yaxis.set_ticks(r2yticks)
axRos2.yaxis.set_ticklabels(['%d' %(y*10/scale) for y in r2yticks])
axRos2.set_xlabel("z [µm]")
axRos2.set_ylabel("x [µm]")
dataDors = getCut('dorso', t= time, s=dorsnr, dataPath = dataPath)
dataRos = getCut('rostro', t= time, s=rosnr, dataPath = dataPath)
dataRos2 = getCut('rostro2', t= time, s=rosnr2,dataPath = dataPath)
for axtype in ['rostro','dorso']:
if axtype == 'dorso':
cmap1,cmap2,cmap3 = cmapP,cmapO,cmapN
size = 500
ax = axDors
cut =dataDors
if axtype == 'rostro':
cmap1,cmap2,cmap3 = cmapFB,cmapMB,cmapHB
size =100
ax=axRos
ax2=axRos2
cut= dataRos
cut2=dataRos2
if plotmethod == 'circle':
mapper1 = matplotlib.cm.ScalarMappable(cmap=cmap1)
mapper2 = matplotlib.cm.ScalarMappable(cmap=cmap2)
mapper3 = matplotlib.cm.ScalarMappable(cmap=cmap3)
c1= np.where(cut[0])
colors1 = mapper1.to_rgba(cut[0][c1])
c2= np.where(cut[1])
colors2 = mapper2.to_rgba(cut[1][c2])
c3= np.where(cut[2])
colors3 = mapper3.to_rgba(cut[2][c3])
ax.set_aspect('auto')
#ax.set_xlim([-1,16])
ax.scatter(c1[0],c1[1],c=colors1,s=size)
ax.scatter(c2[0],c2[1],c=colors2,s=size)
ax.scatter(c3[0],c3[1],c=colors3, s=size)
if plotmethod == 'square':
# ax1.imshow(cut[0],cmap=cmap1)
# ax1.imshow(cut[1],cmap=cmap2)
# ax1.imshow(cut[2],cmap=cmap3)
if axtype == 'rostro':
ax.imshow(cut[0][:-1,:-1],interpolation='nearest',cmap=cmap1,origin = 'lower')
ax.imshow(cut[1][:-1,:-1],interpolation='nearest',cmap=cmap2,origin = 'lower')
ax.imshow(cut[2][:-1,:-1],interpolation='nearest',cmap=cmap3,origin = 'lower')
ax2.imshow(ndimage.rotate(cut2[0][:-1,:-1],-90)[:,:-1],interpolation='nearest',cmap=cmap1,origin = 'lower')
ax2.imshow(ndimage.rotate(cut2[1][:-1,:-1],-90)[:,:-1],interpolation='nearest',cmap=cmap2,origin = 'lower')
ax2.imshow(ndimage.rotate(cut2[2][:-1,:-1],-90)[:,:-1],interpolation='nearest',cmap=cmap3,origin = 'lower')
# rcut0 = ndimage.rotate(cut[0], 90)
# rcut1 = ndimage.rotate(cut[1], 90)
# rcut2 = ndimage.rotate(cut[2], 90)
# ax.imshow(rcut0,interpolation='nearest',cmap=cmap1)
# ax.imshow(rcut1,interpolation='nearest',cmap=cmap2)
# ax.imshow(rcut2,interpolation='nearest',cmap=cmap3)
if axtype == 'dorso':
rcut0 = ndimage.rotate(cut[0], -90)
rcut1 = ndimage.rotate(cut[1], -90)
rcut2 = ndimage.rotate(cut[2], -90)
ax.imshow(rcut0[:-1,1:],interpolation='nearest',cmap=cmap1,origin = 'lower')
ax.imshow(rcut1[:-1,1:],interpolation='nearest',cmap=cmap2,origin = 'lower')
ax.imshow(rcut2[:-1,1:],interpolation='nearest',cmap=cmap3,origin = 'lower')
if save ==True:
fug.savefig(dataPath + '/allPictures/T%1.1f_DV%d.png' %(time,dorsnr) )
fag.savefig(dataPath + '/allPictures/T%1.1f_RC%d_%d.png' %(time,rosnr,rosnr2) )
def plotSliceMix(plotFrom, time, dorsnr, rosnr, rosnr2,save=True):
dataPath = plotFrom
"""Plot gene combinations with a different colour for each combination."""
wntDir = plotFrom + '/Wnt/'
shhDir = plotFrom + '/Shh/'
rostroDir = plotFrom + '/rostro/'
dorsoDir = plotFrom + '/dorso/'
mixDir = plotFrom + '/Mix/'
baseLevels = np.load(plotFrom + '/BaseLevels.npy')
allDir = plotFrom + '/allPictures/'
pFile = dorsoDir + 'T%1.1f' %time + '_P'
oFile = dorsoDir + 'T%1.1f' %time + '_O'
nFile = dorsoDir + 'T%1.1f' %time + '_N'
dcFile = dorsoDir + 'T%1.1f' %time + '_dComp.npy'
pArray =np.load(pFile +'.npy')
oArray =np.load(oFile +'.npy')
nArray =np.load(nFile +'.npy')
if os.path.isfile(dcFile):
dComp = np.load(dcFile)
else:
dComp = compare([pArray,oArray,nArray])
np.save(dcFile,dComp)
fbFile = rostroDir + 'T%1.1f' %time + '_FB'
mbFile = rostroDir + 'T%1.1f' %time + '_MB'
hbFile = rostroDir + 'T%1.1f' %time + '_HB'
rcFile = rostroDir + 'T%1.1f' %time + '_rComp.npy'
fbArray =np.load(fbFile +'.npy')
mbArray =np.load(mbFile +'.npy')
hbArray =np.load(hbFile +'.npy')
if os.path.isfile(rcFile):
rComp = np.load(rcFile)
else:
rComp = compare([fbArray,mbArray,hbArray])
np.save(rcFile,rComp)
dimX = len(rComp[0])
dimY = len(rComp[0][0])
dimZ = len(rComp[0][0][0])
mixArray = np.zeros((len(colmix.colours),dimX,dimY,dimZ))
i=0
for pon in dComp:
for fbmbhb in rComp:
an = np.transpose(np.nonzero(pon))
bn = np.transpose(np.nonzero(fbmbhb))
anl = an.tolist()
bnl = bn.tolist()
incommon = set(str(x) for x in anl) & set(str(y) for y in bnl)
incommon = np.asarray([ast.literal_eval(i) for i in incommon])
for coord in incommon:
#print(coord)
mixArray[i][coord[0]][coord[1]][coord[2]] = 1
i+=1
mixArray[mixArray==0] = np.nan
#plt.close("all")
fug = plt.figure(figsize = [7.5, 8])
fag = plt.figure(figsize = [10, 14])
axDors = fug.add_subplot(1,1,1)
axRos = fag.add_subplot(1,2,1)
axRos2 = fag.add_subplot(1,2,2)
axDors.set_title("DV slice at \n x = %d µm, t = %1.1f " %(dorsnr*10/scale, time))
axDors.set_xlabel("y [µm]")
dxticks = np.arange(0,20,5)
axDors.xaxis.set_ticks(dxticks)
axDors.xaxis.set_ticklabels(['%d' %(x*10/scale) for x in dxticks])
dyticks = np.arange(0,65,10)
axDors.yaxis.set_ticks(dyticks)
axDors.yaxis.set_ticklabels(['%d' %(y*10/scale) for y in dyticks])
axDors.set_ylabel("z [µm]")
axRos.set_title("RC slice at \n z = %d µm, t = %1.1f " %(rosnr*10/scale, time))
rxticks = dxticks
axRos.xaxis.set_ticks(rxticks)
axRos.xaxis.set_ticklabels(['%d' %(x*10/scale) for x in rxticks])
ryticks = np.arange(0,65,10)
axRos.yaxis.set_ticks(ryticks)
axRos.yaxis.set_ticklabels(['%d' %(y*10/scale) for y in ryticks])
axRos.set_xlabel("y [µm]")
axRos.set_ylabel("x [µm]")
axRos2.set_title("RC slice at \n y = %d µm, t = %1.1f " %(rosnr*10/scale, time))
r2xticks = np.arange(0,65,10)
axRos2.xaxis.set_ticks(r2xticks)
axRos2.xaxis.set_ticklabels(['%d' %(x*10/scale) for x in r2xticks])
r2yticks = np.arange(0,65,10)
axRos2.yaxis.set_ticks(r2yticks)
axRos2.yaxis.set_ticklabels(['%d' %(y*10/scale) for y in r2yticks])
axRos2.set_xlabel("z [µm]")
axRos2.set_ylabel("x [µm]")
for axtype in ['rostro','dorso']:
for i in range(len(mixArray)):
#for i in range(3):
colours = colmix.colours[i]
#colours2 = colmix.colours[i+1]
myCmap = matplotlib.colors.LinearSegmentedColormap.from_list('my_cmapP',[colours,colours],256)
#myCmap2 = matplotlib.colors.LinearSegmentedColormap.from_list('my_cmapP',['white',colours2],256)
print(i, colours)
if axtype == 'dorso':
size = 500
ax = axDors
arr = getMixCut(axtype,mixArray[i],s=dorsnr)
arr=(np.flip(np.transpose(arr),axis=1))[:-1,1:]
cut = np.ma.masked_where(np.isnan(arr),arr)
#cut= np.flip(cut)
ax.set_aspect('equal')
if axtype == 'rostro':
size =100
ax=axRos
ax2=axRos2
ax.set_aspect('equal')
ax2.set_aspect('equal')
arr= getMixCut('rostro',mixArray[i],s=rosnr)
arr2=getMixCut('rostro2',mixArray[i],s=rosnr2)
cut= np.ma.masked_where(np.isnan(arr),arr)
cut2 = np.ma.masked_where( | np.isnan(arr2) | numpy.isnan |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 9 17:02:59 2018
@author: bruce
compared with version 1.6.4
the update is from correlation coefficient
"""
import pandas as pd
import numpy as np
from scipy import fftpack
from scipy import signal
import matplotlib.pyplot as plt
from matplotlib.colors import LinearSegmentedColormap
def correlation_matrix(corr_mx, cm_title):
from matplotlib import pyplot as plt
from matplotlib import cm as cm
fig = plt.figure()
ax1 = fig.add_subplot(111)
#cmap = cm.get_cmap('jet', 30)
cax = ax1.matshow(corr_mx, cmap='gray')
#cax = ax1.imshow(df.corr(), interpolation="nearest", cmap=cmap)
fig.colorbar(cax)
ax1.grid(False)
plt.title(cm_title)
#plt.title('cross correlation of test and retest')
ylabels=['T1','T2','T3','T4','T6','T7','T8','T9', 'T11', 'T12', 'T13', 'T14', 'T15', 'T16', 'T17', 'T18', 'T19', 'T20', 'T21', 'T22', 'T23', 'T25']
xlabels=['R1','R2','R3','R4','R6','R7','R8','R9', 'R11', 'R12', 'R13', 'R14', 'R15', 'R16', 'R17', 'R18', 'R19', 'R20', 'R21', 'R22', 'R23', 'R25']
ax1.set_xticks(np.arange(len(xlabels)))
ax1.set_yticks(np.arange(len(ylabels)))
ax1.set_xticklabels(xlabels,fontsize=6)
ax1.set_yticklabels(ylabels,fontsize=6)
# Add colorbar, make sure to specify tick locations to match desired ticklabels
#fig.colorbar(cax, ticks=[.75,.8,.85,.90,.95,1])
# show digit in matrix
corr_mx_array = np.asarray(corr_mx)
for i in range(22):
for j in range(22):
c = corr_mx_array[j,i]
ax1.text(i, j, round(c,2), va='center', ha='center')
plt.show()
def correlation_matrix_01(corr_mx, cm_title):
# find the maximum in each row
# input corr_mx is a dataframe
# need to convert it into a array first
#otherwise it is not working
temp = np.asarray(corr_mx)
output = (temp == temp.max(axis=1)[:,None]) # along rows
fig = plt.figure()
ax1 = fig.add_subplot(111)
#cmap = cm.get_cmap('jet', 30)
cs = ax1.matshow(output, cmap='gray')
#cax = ax1.imshow(df.corr(), interpolation="nearest", cmap=cmap)
fig.colorbar(cs)
ax1.grid(False)
plt.title(cm_title)
ylabels=['T1','T2','T3','T4','T6','T7','T8','T9', 'T11', 'T12', 'T13', 'T14', 'T15', 'T16', 'T17', 'T18', 'T19', 'T20', 'T21', 'T22', 'T23', 'T25']
xlabels=['R1','R2','R3','R4','R6','R7','R8','R9', 'R11', 'R12', 'R13', 'R14', 'R15', 'R16', 'R17', 'R18', 'R19', 'R20', 'R21', 'R22', 'R23', 'R25']
ax1.set_xticks(np.arange(len(xlabels)))
ax1.set_yticks(np.arange(len(ylabels)))
ax1.set_xticklabels(xlabels,fontsize=6)
ax1.set_yticklabels(ylabels,fontsize=6)
plt.show()
def correlation_matrix_rank(corr_mx, cm_title):
temp = corr_mx
#output = (temp == temp.max(axis=1)[:,None]) # along row
output = temp.rank(axis=1, ascending=False)
fig, ax1 = plt.subplots()
im1 = ax1.matshow(output, cmap=plt.cm.Wistia)
#cs = ax1.matshow(output)
fig.colorbar(im1)
ax1.grid(False)
ylabels=['T1','T2','T3','T4','T6','T7','T8','T9', 'T11', 'T12', 'T13', 'T14', 'T15', 'T16', 'T17', 'T18', 'T19', 'T20', 'T21', 'T22', 'T23', 'T25']
xlabels=['R1','R2','R3','R4','R6','R7','R8','R9', 'R11', 'R12', 'R13', 'R14', 'R15', 'R16', 'R17', 'R18', 'R19', 'R20', 'R21', 'R22', 'R23', 'R25']
ax1.set_xticks(np.arange(len(xlabels)))
ax1.set_yticks(np.arange(len(ylabels)))
ax1.set_xticklabels(xlabels,fontsize=6)
ax1.set_yticklabels(ylabels,fontsize=6)
plt.title(cm_title)
# show digit in matrix
output = np.asarray(output)
for i in range(22):
for j in range(22):
c = output[j,i]
ax1.text(i, j, int(c), va='center', ha='center')
plt.show()
def correlation_matrix_comb(corr_mx, cm_title):
fig, (ax2, ax3) = plt.subplots(1, 2)
ylabels=['T1','T2','T3','T4','T6','T7','T8','T9', 'T11', 'T12', 'T13', 'T14', 'T15', 'T16', 'T17', 'T18', 'T19', 'T20', 'T21', 'T22', 'T23', 'T25']
xlabels=['R1','R2','R3','R4','R6','R7','R8','R9', 'R11', 'R12', 'R13', 'R14', 'R15', 'R16', 'R17', 'R18', 'R19', 'R20', 'R21', 'R22', 'R23', 'R25']
'''
# graph 1 grayscale
im1 = ax1.matshow(corr_mx, cmap='gray')
# colorbar need numpy version 1.13.1
#fig.colorbar(im1, ax=ax1)
ax1.grid(False)
ax1.set_title(cm_title)
ax1.set_xticks(np.arange(len(xlabels)))
ax1.set_yticks(np.arange(len(ylabels)))
ax1.set_xticklabels(xlabels,fontsize=6)
ax1.set_yticklabels(ylabels,fontsize=6)
# show digit in matrix
corr_mx_array = np.asarray(corr_mx)
for i in range(22):
for j in range(22):
c = corr_mx_array[j,i]
ax1.text(i, j, round(c,2), va='center', ha='center')
'''
# graph 2 yellowscale
corr_mx_rank = corr_mx.rank(axis=1, ascending=False)
cmap_grey = LinearSegmentedColormap.from_list('mycmap', ['white', 'black'])
im2 = ax2.matshow(corr_mx, cmap='viridis')
# colorbar need numpy version 1.13.1
fig.colorbar(im2, ax=ax2)
ax2.grid(False)
ax2.set_title(cm_title)
ax2.set_xticks(np.arange(len(xlabels)))
ax2.set_yticks(np.arange(len(ylabels)))
ax2.set_xticklabels(xlabels,fontsize=6)
ax2.set_yticklabels(ylabels,fontsize=6)
# Add colorbar, make sure to specify tick locations to match desired ticklabels
# show digit in matrix
corr_mx_rank = np.asarray(corr_mx_rank)
for i in range(22):
for j in range(22):
c = corr_mx_rank[j,i]
ax2.text(i, j, int(c), va='center', ha='center')
# graph 3
# find the maximum in each row
# input corr_mx is a dataframe
# need to convert it into a array first
#otherwise it is not working
temp = np.asarray(corr_mx)
output = (temp == temp.max(axis=1)[:,None]) # along rows
im3 = ax3.matshow(output, cmap='gray')
# colorbar need numpy version 1.13.1
#fig.colorbar(im3, ax=ax3)
ax3.grid(False)
ax3.set_title(cm_title)
ax3.set_xticks(np.arange(len(xlabels)))
ax3.set_yticks(np.arange(len(ylabels)))
ax3.set_xticklabels(xlabels,fontsize=6)
ax3.set_yticklabels(ylabels,fontsize=6)
plt.show()
def correlation_matrix_tt_01(corr_mx, cm_title):
# find the maximum in each row
# input corr_mx is a dataframe
# need to convert it into a array first
#otherwise it is not working
temp = np.asarray(corr_mx)
output = (temp == temp.max(axis=1)[:,None]) # along rows
fig = plt.figure()
ax1 = fig.add_subplot(111)
#cmap = cm.get_cmap('jet', 30)
cax = ax1.matshow(output, cmap='gray')
#cax = ax1.imshow(df.corr(), interpolation="nearest", cmap=cmap)
fig.colorbar(cax)
ax1.grid(False)
plt.title(cm_title)
ylabels=['T1','T2','T3','T4','T6','T7','T8','T9', 'T11', 'T12', 'T13', 'T14', 'T15', 'T16', 'T17', 'T18', 'T19', 'T20', 'T21', 'T22', 'T23', 'T25']
xlabels=['T1','T2','T3','T4','T6','T7','T8','T9', 'T11', 'T12', 'T13', 'T14', 'T15', 'T16', 'T17', 'T18', 'T19', 'T20', 'T21', 'T22', 'T23', 'T25']
ax1.set_xticks(np.arange(len(xlabels)))
ax1.set_yticks(np.arange(len(ylabels)))
ax1.set_xticklabels(xlabels,fontsize=6)
ax1.set_yticklabels(ylabels,fontsize=6)
plt.show()
def correlation_matrix_rr_01(corr_mx, cm_title):
# find the maximum in each row
# input corr_mx is a dataframe
# need to convert it into a array first
#otherwise it is not working
temp = np.asarray(corr_mx)
output = (temp == temp.max(axis=1)[:,None]) # along rows
fig = plt.figure()
ax1 = fig.add_subplot(111)
#cmap = cm.get_cmap('jet', 30)
cax = ax1.matshow(output, cmap='gray')
#cax = ax1.imshow(df.corr(), interpolation="nearest", cmap=cmap)
fig.colorbar(cax)
ax1.grid(False)
plt.title(cm_title)
ylabels=['R1','R2','R3','R4','R6','R7','R8','R9', 'R11', 'R12', 'R13', 'R14', 'R15', 'R16', 'R17', 'R18', 'R19', 'R20', 'R21', 'R22', 'R23', 'R25']
xlabels=['R1','R2','R3','R4','R6','R7','R8','R9', 'R11', 'R12', 'R13', 'R14', 'R15', 'R16', 'R17', 'R18', 'R19', 'R20', 'R21', 'R22', 'R23', 'R25']
ax1.set_xticks(np.arange(len(xlabels)))
ax1.set_yticks(np.arange(len(ylabels)))
ax1.set_xticklabels(xlabels,fontsize=6)
ax1.set_yticklabels(ylabels,fontsize=6)
plt.show()
# shrink value for correlation matrix
# in order to use colormap -> 10 scale
def shrink_value_03_1(corr_in1):
corr_out1 = corr_in1.copy()
# here dataframe.copy() must be used, otherwise input can also be changed when changing output
for i in range (22):
for j in range(22):
if corr_in1.iloc[i, j] < 0.3:
corr_out1.iloc[i, j] = 0.3
return corr_out1
def shrink_value_05_1(corr_in2):
corr_out2 = corr_in2.copy()
# here dataframe.copy() must be used, otherwise input can also be changed when changing output
for i2 in range (22):
for j2 in range(22):
if corr_in2.iloc[i2, j2] < 0.5:
corr_out2.iloc[i2, j2] = 0.5
return corr_out2
# not used!!!!!!!!!!!!
# normalize the complex signal series
def normalize_complex_arr(a):
a_oo = a - a.real.min() - 1j*a.imag.min() # origin offsetted
return a_oo/np.abs(a_oo).max()
def improved_PCC(signal_in):
output_corr = pd.DataFrame()
for i in range(44):
row_pcc_notremovemean = []
for j in range(44):
sig_1 = signal_in.iloc[i, :]
sig_2 = signal_in.iloc[j, :]
pcc_notremovemean = np.abs(np.sum(sig_1 * sig_2) / np.sqrt( | np.sum(sig_1*sig_1) | numpy.sum |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 2 10:08:42 2019
@author: amandaash
"""
import numpy as np
import numpy.linalg as lin
import matplotlib.pyplot as plt
def masses_string(W1, W2, L1, L2, L3, L):
#initial guess
sintheta1 = 1
sintheta2 = 1
sintheta3 = 1
costheta1 = 1
costheta2 = 1
costheta3 = 1
T1 = 1
T2 = 1
T3 = 1
f1 = (L1*costheta1)+(L2*costheta2)+(L3*costheta3)-L
f2 = (L1*sintheta1)+(L2*sintheta2)+(-L3*sintheta3)
f3 = (sintheta1**2)+(costheta1**2) -1
f4 = (sintheta2**2)+(costheta2**2) -1
f5 = (sintheta3**2)+(costheta3**2) -1
f6 = (-T1*costheta1) + (T2*costheta2)
f7 = (-T1*sintheta1) + (T2*sintheta2) + W1
f8 = (-T2*costheta2)+(T3*costheta3)
f9 = (-T2*sintheta2)-(T3*sintheta3)+W2
f_vector = np.array([[f1],[f2],[f3],[f4],[f5],[f6],[f7],[f8],[f9]])
jacobian = np.array([[0,0,0,0,L1,0,L2,0,L3],[0,0,0,L1,0,L2,0,-L3,0],\
[0,0,0,2*sintheta1,2*costheta1,0,0,0,0],\
[0,0,0,0,0,2*sintheta2,2*costheta2,0,0],\
[0,0,0,0,0,0,0,2*sintheta3,2*costheta3],\
[-costheta1,costheta2,0,0,-T1,0,T2,0,0],\
[-sintheta1,sintheta2,0,-T1,0,T2,0,0,0],\
[0,-costheta2,costheta3,0,0,0,-T2,0,T3],\
[0,-sintheta2,-sintheta3,0,0,-T2,0,-T3,0]])
N = 1000
for iteration in range(N):
dx = (-lin.inv(jacobian)).dot(f_vector)
dT1 = float(dx[0])
dT2 = float(dx[1])
dT3 = float(dx[2])
d_sintheta1 = float(dx[3])
d_costheta1 = float(dx[4])
d_sintheta2 = float(dx[5])
d_costheta2 = float(dx[6])
d_sintheta3 = float(dx[7])
d_costheta3 = float(dx[8])
T1 = T1 + dT1
T2 = T2 + dT2
T3 = T3 + dT3
sintheta1 = sintheta1 + d_sintheta1
costheta1 = costheta1 + d_costheta1
sintheta2 = sintheta2 + d_sintheta2
costheta2 = costheta2 + d_costheta2
sintheta3 = sintheta3 + d_sintheta3
costheta3 = costheta3 + d_costheta3
f1 = (L1*costheta1)+(L2*costheta2)+(L3*costheta3)-L
f2 = (L1*sintheta1)+(L2*sintheta2)+(-L3*sintheta3)
f3 = (sintheta1**2)+(costheta1**2) -1
f4 = (sintheta2**2)+(costheta2**2) -1
f5 = (sintheta3**2)+(costheta3**2) -1
f6 = (-T1*costheta1) + (T2*costheta2)
f7 = (-T1*sintheta1) + (T2*sintheta2) + W1
f8 = (-T2*costheta2)+(T3*costheta3)
f9 = (-T2*sintheta2)-(T3*sintheta3)+W2
f_vector = np.array([[f1],[f2],[f3],[f4],[f5],[f6],[f7],[f8],[f9]])
jacobian = np.array([[0,0,0,0,L1,0,L2,0,L3],[0,0,0,L1,0,L2,0,-L3,0],\
[0,0,0,2*sintheta1,2*costheta1,0,0,0,0],\
[0,0,0,0,0,2*sintheta2,2*costheta2,0,0],\
[0,0,0,0,0,0,0,2*sintheta3,2*costheta3],\
[-costheta1,costheta2,0,0,-T1,0,T2,0,0],\
[-sintheta1,sintheta2,0,-T1,0,T2,0,0,0],\
[0,-costheta2,costheta3,0,0,0,-T2,0,T3],\
[0,-sintheta2,-sintheta3,0,0,-T2,0,-T3,0]])
tolerance = 10**-6
if dT1 <= tolerance and dT2 <= tolerance\
and d_sintheta1 <= tolerance and d_costheta1 <= tolerance\
and d_sintheta2 <= tolerance and d_costheta2 <= tolerance\
and d_sintheta3 <= tolerance and d_costheta3<= tolerance:
theta1 = np.arctan(sintheta1/costheta1)*(180/np.pi)
theta2 = np.arctan(sintheta2/costheta2)*(180/np.pi)
theta3 = np.arctan(sintheta3/costheta3)*(180/np.pi)
break
return T1, T2, T3, theta1, theta2, theta3
test = masses_string(10,20,3,4,4,8)
print(test)
weight1 = | np.arange(1,100,1) | numpy.arange |
# This module has been generated automatically from space group information
# obtained from the Computational Crystallography Toolbox
#
"""
Space groups
This module contains a list of all the 230 space groups that can occur in
a crystal. The variable space_groups contains a dictionary that maps
space group numbers and space group names to the corresponding space
group objects.
.. moduleauthor:: <NAME> <<EMAIL>>
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2013 The Mosaic Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file LICENSE.txt, distributed as part of this software.
#-----------------------------------------------------------------------------
import numpy as N
class SpaceGroup(object):
"""
Space group
All possible space group objects are created in this module. Other
modules should access these objects through the dictionary
space_groups rather than create their own space group objects.
"""
def __init__(self, number, symbol, transformations):
"""
:param number: the number assigned to the space group by
international convention
:type number: int
:param symbol: the Hermann-Mauguin space-group symbol as used
in PDB and mmCIF files
:type symbol: str
:param transformations: a list of space group transformations,
each consisting of a tuple of three
integer arrays (rot, tn, td), where
rot is the rotation matrix and tn/td
are the numerator and denominator of the
translation vector. The transformations
are defined in fractional coordinates.
:type transformations: list
"""
self.number = number
self.symbol = symbol
self.transformations = transformations
self.transposed_rotations = N.array([N.transpose(t[0])
for t in transformations])
self.phase_factors = N.exp(N.array([(-2j*N.pi*t[1])/t[2]
for t in transformations]))
def __repr__(self):
return "SpaceGroup(%d, %s)" % (self.number, repr(self.symbol))
def __len__(self):
"""
:return: the number of space group transformations
:rtype: int
"""
return len(self.transformations)
def symmetryEquivalentMillerIndices(self, hkl):
"""
:param hkl: a set of Miller indices
:type hkl: Scientific.N.array_type
:return: a tuple (miller_indices, phase_factor) of two arrays
of length equal to the number of space group
transformations. miller_indices contains the Miller
indices of each reflection equivalent by symmetry to the
reflection hkl (including hkl itself as the first element).
phase_factor contains the phase factors that must be applied
to the structure factor of reflection hkl to obtain the
structure factor of the symmetry equivalent reflection.
:rtype: tuple
"""
hkls = N.dot(self.transposed_rotations, hkl)
p = N.multiply.reduce(self.phase_factors**hkl, -1)
return hkls, p
space_groups = {}
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(1, 'P 1', transformations)
space_groups[1] = sg
space_groups['P 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(2, 'P -1', transformations)
space_groups[2] = sg
space_groups['P -1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(3, 'P 1 2 1', transformations)
space_groups[3] = sg
space_groups['P 1 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(4, 'P 1 21 1', transformations)
space_groups[4] = sg
space_groups['P 1 21 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(5, 'C 1 2 1', transformations)
space_groups[5] = sg
space_groups['C 1 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(6, 'P 1 m 1', transformations)
space_groups[6] = sg
space_groups['P 1 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(7, 'P 1 c 1', transformations)
space_groups[7] = sg
space_groups['P 1 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(8, 'C 1 m 1', transformations)
space_groups[8] = sg
space_groups['C 1 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(9, 'C 1 c 1', transformations)
space_groups[9] = sg
space_groups['C 1 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(10, 'P 1 2/m 1', transformations)
space_groups[10] = sg
space_groups['P 1 2/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(11, 'P 1 21/m 1', transformations)
space_groups[11] = sg
space_groups['P 1 21/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(12, 'C 1 2/m 1', transformations)
space_groups[12] = sg
space_groups['C 1 2/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(13, 'P 1 2/c 1', transformations)
space_groups[13] = sg
space_groups['P 1 2/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(14, 'P 1 21/c 1', transformations)
space_groups[14] = sg
space_groups['P 1 21/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(15, 'C 1 2/c 1', transformations)
space_groups[15] = sg
space_groups['C 1 2/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(16, 'P 2 2 2', transformations)
space_groups[16] = sg
space_groups['P 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(17, 'P 2 2 21', transformations)
space_groups[17] = sg
space_groups['P 2 2 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(18, 'P 21 21 2', transformations)
space_groups[18] = sg
space_groups['P 21 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(19, 'P 21 21 21', transformations)
space_groups[19] = sg
space_groups['P 21 21 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(20, 'C 2 2 21', transformations)
space_groups[20] = sg
space_groups['C 2 2 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(21, 'C 2 2 2', transformations)
space_groups[21] = sg
space_groups['C 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(22, 'F 2 2 2', transformations)
space_groups[22] = sg
space_groups['F 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(23, 'I 2 2 2', transformations)
space_groups[23] = sg
space_groups['I 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(24, 'I 21 21 21', transformations)
space_groups[24] = sg
space_groups['I 21 21 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(25, 'P m m 2', transformations)
space_groups[25] = sg
space_groups['P m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(26, 'P m c 21', transformations)
space_groups[26] = sg
space_groups['P m c 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(27, 'P c c 2', transformations)
space_groups[27] = sg
space_groups['P c c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(28, 'P m a 2', transformations)
space_groups[28] = sg
space_groups['P m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(29, 'P c a 21', transformations)
space_groups[29] = sg
space_groups['P c a 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(30, 'P n c 2', transformations)
space_groups[30] = sg
space_groups['P n c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(31, 'P m n 21', transformations)
space_groups[31] = sg
space_groups['P m n 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(32, 'P b a 2', transformations)
space_groups[32] = sg
space_groups['P b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(33, 'P n a 21', transformations)
space_groups[33] = sg
space_groups['P n a 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(34, 'P n n 2', transformations)
space_groups[34] = sg
space_groups['P n n 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(35, 'C m m 2', transformations)
space_groups[35] = sg
space_groups['C m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(36, 'C m c 21', transformations)
space_groups[36] = sg
space_groups['C m c 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(37, 'C c c 2', transformations)
space_groups[37] = sg
space_groups['C c c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(38, 'A m m 2', transformations)
space_groups[38] = sg
space_groups['A m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(39, 'A b m 2', transformations)
space_groups[39] = sg
space_groups['A b m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(40, 'A m a 2', transformations)
space_groups[40] = sg
space_groups['A m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(41, 'A b a 2', transformations)
space_groups[41] = sg
space_groups['A b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(42, 'F m m 2', transformations)
space_groups[42] = sg
space_groups['F m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(43, 'F d d 2', transformations)
space_groups[43] = sg
space_groups['F d d 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(44, 'I m m 2', transformations)
space_groups[44] = sg
space_groups['I m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(45, 'I b a 2', transformations)
space_groups[45] = sg
space_groups['I b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(46, 'I m a 2', transformations)
space_groups[46] = sg
space_groups['I m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(47, 'P m m m', transformations)
space_groups[47] = sg
space_groups['P m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(48, 'P n n n :2', transformations)
space_groups[48] = sg
space_groups['P n n n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(49, 'P c c m', transformations)
space_groups[49] = sg
space_groups['P c c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(50, 'P b a n :2', transformations)
space_groups[50] = sg
space_groups['P b a n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(51, 'P m m a', transformations)
space_groups[51] = sg
space_groups['P m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(52, 'P n n a', transformations)
space_groups[52] = sg
space_groups['P n n a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(53, 'P m n a', transformations)
space_groups[53] = sg
space_groups['P m n a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(54, 'P c c a', transformations)
space_groups[54] = sg
space_groups['P c c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(55, 'P b a m', transformations)
space_groups[55] = sg
space_groups['P b a m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(56, 'P c c n', transformations)
space_groups[56] = sg
space_groups['P c c n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(57, 'P b c m', transformations)
space_groups[57] = sg
space_groups['P b c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(58, 'P n n m', transformations)
space_groups[58] = sg
space_groups['P n n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(59, 'P m m n :2', transformations)
space_groups[59] = sg
space_groups['P m m n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(60, 'P b c n', transformations)
space_groups[60] = sg
space_groups['P b c n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(61, 'P b c a', transformations)
space_groups[61] = sg
space_groups['P b c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(62, 'P n m a', transformations)
space_groups[62] = sg
space_groups['P n m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(63, 'C m c m', transformations)
space_groups[63] = sg
space_groups['C m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(64, 'C m c a', transformations)
space_groups[64] = sg
space_groups['C m c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(65, 'C m m m', transformations)
space_groups[65] = sg
space_groups['C m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(66, 'C c c m', transformations)
space_groups[66] = sg
space_groups['C c c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(67, 'C m m a', transformations)
space_groups[67] = sg
space_groups['C m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(68, 'C c c a :2', transformations)
space_groups[68] = sg
space_groups['C c c a :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(69, 'F m m m', transformations)
space_groups[69] = sg
space_groups['F m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(70, 'F d d d :2', transformations)
space_groups[70] = sg
space_groups['F d d d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(71, 'I m m m', transformations)
space_groups[71] = sg
space_groups['I m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(72, 'I b a m', transformations)
space_groups[72] = sg
space_groups['I b a m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(73, 'I b c a', transformations)
space_groups[73] = sg
space_groups['I b c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(74, 'I m m a', transformations)
space_groups[74] = sg
space_groups['I m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(75, 'P 4', transformations)
space_groups[75] = sg
space_groups['P 4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(76, 'P 41', transformations)
space_groups[76] = sg
space_groups['P 41'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(77, 'P 42', transformations)
space_groups[77] = sg
space_groups['P 42'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(78, 'P 43', transformations)
space_groups[78] = sg
space_groups['P 43'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(79, 'I 4', transformations)
space_groups[79] = sg
space_groups['I 4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(80, 'I 41', transformations)
space_groups[80] = sg
space_groups['I 41'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(81, 'P -4', transformations)
space_groups[81] = sg
space_groups['P -4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(82, 'I -4', transformations)
space_groups[82] = sg
space_groups['I -4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(83, 'P 4/m', transformations)
space_groups[83] = sg
space_groups['P 4/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(84, 'P 42/m', transformations)
space_groups[84] = sg
space_groups['P 42/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(85, 'P 4/n :2', transformations)
space_groups[85] = sg
space_groups['P 4/n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(86, 'P 42/n :2', transformations)
space_groups[86] = sg
space_groups['P 42/n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(87, 'I 4/m', transformations)
space_groups[87] = sg
space_groups['I 4/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(88, 'I 41/a :2', transformations)
space_groups[88] = sg
space_groups['I 41/a :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(89, 'P 4 2 2', transformations)
space_groups[89] = sg
space_groups['P 4 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(90, 'P 4 21 2', transformations)
space_groups[90] = sg
space_groups['P 4 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(91, 'P 41 2 2', transformations)
space_groups[91] = sg
space_groups['P 41 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(92, 'P 41 21 2', transformations)
space_groups[92] = sg
space_groups['P 41 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(93, 'P 42 2 2', transformations)
space_groups[93] = sg
space_groups['P 42 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(94, 'P 42 21 2', transformations)
space_groups[94] = sg
space_groups['P 42 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(95, 'P 43 2 2', transformations)
space_groups[95] = sg
space_groups['P 43 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(96, 'P 43 21 2', transformations)
space_groups[96] = sg
space_groups['P 43 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(97, 'I 4 2 2', transformations)
space_groups[97] = sg
space_groups['I 4 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(98, 'I 41 2 2', transformations)
space_groups[98] = sg
space_groups['I 41 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(99, 'P 4 m m', transformations)
space_groups[99] = sg
space_groups['P 4 m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(100, 'P 4 b m', transformations)
space_groups[100] = sg
space_groups['P 4 b m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(101, 'P 42 c m', transformations)
space_groups[101] = sg
space_groups['P 42 c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(102, 'P 42 n m', transformations)
space_groups[102] = sg
space_groups['P 42 n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(103, 'P 4 c c', transformations)
space_groups[103] = sg
space_groups['P 4 c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(104, 'P 4 n c', transformations)
space_groups[104] = sg
space_groups['P 4 n c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(105, 'P 42 m c', transformations)
space_groups[105] = sg
space_groups['P 42 m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(106, 'P 42 b c', transformations)
space_groups[106] = sg
space_groups['P 42 b c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(107, 'I 4 m m', transformations)
space_groups[107] = sg
space_groups['I 4 m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(108, 'I 4 c m', transformations)
space_groups[108] = sg
space_groups['I 4 c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(109, 'I 41 m d', transformations)
space_groups[109] = sg
space_groups['I 41 m d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(110, 'I 41 c d', transformations)
space_groups[110] = sg
space_groups['I 41 c d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(111, 'P -4 2 m', transformations)
space_groups[111] = sg
space_groups['P -4 2 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(112, 'P -4 2 c', transformations)
space_groups[112] = sg
space_groups['P -4 2 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(113, 'P -4 21 m', transformations)
space_groups[113] = sg
space_groups['P -4 21 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(114, 'P -4 21 c', transformations)
space_groups[114] = sg
space_groups['P -4 21 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(115, 'P -4 m 2', transformations)
space_groups[115] = sg
space_groups['P -4 m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(116, 'P -4 c 2', transformations)
space_groups[116] = sg
space_groups['P -4 c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(117, 'P -4 b 2', transformations)
space_groups[117] = sg
space_groups['P -4 b 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(118, 'P -4 n 2', transformations)
space_groups[118] = sg
space_groups['P -4 n 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(119, 'I -4 m 2', transformations)
space_groups[119] = sg
space_groups['I -4 m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(120, 'I -4 c 2', transformations)
space_groups[120] = sg
space_groups['I -4 c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(121, 'I -4 2 m', transformations)
space_groups[121] = sg
space_groups['I -4 2 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(122, 'I -4 2 d', transformations)
space_groups[122] = sg
space_groups['I -4 2 d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = | N.array([1,1,1]) | numpy.array |
def getCynByAxis(radius = 1, heightStart = 0, heightEnd = 5, \
offset = [0, 0, 0], devision = 20, mainAxis = 'z'):
'''NyanNyanNyan'''
import numpy as np
mainAxis = mainAxis.lower()
theta = np.linspace(0, 2*np.pi, devision)
cx = np.array([radius * np.cos(theta)])
cz = np.array([heightStart, heightEnd])
cx, cz = np.meshgrid(cx, cz)
cy = np.array([radius * | np.sin(theta) | numpy.sin |
from __future__ import division, print_function, absolute_import
import os
import traceback
import scipy.misc as misc
import matplotlib.pyplot as plt
import numpy as np
import glob
import pandas as pd
import random
from PIL import Image, ImageOps
def get_data_A1A4(data_path, split_load):
# Getting images (x data)
imgname_train_A1 = np.array([glob.glob(data_path+'/CVPPP2017_LCC_training/TrainingSplits/A1'+str(h)+'/*.png') for h in split_load[0]])
imgname_train_A4 = np.array([glob.glob(data_path+'/CVPPP2017_LCC_training/TrainingSplits/A4'+str(h)+'/*.png') for h in split_load[0]])
imgname_val_A1 = np.array([glob.glob(data_path+'/CVPPP2017_LCC_training/TrainingSplits/A1'+str(split_load[1])+'/*.png')])
imgname_val_A4 = np.array([glob.glob(data_path+'/CVPPP2017_LCC_training/TrainingSplits/A4'+str(split_load[1])+'/*.png')])
imgname_test_A1 = np.array([glob.glob(data_path+'/CVPPP2017_LCC_training/TrainingSplits/A1'+str(split_load[2])+'/*.png')])
imgname_test_A4 = np.array([glob.glob(data_path+'/CVPPP2017_LCC_training/TrainingSplits/A4'+str(split_load[2])+'/*.png')])
filelist_train_A1 = list(np.sort(imgname_train_A1.flat)[1::2])
filelist_train_A4 = list(np.sort(imgname_train_A4.flat)[1::2])
filelist_train_A1_fg = list(np.sort(imgname_train_A1.flat)[0::2])
filelist_train_A4_fg = list(np.sort(imgname_train_A4.flat)[0::2])
filelist_train_A1_img = np.array([np.array(filelist_train_A1[h][-16:]) for h in range(0,len(filelist_train_A1))])
filelist_train_A4_img = np.array([np.array(filelist_train_A4[h][-17:]) for h in range(0,len(filelist_train_A4))])
filelist_train_A1_set = np.array([np.array(filelist_train_A1[h][-20:-18]) for h in range(0,len(filelist_train_A1))])
filelist_train_A4_set = np.array([np.array(filelist_train_A4[h][-20:-18]) for h in range(0,len(filelist_train_A4))])
filelist_val_A1 = list(np.sort(imgname_val_A1.flat)[1::2])
filelist_val_A4 = list(np.sort(imgname_val_A4.flat)[1::2])
filelist_val_A1_fg = list(np.sort(imgname_val_A1.flat)[0::2])
filelist_val_A4_fg = list(np.sort(imgname_val_A4.flat)[0::2])
filelist_val_A1_img = np.array([np.array(filelist_val_A1[h][-16:]) for h in range(0,len(filelist_val_A1))])
filelist_val_A4_img = np.array([np.array(filelist_val_A4[h][-17:]) for h in range(0,len(filelist_val_A4))])
filelist_val_A1_set = np.array([np.array(filelist_val_A1[h][-20:-18]) for h in range(0,len(filelist_val_A1))])
filelist_val_A4_set = np.array([np.array(filelist_val_A4[h][-20:-18]) for h in range(0,len(filelist_val_A4))])
filelist_test_A1 = list(np.sort(imgname_test_A1.flat)[1::2])
filelist_test_A4 = list(np.sort(imgname_test_A4.flat)[1::2])
filelist_test_A1_fg = list(np.sort(imgname_test_A1.flat)[0::2])
filelist_test_A4_fg = list(np.sort(imgname_test_A4.flat)[0::2])
filelist_test_A1_img = np.array([np.array(filelist_test_A1[h][-16:]) for h in range(0,len(filelist_test_A1))])
filelist_test_A4_img = np.array([ | np.array(filelist_test_A4[h][-17:]) | numpy.array |
import torch
import torch.nn as nn
import numpy as np
import time
import custom_paths
from pathlib import Path
import utils
import sys
class ParallelLinear(nn.Module):
def __init__(self, n_parallel, in_features, out_features, act=None, weight_factor=1.0, weight_init_gain=1.0,
bias_init_gain=0.0, bias_init_mode='normal'):
super().__init__()
self.act = act
self.weight = nn.Parameter(torch.Tensor(n_parallel, in_features, out_features))
self.bias = nn.Parameter(torch.Tensor(n_parallel, out_features))
with torch.no_grad():
# maybe need to rescale for mean field / mup?
# maybe use mean field in a form that doesn't require changing the lr?
unif_range = np.sqrt(3) * np.sqrt(in_features) * weight_factor * weight_init_gain
self.weight.normal_(0.0, weight_init_gain)
if bias_init_mode == 'normal':
self.bias.normal_(0.0, 1.0)
elif bias_init_mode == 'uniform':
self.bias.uniform_(-np.sqrt(3), np.sqrt(3))
elif bias_init_mode == 'pos-unif':
self.bias.uniform_(0, np.sqrt(3))
elif bias_init_mode == 'neg-unif':
self.bias.uniform_(-np.sqrt(3), 0)
elif bias_init_mode == 'kink-unif':
self.bias.uniform_(-np.sqrt(3), | np.sqrt(3) | numpy.sqrt |
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
import numpy as np
device = "cuda" if torch.cuda.is_available() else "cpu"
class PrunedLinear(nn.Module):
def __init__(self, linear_module):
assert isinstance(linear_module, torch.nn.Linear), "Input Module is not a valid linear operator!"
super(PrunedLinear, self).__init__()
self.in_features = linear_module.in_features
self.out_features = linear_module.out_features
self.linear = linear_module
#self.linear = nn.Linear(in_features, out_features)
self.mask = np.ones([self.out_features, self.in_features])
m = self.in_features
n = self.out_features
self.sparsity = 1.0
self.finetune = False
self.extracted = False
# Initailization
#self.linear.weight.data.normal_(0, math.sqrt(2. / (m+n)))
def forward(self, x):
if not self.finetune:
device = torch.device('cuda')#, torch.distributed.get_rank())
self.gl_loss = self.compute_group_lasso_v2(device=device)
out = self.linear(x)
#out = quant8(out, None) # last layer should NOT be quantized
return out
def prune_by_percentage(self, q=5.0):
"""
Pruning the weight paramters by threshold.
:param q: pruning percentile. 'q' percent of the least
significant weight parameters will be pruned.
"""
# get bounds
max = torch.max(torch.abs(self.linear.weight.data))
min = torch.min(torch.abs(self.linear.weight.data))
# calculate cutoff
cutoff = ((max - min) * (q / 100.0)) + min
# generate mask
self.mask = torch.abs(self.linear.weight.data) > cutoff
# prune the weights
self.linear.weight.data = self.linear.weight.float() * self.mask.float()
# calculate sparsity
self.sparsity = self.linear.weight.data.numel() - self.linear.weight.data.nonzero().size(0)
def prune_by_std(self, s=0.25):
"""
Pruning by a factor of the standard deviation value.
:param std: (scalar) factor of the standard deviation value.
Weight magnitude below np.std(weight)*std
will be pruned.
"""
# generate mask
self.mask = torch.abs(self.linear.weight.data) >= (torch.std(self.linear.weight.data)*s)
# prune the weights
self.linear.weight.data = self.linear.weight.data.float() * self.mask.float()
# calculate sparsity
self.sparsity = self.linear.weight.data.numel() - self.linear.weight.data.nonzero().size(0)
#print("WEIGHTS: ",self.linear.weight.data)
#print("MASK: ",self.mask)
def prune_towards_dilation(self):
# do nothing for the linear layers
mask = torch.tensor([True])
self.mask = torch.tensor(mask.repeat(self.out_features, self.in_features)).cuda()
def prune_towards_asym_dilation(self):
# do nothing for linear layers
mask = torch.tensor([True])
self.mask = torch.tensor(mask.repeat(self.out_features, self.in_features)).cuda()
def prune_structured_interfilter(self, q):
# get bounds
max = torch.max(torch.abs(self.linear.weight.data))
min = torch.min(torch.abs(self.linear.weight.data))
# calculate cutoff
cutoff = ((max - min) * (q / 100.0)) + min
# generate mask
means = torch.abs(self.linear.weight.data).mean(axis=(0))
mask = torch.tensor(torch.abs(means) > cutoff)
self.mask = torch.tensor(mask.repeat(self.out_features, 1)).cuda()
# prune the weights
self.linear.weight.data = self.linear.weight.float() * self.mask.float()
# calculate sparsity
self.sparsity = self.linear.weight.data.numel() - self.linear.weight.data.nonzero().size(0)
def prune_chunk(self, chunk_size = 32, q = 0.75):
last_chunk = self.out_features % chunk_size
n_chunks = self.out_features // chunk_size + (last_chunk != 0)
linear_mat = self.linear.weight.data
mask = torch.full(linear_mat.shape, True, dtype=bool).cuda()
cutoff = torch.std(linear_mat)*q
for chunk_idx in range(n_chunks):
if chunk_idx == n_chunks - 1 and last_chunk != 0:
current_chunk = linear_mat[chunk_idx * chunk_size:, :]
l1_norm = torch.sum(torch.abs(current_chunk), dim=0) / last_chunk
next_mask = (l1_norm > cutoff).repeat(last_chunk, 1)
mask[chunk_idx * chunk_size:, :] = torch.logical_and(mask[chunk_idx * chunk_size:, :], next_mask)
else:
current_chunk = linear_mat[chunk_idx * chunk_size:(chunk_idx + 1) * chunk_size, :]
l1_norm = torch.sum(torch.abs(current_chunk), dim=0) / chunk_size
next_mask = (l1_norm > cutoff).repeat(chunk_size, 1)
mask[chunk_idx * chunk_size:(chunk_idx + 1) * chunk_size, :] = torch.logical_and(mask[chunk_idx * chunk_size:(chunk_idx + 1) * chunk_size, :], next_mask)
self.mask = mask
# prune the weights
self.linear.weight.data = self.linear.weight.float() * self.mask.float()
# calculate sparsity
self.sparsity = self.linear.weight.data.numel() - self.linear.weight.data.nonzero().size(0)
def prune_cascade_l1(self, chunk_size = 32, q = 0.75):
last_chunk = self.out_features % chunk_size
n_chunks = self.out_features // chunk_size + (last_chunk != 0)
linear_mat = self.linear.weight.data
mask = torch.full(linear_mat.shape, True, dtype=bool).cuda()
cutoff = torch.std(linear_mat)*q
cutoff = cutoff * (1.0 / (n_chunks*(n_chunks+1)/2))
for chunk_idx in range(n_chunks):
current_cascade = linear_mat[chunk_idx * chunk_size:, :]
l1_norm = torch.sum(torch.abs(current_cascade), dim=0) / (self.out_features - (chunk_idx * chunk_size))
# scale norm
l1_norm = l1_norm * ((n_chunks - chunk_idx) / (n_chunks*(n_chunks+1)/2))
next_mask = (l1_norm > cutoff).repeat((self.out_features - (chunk_idx * chunk_size)), 1)
mask[chunk_idx * chunk_size:, :] = torch.logical_and(mask[chunk_idx * chunk_size:, :], next_mask)
# PRUNE FILTER CHUNK
#if (chunk_idx + 1) * chunk_size > self.out_features:
# end = self.out_features
#else:
# end = (chunk_idx + 1) * chunk_size
#current_chunk = linear_mat[chunk_idx * chunk_size:end, :]
#l1_norm = torch.sum(torch.abs(current_chunk)) / ((end - (chunk_idx * chunk_size)) * self.in_features)
#next_mask = (l1_norm > cutoff).repeat((end - (chunk_idx * chunk_size)), self.in_features)
#mask[chunk_idx * chunk_size:end, :] = torch.logical_and(mask[chunk_idx * chunk_size:end, :], next_mask)
self.mask = mask
# prune the weights
self.linear.weight.data = self.linear.weight.float() * self.mask.float()
# calculate sparsity
self.sparsity = self.linear.weight.data.numel() - self.linear.weight.data.nonzero().size(0)
def prune_filter_chunk(self, chunk_size = 32, q = 0.75):
"""
last_chunk = self.out_features % chunk_size
n_chunks = self.out_features // chunk_size + (last_chunk != 0)
linear_mat = self.linear.weight.data
mask = self.mask
cutoff = torch.std(linear_mat)*q
for chunk_idx in range(n_chunks):
if (chunk_idx + 1) * chunk_size > self.out_features:
end = self.out_features
else:
end = (chunk_idx + 1) * chunk_size
current_chunk = linear_mat[chunk_idx * chunk_size:end, :]
l1_norm = torch.sum(torch.abs(current_chunk)) / ((end - (chunk_idx * chunk_size)) * self.in_features)
next_mask = (l1_norm > cutoff).repeat((end - (chunk_idx * chunk_size)), self.in_features)
mask[chunk_idx * chunk_size:end, :] = torch.logical_and(mask[chunk_idx * chunk_size:end, :], next_mask)
self.mask = mask
# prune the weights
self.linear.weight.data = self.linear.weight.float() * self.mask.float()
# calculate sparsity
self.sparsity = self.linear.weight.data.numel() - self.linear.weight.data.nonzero().size(0)
"""
pass
def prune_SSL(self, q, dim=None):
linear_mat = self.linear.weight.data
mask = torch.full(linear_mat.shape, True, dtype=bool).cuda()
cutoff = torch.std(linear_mat)*q
l1_norm = torch.sum(torch.abs(linear_mat), dim=dim) / self.out_features
if dim==0:
next_mask = (l1_norm > cutoff).repeat(self.out_features, 1)
elif dim==1:
next_mask = (l1_norm > cutoff).repeat(self.in_features, 1)
next_mask = next_mask.reshape(mask.shape)
else:
next_mask = l1_norm > cutoff
try:
mask = torch.logical_and(mask, next_mask)
except:
print(linear_mat.shape)
print(l1_norm.shape)
print(next_mask.shape)
print(mask.shape)
exit(1)
self.mask = mask
# prune the weights
self.linear.weight.data = self.linear.weight.float() * self.mask.float()
# calculate sparsity
self.sparsity = self.linear.weight.data.numel() - self.linear.weight.data.nonzero().size(0)
# Group Lasso for v1 chunk pruning
def compute_group_lasso_v1(self, chunk_size = 32):
layer_loss = torch.zeros(1).cuda()
last_chunk = self.out_features % chunk_size
n_chunks = self.out_features // chunk_size + (last_chunk != 0)
linear_mat = self.linear.weight.view((self.out_features, -1))
for chunk_idx in range(n_chunks):
if chunk_idx == n_chunks - 1 and last_chunk != 0:
current_chunk = linear_mat[chunk_idx * chunk_size:, :]
l2_norm = torch.sqrt(torch.sum(current_chunk ** 2, dim=0) / last_chunk)
else:
current_chunk = linear_mat[chunk_idx * chunk_size:(chunk_idx + 1) * chunk_size, :]
l2_norm = torch.sqrt(torch.sum(current_chunk ** 2, dim=0) / chunk_size)
chunk_loss = torch.sum(torch.abs(l2_norm))
layer_loss += chunk_loss
return layer_loss
# original CSP unoptimized
def compute_group_lasso_v2_backup(self, chunk_size = 32, device=None):
layer_loss = torch.zeros(1).cuda()
last_chunk = self.out_features % chunk_size
n_chunks = self.out_features // chunk_size + (last_chunk != 0)
linear_mat = self.linear.weight.view((self.out_features, -1))
for chunk_idx in range(n_chunks-1, -1, -1):
current_cascade = linear_mat[chunk_idx * chunk_size:, :]
l2_norm = torch.sqrt(torch.sum(current_cascade ** 2, dim=0) / (self.out_features - (chunk_idx * chunk_size)))
# use triangular number to scale norm
l2_norm = l2_norm * ((n_chunks - chunk_idx) / (n_chunks*(n_chunks+1)/2))
chunk_loss = torch.sum(torch.abs(l2_norm))
layer_loss += chunk_loss
return layer_loss
# cascading bounded sparsity - attempt 1
def compute_group_lasso_v2(self, chunk_size = 32, device=None):
last_chunk = self.out_features % chunk_size
n_chunks = self.out_features // chunk_size + (last_chunk != 0)
linear_mat = self.linear.weight.view((self.out_features, -1))
layer_loss = torch.zeros(n_chunks).to(device)
chunk_ids = torch.arange(n_chunks-1, -1, -1).to(device)
scaling_factor = ((n_chunks - chunk_ids) / (n_chunks*(n_chunks+1)/2))
# #print(linear_mat.shape)
# linear_mat = nn.functional.pad(linear_mat, pad=( 0, 0, 0, chunk_size - last_chunk))
# #print(linear_mat.shape)
# #Linear Mat: chunk_size * n_chunks, other_dim
# linear_mat = linear_mat.reshape(chunk_size, n_chunks, -1).sum(dim=0)
# #print(linear_mat.shape)
# #Linear Mat: n_chunks, other_dim
# linear_mat = linear_mat.fliplr().pow(2).cumsum(dim=0).div((self.out_features - chunk_ids * chunk_size).reshape(-1, 1)).abs().sum(dim=1)
for chunk_idx in range(n_chunks-1, -1, -1):
current_cascade = linear_mat[chunk_idx * chunk_size:, :]
l2_norm = torch.norm(current_cascade, p=2, dim=0)#torch.sqrt(torch.sum(current_cascade ** 2, dim=0) / (self.out_features - (chunk_idx * chunk_size)))
# use triangular number to scale norm
#l2_norm = l2_norm * ((n_chunks - chunk_idx) / (n_chunks*(n_chunks+1)/2))
layer_loss[chunk_idx] = l2_norm.abs().sum()
return torch.sum(layer_loss * scaling_factor)
def compute_SSL(self, dim=1):
layer_loss = torch.zeros(1).cuda()
conv_mat = self.linear.weight.view((self.out_features, -1))
l2_norm = torch.sqrt(torch.sum(conv_mat ** 2, dim=dim) / self.out_features)
layer_loss += torch.sum(torch.abs(l2_norm))
return layer_loss
class PrunedConv(nn.Module):
def __init__(self, conv2d_module):
super(PrunedConv, self).__init__()
assert isinstance(conv2d_module, torch.nn.Conv2d), "Input Module is not a valid conv operator!"
self.in_channels = conv2d_module.in_channels
self.out_channels = conv2d_module.out_channels
self.kernel_size = conv2d_module.kernel_size
self.stride = conv2d_module.stride
self.padding = conv2d_module.padding
self.dilation = conv2d_module.dilation
self.bias = conv2d_module.bias
self.conv = conv2d_module
self.finetune = False
self.extracted = False
#self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, bias=bias, dilation=dilation)
# Expand and Transpose to match the dimension
self.mask = np.ones_like([self.out_channels, self.in_channels, self.kernel_size[0], self.kernel_size[1]])
# Initialization
n = self.kernel_size[0] * self.kernel_size[1] * self.out_channels
m = self.kernel_size[0] * self.kernel_size[1] * self.in_channels
#self.conv.weight.data.normal_(0, math.sqrt(2. / (n+m) ))
self.sparsity = 1.0
def forward(self, x):
if not self.finetune:
#Compute Gorup Lasso at forward
device = torch.device('cuda')#, torch.distributed.get_rank())
self.gl_loss = self.compute_group_lasso_v2(device=device)
out = self.conv(x)
#out = quant8(out, None)
return out
def prune_by_percentage(self, q=5.0):
"""
Pruning the weight paramters by threshold.
:param q: pruning percentile. 'q' percent of the least
significant weight parameters will be pruned.
"""
# get bounds
max = torch.max(torch.abs(self.conv.weight.data))
min = torch.min(torch.abs(self.conv.weight.data))
# calculate cutoff
cutoff = ((max - min) * (q / 100.0)) + min
# generate mask
self.mask = torch.abs(self.conv.weight.data) > cutoff
# prune the weights
self.conv.weight.data = self.conv.weight.float() * self.mask.float()
# calculate sparsity
self.sparsity = self.conv.weight.data.numel() - self.conv.weight.data.nonzero().size(0)
def prune_by_std(self, q=0.25):
"""
Pruning by a factor of the standard deviation value.
:param s: (scalar) factor of the standard deviation value.
Weight magnitude below np.std(weight)*std
will be pruned.
"""
# generate mask
self.mask = torch.abs(self.conv.weight.data) >= (torch.std(self.conv.weight.data)*q)
# prune the weights
self.conv.weight.data = self.conv.weight.data.float() * self.mask.float()
# calculate sparsity
self.sparsity = self.conv.weight.data.numel() - self.conv.weight.data.nonzero().size(0)
def prune_towards_dilation(self):
# generate mask
if self.kernel_size[0] == 5:
mask = torch.tensor([[True, False, True, False, True],
[False, False, False, False, False],
[True, False, True, False, True],
[False, False, False, False, False],
[True, False, True, False, True]])
else:
mask = torch.tensor([[True, True, True],
[True, True, True],
[True, True, True]])
self.mask = torch.tensor(mask.repeat(self.out_channels, self.in_channels, 1, 1)).cuda()
# prune the weights
self.conv.weight.data = self.conv.weight.data.float() * self.mask.float()
# calculate sparsity
self.sparsity = self.conv.weight.data.numel() - self.conv.weight.data.nonzero().size(0)
def prune_towards_asym_dilation(self):
# generate mask
if self.kernel_size[0] == 5:
# compute kernel-normalized magnitudes of each element in each kernel across filters
means = torch.abs(self.conv.weight.data).mean(axis=(2,3)).cpu()
scaled = np.array([[[[0]*self.kernel_size[0]]*self.kernel_size[1]]*self.in_channels]*self.out_channels)
weight_data = self.conv.weight.data.cpu()
for out_channel in range(self.out_channels):
for in_channel in range(self.in_channels):
scaled[out_channel][in_channel] = | np.divide(weight_data[out_channel][in_channel], means[out_channel][in_channel]) | numpy.divide |
import sys
import unittest
import copy
import numpy as np
from scipy.linalg import block_diag
import pyinduct as pi
import pyinduct.hyperbolic.feedforward as hff
import pyinduct.parabolic as parabolic
import pyinduct.simulation as sim
from pyinduct.tests import show_plots
import pyqtgraph as pg
class SimpleInput(sim.SimulationInput):
"""
the simplest input we can imagine
"""
def __init__(self):
super().__init__("SimpleInput")
def _calc_output(self, **kwargs):
return 0
class MonotonousInput(sim.SimulationInput):
"""
an input that ramps up
"""
def __init__(self):
super().__init__("MonotonousInput")
def _calc_output(self, **kwargs):
t = kwargs["time"]
extra_data = np.sin(t)
if np.isclose(t % 2, 0):
extra_data = np.nan
return dict(output=kwargs["time"], extra_data=extra_data)
class CorrectInput(sim.SimulationInput):
"""
a diligent input
"""
def __init__(self, output, limits=(0, 1), der_order=0):
super().__init__(self)
self.out = np.ones(der_order + 1) * output
self.t_min, self.t_max = limits
def _calc_output(self, **kwargs):
if "time" not in kwargs:
raise ValueError("mandatory key not found!")
if "weights" not in kwargs:
raise ValueError("mandatory key not found!")
if "weight_lbl" not in kwargs:
raise ValueError("mandatory key not found!")
return dict(output=self.out)
class AlternatingInput(sim.SimulationInput):
"""
a simple alternating input, composed of smooth transitions
"""
def _calc_output(self, **kwargs):
t = kwargs["time"] % 2
if t < 1:
res = self.tr_up(t)
else:
res = self.tr_down(t)
return dict(output=res - .5)
def __init__(self):
super().__init__(self)
self.tr_up = pi.SmoothTransition(states=(0, 1),
interval=(0, 1),
method="poly")
self.tr_down = pi.SmoothTransition(states=(1, 0),
interval=(1, 2),
method="poly")
class SimulationInputTest(unittest.TestCase):
def setUp(self):
pass
def test_abstract_funcs(self):
# raise type error since abstract method is not implemented
self.assertRaises(TypeError, sim.SimulationInput)
# method implemented, should work
u = SimpleInput()
def test_call_arguments(self):
a = np.eye(2, 2)
b = np.array([[0], [1]])
u = CorrectInput(output=1, limits=(0, 1))
ic = np.zeros((2, 1))
ss = sim.StateSpace({1: a}, {0: {1: b}}, input_handle=u)
# if caller provides correct kwargs no exception should be raised
res = sim.simulate_state_space(ss, ic, pi.Domain((0, 1), num=10))
def test_storage(self):
a = np.eye(2, 2)
b = np.array([[0], [1]])
u = MonotonousInput()
ic = np.zeros((2, 1))
ss = sim.StateSpace(a, b, input_handle=u)
# run simulation to fill the internal storage
domain = pi.Domain((0, 10), num=11)
bigger_domain = pi.Domain((-1, 11), num=13)
res = sim.simulate_state_space(ss, ic, domain)
# don't return any entries that aren't there
self.assertRaises(KeyError, u.get_results, domain, "Unknown Entry")
# default key is "output"
ed = u.get_results(domain)
ed_explicit = u.get_results(domain, result_key="output")
self.assertTrue(np.array_equal(ed, ed_explicit))
# return an np.ndarray as default
self.assertIsInstance(ed, np.ndarray)
# return EvalData if corresponding flag is set
self.assertIsInstance(u.get_results(domain, as_eval_data=True),
pi.EvalData)
# if data has to be extrapolated, just repeat the last values
res = u.get_results(bigger_domain)
self.assertEqual(res[0], res[1])
self.assertEqual(res[-2], res[-1])
# nan values in the data storage should be ignored
res = u.get_results(bigger_domain, result_key="extra_data")
# storage contains values
self.assertTrue(u._time_storage)
self.assertTrue(u._value_storage)
# clear it
u.clear_cache()
# storage should be empty
self.assertFalse(u._time_storage)
self.assertFalse(u._value_storage)
# double clearing should work
u.clear_cache()
class CanonicalFormTest(unittest.TestCase):
def setUp(self):
self.cf = sim.CanonicalForm()
self.u = SimpleInput()
def test_add_to(self):
a = np.eye(5)
self.cf.add_to(dict(name="E", order=0, exponent=1), a)
self.assertTrue(np.array_equal(self.cf.matrices["E"][0][1], a))
self.cf.add_to(dict(name="E", order=0, exponent=1), 5 * a)
self.assertTrue(np.array_equal(self.cf.matrices["E"][0][1], 6 * a))
b = np.eye(10)
self.assertRaises(ValueError,
self.cf.add_to,
dict(name="E", order=0, exponent=1), b)
self.cf.add_to(dict(name="E", order=2, exponent=1), b)
self.assertTrue(np.array_equal(self.cf.matrices["E"][2][1], b))
f = np.atleast_2d(np.array(range(5))).T
self.assertRaises(ValueError,
self.cf.add_to,
dict(name="E", order=0, exponent=1), f)
self.cf.add_to(dict(name="f"), f)
self.assertTrue(np.array_equal(self.cf.matrices["f"], f))
# try to add something with derivative or exponent to f: value should
# end up in f
self.cf.add_to(dict(name="f"), f)
self.assertTrue(np.array_equal(self.cf.matrices["f"], 2 * f))
c = np.atleast_2d(np.array(range(5))).T
# that one should be easy
self.cf.add_to(dict(name="G", order=0, exponent=1), c, column=0)
self.assertTrue(np.array_equal(self.cf.matrices["G"][0][1], c))
# here G01 as to be expanded
self.cf.add_to(dict(name="G", order=0, exponent=1), c, column=1)
self.assertTrue(np.array_equal(self.cf.matrices["G"][0][1],
np.hstack((c, c))))
# here G01 as to be expanded again
self.cf.add_to(dict(name="G", order=0, exponent=1), c, column=3)
self.assertTrue(np.array_equal(self.cf.matrices["G"][0][1],
np.hstack((c, c, np.zeros_like(c), c))))
# input derivatives can occur
self.cf.add_to(dict(name="G", order=1, exponent=1), c, column=0)
self.assertTrue(np.array_equal(self.cf.matrices["G"][1][1], c))
# expansion should still work
self.cf.add_to(dict(name="G", order=1, exponent=1), c, column=1)
self.assertTrue(np.array_equal(self.cf.matrices["G"][1][1],
np.hstack((c, c))))
class ParseTest(unittest.TestCase):
def setUp(self):
# scalars
self.scalars = pi.Scalars(np.vstack(list(range(3))))
# callbacks
self.u = pi.ConstantTrajectory(7)
u1 = CorrectInput(output=1)
u2 = CorrectInput(output=2)
self.u_vec = pi.SimulationInputVector([u1, u2])
self.u_dt = CorrectInput(output=1, der_order=1)
u1_dt = CorrectInput(output=1, der_order=1)
u2_dt = CorrectInput(output=2, der_order=1)
self.u_vec_dt = pi.SimulationInputVector([u1_dt, u2_dt])
# inputs
self.input = pi.Input(self.u)
self.vec_input_1 = pi.Input(self.u_vec, index=0)
self.vec_input_2 = pi.Input(self.u_vec, index=1)
self.input_dt = pi.Input(self.u_dt, order=1)
self.vec_input_dt_1 = pi.Input(self.u_vec_dt, index=0, order=1)
self.vec_input_dt_2 = pi.Input(self.u_vec_dt, index=1, order=1)
# scale function
def heavyside(z):
if z < 0.5:
return 0
elif z == 0.5:
return .5
else:
return 1
base = pi.Base(pi.Function(heavyside))
pi.register_base("heavyside_base", base)
# distributed base
nodes = pi.Domain((0, 1), num=3)
self.distributed_base = pi.LagrangeFirstOrder.cure_interval(nodes)
pi.register_base("distributed_base", self.distributed_base)
fractions = [pi.ComposedFunctionVector(f, s) for f, s in
zip(self.distributed_base, nodes)]
self.composed_base = pi.Base(fractions)
pi.register_base("composed_base", self.composed_base)
# lumped base
self.lumped_base = pi.Base([pi.ConstantFunction(1)])
pi.register_base("lumped_base", self.lumped_base)
# Test Functions
self.test_funcs = pi.TestFunction("distributed_base")
self.test_funcs_at0 = self.test_funcs(0)
self.test_funcs_at1 = self.test_funcs(1)
self.test_funcs_dz = self.test_funcs.derive(1)
self.test_funcs_dz_at1 = self.test_funcs_dz(1)
self.comp_test_funcs = pi.TestFunction("composed_base")
self.comp_test_funcs_at0 = self.comp_test_funcs(0)
self.comp_test_funcs_at1 = self.comp_test_funcs(1)
self.comp_test_funcs_dz = self.comp_test_funcs.derive(1)
self.comp_test_funcs_dz_at1 = self.comp_test_funcs_dz(1)
# Scalar Functions
self.scalar_func = pi.ScalarFunction("heavyside_base")
# Distributed / Field Variables
self.field_var = pi.FieldVariable("distributed_base")
self.field_var_at1 = self.field_var(1)
self.field_var_dz = self.field_var.derive(spat_order=1)
self.field_var_dz_at1 = self.field_var_dz(1)
self.field_var_ddt = self.field_var.derive(temp_order=2)
self.field_var_ddt_at0 = self.field_var_ddt(0)
self.field_var_ddt_at1 = self.field_var_ddt(1)
self.comp_field_var = pi.FieldVariable("composed_base")
self.comp_field_var_at1 = self.comp_field_var(1)
self.comp_field_var_dz = self.comp_field_var.derive(spat_order=1)
self.odd_weight_field_var = pi.FieldVariable(
"distributed_base", weight_label="special_weights")
# Field variable 2
self.lumped_var = pi.FieldVariable("lumped_base")
# ---------------------------------------------------------------------
# Construction of Equation Terms
# ---------------------------------------------------------------------
# inputs
self.input_term1 = pi.ScalarTerm(pi.Product(self.test_funcs_at1,
self.input))
self.input_term1_swapped = pi.ScalarTerm(pi.Product(self.input,
self.test_funcs_at1)
)
self.input_term2 = pi.ScalarTerm(pi.Product(self.test_funcs_dz_at1,
self.input))
self.input_term3 = pi.IntegralTerm(pi.Product(self.test_funcs,
self.input),
limits=(0, 1))
self.input_term3_swapped = pi.IntegralTerm(pi.Product(self.input,
self.test_funcs),
limits=(0, 1))
self.input_term3_scaled = pi.IntegralTerm(
pi.Product(pi.Product(self.scalar_func, self.test_funcs),
self.input),
limits=(0, 1))
self.input_term3_scaled_first_half = pi.IntegralTerm(
pi.Product(pi.Product(self.scalar_func, self.test_funcs),
self.input),
limits=(0, .5))
self.input_term3_scaled_second_half = pi.IntegralTerm(
pi.Product(pi.Product(self.scalar_func, self.test_funcs),
self.input),
limits=(.5, 1))
self.input_term_dt = pi.IntegralTerm(pi.Product(self.test_funcs,
self.input_dt),
limits=(0, 1))
self.input_term_vectorial1 = pi.ScalarTerm(
pi.Product(self.test_funcs_at0, self.vec_input_1))
self.input_term_vectorial2 = pi.ScalarTerm(
pi.Product(self.test_funcs_at1, self.vec_input_2))
self.input_term_vectorial_dt1 = pi.ScalarTerm(
pi.Product(self.test_funcs_at0, self.vec_input_dt_1))
self.input_term_vectorial_dt2 = pi.ScalarTerm(
pi.Product(self.test_funcs_at1, self.vec_input_dt_2))
# pure test function terms
self.func_term = pi.ScalarTerm(self.test_funcs_at1)
self.func_term_int = pi.IntegralTerm(pi.Product(self.test_funcs,
self.test_funcs),
limits=(0, 1))
self.comp_func_term = pi.ScalarTerm(self.comp_test_funcs_at1)
self.comp_func_term_int = pi.IntegralTerm(
pi.Product(self.comp_test_funcs, self.comp_test_funcs),
limits=(0, 1))
# pure field variable terms
self.field_term_at1 = pi.ScalarTerm(self.field_var_at1)
self.field_term_dz_at1 = pi.ScalarTerm(self.field_var_dz_at1)
self.field_term_ddt_at1 = pi.ScalarTerm(self.field_var_ddt_at1)
self.field_int = pi.IntegralTerm(self.field_var, limits=(0, 1))
self.field_int_half = pi.IntegralTerm(self.field_var, limits=(0, .5))
self.field_dz_int = pi.IntegralTerm(self.field_var_dz, (0, 1))
self.field_ddt_int = pi.IntegralTerm(self.field_var_ddt, (0, 1))
self.comp_field_term_at1 = pi.ScalarTerm(self.comp_field_var_at1)
self.comp_field_int = pi.IntegralTerm(self.comp_field_var,
limits=(0, 1))
self.comp_field_dz_int = pi.IntegralTerm(self.comp_field_var,
limits=(0, 1))
# products
self.prod_term_fs_at1 = pi.ScalarTerm(
pi.Product(self.field_var_at1, self.scalars))
self.prod_int_fs = pi.IntegralTerm(pi.Product(self.field_var,
self.scalars),
(0, 1))
self.prod_int_f_f = pi.IntegralTerm(pi.Product(self.field_var,
self.test_funcs),
(0, 1))
self.prod_int_f_f_swapped = pi.IntegralTerm(pi.Product(self.test_funcs,
self.field_var),
(0, 1))
self.prod_int_f_at1_f = pi.IntegralTerm(
pi.Product(self.field_var_at1, self.test_funcs), (0, 1))
self.prod_int_f_f_at1 = pi.IntegralTerm(
pi.Product(self.field_var, self.test_funcs_at1), (0, 1))
self.prod_term_f_at1_f_at1 = pi.ScalarTerm(
pi.Product(self.field_var_at1, self.test_funcs_at1))
self.prod_int_fddt_f = pi.IntegralTerm(
pi.Product(self.field_var_ddt, self.test_funcs), (0, 1))
self.prod_term_fddt_at0_f_at0 = pi.ScalarTerm(
pi.Product(self.field_var_ddt_at0, self.test_funcs_at0))
self.prod_term_f_at1_dphi_at1 = pi.ScalarTerm(
pi.Product(self.field_var_at1, self.test_funcs_dz_at1))
self.temp_int = pi.IntegralTerm(pi.Product(self.field_var_ddt,
self.test_funcs),
limits=(0, 1))
self.spat_int = pi.IntegralTerm(pi.Product(self.field_var_dz,
self.test_funcs_dz),
limits=(0, 1))
self.spat_int_asymmetric = pi.IntegralTerm(pi.Product(self.field_var_dz,
self.test_funcs),
limits=(0, 1))
self.prod_term_tf_at0_lv_at0 = pi.ScalarTerm(
pi.Product(self.test_funcs(0), self.lumped_var(0)))
self.prod_term_tf_at0_lv_at0_swapped = pi.ScalarTerm(
pi.Product(self.lumped_var(0), self.test_funcs(0)))
self.prod_int_sf_fv = pi.IntegralTerm(pi.Product(self.scalar_func,
self.field_var),
limits=(0, 1))
self.prod_int_sf_fv_swapped = pi.IntegralTerm(
pi.Product(self.field_var, self.scalar_func),
limits=(0, 1))
self.alternating_weights_term = pi.IntegralTerm(
self.odd_weight_field_var,
limits=(0, 1))
def test_Input_term(self):
terms = sim.parse_weak_formulation(
sim.WeakFormulation(self.input_term2, name="test"),
finalize=False).get_static_terms()
self.assertFalse(np.iscomplexobj(terms["G"][0][1]))
np.testing.assert_array_almost_equal(terms["G"][0][1],
np.array([[0], [-2], [2]]))
terms = sim.parse_weak_formulation(
sim.WeakFormulation(self.input_term3, name="test"),
finalize=False).get_static_terms()
self.assertFalse(np.iscomplexobj(terms["G"][0][1]))
np.testing.assert_array_almost_equal(terms["G"][0][1],
np.array([[.25], [.5], [.25]]))
terms = sim.parse_weak_formulation(
sim.WeakFormulation(self.input_term3_swapped, name="test"),
finalize=False).get_static_terms()
self.assertFalse(np.iscomplexobj(terms["G"][0][1]))
np.testing.assert_array_almost_equal(terms["G"][0][1],
np.array([[.25], [.5], [.25]]))
terms = sim.parse_weak_formulation(
sim.WeakFormulation(self.input_term3_scaled, name="test"),
finalize=False).get_static_terms()
self.assertFalse(np.iscomplexobj(terms["G"][0][1]))
np.testing.assert_array_almost_equal(terms["G"][0][1],
np.array([[.0], [.25], [.25]]))
terms_fh = sim.parse_weak_formulation(
sim.WeakFormulation(self.input_term3_scaled_first_half, name="test"),
finalize=False).get_static_terms()
self.assertFalse(np.iscomplexobj(terms["G"][0][1]))
np.testing.assert_array_almost_equal(terms_fh["G"][0][1],
np.array([[.0], [.0], [.0]]))
terms_sh = sim.parse_weak_formulation(
sim.WeakFormulation(self.input_term3_scaled_second_half, name="test"),
finalize=False).get_static_terms()
self.assertFalse(np.iscomplexobj(terms["G"][0][1]))
np.testing.assert_array_almost_equal(terms_sh["G"][0][1],
np.array([[.0], [.25], [.25]]))
# vectorial inputs
terms = sim.parse_weak_formulation(sim.WeakFormulation(
[self.input_term_vectorial1, self.input_term_vectorial2],
name="test"),
finalize=False).get_static_terms()
self.assertFalse(np.iscomplexobj(terms["G"][0][1]))
np.testing.assert_array_almost_equal(terms["G"][0][1],
np.array([[1, 0],
[0, 0],
[0, 1]]))
# time derivatives of inputs
terms = sim.parse_weak_formulation(sim.WeakFormulation(
self.input_term_dt,
name="test"),
finalize=False).get_static_terms()
self.assertFalse(np.iscomplexobj(terms["G"][1][1]))
np.testing.assert_array_almost_equal(terms["G"][1][1],
np.array([[.25], [.5], [.25]]))
# time derivative of vectorial inputs
terms = sim.parse_weak_formulation(sim.WeakFormulation(
[self.input_term_vectorial_dt1, self.input_term_vectorial_dt2],
name="test"),
finalize=False).get_static_terms()
self.assertFalse(np.iscomplexobj(terms["G"][1][1]))
np.testing.assert_array_almost_equal(terms["G"][1][1],
np.array([[1, 0],
[0, 0],
[0, 1]]))
def test_TestFunction_term(self):
terms = sim.parse_weak_formulation(
sim.WeakFormulation(self.func_term, name="test"),
finalize=False).get_static_terms()
self.assertFalse(np.iscomplexobj(terms["f"]))
np.testing.assert_array_almost_equal(terms["f"],
np.array([[0], [0], [1]]))
terms = sim.parse_weak_formulation(
sim.WeakFormulation(self.func_term_int, name="test"),
finalize=False).get_static_terms()
self.assertFalse(np.iscomplexobj(terms["f"]))
np.testing.assert_array_almost_equal(terms["f"],
np.array([[1 / 6],
[1 / 3],
[1 / 6]]))
if 0:
# composed
terms = sim.parse_weak_formulation(
sim.WeakFormulation(self.comp_func_term, name="test"),
finalize=False).get_static_terms()
self.assertFalse(np.iscomplexobj(terms["f"]))
np.testing.assert_array_almost_equal(terms["f"],
np.array([[0, 0],
[0, .5],
[1, 1]]))
terms = sim.parse_weak_formulation(
sim.WeakFormulation(self.comp_func_term_int, name="test"),
finalize=False).get_static_terms()
self.assertFalse(np.iscomplexobj(terms["f"]))
np.testing.assert_array_almost_equal(terms["f"],
np.array([[1 / 6 + 0],
[1 / 3 + .25],
[1 / 6 + 1]]))
def test_FieldVariable_term(self):
terms = sim.parse_weak_formulation(
sim.WeakFormulation(self.field_term_at1, name="test"),
finalize=False).get_dynamic_terms()["distributed_base"]
self.assertFalse(np.iscomplexobj(terms["E"][0][1]))
np.testing.assert_array_almost_equal(terms["E"][0][1],
np.array([[0, 0, 1]]))
terms = sim.parse_weak_formulation(
sim.WeakFormulation(self.field_term_ddt_at1, name="test"),
finalize=False).get_dynamic_terms()["distributed_base"]
self.assertFalse(np.iscomplexobj(terms["E"][2][1]))
np.testing.assert_array_almost_equal(terms["E"][2][1],
np.array([[0, 0, 1]]))
terms = sim.parse_weak_formulation(
sim.WeakFormulation(self.field_term_dz_at1, name="test"),
finalize=False).get_dynamic_terms()["distributed_base"]
self.assertFalse(np.iscomplexobj(terms["E"][0][1]))
np.testing.assert_array_almost_equal(terms["E"][0][1],
np.array([[0, -2, 2]]))
terms = sim.parse_weak_formulation(
sim.WeakFormulation(self.field_int, name="test"),
finalize=False).get_dynamic_terms()["distributed_base"]
self.assertFalse(np.iscomplexobj(terms["E"][0][1]))
np.testing.assert_array_almost_equal(terms["E"][0][1],
np.array([[.25, .5, .25]]))
terms = sim.parse_weak_formulation(
sim.WeakFormulation(self.field_int_half, name="test"),
finalize=False).get_dynamic_terms()["distributed_base"]
self.assertFalse(np.iscomplexobj(terms["E"][0][1]))
np.testing.assert_array_almost_equal(terms["E"][0][1],
np.array([[.25, .25, 0]]))
terms = sim.parse_weak_formulation(
sim.WeakFormulation(self.field_dz_int, name="test"),
finalize=False).get_dynamic_terms()["distributed_base"]
self.assertFalse(np.iscomplexobj(terms["E"][0][1]))
np.testing.assert_array_almost_equal(terms["E"][0][1],
np.array([[-1, 0, 1]]))
terms = sim.parse_weak_formulation(
sim.WeakFormulation(self.field_ddt_int, name="test"),
finalize=False).get_dynamic_terms()["distributed_base"]
self.assertFalse(np.iscomplexobj(terms["E"][2][1]))
np.testing.assert_array_almost_equal(terms["E"][2][1],
np.array([[.25, .5, .25]]))
# composed
# terms = sim.parse_weak_formulation(
# sim.WeakFormulation(self.comp_field_term_at1, name="test"),
# finalize=False).get_dynamic_terms()["composed_base"]
# self.assertFalse(np.iscomplexobj(terms["E"][0][1]))
# np.testing.assert_array_almost_equal(terms["E"][0][1],
# np.array([[1, 0], [0, .5], [0, 1]]))
# terms = sim.parse_weak_formulation(
# sim.WeakFormulation(self.comp_field_int, name="test"),
# finalize=False).get_dynamic_terms()["composed_base"]
# self.assertFalse(np.iscomplexobj(terms["E"][0][1]))
# np.testing.assert_array_almost_equal(terms["E"][0][1],
# np.array([[[.25, 0],
# [.5, .5],
# [.25, 1]]]))
def test_Product_term(self):
# TODO create test functionality that will automatically check if Case
# is also valid for swapped arguments
terms = sim.parse_weak_formulation(
sim.WeakFormulation(self.prod_term_fs_at1, name="test"),
finalize=False).get_dynamic_terms()["distributed_base"]
self.assertFalse( | np.iscomplexobj(terms["E"][0][1]) | numpy.iscomplexobj |
from spreco.common import utils, ops
from spreco.model.ncsn import ncsn
import argparse
import os
import numpy as np
import tqdm
from functools import partial
import tensorflow.compat.v1 as tf
tf.disable_eager_execution()
def main(config_path):
config = utils.load_config(config_path)
model_config = utils.load_config(config['model_folder']+'/config.yaml')
model_path = os.path.join(config['model_folder'], config['model_name'])
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = config['gpu_id']
np.random.seed(model_config['seed'])
def get_mask(shape, o, center=True):
"""
0 odd
1 even
cemter set the central two lines
"""
mask = np.zeros(shape)
if o == 0:
mask[0::2, ...] = 1
if o == 1:
mask[1::2, ...] = 1
if center:
mask[127:128, ...] = 1
return mask
def prepare_simu(kspace_path, o, center, shift):
"""to simulate single coil acquire kspace"""
kspace = np.squeeze(np.load(kspace_path)['kspace'])
nx, ny, _ = kspace.shape
coilsen = np.squeeze(utils.bart(1, 'caldir 40', kspace[np.newaxis, ...]))
img_shape = [nx, ny]
std_coils = ops.mifft2(kspace, img_shape)
rss = np.sum(np.multiply(std_coils, np.squeeze(np.conj(coilsen))), axis=2)
rss = np.roll(rss, shift=shift, axis=0)
utils.writecfl('/scratch/gluo/zero_filled', rss)
ksp = ops.mfft2(rss, img_shape)
mask = get_mask(ksp.shape, o, center)
kspx2 = ksp*mask #x,y -> (0,1)
x_ = ops.mifft2(kspx2, img_shape)
def A_cart(img, mask, shape, axis=(0,1), center=False):
kspace = ops.mfft2(img, shape, axis=axis, center=center)
kspace = np.multiply(kspace, mask)
return kspace
def AT_cart(kspace, mask, shape, axis=(0,1), center=False):
"""
adjoint cartesian AT
"""
img = ops.mifft2(kspace*mask, shape, axis=axis, center=center)
return img
def AHA(img, mask, shape, axis=(0,1), center=False):
tmp = A_cart(img, mask, shape, axis, center)
ret = AT_cart(tmp, mask, shape, axis=axis, center=center)
return ret
params1 = {'mask': mask[np.newaxis, ...], 'shape': img_shape, 'axis': (1,2)}
AHA = partial(AHA, **params1)
params2 = {'mask': mask, 'shape': img_shape, 'axis': (0,1)}
A = partial(A_cart, **params2)
return x_[np.newaxis, ...], rss, ksp, mask, AHA, A
def get_grad_logp():
batch_size = None
x = tf.placeholder(tf.float32, shape=[batch_size]+model_config['input_shape'])
h = tf.placeholder(tf.int32, shape=[batch_size])
ins_ncsn = ncsn(model_config)
grad_op = ins_ncsn.net.forward(x, h)
saver = tf.train.Saver()
sess = tf.Session()
sess.run(tf.global_variables_initializer())
saver.restore(sess, model_path)
sigmas = sess.run(ins_ncsn.sigmas)
def grad_logp(x_in, label):
return sess.run(grad_op, {x:x_in, h:label})
return grad_logp, sigmas
def ancestral_sampler(zero_filled, AHA, grad_logp, sigmas, shape, lamb=5, nr_samples=10, n_steps_each=50, burn_in=False, burn_step=0):
images = []
scalar = np.max(abs(zero_filled))
zero_filled = zero_filled/scalar
nx, ny = shape
if burn_in:
burn_flag = True
x_mod = np.random.rand(1,nx,ny,2)
else:
x_mod = np.random.rand(nr_samples,nx,ny,2)
for i in tqdm.tqdm(range(len(sigmas)-1), desc='reconing'):
sigma = sigmas[i]
adj_sigma = sigmas[i+1]
tau = (sigma ** 2 - adj_sigma ** 2)
std = np.sqrt(adj_sigma ** 2 * tau / (sigma ** 2))
if burn_in and i<burn_step:
z = np.random.randn(1, nx, ny, 2) * sigma
labels = [np.int32(i)]
else:
z = np.random.randn(nr_samples, nx, ny, 2) * sigma
labels = np.array([i]*nr_samples, dtype=np.int32)
noise_x_ = utils.cplx2float(zero_filled + AHA(utils.float2cplx(z)))
for _ in range(n_steps_each):
if burn_in and i > burn_step-1 and burn_flag:
print("burned")
x_mod = np.squeeze(np.array([x_mod for _ in range(nr_samples)]))
burn_flag = False
# x_k+1 <-- x_k + tau*score - lambda*std*AHA(x_k + tau*score) + lambda*std*noise_x score is grad_logp
score = grad_logp(x_mod, labels)
grad_data_fidelity = AHA(utils.float2cplx(x_mod+tau*score))
grad_data_fidelity = utils.cplx2float(grad_data_fidelity)
noise = np.random.randn(*x_mod.shape)*std
x_mod = x_mod + tau*score - std*lamb*grad_data_fidelity + lamb*std*noise_x_ + noise
images.append(x_mod)
return images, scalar
def run_recon():
x_, rss, kspace, mask, AHA, A = prepare_simu(config['ksp_path'], config['o'], config['center'], config['shift'])
grad_logp, sigmas = get_grad_logp()
params ={
'lamb': config['lamb'],
'nr_samples': config['nr_samples'],
'burn_in': config['burn_in'],
'burn_step': config['burn_step'],
'n_steps_each': config['n_steps_each']
}
images, scalar = ancestral_sampler(x_, AHA, grad_logp, sigmas, x_.shape[1:], **params)
return x_, images, rss, kspace, mask, A, scalar
zero_filled, images, rss, kspace, mask, A, scalar = run_recon()
if config['burn_in']:
images = np.array(images[config['burn_step']*config['n_steps_each']:])
else:
images = | np.array(images) | numpy.array |
# stdlib
from typing import Any
# third party
import numpy as np
import pytest
import torch
# syft absolute
from syft.core.tensor.passthrough import PassthroughTensor
from syft.core.tensor.util import implements
def test_data_child() -> None:
data = np.array([1, 2, 3], dtype=np.int32)
tensor = PassthroughTensor(child=data)
assert (tensor._data_child == data).all()
def test_len() -> None:
data_list = [1.5, 3, True, "Thanos"]
data_array = np.array([[1, 2], [3, 4]], dtype=np.int32)
for i in data_list:
if i == float or int or bool:
data = np.array([i])
tensor = PassthroughTensor(child=data)
assert tensor.__len__() == 1
else:
data = np.array([i])
tensor = PassthroughTensor(child=data)
assert len(tensor) == 1
tensor = PassthroughTensor(child=data_array)
assert len(tensor) == 2
def test_shape() -> None:
data_list = [1.5, 3, True, "Thanos"]
data_array = np.array([[1, 2], [3, 4]], dtype=np.int32)
for i in data_list:
if i == float or int or bool:
data = np.array([i])
tensor = PassthroughTensor(child=data)
assert tensor.shape == (1,)
else:
data = np.array([i])
tensor = PassthroughTensor(child=data)
assert tensor.shape == (1,)
tensor = PassthroughTensor(child=data_array)
assert tensor.shape == (2, 2)
def test_dtype() -> None:
data = np.array([1, 2, 3], dtype=np.int32)
tensor = PassthroughTensor(child=data)
assert tensor.dtype == np.int32
def test__and__() -> None:
data_a = np.array([True, False, False])
data_b = np.array([True, True, False])
data_c = np.array([False, False])
tensor_a = PassthroughTensor(child=data_a)
tensor_b = PassthroughTensor(child=data_b)
tensor_c = PassthroughTensor(child=data_c)
result_a = data_a & data_b
result_b = tensor_a & tensor_b
result_c = tensor_a & data_b
assert (result_a == data_a).all()
assert result_b == tensor_a
assert result_b != tensor_b
assert result_c == tensor_a
assert result_c != tensor_b
with pytest.raises(
ValueError, match="operands could not be broadcast together with shapes"
):
tensor_b & tensor_c
def test__rand__() -> None:
data_a = np.array([True, False, True])
data_b = torch.tensor([False, False, True])
data_c = np.array([False, False])
tensor_a = PassthroughTensor(child=data_a)
tensor_b = PassthroughTensor(child=data_b)
tensor_c = PassthroughTensor(child=data_c)
expected = tensor_b
result_a = tensor_a.__rand__(tensor_b)
result_b = tensor_a.__rand__(data_b)
assert result_a == expected
assert result_b == expected
with pytest.raises(
ValueError, match="operands could not be broadcast together with shapes"
):
tensor_c.__rand__(tensor_b)
def test__abs__() -> None:
data = np.array([1, -1, -2], dtype=np.int32)
expected = np.array([1, 1, 2], dtype=np.int32)
tensor_a = PassthroughTensor(child=data)
tensor_b = PassthroughTensor(child=expected)
assert tensor_a.__abs__() == tensor_b
def test__add__() -> None:
data_a = np.array([1, -1, -2], dtype=np.int32)
data_b = np.array([1, 1, 3], dtype=np.int32)
expected = np.array([2, 0, 1], dtype=np.int32)
tensor_a = PassthroughTensor(child=data_a)
tensor_b = PassthroughTensor(child=data_b)
tensor_c = PassthroughTensor(child=expected)
result_a = tensor_a.__add__(tensor_b)
result_b = tensor_a.__add__(data_b)
assert result_a == tensor_c
assert result_b == tensor_c
def test__radd__() -> None:
data_a = np.array([1, -1, -2], dtype=np.int32)
data_b = torch.tensor([1, 1, 3], dtype=torch.int32)
expected = torch.tensor([2, 0, 1], dtype=torch.int32)
tensor_a = PassthroughTensor(child=data_a)
tensor_b = PassthroughTensor(child=data_b)
tensor_c = PassthroughTensor(child=expected)
result_a = tensor_a.__radd__(tensor_b)
result_b = tensor_a.__radd__(data_b)
assert result_a == tensor_c
assert result_b == tensor_c
def test__sub__() -> None:
data_a = np.array([1, -1, -2], dtype=np.int32)
data_b = np.array([1, 1, 3], dtype=np.int32)
expected = np.array([0, -2, -5], dtype=np.int32)
tensor_a = PassthroughTensor(child=data_a)
tensor_b = PassthroughTensor(child=data_b)
tensor_c = PassthroughTensor(child=expected)
result_a = tensor_a.__sub__(tensor_b)
result_b = tensor_a.__sub__(data_b)
assert result_a == tensor_c
assert result_b == tensor_c
def test__rsub__() -> None:
data_a = np.array([1, -1, -2], dtype=np.int32)
data_b = torch.tensor([1, 1, 3], dtype=torch.int32)
expected = torch.tensor([0, -2, -5], dtype=torch.int32)
tensor_a = PassthroughTensor(child=data_a)
tensor_b = PassthroughTensor(child=data_b)
tensor_c = PassthroughTensor(child=expected)
result_a = tensor_b.__rsub__(tensor_a)
result_b = tensor_b.__rsub__(data_a)
assert result_a == tensor_c
assert result_b == tensor_c
def test__gt__() -> None:
data_a = np.array([1, 2, 3], dtype=np.int32)
data_b = np.array([0, 3, 3], dtype=np.int32)
expected = np.array([True, False, False])
tensor_a = PassthroughTensor(child=data_a)
tensor_b = PassthroughTensor(child=data_b)
tensor_c = PassthroughTensor(child=expected)
result_a = tensor_a.__gt__(tensor_b)
result_b = tensor_a.__gt__(data_b)
assert result_a == tensor_c
assert result_b == tensor_c
def test__ge__() -> None:
data_a = np.array([1, 2, 3], dtype=np.int32)
data_b = np.array([0, 3, 3], dtype=np.int32)
expected = np.array([True, False, True])
tensor_a = PassthroughTensor(child=data_a)
tensor_b = PassthroughTensor(child=data_b)
tensor_c = PassthroughTensor(child=expected)
result_a = tensor_a.__ge__(tensor_b)
result_b = tensor_a.__ge__(data_b)
assert result_a == tensor_c
assert result_b == tensor_c
def test__lt__() -> None:
data_a = np.array([1, 2, 3], dtype=np.int32)
data_b = np.array([2, 1, 3], dtype=np.int32)
expected = np.array([True, False, False])
tensor_a = PassthroughTensor(child=data_a)
tensor_b = PassthroughTensor(child=data_b)
tensor_c = PassthroughTensor(child=expected)
result_a = tensor_a.__lt__(tensor_b)
result_b = tensor_a.__lt__(data_b)
assert result_a == tensor_c
assert result_b == tensor_c
def test__le__() -> None:
data_a = np.array([1, 2, 3], dtype=np.int32)
data_b = np.array([0, 3, 3], dtype=np.int32)
expected = np.array([False, True, True])
tensor_a = PassthroughTensor(child=data_a)
tensor_b = PassthroughTensor(child=data_b)
tensor_c = PassthroughTensor(child=expected)
result_a = tensor_a.__le__(tensor_b)
result_b = tensor_a.__le__(data_b)
assert result_a == tensor_c
assert result_b == tensor_c
def test__ne__() -> None:
data_a = np.array([0, 1, 2], dtype=np.int32)
data_b = np.zeros((3,), dtype=np.int32)
tensor_a = PassthroughTensor(child=data_a)
tensor_b = PassthroughTensor(child=data_b)
result_a = tensor_a.__ne__(tensor_b)
result_b = tensor_b.__ne__(data_a)
assert all(result_a.child) is False
assert all(result_b.child) is False
def test__eq__() -> None:
data_a = np.array([0, 1, 2], dtype=np.int32)
data_b = np.zeros((3,), dtype=np.int32)
tensor_a = PassthroughTensor(child=data_a)
tensor_b = PassthroughTensor(child=data_b)
result_a = tensor_a.__eq__(tensor_b)
result_b = tensor_a.__eq__(data_a)
assert all(result_a.child) is False
assert all(result_b.child) is True
def test__floordiv__() -> None:
data_a = np.array([1, 2, 3], dtype=np.int32)
data_b = np.array([1, 5, 4], dtype=np.int32)
expected = np.array([1, 2, 0], dtype=np.int32)
tensor_a = PassthroughTensor(child=data_a)
tensor_b = PassthroughTensor(child=data_b)
tensor_c = PassthroughTensor(child=expected)
result_a = tensor_a.__floordiv__(tensor_b)
result_b = tensor_a.__floordiv__(data_b)
assert result_a == tensor_c
assert result_b == tensor_c
def test__rfloordiv__() -> None:
data_a = np.array([1, 2, 3], dtype=np.int32)
data_b = torch.tensor([1, 5, 4], dtype=torch.int32)
expected = torch.tensor([1, 2, 0], dtype=torch.int32)
tensor_a = PassthroughTensor(child=data_a)
tensor_b = PassthroughTensor(child=data_b)
tensor_c = PassthroughTensor(child=expected)
result_a = tensor_a.__rfloordiv__(tensor_b)
result_b = tensor_a.__rfloordiv__(data_b)
assert result_a == tensor_c
assert result_b == tensor_c
def test__lshift__() -> None:
data_a = np.array([1, 2, -1], dtype=np.int32)
data_b = np.array([0, 2, 1], dtype=np.int32)
expected = np.array([0, 8, -2], dtype=np.int32)
tensor_a = PassthroughTensor(child=data_a)
tensor_b = PassthroughTensor(child=data_b)
tensor_c = PassthroughTensor(child=expected)
result_a = tensor_a.__lshift__(tensor_b)
result_b = tensor_a.__lshift__(data_b)
assert result_a == tensor_c
assert result_b == tensor_c
def test__rlshift__() -> None:
data_a = np.array([1, 2, -1], dtype=np.int32)
data_b = torch.tensor([0, 2, 1], dtype=torch.int32)
expected = torch.tensor([0, 8, -2], dtype=torch.int32)
tensor_a = PassthroughTensor(child=data_a)
tensor_b = PassthroughTensor(child=data_b)
tensor_c = PassthroughTensor(child=expected)
result_a = tensor_a.__rlshift__(tensor_b)
result_b = tensor_a.__rlshift__(data_b)
assert result_a == tensor_c
assert result_b == tensor_c
def test__rshift__() -> None:
data_a = np.array([1, 2, -1], dtype=np.int32)
data_b = np.array([2, 1, 1], dtype=np.int32)
expected = np.array([0, 1, -1], dtype=np.int32)
tensor_a = PassthroughTensor(child=data_a)
tensor_b = PassthroughTensor(child=data_b)
tensor_c = PassthroughTensor(child=expected)
result_a = tensor_a.__rshift__(tensor_b)
result_b = tensor_a.__rshift__(data_b)
assert result_a == tensor_c
assert result_b == tensor_c
def test__rrshift__() -> None:
data_a = np.array([1, 2, -1], dtype=np.int32)
data_b = torch.tensor([0, 2, 1], dtype=torch.int32)
expected = torch.tensor([0, 8, -1], dtype=torch.int32)
tensor_a = PassthroughTensor(child=data_a)
tensor_b = PassthroughTensor(child=data_b)
tensor_c = PassthroughTensor(child=expected)
result_a = tensor_a.__rrshift__(tensor_b)
result_b = tensor_a.__rrshift__(data_b)
assert result_a == tensor_c
assert result_b == tensor_c
def test__pow__() -> None:
data_a = np.array([1, 2, -1], dtype=np.int32)
data_b = np.array([0, 2, 1], dtype=np.int32)
expected = np.array([1, 4, -1], dtype=np.int32)
tensor_a = PassthroughTensor(child=data_a)
tensor_b = PassthroughTensor(child=data_b)
tensor_c = PassthroughTensor(child=expected)
result_a = tensor_a.__pow__(tensor_b)
result_b = tensor_a.__pow__(data_b)
assert result_a == tensor_c
assert result_b == tensor_c
def test__rpow__() -> None:
data_a = np.array([1, 2, 1], dtype=np.int32)
data_b = torch.tensor([0, 2, -1], dtype=torch.int32)
expected = torch.tensor([0, 4, -1], dtype=torch.int32)
tensor_a = PassthroughTensor(child=data_a)
tensor_b = PassthroughTensor(child=data_b)
tensor_c = PassthroughTensor(child=expected)
result_a = tensor_a.__rpow__(tensor_b)
result_b = tensor_a.__rpow__(data_b)
assert result_a == tensor_c
assert result_b == tensor_c
def test__divmod__() -> None:
data_a = np.array([1, 2, 3], dtype=np.int32)
data_b = | np.array([1, 5, 4], dtype=np.int32) | numpy.array |
import os
import numpy as np
from scipy.stats import mode
from tensorflow.keras.models import Model
from tensorflow.keras.models import Sequential
from tensorflow.keras.models import load_model
from tensorflow.keras.layers import Concatenate
from tensorflow.keras.models import clone_model
# Only trainable for a specific architecture
class SingularEnsemble:
def __init__(self, layers : list, classes : int, n_estimators : int = 50, voting : str = "hard",
sampling : bool = False, bootstrap : bool = True, verbose : int = 1, save_to = False):
self.org = layers
self.n_estimators = n_estimators
self.voting = voting
self.models = []
self.sampling = sampling
self.bootstrap = bootstrap
self.verbose = verbose
self.mainModel = None
self.classes = classes
if save_to:
self.path = save_to
#-----------------------------------------------------------------------------------------------------------------------------------------|
# Train the model by giving the parameters as a dictionary
def fit(self, params : dict) -> None:
if self.sampling:
new_data = self.sample(params["x"], params["y"])
for i in range(1, self.n_estimators+1):
if i % self.verbose == 0 or i == 1:
os.system("cls")
print(f"Fitting model number {i}. . .")
if self.sampling:
subset = next(new_data)
params["x"] = subset["x"]
params["y"] = subset["y"]
model = self.create(self.org, i)
self.compile_model(model)
model.fit(**params)
self.models.append(model)
#-----------------------------------------------------------------------------------------------------------------------------------------|
# Compile parameter unpacker
def compile_model(self, model):
return model.compile(**self.compParams)
#-----------------------------------------------------------------------------------------------------------------------------------------|
# Compile the model by the parameters as a dictionary
def compile(self, params : dict) -> None:
self.compParams = params
#-----------------------------------------------------------------------------------------------------------------------------------------|
# Perform bagging and pasting
def sample(self, X, y):
data = np.concatenate((X, y), axis=1)
np.random.shuffle(data)
num_of_subsets = int(len(data) * 0.63)
for _ in range(self.n_estimators):
indices = np.random.choice(np.arange(X.shape[0]), num_of_subsets, replace = self.bootstrap)
new = data[indices]
yield {"x": new[:, :X.shape[1]], "y": new[:, X.shape[1]:]}
#-----------------------------------------------------------------------------------------------------------------------------------------|
# Changes layer names to avoid repeated layer names
def create(self, layers : list, pos : int):
model = Sequential()
for i, layer in enumerate(layers):
name = layer._name[:layer._name.index("_")+1]
layer._name = f"{name}{pos}-{i}"
model.add(layer)
return clone_model(model)
#-----------------------------------------------------------------------------------------------------------------------------------------|
# Connect all the individual models into one
def aggregate(self) -> None:
details = np.array([[model.inputs, model.output] for model in self.models])
inputs, outputs = details[:, 0], details[:, 1]
conn = Concatenate()(list(outputs))
self.mainModel = Model(list(inputs), conn)
if os.path.exists(self.path):
with open(self.path+"var.txt", "w") as f:
f.write(f"{self.n_estimators} {self.voting} {self.classes}")
f.close()
self.mainModel.save(self.path+"model.h5")
#-----------------------------------------------------------------------------------------------------------------------------------------|
# Make predictions
@staticmethod
def predict(model, sample : np.ndarray, extra : list) -> int:
predictions = []
preds = model.predict([sample for _ in range(extra[0])])
preds = np.hsplit(preds, extra[0])
pred = None
if extra[1] == "hard":
for pred in preds:
predictions.append(np.argmax(pred))
pred = mode(np.array(predictions)).mode[0]
elif extra[1] == "soft":
preds = np.sum(np.array(preds), axis=0)
pred = np.argmax(preds)
return pred
#-----------------------------------------------------------------------------------------------------------------------------------------|
# Model evaluation
@staticmethod
def evaluate(model, X_test : np.ndarray, y_test : np.ndarray, extra : list, batch_size : int) -> list:
losses, accs = [], []
to_bin = False
if y_test.ndim > 1:
to_bin = True
for i in range(0, len(X_test), batch_size):
batch = X_test[i:i+batch_size]
batch_y = y_test[i:i+batch_size]
preds = np.array([model.predict([batch[i].reshape(-1, *X_test.shape[1:]) for j in range(extra[0])]) for i in range(len(batch))])
preds = preds.reshape(len(preds), extra[0], extra[2])
preds = np.argmax(np.sum(preds, axis=1), axis=1)
if to_bin:
batch_y = np.argmax(batch_y, axis=1)
error = batch_y - preds
acc = (len(batch_y) - | np.count_nonzero(error) | numpy.count_nonzero |
import tensorflow as tf
import numpy as np
import multiprocessing
import random
import os
from random import shuffle
import pandas as pd
import h5py
from scipy.integrate import simps
import warnings
from sklearn.metrics import r2_score
DEBUG = True
RUN = False
if DEBUG:
PROJECT_DIR = os.path.dirname(
os.path.dirname(
os.path.realpath(__file__)))
DATA_DIR = os.path.join(PROJECT_DIR, 'data')
TRAIN_DATA_FILE = os.path.join(DATA_DIR, 'train.h5').replace('models/', '')
from utils import mock_gym as kagglegym
else:
TRAIN_DATA_FILE = '../input/train.h5'
import kagglegym
RANDOM_SEED = 8888
np.random.seed(RANDOM_SEED)
tf.set_random_seed(RANDOM_SEED)
def r_score(y_true, y_pred, sample_weight=None, multioutput=None):
# SKL is not self-consistent. Filter out the many deprecation warnings.
with warnings.catch_warnings():
warnings.filterwarnings("ignore",category=DeprecationWarning)
r2 = r2_score(y_true, y_pred, sample_weight=sample_weight, multioutput=multioutput)
r = (np.sign(r2)*np.sqrt(np.abs(r2)))
if r <= -1:
return -1
else:
return r
class DataSet(object):
"""class for dataset processing"""
def __init__(self, path=TRAIN_DATA_FILE):
self.path = path
self.data_dict = self._get_data_dict()
self.df = self._get_df()
self.col_means = None
self.col_stds = None
self.cols = []
def _get_data_dict(self):
with h5py.File(self.path,'r') as hf:
train_hf = hf.get('train')
data_dict = { hf_key: np.array(train_hf.get(hf_key))
for hf_key in train_hf.keys()}
return data_dict
def _get_df(self):
with pd.HDFStore(self.path, "r") as train:
df = train.get("train")
return df
def __repr__(self):
sets = [ "{}: {}".format(key,data_set.shape)
for key, data_set in
self.data_dict.iteritems()]
return "; ".join(sets)
def keys(self):
return self.data_dict.keys()
def get(self, key):
return self.data_dict.get(key, None)
def to_df(self):
return self.df
def get_batch(self, slice_index, batch_size, columns=None, random=False):
if random:
samples = self.df.sample(n=batch_size)
else:
num_samples = self.df.shape[0]
if (slice_index+1)*batch_size >= num_samples:
print("Slice is out of range. Taking last batch_size slice")
sample_range = (num_samples - batch_size, num_samples)
else:
sample_range = (slice_index*batch_size, (slice_index+1)*batch_size)
samples = self.df[sample_range[0] : sample_range[1]]
samples_matrix = np.array(samples.as_matrix(columns=columns)) if columns else np.array(samples.as_matrix())
return samples_matrix
def get_numpy_data(self):
df = self.df
# Let's limit the data for now
features = ['technical_20', 'technical_30']
meta = ['y', 'timestamp', 'id']
df = df[features+meta]
means = []
stds = []
# Assuming column order remains consistent throughout the class
for col in df.columns:
if col not in ['y', 'timestamp', 'index', 'id']:
data = df[col].dropna().as_matrix()
means.append(np.mean(data))
stds.append(np.std(data))
self.cols.append(col)
self.col_means = np.array(means)
self.col_stds = np.array(stds)
# Ensure values are sorted by time
df = df.sort_values(by=['id', 'timestamp'], ascending=True)
max_seq_len_raw = 1820
# Simply mean-fill missing values for now
df = df.fillna(df.mean())
ids = np.unique(df['id'].as_matrix())
examples = []
targets = []
weights = []
for id in ids:
slice = df[df.id == id]
num_timesteps = slice.shape[0]
#y = slice['y'].as_matrix()
# Pad df to max seq len
padded = slice.reset_index().reindex(range(max_seq_len_raw),
fill_value=0)
target = padded['y'].as_matrix()
padded.drop('y', axis=1, inplace=True)
padded.drop('timestamp', axis=1, inplace=True)
padded.drop('index', axis=1, inplace=True)
padded.drop('id', axis=1, inplace=True)
example = padded.as_matrix()
examples.append(example)
targets.append(target)
weight = [1]*num_timesteps + [0]*(max_seq_len_raw - num_timesteps)
weights.append(weight)
examples = np.array(examples)
targets = np.array(targets)
weights = np.array(weights)
# Normalize the data
#examples = (examples - self.col_means)/self.col_stds
# TODO: Supply these outside the function later: col_means, col_stds
return examples, targets, weights
def normalize(self, data):
return (data - self.col_means)/self.col_stds
def split_valid(self, examples, targets, weights, valid_split_ratio=0.5):
"""
Args:
valid_split_ratio: float range 0-1.; percentage of data reserved
for validation. Note that two validation sets are reserved: unique
ids are reserved entirely for validation, and, latter timesteps for
sequences used in training are also used in validation.
"""
num_ids = examples.shape[0]
valid_num = int(round(num_ids*valid_split_ratio))
examples_train_pre = examples[:-valid_num]
targets_train_pre = targets[:-valid_num]
weights_train_pre = weights[:-valid_num]
examples_valid = examples[-valid_num:]
targets_valid = targets[-valid_num:]
weights_valid = weights[-valid_num:]
examples_train = []
targets_train = []
weights_train = []
examples_train_valid = []
targets_train_valid = []
weights_train_valid = []
valid_len = 900 # Hardcoded for now
for arr1, arr2, arr3 in zip(examples_train_pre, targets_train_pre,
weights_train_pre):
examples_train.append(arr1[:-valid_len])
targets_train.append(arr2[:-valid_len])
weights_train.append(arr3[:-valid_len])
examples_train_valid.append(arr1[-valid_len:])
targets_train_valid.append(arr2[-valid_len:])
weights_train_valid.append(arr3[-valid_len:])
trainset = (np.array(examples_train), np.array(targets_train),
np.array(weights_train))
train_validset = (np.array(examples_train_valid),
np.array(targets_train_valid),
np.array(weights_train_valid))
validset = (examples_valid, targets_valid, weights_valid)
return trainset, train_validset, validset
def get_numpy_batch(self, dataset, batch_size, seq_len):
examples = []
targets = []
weights = []
#for _ in range(batch_size):
while len(targets) < batch_size:
# Sample a random id
idx = np.random.choice(range(dataset[0].shape[0]))
# Take random slice
max_seq_len = dataset[0][idx].shape[0]
assert max_seq_len >= seq_len
slice = np.random.choice(range(max_seq_len - seq_len))
# Let's just go with full length for now
w = dataset[2][idx][slice:slice+seq_len]
if np.sum(w) != len(w):
continue
examples.append(dataset[0][idx][slice:slice+seq_len])
targets.append(dataset[1][idx][slice:slice+seq_len])
weights.append(w)
return | np.array(examples) | numpy.array |
#!/usr/bin/env python
# Copyright 2014-2020 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Analytical nuclear gradients for 1-electron spin-free x2c method
Ref.
JCP 135, 084114 (2011); DOI:10.1063/1.3624397
'''
from functools import reduce
import numpy
import scipy.linalg
from pyscf import lib
from pyscf import gto
from pyscf.x2c import x2c
def hcore_grad_generator(x2cobj, mol=None):
'''nuclear gradients of 1-component X2c hcore Hamiltonian (spin-free part only)
'''
if mol is None: mol = x2cobj.mol
xmol, contr_coeff = x2cobj.get_xmol(mol)
if x2cobj.basis is not None:
s22 = xmol.intor_symmetric('int1e_ovlp')
s21 = gto.intor_cross('int1e_ovlp', xmol, mol)
contr_coeff = lib.cho_solve(s22, s21)
get_h1_xmol = gen_sf_hfw(xmol, x2cobj.approx)
def hcore_deriv(atm_id):
h1 = get_h1_xmol(atm_id)
if contr_coeff is not None:
h1 = lib.einsum('pi,xpq,qj->xij', contr_coeff, h1, contr_coeff)
return numpy.asarray(h1)
return hcore_deriv
def gen_sf_hfw(mol, approx='1E'):
approx = approx.upper()
c = lib.param.LIGHT_SPEED
h0, s0 = _get_h0_s0(mol)
e0, c0 = scipy.linalg.eigh(h0, s0)
aoslices = mol.aoslice_by_atom()
nao = mol.nao_nr()
if 'ATOM' in approx:
x0 = numpy.zeros((nao,nao))
for ia in range(mol.natm):
ish0, ish1, p0, p1 = aoslices[ia]
shls_slice = (ish0, ish1, ish0, ish1)
t1 = mol.intor('int1e_kin', shls_slice=shls_slice)
s1 = mol.intor('int1e_ovlp', shls_slice=shls_slice)
with mol.with_rinv_at_nucleus(ia):
z = -mol.atom_charge(ia)
v1 = z * mol.intor('int1e_rinv', shls_slice=shls_slice)
w1 = z * mol.intor('int1e_prinvp', shls_slice=shls_slice)
x0[p0:p1,p0:p1] = x2c._x2c1e_xmatrix(t1, v1, w1, s1, c)
else:
cl0 = c0[:nao,nao:]
cs0 = c0[nao:,nao:]
x0 = scipy.linalg.solve(cl0.T, cs0.T).T
s_nesc0 = s0[:nao,:nao] + reduce(numpy.dot, (x0.T, s0[nao:,nao:], x0))
R0 = x2c._get_r(s0[:nao,:nao], s_nesc0)
c_fw0 = numpy.vstack((R0, numpy.dot(x0, R0)))
h0_fw_half = numpy.dot(h0, c_fw0)
get_h1_etc = _gen_first_order_quantities(mol, e0, c0, x0, approx)
def hcore_deriv(ia):
h1_ao, s1_ao, e1, c1, x1, s_nesc1, R1, c_fw1 = get_h1_etc(ia)
hfw1 = lib.einsum('xpi,pj->xij', c_fw1, h0_fw_half)
hfw1 = hfw1 + hfw1.transpose(0,2,1)
hfw1+= lib.einsum('pi,xpq,qj->xij', c_fw0, h1_ao, c_fw0)
return hfw1
return hcore_deriv
def _get_h0_s0(mol):
c = lib.param.LIGHT_SPEED
s = mol.intor_symmetric('int1e_ovlp')
t = mol.intor_symmetric('int1e_kin')
v = mol.intor_symmetric('int1e_nuc')
w = mol.intor_symmetric('int1e_pnucp')
nao = s.shape[0]
n2 = nao * 2
h = numpy.zeros((n2,n2), dtype=v.dtype)
m = numpy.zeros((n2,n2), dtype=v.dtype)
h[:nao,:nao] = v
h[:nao,nao:] = t
h[nao:,:nao] = t
h[nao:,nao:] = w * (.25/c**2) - t
m[:nao,:nao] = s
m[nao:,nao:] = t * (.5/c**2)
return h, m
def _gen_h1_s1(mol):
c = lib.param.LIGHT_SPEED
s1 = mol.intor('int1e_ipovlp', comp=3)
t1 = mol.intor('int1e_ipkin', comp=3)
v1 = mol.intor('int1e_ipnuc', comp=3)
w1 = mol.intor('int1e_ippnucp', comp=3)
aoslices = mol.aoslice_by_atom()
nao = s1.shape[1]
n2 = nao * 2
def get_h1_s1(ia):
h1 = numpy.zeros((3,n2,n2), dtype=v1.dtype)
m1 = numpy.zeros((3,n2,n2), dtype=v1.dtype)
ish0, ish1, i0, i1 = aoslices[ia]
with mol.with_rinv_origin(mol.atom_coord(ia)):
z = mol.atom_charge(ia)
rinv1 = -z*mol.intor('int1e_iprinv', comp=3)
prinvp1 = -z*mol.intor('int1e_ipprinvp', comp=3)
rinv1 [:,i0:i1,:] -= v1[:,i0:i1]
prinvp1[:,i0:i1,:] -= w1[:,i0:i1]
for i in range(3):
s1cc = numpy.zeros((nao,nao))
t1cc = numpy.zeros((nao,nao))
s1cc[i0:i1,:] =-s1[i,i0:i1]
s1cc[:,i0:i1]-= s1[i,i0:i1].T
t1cc[i0:i1,:] =-t1[i,i0:i1]
t1cc[:,i0:i1]-= t1[i,i0:i1].T
v1cc = rinv1[i] + rinv1[i].T
w1cc = prinvp1[i] + prinvp1[i].T
h1[i,:nao,:nao] = v1cc
h1[i,:nao,nao:] = t1cc
h1[i,nao:,:nao] = t1cc
h1[i,nao:,nao:] = w1cc * (.25/c**2) - t1cc
m1[i,:nao,:nao] = s1cc
m1[i,nao:,nao:] = t1cc * (.5/c**2)
return h1, m1
return get_h1_s1
def _gen_first_order_quantities(mol, e0, c0, x0, approx='1E'):
c = lib.param.LIGHT_SPEED
nao = e0.size // 2
n2 = nao * 2
epq = e0[:,None] - e0
degen_mask = abs(epq) < 1e-7
epq[degen_mask] = 1e200
cl0 = c0[:nao,nao:]
cs0 = c0[nao:,nao:]
s0 = mol.intor('int1e_ovlp')
t0 = mol.intor('int1e_kin')
t0x0 = numpy.dot(t0, x0) * (.5/c**2)
s_nesc0 = s0[:nao,:nao] + numpy.dot(x0.T, t0x0)
w_s, v_s = scipy.linalg.eigh(s0)
w_sqrt = numpy.sqrt(w_s)
s_nesc0_vbas = reduce(numpy.dot, (v_s.T, s_nesc0, v_s))
R0_mid = numpy.einsum('i,ij,j->ij', 1./w_sqrt, s_nesc0_vbas, 1./w_sqrt)
wr0, vr0 = scipy.linalg.eigh(R0_mid)
wr0_sqrt = numpy.sqrt(wr0)
# R0 in v_s basis
R0 = numpy.dot(vr0/wr0_sqrt, vr0.T)
R0 *= w_sqrt
R0 /= w_sqrt[:,None]
# Transform R0 back
R0 = reduce(numpy.dot, (v_s, R0, v_s.T))
get_h1_s1 = _gen_h1_s1(mol)
def get_first_order(ia):
h1ao, s1ao = get_h1_s1(ia)
h1mo = lib.einsum('pi,xpq,qj->xij', c0.conj(), h1ao, c0)
s1mo = lib.einsum('pi,xpq,qj->xij', c0.conj(), s1ao, c0)
if 'ATOM' in approx:
e1 = c1_ao = x1 = None
s_nesc1 = lib.einsum('pi,xpq,qj->xij', x0, s1ao[:,nao:,nao:], x0)
s_nesc1+= s1ao[:,:nao,:nao]
else:
f1 = h1mo[:,:,nao:] - s1mo[:,:,nao:] * e0[nao:]
c1 = f1 / -epq[:,nao:]
e1 = f1[:,nao:]
e1[:,~degen_mask[nao:,nao:]] = 0
c1_ao = lib.einsum('pq,xqi->xpi', c0, c1)
cl1 = c1_ao[:,:nao]
cs1 = c1_ao[:,nao:]
tmp = cs1 - lib.einsum('pq,xqi->xpi', x0, cl1)
x1 = scipy.linalg.solve(cl0.T, tmp.reshape(-1,nao).T)
x1 = x1.T.reshape(3,nao,nao)
s_nesc1 = lib.einsum('xpi,pj->xij', x1, t0x0)
s_nesc1 = s_nesc1 + s_nesc1.transpose(0,2,1)
s_nesc1+= lib.einsum('pi,xpq,qj->xij', x0, s1ao[:,nao:,nao:], x0)
s_nesc1+= s1ao[:,:nao,:nao]
R1 = numpy.empty((3,nao,nao))
c_fw1 = | numpy.empty((3,n2,nao)) | numpy.empty |
# -*- coding: utf-8 -*-
#
import numpy
import pytest
import orthopy
import quadpy
from quadpy.sphere.helpers import cartesian_to_spherical
# Note
# ====
# Instead of testing exact integration against of all monomials of degree at
# most l, one can instead test exact integration of all _spherical harmonics_
# of degree at most l. While there are 2**l monomials, there are only l**2
# spherical harmonics.
@pytest.mark.parametrize(
"scheme", [quadpy.sphere.Lebedev("3a"), quadpy.sphere.Stroud("U3 14-1")]
)
def test_spherical_harmonic(scheme):
"""Assert the norm of the spherical harmonic
Y_1^1(phi, theta) = -1/2 sqrt(3/2/pi) * exp(i*phi) * sin(theta)
is indeed 1, i.e.,
int_0^2pi int_0^pi
Y_1^1(phi, theta) * conj(Y_1^1(phi, theta)) * sin(theta)
dphi dtheta = 1.
"""
def spherical_harmonic_11(azimuthal, polar):
# y00 = 1.0 / numpy.sqrt(4*numpy.pi)
y11 = (
-0.5
* numpy.sqrt(3.0 / 2.0 / numpy.pi)
* numpy.exp(1j * azimuthal)
* numpy.sin(polar)
)
return y11 * numpy.conjugate(y11)
val = quadpy.sphere.integrate_spherical(spherical_harmonic_11, rule=scheme)
assert abs(val - 1.0) < 1.0e-14
return
@pytest.mark.parametrize(
"scheme,tol",
[(quadpy.sphere.BazantOh(index), 1.0e-10) for index in ["9", "11", "13"]]
+ [
(quadpy.sphere.HeoXu(index), 1.0e-6)
for index in [
"13",
"15",
"17",
"19-1",
"19-2",
"21-1",
"21-2",
"21-3",
"21-4",
"21-5",
"21-6",
"23-1",
"23-2",
"23-3",
"25-1",
"25-2",
"27-1",
"27-2",
"27-3",
"29",
"31",
"33",
"35",
"37",
"39-1",
"39-2",
]
]
+ [(quadpy.sphere.FliegeMaier(index), 1.0e-6) for index in ["4", "9", "16", "25"]]
+ [
(quadpy.sphere.Lebedev(index), 1.0e-11)
for index in [
"3a",
"3b",
"3c",
"5",
"7",
"9",
"11",
"13",
"15",
"17",
"19",
"21",
"23",
"25",
"27",
"29",
"31",
"35",
"41",
"47",
"53",
"59",
"65",
"71",
"77",
"83",
"89",
"95",
"101",
"107",
"113",
"119",
# The highest degree formulas are too memory-intensive for circleci,
# and the tests are oom-killed. A workaround would be to not test the
# entire tree at once, but split it up.
# Check <https://stackoverflow.com/q/47474140/353337>.
# TODO reenable
# "125", "131"
]
]
+ [
(quadpy.sphere.Stroud(k), 1.0e-13)
for k in [
"U3 3-1",
"U3 5-1",
"U3 5-2",
"U3 5-3",
"U3 5-4",
"U3 5-5",
"U3 7-1",
"U3 7-2",
"U3 8-1",
"U3 9-1",
"U3 9-2",
"U3 9-3",
"U3 11-1",
"U3 11-3",
"U3 14-1",
]
]
+ [(quadpy.sphere.Stroud(k), 1.0e-12) for k in ["U3 11-2"]],
)
def test_scheme_cartesian(scheme, tol):
exact_val = numpy.zeros(scheme.degree + 1)
exact_val[0] = numpy.sqrt(4 * numpy.pi)
def sph_tree_cartesian(x):
flt = numpy.vectorize(float)
azimuthal, polar = cartesian_to_spherical(flt(x).T).T
return numpy.concatenate(
orthopy.sphere.tree_sph(
polar, azimuthal, scheme.degree + 1, standardization="quantum mechanic"
)
)
assert scheme.points.dtype == numpy.float64, scheme.name
assert scheme.weights.dtype == numpy.float64, scheme.name
vals = quadpy.sphere.integrate(
sph_tree_cartesian, center=numpy.array([0.0, 0.0, 0.0]), radius=1.0, rule=scheme
)
# The exact value is sqrt(4*pi) for the Y_0^0, and 0 otherwise.
err = vals
err[0] -= | numpy.sqrt(4.0 * numpy.pi) | numpy.sqrt |
"""Test calculation of implied timescales."""
import numpy as np
from ivac.linear import _vac_its as vac_its
from ivac.linear import _ivac_its as ivac_its
def vac_eval(sigmas, lag):
return np.exp(-sigmas * lag)
def ivac_eval(sigmas, minlag, maxlag, lagstep):
assert (maxlag - minlag) % lagstep == 0
lags = np.arange(minlag, maxlag + 1, lagstep)
return np.sum(np.exp(-np.outer(sigmas, lags)), axis=-1)
def test_vac_its():
lags = np.unique(np.rint(np.logspace(0, 4, 100)).astype(int))
sigmas = np.logspace(-4, 1, 100)
ref_its = 1.0 / sigmas
assert np.all(ref_its > 0.0)
assert not np.any(np.isnan(ref_its))
for lag in lags:
evals = vac_eval(sigmas, lag)
assert np.all(evals >= 0.0)
assert np.all(evals <= 1.0)
test_its = vac_its(evals, lag)
mask = np.logical_not(np.isnan(test_its))
assert np.allclose(test_its[mask], ref_its[mask])
assert np.all(np.isnan(test_its[np.logical_not(mask)]))
length = len(test_its[mask])
assert np.all(np.logical_not(np.isnan(test_its[:length])))
assert np.all(np.isnan(test_its[length:]))
def test_ivac_its():
sigmas = np.logspace(-4, 1, 100)
ref_its = 1.0 / sigmas
minlags = np.unique(np.rint(np.logspace(0, 4, 100)).astype(int))
lagsteps = np.unique(np.rint(np.logspace(0, 3, 100)).astype(int))
nlags = np.arange(100)
assert np.all(ref_its > 0.0)
assert not np.any(np.isnan(ref_its))
for _ in range(100):
minlag = np.random.choice(minlags)
lagstep = np.random.choice(lagsteps)
maxlag = minlag + lagstep * np.random.choice(nlags)
numlags = (maxlag - minlag) % lagstep + 1
evals = ivac_eval(sigmas, minlag, maxlag, lagstep)
assert np.all(evals >= 0.0)
assert np.all(evals <= ((maxlag - minlag) // lagstep + 1))
test_its = ivac_its(evals, minlag, maxlag, lagstep)
mask = np.logical_not( | np.isnan(test_its) | numpy.isnan |
# --------------
# Importing header files
import numpy as np
import warnings
warnings.filterwarnings('ignore')
#New record
new_record = np.array([[50, 9, 4, 1, 0, 0, 40, 0]])
#Reading file
data = np.genfromtxt(path, delimiter=",", skip_header=1)
#Code starts here
census = np.concatenate((data, new_record), axis=0)
age = np.array([i[0] for i in census])
max_age = np.max(age)
min_age = | np.min(age) | numpy.min |
# Lint as: python3
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Precompute variables mapping inputs to outputs for complex functions."""
import collections
import copy
import functools
import itertools
import os
from typing import Any, Mapping, Optional, Sequence, Tuple
from absl import logging
import dataclasses
from dm_alchemy import io
from dm_alchemy.encode import precomputed_maps_pb2
from dm_alchemy.encode import precomputed_maps_proto_conversion
from dm_alchemy.ideal_observer import helpers
from dm_alchemy.types import graphs
from dm_alchemy.types import helpers as types_helpers
from dm_alchemy.types import stones_and_potions
import frozendict
import numpy as np
@dataclasses.dataclass
class PrecomputedMaps:
"""Functions to get the observations for different content types for each element."""
graphs_list: np.ndarray
graph_index_distr: np.ndarray
partial_graph_to_matching_graphs: np.ndarray
partial_graph_update: np.ndarray
stone_to_reward: np.ndarray
drop_reward: np.ndarray
partial_graph_index_to_possible_index: Mapping[int, int]
graphs_with_edge: np.ndarray
edge_exists: np.ndarray
stone_maps: np.ndarray
potion_maps: np.ndarray
possible_latent_dims: np.ndarray
poss_p_maps: np.ndarray
poss_s_maps: np.ndarray
react_result: np.ndarray
possible_latent_dirs: np.ndarray
partial_potion_map_update: np.ndarray
partial_stone_map_update: np.ndarray
potion_masks: np.ndarray
potion_to_pair: np.ndarray
perm_index_to_index: np.ndarray
index_to_perm_index: np.ndarray
missing_edge_no_change: np.ndarray
update_partial_graph_no_change: np.ndarray
partial_stone_map_to_stone_map: np.ndarray
no_effect_from_partial_chem: np.ndarray
def __deepcopy__(self, memo):
# Don't deepcopy precomputed maps as it takes too long and uses too much
# memory and the contents never change after construction so we only need 1.
return copy.copy(self)
def save(self, folder):
"""Saves the precomputed maps to serialized protos in the folder passed in."""
precomputed_maps_proto_conversion.write_graph_array(
self.graphs_list, folder, 'graphs_list')
for int_array, name in [
(self.stone_to_reward, 'stone_to_reward'),
(self.drop_reward, 'drop_reward'),
(self.edge_exists, 'edge_exists'),
(self.stone_maps, 'stone_maps'),
(self.potion_maps, 'potion_maps'),
(self.react_result, 'react_result'),
(self.partial_stone_map_update, 'partial_stone_map_update'),
(self.potion_to_pair, 'potion_to_pair'),
(self.perm_index_to_index, 'perm_index_to_index'),
(self.index_to_perm_index, 'index_to_perm_index'),
(self.missing_edge_no_change, 'missing_edge_no_change'),
(self.update_partial_graph_no_change, 'update_partial_graph_no_change'),
(self.partial_stone_map_to_stone_map, 'partial_stone_map_to_stone_map'),
]:
precomputed_maps_proto_conversion.write_int_array(int_array, folder, name)
for int_array, name in [
(self.partial_graph_to_matching_graphs,
'partial_graph_to_matching_graphs'),
(self.graphs_with_edge, 'graphs_with_edge'),
(self.potion_masks, 'potion_masks'),
(self.no_effect_from_partial_chem, 'no_effect_from_partial_chem'),
]:
precomputed_maps_proto_conversion.write_bitfield_array(
int_array, folder, name)
precomputed_maps_proto_conversion.write_float_array(
self.graph_index_distr, folder, 'graph_index_distr')
for int_array, name in [
(self.possible_latent_dims, 'possible_latent_dims'),
(self.partial_graph_update, 'partial_graph_update'),
(self.poss_p_maps, 'poss_p_maps'),
(self.poss_s_maps, 'poss_s_maps'),
]:
precomputed_maps_proto_conversion.write_list_ints_array(
int_array, folder, name)
precomputed_maps_proto_conversion.write_possible_latent_dirs(
self.possible_latent_dirs, folder, 'possible_latent_dirs')
precomputed_maps_proto_conversion.write_partial_potion_map_update(
self.partial_potion_map_update, folder, 'partial_potion_map_update')
proto = precomputed_maps_pb2.PartialGraphIndexToPossibleIndex(
entries=self.partial_graph_index_to_possible_index)
io.write_proto(
os.path.join(folder, 'partial_graph_index_to_possible_index'),
proto.SerializeToString())
def _load_from_folder(folder):
"""Loads precomputed maps from serialized protos in the folder passed in."""
kwargs = {'graphs_list': precomputed_maps_proto_conversion.load_graph_array(
folder, 'graphs_list')}
for name in [
'stone_to_reward', 'drop_reward', 'edge_exists', 'stone_maps',
'potion_maps', 'react_result', 'partial_stone_map_update',
'potion_to_pair', 'perm_index_to_index', 'index_to_perm_index',
'missing_edge_no_change', 'update_partial_graph_no_change',
'partial_stone_map_to_stone_map']:
kwargs[name] = precomputed_maps_proto_conversion.load_int_array(
folder, name)
for name in [
'partial_graph_to_matching_graphs', 'graphs_with_edge', 'potion_masks',
'no_effect_from_partial_chem']:
kwargs[name] = precomputed_maps_proto_conversion.load_bitfield_array(
folder, name)
for name in [
'possible_latent_dims', 'poss_p_maps', 'poss_s_maps',
'partial_graph_update']:
kwargs[name] = precomputed_maps_proto_conversion.load_list_ints_array(
folder, name)
kwargs['graph_index_distr'] = (
precomputed_maps_proto_conversion.load_float_array(
folder, 'graph_index_distr'))
kwargs['possible_latent_dirs'] = (
precomputed_maps_proto_conversion.load_possible_latent_dirs(
folder, 'possible_latent_dirs'))
kwargs['partial_potion_map_update'] = (
precomputed_maps_proto_conversion.load_partial_potion_map_update(
folder, 'partial_potion_map_update'))
serialized = io.read_proto(os.path.join(
folder, 'partial_graph_index_to_possible_index'))
proto = precomputed_maps_pb2.PartialGraphIndexToPossibleIndex.FromString(
serialized)
kwargs['partial_graph_index_to_possible_index'] = proto.entries
return PrecomputedMaps(**kwargs)
# Alias these for readability
AlignedStone = stones_and_potions.AlignedStone
AlignedStoneIndex = stones_and_potions.AlignedStoneIndex
PerceivedPotion = stones_and_potions.PerceivedPotion
PerceivedPotionIndex = stones_and_potions.PerceivedPotionIndex
LatentStone = stones_and_potions.LatentStone
LatentPotion = stones_and_potions.LatentPotion
StoneMap = stones_and_potions.StoneMap
PotionMap = stones_and_potions.PotionMap
PartialStoneMap = stones_and_potions.PartialStoneMap
PartialPotionMap = stones_and_potions.PartialPotionMap
PartialGraph = graphs.PartialGraph
aligned_stone_from_index = stones_and_potions.aligned_stone_from_index
perceived_potion_from_index = stones_and_potions.perceived_potion_from_index
latent_stone_from_index = stones_and_potions.latent_stone_from_index
latent_potion_from_index = stones_and_potions.latent_potion_from_index
stone_map_from_index = stones_and_potions.stone_map_from_index
potion_map_from_index = stones_and_potions.potion_map_from_index
partial_stone_map_from_index = stones_and_potions.partial_stone_map_from_index
partial_potion_map_from_index = stones_and_potions.partial_potion_map_from_index
partial_graph_from_index = graphs.partial_graph_from_index
_SIMPLE_TYPE_COUNT = frozendict.frozendict({
'PotionMap': PotionMap.num_types,
'StoneMap': StoneMap.num_types,
'LatentPotion': LatentPotion.num_types,
'LatentStone': LatentStone.num_types,
'PerceivedPotion': PerceivedPotion.num_types,
'AlignedStone': AlignedStone.num_types,
'PartialPotionMap_dim': PartialPotionMap.num_axis_assignments,
'PartialPotionMap_dir': PartialPotionMap.num_dir_assignments,
'PartialStoneMap': PartialStoneMap.num_types,
'dim': stones_and_potions.get_num_axes(),
'dir': stones_and_potions.get_num_dirs(),
})
_SIMPLE_TYPE_RECONSTRUCTOR = frozendict.frozendict({
'StoneMap': stone_map_from_index,
'LatentPotion': latent_potion_from_index,
'LatentStone': latent_stone_from_index,
'PerceivedPotion': perceived_potion_from_index,
'AlignedStone': aligned_stone_from_index,
'PartialStoneMap': partial_stone_map_from_index,
'dim': lambda x: x,
'dir': stones_and_potions.index_to_dir,
})
# Dict of reconstructors for indices which are passed in additional data.
_INDICES_PASSED_IN_RECONSTRUCTOR = frozendict.frozendict({
'graph_important_edges':
lambda x: (latent_stone_from_index(x[0]), latent_stone_from_index(x[1]))
,
'possible_partial_graph_indices': partial_graph_from_index,
'nodes': lambda x: x
})
def partial_potion_map_part_index(data, dim=0, direction=0):
return partial_potion_map_from_index(
(dim, direction), data['index_to_perm_index'])
_RECONSTRUCTORS_REQUIRING_DATA = frozendict.frozendict({
'PotionMap':
lambda i, data: potion_map_from_index(i, data['index_to_perm_index']),
'PartialPotionMap_dim':
lambda i, data: partial_potion_map_part_index(data, dim=i),
'PartialPotionMap_dir':
lambda i, data: partial_potion_map_part_index(data, direction=i),
})
_TYPE_FROM_TUPLE_INDEX = frozendict.frozendict({
'PartialPotionMap': (
('PartialPotionMap_dim', 'PartialPotionMap_dir'),
lambda i, data: partial_potion_map_part_index(data, i[0], i[1])),
})
PRECOMPUTED_LEVEL_FILES_DIR = 'ideal_observer/data'
def _get_type_count(current_type: str, additional_data: Mapping[str, Any]):
if current_type in _SIMPLE_TYPE_COUNT:
return _SIMPLE_TYPE_COUNT[current_type]
return len(additional_data[current_type])
def _get_indices_and_reconstructor(current_type, additional_data):
"""For a given type gets valid indices and a method to reconstruct from index."""
if 'enumerated_' in current_type:
index_gen, reconstructor = _get_indices_and_reconstructor(
current_type.replace('enumerated_', ''), additional_data)
return enumerate(index_gen), lambda x: reconstructor(x[1])
if current_type in _SIMPLE_TYPE_RECONSTRUCTOR:
return (range(_SIMPLE_TYPE_COUNT[current_type]),
_SIMPLE_TYPE_RECONSTRUCTOR[current_type])
if current_type in _INDICES_PASSED_IN_RECONSTRUCTOR:
return (additional_data[current_type],
_INDICES_PASSED_IN_RECONSTRUCTOR[current_type])
if current_type in _RECONSTRUCTORS_REQUIRING_DATA:
return (range(_SIMPLE_TYPE_COUNT[current_type]),
functools.partial(_RECONSTRUCTORS_REQUIRING_DATA[current_type],
data=additional_data))
if current_type in _TYPE_FROM_TUPLE_INDEX:
sub_types, reconstructor = _TYPE_FROM_TUPLE_INDEX[current_type]
sub_indices = []
for sub_type in sub_types:
index_gen, _ = _get_indices_and_reconstructor(sub_type, additional_data)
sub_indices.append(index_gen)
return itertools.product(*sub_indices), functools.partial(
reconstructor, data=additional_data)
def _reconstructed_elements(
to_map: Mapping[str, str],
additional_data: Mapping[str, np.ndarray]):
"""Generator for map from indices to elements."""
# Get one of the types in to_map and loop through all possibilities for it
# recursively calling for the remaining entries.
indices_and_reconstructors = [
_get_indices_and_reconstructor(current_type, additional_data)
for current_type in to_map.values()]
names = to_map.keys()
indices = [elt[0] for elt in indices_and_reconstructors]
reconstructors = [elt[1] for elt in indices_and_reconstructors]
reconstructed = []
# Indices may be generators and we iterate through twice so we must make a
# copy
for type_indices, reconstructor in zip(
copy.deepcopy(indices), reconstructors):
reconstructed.append([reconstructor(i) for i in type_indices])
for current_index, current_element in zip(
itertools.product(*indices), itertools.product(*reconstructed)):
# We have to make a copy of the element before returning it because if it is
# mutable and gets changed we don't want the change to be there for later
# iterations.
yield (collections.OrderedDict(
[(name, i) for name, i in zip(names, current_index)]),
collections.OrderedDict(
[(name, copy.deepcopy(e)) for name, e in zip(
names, current_element)]))
_RESULT_TYPE_TO_EMPTY_RESULT = {
# Use numpy object type to store python ints rather than numpy ints.
'int': lambda s: np.zeros(s, dtype=np.object),
# Create an array with an empty list at each entry.
'list': lambda s: np.frompyfunc(list, 0, 1)(np.empty(s, dtype=object)),
'tuple': lambda s: np.frompyfunc(tuple, 0, 1)(np.empty(s, dtype=object)),
}
def _empty_result(to_map, result_type, additional_data=None):
shape = []
for current_type in to_map:
if current_type in _TYPE_FROM_TUPLE_INDEX:
shape.extend([_get_type_count(sub_type, additional_data) for sub_type in
_TYPE_FROM_TUPLE_INDEX[current_type][0]])
else:
shape.append(_get_type_count(current_type, additional_data))
shape = tuple(shape)
return _RESULT_TYPE_TO_EMPTY_RESULT[result_type](shape)
LoopHelper = collections.namedtuple('LoopHelper', 'empty_result gen')
def _precompute_loop_helper(
to_map, result_type, additional_data=None, result_to_map=None):
"""Creates an empty results array and generator for indices and elements.
Args:
to_map: A list of types to map optionally with a name for the index and the
element associated with each type. If no name is provided the type name
itself will be used as the name. See functions below for example usages.
result_type: The type of each element in the result matrix.
additional_data: Additional data required to loop over the types passed in
and reconstruct the elements.
result_to_map: A list of types which index the result. If none is provided
then it is assumed to be the same as to_map.
Returns:
A LoopHelper type containing an empty numpy array and a generator which will
loop through all of the valid indices and elements.
"""
# Passing a name is optional - if no name is passed then use the type string.
to_map = collections.OrderedDict(
[elt if isinstance(elt, tuple) else (elt, elt) for elt in to_map])
if result_to_map is None:
result_to_map = to_map.values()
# Remove enumerated from result_to_map
result_to_map = [elt.replace('enumerated_', '') for elt in result_to_map]
empty_result = _empty_result(result_to_map, result_type, additional_data)
gen = functools.partial(_reconstructed_elements, to_map, additional_data)
return LoopHelper(empty_result, gen)
def get_partial_graph_update(
all_graphs, graph_important_edges,
possible_partial_graph_indices, partial_graph_index_to_possible_index
) -> np.ndarray:
"""Updates partial graph after seeing that edge exists."""
# Create an array to hold results with an empty list at each entry.
result, gen = _precompute_loop_helper(
['graph_important_edges', 'possible_partial_graph_indices'], 'list',
additional_data={
'graph_important_edges': graph_important_edges,
'possible_partial_graph_indices': possible_partial_graph_indices},
result_to_map=['LatentStone', 'LatentStone'])
for indices, elements in gen():
latent_stone_index, latent_result_index = indices['graph_important_edges']
latent_stone, latent_result = elements['graph_important_edges']
partial_graph = elements['possible_partial_graph_indices']
partial_graph.add_edge(latent_stone, latent_result, graphs.KNOWN_EDGE)
partial_graph.update(all_graphs)
poss_index = partial_graph_index_to_possible_index[partial_graph.index()]
result[latent_stone_index, latent_result_index].append(poss_index)
result[latent_result_index, latent_stone_index].append(poss_index)
return result
def get_partial_graph_to_matching_graphs(
all_graphs, possible_partial_graph_indices: np.ndarray) -> np.ndarray:
"""Gets list of graphs matching the partial graph."""
result, gen = _precompute_loop_helper(
['enumerated_possible_partial_graph_indices'], 'int',
additional_data={
'possible_partial_graph_indices': possible_partial_graph_indices})
for indices, elements in gen():
i, _ = indices['enumerated_possible_partial_graph_indices']
partial_graph = elements['enumerated_possible_partial_graph_indices']
matches = partial_graph.matching_graphs(all_graphs, return_indices=True)
result[i] = helpers.list_to_bitfield(matches)
return result
def get_graphs_with_edge(valid_graphs, index_to_perm_index) -> np.ndarray:
"""Array of bitfields of graphs which have the given edge given the maps."""
nodes = graphs.all_nodes_in_graph()
result, gen = _precompute_loop_helper(
['StoneMap', 'PotionMap', 'AlignedStone', 'PerceivedPotion'], 'int',
additional_data={'index_to_perm_index': index_to_perm_index})
for indices, elements in gen():
stone_map = elements['StoneMap']
potion_map = elements['PotionMap']
aligned_stone = elements['AlignedStone']
perceived_potion = elements['PerceivedPotion']
latent_potion = potion_map.apply(perceived_potion)
potion_in_stone_space = stone_map.apply_to_potion(latent_potion)
start_node = nodes.get_node_by_coords(list(aligned_stone.aligned_coords))
end_node_coords = copy.deepcopy(aligned_stone.aligned_coords)
end_node_coords[potion_in_stone_space.latent_dim] += (
2 * potion_in_stone_space.latent_dir)
end_node_coord = end_node_coords[potion_in_stone_space.latent_dim]
if end_node_coord < -1 or end_node_coord > 1:
# Not in any graph
result[tuple(indices.values())] = 0
continue
end_node = nodes.get_node_by_coords(list(end_node_coords))
poss_graphs = [i for i, g in enumerate(valid_graphs)
if g.edge_list.has_edge(start_node, end_node)]
graphs_bitfield = helpers.list_to_bitfield(poss_graphs)
result[tuple(indices.values())] = graphs_bitfield
return result
def get_edge_exists(possible_partial_graph_indices: np.ndarray) -> np.ndarray:
"""Checks if an edge exists given partial graph info."""
graph_nodes = graphs.all_nodes_in_graph()
result, gen = _precompute_loop_helper(
['enumerated_possible_partial_graph_indices', 'enumerated_nodes', 'dim'],
'int', additional_data={
'possible_partial_graph_indices': possible_partial_graph_indices,
'nodes': graph_nodes.nodes})
for indices, elements in gen():
i, _ = indices['enumerated_possible_partial_graph_indices']
partial_graph = elements['enumerated_possible_partial_graph_indices']
start_node_index, start_node = indices['enumerated_nodes']
dim = indices['dim']
start_coords = start_node.coords
end_coords = copy.deepcopy(start_coords)
end_coords[dim] = -start_coords[dim]
end_node_ = graph_nodes.get_node_by_coords(end_coords)
assert end_node_ is not None
end_node: graphs.Node = end_node_
result[i, start_node_index, dim] = partial_graph.known_adj_mat[
start_node_index, end_node.idx]
return result
def get_possible_partial_graph_indices(
graph_important_edges: Sequence[Tuple[int, int]],
graphs_list: Sequence[graphs.Graph]
) -> np.ndarray:
"""Calculates an exhaustive list of possible partial graphs.
This is smaller than the list of partial graphs we can represent because some
partial graphs are impossible. For example graphs which are known to be
disconnected.
It is important to use only the possible partial graphs because this makes it
practical to store maps over all possibilities.
Args:
graph_important_edges: List of the edges which may exist in a graph.
graphs_list: List of all valid graphs.
Returns:
The list of partial graph indices.
"""
def remaining_edges(g):
ret = []
for edge in graph_important_edges:
if g.known_adj_mat[edge] == types_helpers.UNKNOWN:
ret.append(edge)
return ret
# TODO(b/173785715): Start with what we can deduce from the graphs_list.
to_expand = [PartialGraph().index()]
visited = {PartialGraph().index()}
while to_expand:
current_node = to_expand[0]
to_expand = to_expand[1:]
current_graph = partial_graph_from_index(current_node)
for e in remaining_edges(current_graph):
for val in [0, 1]:
new_partial = copy.deepcopy(current_graph)
new_partial.known_adj_mat[e] = val
new_partial.known_adj_mat[e[1], e[0]] = val
new_partial.update(graphs_list)
new_partial_index = new_partial.index()
if new_partial_index not in visited:
visited.add(new_partial_index)
to_expand.append(new_partial_index)
return np.array(list(sorted(visited)), dtype=np.object)
def get_poss_potion_maps_and_stone_maps(
perm_index_to_index: np.ndarray
) -> Tuple[np.ndarray, np.ndarray]:
"""Gets a list of potion and stone maps possible given an observation."""
poss_stone_maps, gen = _precompute_loop_helper(
[('stone', 'AlignedStone'), ('potion', 'PerceivedPotion'),
('result', 'AlignedStone')], 'list')
# In this function we get 2 results at the same time so make another.
poss_potion_maps = _empty_result(
['AlignedStone', 'PerceivedPotion', 'AlignedStone'], 'list')
for indices, elements in gen():
potion_maps, stone_maps = stones_and_potions.one_action_outcome(
elements['stone'], elements['potion'], elements['result'],
perm_index_to_index)
poss_potion_maps[tuple(indices.values())] = potion_maps
poss_stone_maps[tuple(indices.values())] = stone_maps
return poss_potion_maps, poss_stone_maps
def get_possible_latent_dims(
index_to_perm_index: np.ndarray
) -> np.ndarray:
"""Gets a list of possible latent dimensions given a potion and partial map."""
result, gen = _precompute_loop_helper(
['PerceivedPotion', 'PartialPotionMap_dim'], 'list',
additional_data={'index_to_perm_index': index_to_perm_index})
for indices, elements in gen():
partial_potion_map = elements['PartialPotionMap_dim']
perceived_potion = elements['PerceivedPotion']
result[tuple(indices.values())] = (
partial_potion_map.possible_latent_dims(perceived_potion))
return result
def get_react_result(
possible_partial_graph_indices: np.ndarray, edge_exists: np.ndarray,
drop_reward: np.ndarray
) -> np.ndarray:
"""Gets the resulting stone when applying a potion to a stone."""
result, gen = _precompute_loop_helper(
['AlignedStone', 'dim', 'dir',
'enumerated_possible_partial_graph_indices'], 'int',
additional_data={
'possible_partial_graph_indices': possible_partial_graph_indices})
for indices, elements in gen():
aligned_stone = elements['AlignedStone']
latent_dim = elements['dim']
latent_dir = elements['dir']
aligned_stone_index = indices['AlignedStone']
latent_dir_index = indices['dir']
partial_graph_index, _ = indices[
'enumerated_possible_partial_graph_indices']
# If we know the edge doesn't exist do not consider the possibility
# that the stone changes.
if edge_exists[
partial_graph_index, drop_reward[aligned_stone_index],
latent_dim] == graphs.NO_EDGE:
result[aligned_stone_index, latent_dim, latent_dir_index,
partial_graph_index] = helpers.IMPOSSIBLE
else:
result[aligned_stone_index, latent_dim, latent_dir_index,
partial_graph_index] = stones_and_potions.react(
aligned_stone, latent_dim, latent_dir).index()
return result
def get_possible_latent_dirs(index_to_perm_index: np.ndarray) -> np.ndarray:
"""Gets a list of possible latent dimensions given maps and stone and potion."""
result, gen = _precompute_loop_helper(
['PartialPotionMap_dir', 'PartialStoneMap', 'dim', 'PerceivedPotion',
'AlignedStone'], 'tuple',
additional_data={'index_to_perm_index': index_to_perm_index})
for indices, elements in gen():
partial_potion_map = elements['PartialPotionMap_dir']
partial_stone_map = elements['PartialStoneMap']
latent_dim = elements['dim']
perceived_potion = elements['PerceivedPotion']
aligned_stone = elements['AlignedStone']
latent_dirs_stone_dirs = (
stones_and_potions.possible_latent_dirs_and_stone_dirs(
perceived_potion, latent_dim, partial_potion_map,
partial_stone_map))
result[tuple(indices.values())] = (
stones_and_potions.latent_dirs_on_stone(
aligned_stone, latent_dim, partial_stone_map,
latent_dirs_stone_dirs))
return result
def get_partial_potion_map_update(
index_to_perm_index: np.ndarray, perm_index_to_index: np.ndarray
) -> np.ndarray:
"""Updates a partial potion map given an observation."""
result, gen = _precompute_loop_helper(
[('stone', 'AlignedStone'), ('potion', 'PerceivedPotion'),
('result', 'AlignedStone'), 'PartialPotionMap'], 'tuple',
additional_data={'index_to_perm_index': index_to_perm_index})
for indices, elements in gen():
stone_index = indices['stone']
potion_index = indices['potion']
result_index = indices['result']
partial_potion_map_index = indices['PartialPotionMap']
stone = elements['stone']
potion = elements['potion']
result_stone = elements['result']
partial_potion_map = elements['PartialPotionMap']
result[stone_index, potion_index, result_index, partial_potion_map_index[0],
partial_potion_map_index[1]] = (
stones_and_potions.update_partial_potion_map(
stone, potion, result_stone, partial_potion_map,
perm_index_to_index))
return result
def get_partial_stone_map_update() -> np.ndarray:
"""Updates a partial stone map given an observation."""
result, gen = _precompute_loop_helper(
[('stone', 'AlignedStone'), ('result', 'AlignedStone'),
'PartialStoneMap'], 'int')
for indices, elements in gen():
partial_stone_map = elements['PartialStoneMap']
result[tuple(indices.values())] = (
stones_and_potions.update_partial_stone_map(
elements['stone'], elements['result'], partial_stone_map))
return result
def get_missing_edge_no_change(
index_to_perm_index: np.ndarray,
graph_important_edges: Sequence[Tuple[int, int]]
) -> np.ndarray:
"""Gets which edge is missing given a potion has no effect."""
result, gen = _precompute_loop_helper(
['PartialStoneMap', 'PartialPotionMap', 'PerceivedPotion', 'LatentStone'],
'int', additional_data={'index_to_perm_index': index_to_perm_index})
for indices, elements in gen():
partial_potion_map = elements['PartialPotionMap']
partial_stone_map = elements['PartialStoneMap']
potion = elements['PerceivedPotion']
aligned_stone_coords = elements['LatentStone']
partial_stone_map_index = indices['PartialStoneMap']
partial_potion_map_index_0, partial_potion_map_index_1 = indices[
'PartialPotionMap']
potion_index = indices['PerceivedPotion']
stone_index = indices['LatentStone']
# If we can't map the potion into latent space we cannot tell which
# edge is missing.
if not partial_potion_map.can_map(potion):
result[partial_stone_map_index, partial_potion_map_index_0,
partial_potion_map_index_1, potion_index, stone_index] = -1
continue
# If we can't map the potion from latent space into stone perceptual
# space we cannot tell which edge is missing.
latent_potion = partial_potion_map.apply(potion)
if partial_stone_map.latent_pos_dir[
latent_potion.latent_dim] == types_helpers.UNKNOWN:
result[partial_stone_map_index, partial_potion_map_index_0,
partial_potion_map_index_1, potion_index, stone_index] = -1
continue
stone_space_potion = partial_stone_map.apply_to_potion(latent_potion)
# If the stone value on the dimension that the potion should change
# is the opposite of the potion direction then the stone should have
# changed and therefore we can eliminate graphs containing the edge.
if aligned_stone_coords.latent_coords[
stone_space_potion.latent_dim] == stone_space_potion.latent_dir:
result[partial_stone_map_index, partial_potion_map_index_0,
partial_potion_map_index_1, potion_index, stone_index] = -1
continue
# Set the result to be the index of the edge which shouldn't be
# there.
expected_end_coords = copy.deepcopy(aligned_stone_coords.latent_coords)
expected_end_coords[stone_space_potion.latent_dim] = -expected_end_coords[
stone_space_potion.latent_dim]
expected_end_index = stones_and_potions.LatentStone(
expected_end_coords).index()
missing_edge = -1
edge_start_end = sorted((stone_index, expected_end_index))
for edge_index, (i, j) in enumerate(graph_important_edges):
if sorted((i, j)) == edge_start_end:
missing_edge = edge_index
assert missing_edge != -1, 'Missing edge doesn\'t exist'
result[partial_stone_map_index, partial_potion_map_index_0,
partial_potion_map_index_1, potion_index, stone_index] = missing_edge
return result
def get_partial_stone_map_to_stone_map() -> np.ndarray:
"""If a partial stone map is fully known returns stone map otherwise -1."""
result, gen = _precompute_loop_helper(['PartialStoneMap'], 'int')
for indices, elements in gen():
index = indices['PartialStoneMap']
partial_stone_map = elements['PartialStoneMap']
stone_maps = partial_stone_map.fill_gaps()
if len(stone_maps) != 1:
result[index] = -1
else:
result[index] = stone_maps[0].index()
return result
def get_no_effect_from_partial_chem(
index_to_perm_index: np.ndarray
) -> np.ndarray:
"""Gets bit mask for potions known to take a stone out of the latent cube."""
result, gen = _precompute_loop_helper(
['StoneMap', 'PartialPotionMap'], 'int', additional_data={
'index_to_perm_index': index_to_perm_index})
for indices, elements in gen():
stone_map = elements['StoneMap']
stone_map_index = indices['StoneMap']
partial_potion_map = elements['PartialPotionMap']
partial_potion_map_index_0, partial_potion_map_index_1 = indices[
'PartialPotionMap']
# Go through perceived potion and perceived stone (without reward) and
# update if we know there will be no effect.
_, no_effect_gen = _precompute_loop_helper(
['PerceivedPotion', 'LatentStone'], 'int')
no_effect_result = 0
for no_effect_indices, no_effect_elements in no_effect_gen():
perceived_potion = no_effect_elements['PerceivedPotion']
aligned_stone_wo_reward = no_effect_elements['LatentStone']
latent_stone = stone_map.apply(AlignedStone(
0, aligned_stone_wo_reward.latent_coords))
# If we can map the perceived potion to latent space, do so and see if it
# has an effect.
if not partial_potion_map.can_map(perceived_potion):
continue
latent_potion = partial_potion_map.apply(perceived_potion)
if latent_potion.latent_dir == latent_stone.latent_coords[
latent_potion.latent_dim]:
no_effect_result |= 1 << (
(no_effect_indices['LatentStone'] * PerceivedPotion.num_types) +
no_effect_indices['PerceivedPotion'])
result[stone_map_index, partial_potion_map_index_0,
partial_potion_map_index_1] = no_effect_result
return result
def get_update_partial_graph_no_change(
all_graphs, possible_partial_graph_indices: np.ndarray,
partial_graph_index_to_possible_index: Mapping[int, int],
graph_important_edges: Sequence[Tuple[int, int]]
) -> np.ndarray:
"""Given a missing edge updates the partial graph."""
graph_nodes = graphs.all_nodes_in_graph()
result, gen = _precompute_loop_helper(
['enumerated_possible_partial_graph_indices',
'enumerated_graph_important_edges'], 'int',
additional_data={
'possible_partial_graph_indices': possible_partial_graph_indices,
'graph_important_edges': graph_important_edges})
for indices, elements in gen():
edge_index, (start_node, end_node) = indices[
'enumerated_graph_important_edges']
poss_index, _ = indices[
'enumerated_possible_partial_graph_indices']
partial_graph = elements['enumerated_possible_partial_graph_indices']
start_stone = stones_and_potions.LatentStone(np.array(
graph_nodes.nodes[start_node].coords))
end_stone = stones_and_potions.LatentStone(np.array(
graph_nodes.nodes[end_node].coords))
partial_graph.add_edge(start_stone, end_stone, graphs.NO_EDGE)
partial_graph.update(all_graphs)
result[poss_index, edge_index] = partial_graph_index_to_possible_index[
partial_graph.index()]
return result
def get_perm_index_conversion() -> Tuple[np.ndarray, np.ndarray]:
"""Gets maps to convert between different indices representing permutations.
We make a map from an index computed by treating each entry in the permutation
as being between 0 and len(perm) - 1 (of which there are
len(perm) ^ len(perm)) to an index between 0 and len(perm)! - 1.
len(perm) is 3 so this is not large.
Returns:
Map from index which treats entries as independent to compact index, the
inverse.
"""
num_axes = stones_and_potions.get_num_axes()
# Use numpy object type to store python ints rather than numpy ints.
perm_index_to_index = np.array([-1 for _ in range(num_axes ** num_axes)],
dtype=np.object)
for i, perm in enumerate(itertools.permutations(range(num_axes))):
perm_index_to_index[np.ravel_multi_index(
tuple(perm), tuple(num_axes for _ in range(num_axes)))] = i
# Make the inverse map.
index_to_perm_index = np.array(
[int(np.ravel_multi_index(
tuple(perm), tuple(num_axes for _ in range(num_axes))))
for perm in itertools.permutations(range(3))], dtype=np.object)
return perm_index_to_index, index_to_perm_index
def constraints_to_filename(
constraints: Sequence[graphs.Constraint],
poss_stone_maps: Sequence[stones_and_potions.StoneMap],
poss_potion_maps: Sequence[stones_and_potions.PotionMap]
) -> str:
"""Converts a sequence of constraints and possible maps to a filename.
This removes characters like * and - and ensures a list with the same number
of constraints is the same length. Each constraint becomes 6 letters long with
S (i.e. star) substituted for *, N (i.e. negative) substituted for -1 and P
(i.e. positive) substituted for 1. Consecutive constraints are separated by a
/ and the sequence of constraints is lexicographically sorted to ensure that
two sequences of constraints which differ only in order are represented by the
same string.
Stone and potion maps are converted to indices and the represented as a
sequence of ranges, eg. 0-7/1,8,16-24,32.
Args:
constraints: A sequence of graphs.Constraints.
poss_stone_maps: A sequence of possible stone maps.
poss_potion_maps: A sequence of possible potion maps.
Returns:
A string with each constraint and possible stone map and potion map.
"""
def constraint_to_str(constraint: graphs.Constraint) -> str:
remapping = {'*': 'S', '-1': 'N', '1': 'P'}
all_dims = []
for i, dim in enumerate(constraint):
constr = dim[:i] + dim[i + 1:]
all_dims.append(''.join([remapping[c] for c in constr]))
return ''.join(all_dims)
def seq_ints_to_str(seq: Sequence[int]) -> str:
"""Convert a sequence of ints to a string."""
ranges = []
start_i, prev_i = None, None
for i in seq:
if start_i is None:
start_i, prev_i = i, i
continue
if i != prev_i + 1:
ranges.append((start_i, prev_i))
start_i = i
prev_i = i
ranges.append((start_i, prev_i))
return ','.join(str(s) + ('' if e == s else '-' + str(e)) for s, e in
ranges)
perm_index_to_index, _ = get_perm_index_conversion()
return ('/'.join(sorted(constraint_to_str(c) for c in constraints)) + '/' +
seq_ints_to_str([s.index() for s in poss_stone_maps]) + '/' +
seq_ints_to_str([p.index(perm_index_to_index) for p in
poss_potion_maps]))
def load_from_level_name(level_name: str) -> Optional[PrecomputedMaps]:
"""Loads precomputed for the level name passed if it exists."""
# All levels are in alchemy and this is not included in the precomputed.pkl
# file paths so remove this from the level name if it is included.
if level_name.startswith('alchemy/'):
level_name = level_name.replace('alchemy/', '')
# Precomputed maps refer to the mapping between aligned stones and latent
# stones so any rotation does not affect them so ignore it.
# There are a few different ways of specifying rotation in the level name.
level_name = level_name.replace('rotation_and_', '')
level_name = level_name.replace('with_rotation', '')
level_name = level_name.replace('fixed_with', 'fixed')
level_name = level_name.replace('rotate_color_shape', '')
level_name = level_name.replace('rotate_color_size', '')
level_name = level_name.replace('rotate_size_shape', '')
precomputed_folder = os.path.join(PRECOMPUTED_LEVEL_FILES_DIR, level_name)
return _load_from_folder(precomputed_folder)
def get_precomputed_maps(
constraints: Optional[Sequence[graphs.Constraint]] = None,
poss_stone_maps: Optional[Sequence[stones_and_potions.StoneMap]] = None,
poss_potion_maps: Optional[Sequence[stones_and_potions.PotionMap]] = None,
) -> PrecomputedMaps:
"""Precomputes a set of maps to make running the ideal observer faster."""
# Constraints must be specified in stone perceptual space.
if constraints is None:
constraints = graphs.possible_constraints()
if poss_stone_maps is None:
poss_stone_maps = stones_and_potions.possible_stone_maps()
perm_index_to_index, index_to_perm_index = get_perm_index_conversion()
if poss_potion_maps is None:
poss_potion_maps = stones_and_potions.possible_potion_maps(
index_to_perm_index)
logging.info('Computing precomputed maps.')
# Everywhere below we use numpy object type to store python ints rather than
# numpy ints so that we get arbitrary precision which allows us to make
# bitfields easily.
stone_maps = np.array([s.index() for s in poss_stone_maps], dtype=np.object)
potion_maps = np.array([p.index(perm_index_to_index) for p in
poss_potion_maps], dtype=np.object)
# The graph distribution is an unordered mapping, we sort it to make debugging
# easier and so that we can extract a list of graphs and a list of
# probabilities for those graphs and this will be consistent across runs.
graphs_distr = graphs.graph_distr(constraints)
graphs_distr_as_list = list(graphs_distr.items())
graphs_distr_constraints = [graphs.constraint_from_graph(k)
for k, _ in graphs_distr_as_list]
graphs_distr_num_constraints = graphs.get_num_constraints(
graphs_distr_constraints)
graphs_distr_sorted = sorted(zip(
graphs_distr_as_list, graphs_distr_num_constraints,
graphs_distr_constraints), key=lambda x: (x[2], str(x[1])))
graphs_list = | np.frompyfunc(graphs.Graph, 2, 1) | numpy.frompyfunc |
import time
import numpy as np
import scipy.integrate
import scipy.linalg
import ross
from ross.units import Q_, check_units
from .abs_defect import Defect
from .integrate_solver import Integrator
__all__ = [
"Rubbing",
]
class Rubbing(Defect):
"""Contains a rubbing model for applications on finite element models of rotative machinery.
The reference coordenates system is: z-axis throught the shaft center; x-axis and y-axis in the sensors' planes
Parameters
----------
dt : float
Time step.
tI : float
Initial time.
tF : float
Final time.
deltaRUB : float
Distance between the housing and shaft surface.
kRUB : float
Contact stiffness.
cRUB : float
Contact damping.
miRUB : float
Friction coefficient.
posRUB : int
Node where the rubbing is ocurring.
speed : float, pint.Quantity
Operational speed of the machine. Default unit is rad/s.
unbalance_magnitude : array
Array with the unbalance magnitude. The unit is kg.m.
unbalance_phase : array
Array with the unbalance phase. The unit is rad.
torque : bool
Set it as True to consider the torque provided by the rubbing, by default False.
print_progress : bool
Set it True, to print the time iterations and the total time spent, by default False.
Returns
-------
A force to be applied on the shaft.
References
----------
.. [1] <NAME>., <NAME>., &<NAME>.(2002). Linear and Nonlinear Rotordynamics: A Modern Treatment with Applications, pp. 215-222 ..
Examples
--------
>>> from ross.defects.rubbing import rubbing_example
>>> probe1 = (14, 0)
>>> probe2 = (22, 0)
>>> response = rubbing_example()
>>> results = response.run_time_response()
>>> fig = response.plot_dfft(probe=[probe1, probe2], range_freq=[0, 100], yaxis_type="log")
>>> # fig.show()
"""
@check_units
def __init__(
self,
dt,
tI,
tF,
deltaRUB,
kRUB,
cRUB,
miRUB,
posRUB,
speed,
unbalance_magnitude,
unbalance_phase,
torque=False,
print_progress=False,
):
self.dt = dt
self.tI = tI
self.tF = tF
self.deltaRUB = deltaRUB
self.kRUB = kRUB
self.cRUB = cRUB
self.miRUB = miRUB
self.posRUB = posRUB
self.speed = speed
self.speedI = speed
self.speedF = speed
self.DoF = np.arange((self.posRUB * 6), (self.posRUB * 6 + 6))
self.torque = torque
self.unbalance_magnitude = unbalance_magnitude
self.unbalance_phase = unbalance_phase
self.print_progress = print_progress
if len(self.unbalance_magnitude) != len(self.unbalance_phase):
raise Exception(
"The unbalance magnitude vector and phase must have the same size!"
)
def run(self, rotor):
"""Calculates the shaft angular position and the unbalance forces at X / Y directions.
Parameters
----------
rotor : ross.Rotor Object
6 DoF rotor model.
"""
self.rotor = rotor
self.n_disk = len(self.rotor.disk_elements)
if self.n_disk != len(self.unbalance_magnitude):
raise Exception("The number of discs and unbalances must agree!")
self.ndof = rotor.ndof
self.iteration = 0
self.radius = rotor.df_shaft.iloc[self.posRUB].o_d / 2
self.ndofd = np.zeros(len(self.rotor.disk_elements))
for ii in range(self.n_disk):
self.ndofd[ii] = (self.rotor.disk_elements[ii].n) * 6
self.lambdat = 0.00001
# Faxial = 0
# TorqueI = 0
# TorqueF = 0
self.sA = (
self.speedI * np.exp(-self.lambdat * self.tF)
- self.speedF * np.exp(-self.lambdat * self.tI)
) / (np.exp(-self.lambdat * self.tF) - np.exp(-self.lambdat * self.tI))
self.sB = (self.speedF - self.speedI) / (
np.exp(-self.lambdat * self.tF) - np.exp(-self.lambdat * self.tI)
)
# sAT = (
# TorqueI * np.exp(-lambdat * self.tF) - TorqueF * np.exp(-lambdat * self.tI)
# ) / (np.exp(-lambdat * self.tF) - np.exp(-lambdat * self.tI))
# sBT = (TorqueF - TorqueI) / (
# np.exp(-lambdat * self.tF) - np.exp(-lambdat * self.tI)
# )
# self.SpeedV = sA + sB * np.exp(-lambdat * t)
# self.TorqueV = sAT + sBT * np.exp(-lambdat * t)
# self.AccelV = -lambdat * sB * np.exp(-lambdat * t)
# Determining the modal matrix
self.K = self.rotor.K(self.speed)
self.C = self.rotor.C(self.speed)
self.G = self.rotor.G()
self.M = self.rotor.M()
self.Kst = self.rotor.Kst()
V1, ModMat = scipy.linalg.eigh(
self.K,
self.M,
type=1,
turbo=False,
)
ModMat = ModMat[:, :12]
self.ModMat = ModMat
# Modal transformations
self.Mmodal = ((ModMat.T).dot(self.M)).dot(ModMat)
self.Cmodal = ((ModMat.T).dot(self.C)).dot(ModMat)
self.Gmodal = ((ModMat.T).dot(self.G)).dot(ModMat)
self.Kmodal = ((ModMat.T).dot(self.K)).dot(ModMat)
self.Kstmodal = ((ModMat.T).dot(self.Kst)).dot(ModMat)
y0 = np.zeros(24)
t_eval = np.arange(self.tI, self.tF + self.dt, self.dt)
# t_eval = np.arange(self.tI, self.tF, self.dt)
T = t_eval
self.angular_position = (
self.sA * T
- (self.sB / self.lambdat) * np.exp(-self.lambdat * T)
+ (self.sB / self.lambdat)
)
self.Omega = self.sA + self.sB * np.exp(-self.lambdat * T)
self.AccelV = -self.lambdat * self.sB * np.exp(-self.lambdat * T)
self.tetaUNB = np.zeros((len(self.unbalance_phase), len(self.angular_position)))
unbx = np.zeros(len(self.angular_position))
unby = np.zeros(len(self.angular_position))
FFunb = np.zeros((self.ndof, len(t_eval)))
self.forces_rub = np.zeros((self.ndof, len(t_eval)))
for ii in range(self.n_disk):
self.tetaUNB[ii, :] = (
self.angular_position + self.unbalance_phase[ii] + np.pi / 2
)
unbx = self.unbalance_magnitude[ii] * (self.AccelV) * (
np.cos(self.tetaUNB[ii, :])
) - self.unbalance_magnitude[ii] * ((self.Omega**2)) * (
np.sin(self.tetaUNB[ii, :])
)
unby = -self.unbalance_magnitude[ii] * (self.AccelV) * (
np.sin(self.tetaUNB[ii, :])
) - self.unbalance_magnitude[ii] * (self.Omega**2) * (
np.cos(self.tetaUNB[ii, :])
)
FFunb[int(self.ndofd[ii]), :] += unbx
FFunb[int(self.ndofd[ii] + 1), :] += unby
self.Funbmodal = (self.ModMat.T).dot(FFunb)
self.inv_Mmodal = np.linalg.pinv(self.Mmodal)
t1 = time.time()
x = Integrator(
self.tI,
y0,
self.tF,
self.dt,
self._equation_of_movement,
self.print_progress,
)
x = x.rk4()
t2 = time.time()
if self.print_progress:
print(f"Time spent: {t2-t1} s")
self.displacement = x[:12, :]
self.velocity = x[12:, :]
self.time_vector = t_eval
self.response = self.ModMat.dot(self.displacement)
def _equation_of_movement(self, T, Y, i):
"""Calculates the displacement and velocity using state-space representation in the modal domain.
Parameters
----------
T : float
Iteration time.
Y : array
Array of displacement and velocity, in the modal domain.
i : int
Iteration step.
Returns
-------
new_Y : array
Array of the new displacement and velocity, in the modal domain.
"""
positions = Y[:12]
velocity = Y[12:] # velocity in space state
positionsFis = self.ModMat.dot(positions)
velocityFis = self.ModMat.dot(velocity)
Frub, ft = self._rub(positionsFis, velocityFis, self.Omega[i])
self.forces_rub[:, i] = ft
ftmodal = (self.ModMat.T).dot(ft)
# proper equation of movement to be integrated in time
new_V_dot = (
ftmodal
+ self.Funbmodal[:, i]
- ((self.Cmodal + self.Gmodal * self.Omega[i])).dot(velocity)
- ((self.Kmodal + self.Kstmodal * self.AccelV[i]).dot(positions))
).dot(self.inv_Mmodal)
new_X_dot = velocity
new_Y = np.zeros(24)
new_Y[:12] = new_X_dot
new_Y[12:] = new_V_dot
return new_Y
def _rub(self, positionsFis, velocityFis, ang):
self.F_k = np.zeros(self.ndof)
self.F_c = np.zeros(self.ndof)
self.F_f = np.zeros(self.ndof)
self.y = np.concatenate((positionsFis, velocityFis))
ii = 0 + 6 * self.posRUB # rubbing position
self.radial_displ_node = np.sqrt(
self.y[ii] ** 2 + self.y[ii + 1] ** 2
) # radial displacement
self.radial_displ_vel_node = np.sqrt(
self.y[ii + self.ndof] ** 2 + self.y[ii + 1 + self.ndof] ** 2
) # velocity
self.phi_angle = | np.arctan2(self.y[ii + 1], self.y[ii]) | numpy.arctan2 |
import sys, os
sys.path.append(os.path.join(os.path.dirname(__file__), "../"))
from linear_recurrent_net.layers import linear_surrogate_lstm, SRU, QRNN, Alg
import argparse
def plr_slr(bs_seq_len_list, alg):
"""Given a list of pairs (batch size, seq_len),
calculate the throughput of an LS-LSTM, an SRU, a QRNN(2),
and QRNN(10) using the parallel kernel as opposed to the serial
one"""
import tensorflow as tf
import numpy as np
import scipy.io.wavfile
from tensorflow.contrib import rnn
import math
import os
import sys
import time
import os
import random
throughput_list = []
#TODO:
#Make LS_LSTM with PLR
#Make SRU with PLR
#Make QRNN with PLR
#Make LS_LSTM with SLR
#Make SRU with SLR
#Make QRNN with SLR
for seq_len in seq_len_list:
#First generate the LS-LSTM and work out the throughput
tf.reset_default_graph()
n_hidden = 256
n_classes = 2
n_steps = seq_len
batch_size = 65536 // seq_len
bs = batch_size
print("Batch size is {} and sequence length is {}".format(bs, seq_len))
n_input = 24
n_layers = 2
forget_gate_init = 1.0 # = 1/(n_in). We use uniform p(x)
#Training Parameters
sn = 1.0 / math.sqrt(n_hidden)
learning_rate = 0.001
training_iters = 5000000
x = tf.placeholder("float", [n_steps, batch_size, n_input])
y = tf.placeholder("float", [batch_size, n_classes])
tf.get_variable_scope().reuse == True
W1 = tf.get_variable('W1', initializer=
tf.random_normal([n_hidden, n_classes]), dtype='float')
b1 = tf.get_variable('b1', initializer=tf.zeros([n_classes]), dtype='float')
layer1 = linear_surrogate_lstm(x, n_hidden, alg=alg, name='ls-lstm')
outputs = linear_surrogate_lstm(layer1, n_hidden, alg=alg, name='ls-lstm2')
pred = tf.matmul(outputs[-1], W1) + b1
#Evaluate network, run adam and clip gradients
################################################################################
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))
optimizer_0 = tf.train.AdamOptimizer(learning_rate=learning_rate)
raw_gradients, variables = zip(*optimizer_0.compute_gradients(cost))
gradients = raw_gradients
optimizer = optimizer_0.apply_gradients(zip(gradients, variables))
init = tf.global_variables_initializer()
#Initialise the model and evaluate
step = 0
times = []
x_in = np.random.random((n_steps, batch_size, n_input))
y_in = np.random.random((batch_size, n_classes))
with tf.device("gpu:0"):
with tf.Session() as sess:
sess.run(init)
while step < 10:
out = sess.run(pred, feed_dict={x: x_in, y: y_in})
step += 1
if step != 0:
start = time.time()
out = sess.run(pred, feed_dict={x: x_in, y: y_in})
finish = time.time()
times.append(finish - start)
ls_lstm_tp = (bs * n_steps) / np.mean(times)
tf.reset_default_graph()
x = tf.placeholder("float", [n_steps, batch_size, n_input])
y = tf.placeholder("float", [batch_size, n_classes])
tf.get_variable_scope().reuse == True
W1 = tf.get_variable('W1', initializer=
tf.random_normal([n_hidden, n_classes]), dtype='float')
b1 = tf.get_variable('b1', initializer=tf.zeros([n_classes]), dtype='float')
layer1 = linear_surrogate_lstm(x, n_hidden, alg=Alg.SERIAL_BASELINE, name='ls-lstm')
output = linear_surrogate_lstm(layer1, n_hidden, alg=Alg.SERIAL_BASELINE, name='ls-lstm')
pred = tf.matmul(output[-1], W1) + b1
#Evaluate network, run adam and clip gradients
################################################################################
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))
optimizer_0 = tf.train.AdamOptimizer(learning_rate=learning_rate)
raw_gradients, variables = zip(*optimizer_0.compute_gradients(cost))
gradients = raw_gradients
optimizer = optimizer_0.apply_gradients(zip(gradients, variables))
init = tf.global_variables_initializer()
#Initialise the model and evaluate
step = 0
times = []
x_in = np.random.random((n_steps, batch_size, n_input))
y_in = np.random.random((batch_size, n_classes))
with tf.device("gpu:0"):
with tf.Session() as sess:
sess.run(init)
while step < 10:
out = sess.run(pred, feed_dict={x: x_in, y: y_in})
step += 1
if step != 0:
start = time.time()
out = sess.run(pred, feed_dict={x: x_in, y: y_in})
finish = time.time()
times.append(finish - start)
s_ls_lstm_tp = (bs * n_steps) / np.mean(times)
# throughput_list.append([ls_lstm_tp, s_ls_lstm_tp])
# continue
tf.reset_default_graph()
x = tf.placeholder("float", [n_steps, batch_size, n_input])
y = tf.placeholder("float", [batch_size, n_classes])
tf.get_variable_scope().reuse == True
W1 = tf.get_variable('W1', initializer=
tf.random_normal([n_input, n_classes]), dtype='float')
b1 = tf.get_variable('b1', initializer=tf.zeros([n_classes]), dtype='float')
layer1 = SRU(x, alg=alg, name='SRU_1')
output = SRU(layer1, alg=alg, name='SRU_2')
pred = tf.matmul(output[-1], W1) + b1
#Evaluate network, run adam and clip gradients
################################################################################
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))
optimizer_0 = tf.train.AdamOptimizer(learning_rate=learning_rate)
raw_gradients, variables = zip(*optimizer_0.compute_gradients(cost))
gradients = raw_gradients
optimizer = optimizer_0.apply_gradients(zip(gradients, variables))
init = tf.global_variables_initializer()
#Initialise the model and evaluate
step = 0
times = []
x_in = np.random.random((n_steps, batch_size, n_input))
y_in = np.random.random((batch_size, n_classes))
with tf.device("gpu:0"):
with tf.Session() as sess:
sess.run(init)
while step < 10:
out = sess.run(pred, feed_dict={x: x_in, y: y_in})
step += 1
if step != 0:
start = time.time()
out = sess.run(pred, feed_dict={x: x_in, y: y_in})
finish = time.time()
times.append(finish - start)
sru_tp = (bs * n_steps) / np.mean(times)
# Serial SRU
tf.reset_default_graph()
x = tf.placeholder("float", [n_steps, batch_size, n_input])
y = tf.placeholder("float", [batch_size, n_classes])
tf.get_variable_scope().reuse == True
W1 = tf.get_variable('W1', initializer=
tf.random_normal([n_input, n_classes]), dtype='float')
b1 = tf.get_variable('b1', initializer=tf.zeros([n_classes]), dtype='float')
layer1 = SRU(x, alg=Alg.SERIAL_BASELINE, name='s_SRU_1')
output = SRU(layer1, alg=Alg.SERIAL_BASELINE, name='s_SRU_2')
pred = tf.matmul(output[-1], W1) + b1
#Evaluate network, run adam and clip gradients
################################################################################
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))
optimizer_0 = tf.train.AdamOptimizer(learning_rate=learning_rate)
raw_gradients, variables = zip(*optimizer_0.compute_gradients(cost))
gradients = raw_gradients
optimizer = optimizer_0.apply_gradients(zip(gradients, variables))
init = tf.global_variables_initializer()
#Initialise the model and evaluate
step = 0
times = []
x_in = np.random.random((n_steps, batch_size, n_input))
y_in = np.random.random((batch_size, n_classes))
with tf.device("gpu:0"):
with tf.Session() as sess:
sess.run(init)
while step < 10:
out = sess.run(pred, feed_dict={x: x_in, y: y_in})
step += 1
if step != 0:
start = time.time()
out = sess.run(pred, feed_dict={x: x_in, y: y_in})
finish = time.time()
times.append(finish - start)
s_sru_tp = (bs * n_steps) / np.mean(times)
########################################## QRNN 2
tf.reset_default_graph()
x = tf.placeholder("float", [n_steps, batch_size, n_input])
y = tf.placeholder("float", [batch_size, n_classes])
tf.get_variable_scope().reuse == True
W1 = tf.get_variable('W1', initializer=
tf.random_normal([n_input, n_classes]), dtype='float')
b1 = tf.get_variable('b1', initializer=tf.zeros([n_classes]), dtype='float')
layer1 = QRNN(x, 2, alg=alg, name='QRNN_1')
output = QRNN(layer1, 2, alg=alg, name='QRNN_2')
pred = tf.matmul(output[-1], W1) + b1
#Evaluate network, run adam and clip gradients
################################################################################
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))
optimizer_0 = tf.train.AdamOptimizer(learning_rate=learning_rate)
raw_gradients, variables = zip(*optimizer_0.compute_gradients(cost))
gradients = raw_gradients
optimizer = optimizer_0.apply_gradients(zip(gradients, variables))
init = tf.global_variables_initializer()
#Initialise the model and evaluate
step = 0
times = []
x_in = | np.random.random((n_steps, batch_size, n_input)) | numpy.random.random |
from SCN import SCN, SCN_multi
from Fractal_generator import koch, binary_frac
import torch
from torch.autograd import Variable
import numpy as np
import matplotlib.pyplot as plt
import pickle
from scipy.stats import norm
import time
X = np.linspace(0.01, 0.99, 99)
X = torch.from_numpy(np.asarray(X, dtype=np.float32)).view(len(X), -1)
X = X.type(torch.FloatTensor)
Y = torch.from_numpy(np.asarray(np.absolute(np.linspace(0.01, 0.99, 99) - 0.2) *
(( | np.linspace(0.01, 0.99, 99) | numpy.linspace |
import os
import sys
sys.path.insert(1, os.path.dirname(os.path.realpath(__file__)) + '/../')
from common import utils
import models
from common.log import log, LogLevel
from common.state import State
from common import cuda
from common import paths
import common.torch
import common.numpy
from training import train_classifier_adversarially
import torch
import numpy
import argparse
import math
if utils.display():
from common import plot
class TrainLearnedDecoderClassifierAdversarially(train_classifier_adversarially.TrainClassifierAdversarially):
"""
Train a classifier.
:param args: arguments
:type args: list
"""
def __init__(self, args=None):
"""
Initialize.
:param args: optional arguments if not to use sys.argv
:type args: [str]
"""
super(TrainLearnedDecoderClassifierAdversarially, self).__init__(args)
self.train_statistics = | numpy.zeros((0, 11)) | numpy.zeros |
import random
import keras as K
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from os import path
from copy import deepcopy
from datetime import datetime
from itertools import compress
from keras.engine.input_layer import Input
from keras.backend import variable as Kvar
from keras.backend import set_value as Kset
from generic.dummy import Noise as dumnz
from generic.noise import OrnsteinUhlenbeckActionNoise
class Agent:
def __init__(self, agent_type, state_size, action_size, valid_actions=None,
layer_units=[48, 32, 16, 8], batch_size=12, discount_factor=0.95, buffer_min_size=200,
buffer_max_size=2000, learning_rate=0.001, noise_process=None, outputdir=None):
# Set action space
self.valid_actions = list( range(action_size) )
self.batch_size = batch_size
self.discount_factor= discount_factor
self.memory = []
self.buffer_min_size= buffer_min_size
self.buffer_max_size= buffer_max_size
self.attr2save = ['state_size', 'action_size', 'model_name']
self.outputdir = outputdir
self.model = self.get_agent(agent_type, state_size, action_size, layer_units, learning_rate, discount_factor, noise_process)
def get_agent(self, agent_type, state_size, action_size, layer_units, learning_rate, discount_factor, noise_process):
if agent_type == 'DQN':
return DQN_agent(state_size, action_size, layer_units, learning_rate, discount_factor, self.outputdir)
elif agent_type == 'DDPG':
return DDPG_agent(state_size, action_size, actor_hidden=layer_units, critic_hidden=layer_units, noise_process=noise_process, outputdir=self.outputdir, discount_factor=discount_factor)
else:
print('Unrecognized agent type')
return
def remember(self, state, action, reward, next_state, done, next_valid_actions=None):
if next_valid_actions is None:
next_valid_actions = self.valid_actions
self.memory.append((state, action, reward, next_state, done, next_valid_actions))
def replay(self, log=False, learning_rate=1e-3):
Q_s_a = []
Qloss = []
if len(self.memory) > self.buffer_min_size:
batch = random.sample(self.memory, min(len(self.memory), self.batch_size))
state = np.concatenate([x[0] for x in batch])
action = np.reshape([x[1] for x in batch], [-1,1])
reward = np.reshape([x[2] for x in batch], [-1,1])
next_state = | np.concatenate([x[3] for x in batch]) | numpy.concatenate |
from tkinter import *
from tkinter import ttk
import tkinter.filedialog as filedialog
from tkinter import messagebox
from PIL import Image,ImageDraw,ImageFont
from PIL import ImageTk,ImageGrab
import cv2
from skimage import filters
#import rasterio
import matplotlib.pyplot as pyplt
#from matplotlib.figure import Figure
import numpy as np
import os
#import time
import csv
import scipy.linalg as la
from functools import partial
#import threading
#import sys
#import kplus
from sklearn.cluster import KMeans
import tkintercorestat
#import tkintercorestat_plot
import tkintercore
import cal_kernelsize
#import histograms
#import createBins
import axistest
#from multiprocessing import Pool
import lm_method
#import batchprocess
import sel_area
class img():
def __init__(self,size,bands):
self.size=size
self.bands=bands
import batchprocess
displayimg={'Origin':None,
'PCs':None,
'Color Deviation':None,
'ColorIndices':None,
'Output':None}
previewimg={'Color Deviation':None,
'ColorIndices':None}
#cluster=['LabOstu','NDI'] #,'Greenness','VEG','CIVE','MExG','NDVI','NGRDI','HEIGHT']
#cluster=['LabOstu','NDI','Greenness','VEG','CIVE','MExG','NDVI','NGRDI','HEIGHT','Band1','Band2','Band3']
cluster=['PAT_R','PAT_G','PAT_B',
'DIF_R','DIF_G','DIF_B',
'ROO_R','ROO_G','ROO_B',
'GLD_R','GLD_G','GLD_B',
'Band1','Band2','Band3']
colorbandtable=np.array([[255,0,0],[255,127,0],[255,255,0],[127,255,0],[0,255,255],[0,127,255],[0,0,255],[127,0,255],[75,0,130],[255,0,255]],'uint8')
#print('colortableshape',colortable.shape)
filenames=[]
Multiimage={}
Multigray={}
Multitype={}
Multiimagebands={}
Multigraybands={}
workbandarray={}
displaybandarray={}
originbandarray={}
colorindicearray={}
clusterdisplay={}
kernersizes={}
multi_results={}
outputimgdict={}
outputimgbands={}
outputsegbands={}
originsegbands={}
oldpcachoice=[]
multiselectitems=[]
coinbox_list=[]
pre_checkbox=[]
originpcabands={}
batch={'PCweight':[],
'PCsel':[],
'Kmeans':[],
'Kmeans_sel':[],
'Area_max':[],
'Area_min':[],
'shape_max':[],
'shape_min':[],
'nonzero':[]}
root=Tk()
root.title('GridFree v.1.1.0 ')
root.geometry("")
root.option_add('*tearoff',False)
emptymenu=Menu(root)
root.config(menu=emptymenu)
screenheight=root.winfo_screenheight()
screenwidth=root.winfo_screenwidth()
print('screenheight',screenheight,'screenwidth',screenwidth)
screenstd=min(screenheight-100,screenwidth-100,850)
coinsize=StringVar()
selarea=StringVar()
refvar=StringVar()
imgtypevar=StringVar()
edge=StringVar()
kmeans=IntVar()
pc_combine_up=DoubleVar()
pc_combine_down=IntVar()
filedropvar=StringVar()
displaybut_var=StringVar()
buttonvar=IntVar()
bandchoice={}
checkboxdict={}
#minipixelareaclass=0
coinbox=None
currentfilename=''
currentlabels=None
displaylabels=None
workingimg=None
displaypclabels=None
boundaryarea=None
outputbutton=None
font=None
reseglabels=None
coindict=None
## Funcitons
refarea=None
originlabels=None
originlabeldict=None
changekmeans=False
convband=None
reflabel=0
minflash=[]
dotflash=[]
labelplotmap={}
mappath=''
elesize=[]
labellist=[]
figdotlist={}
havecolorstrip=True
kmeanschanged=False
pcweightchanged=False
originbinaryimg=None
clusterchanged=False
originselarea=False
zoomoff=False
maxx=0
minx=0
bins=None
loccanvas=None
linelocs=[0,0,0,0]
maxy=0
miny=0
segmentratio=0
zoombox=[]
displayfea_l=0
displayfea_w=0
resizeshape=[]
previewshape=[]
pcbuttons=[]
pcbuttonsgroup=[]
def distance(p1,p2):
return np.sum((p1-p2)**2)
def findratio(originsize,objectsize):
oria=originsize[0]
orib=originsize[1]
obja=objectsize[0]
objb=objectsize[1]
if oria>obja or orib>objb:
ratio=round(max((oria/obja),(orib/objb)))
else:
ratio=round(min((obja/oria),(objb/orib)))
# if oria*orib>850 * 850:
if oria*orib>screenstd * screenstd:
if ratio<2:
ratio=2
return ratio
def getkeys(dict):
return [*dict]
def deletezoom(event,widget):
print('leave widget')
if len(zoombox)>0:
for i in range(len(zoombox)):
#print('delete')
widget.delete(zoombox.pop(0))
widget.update()
def zoom(event,widget,img):
global zoombox
x=event.x
y=event.y
#print(x,y)
if len(zoombox)>1:
widget.delete(zoombox.pop(0))
#print('delete')
crop=img.crop((x-15,y-15,x+15,y+15))
w,h=crop.size
#print(w,h)
crop=crop.resize([w*3,h*3],resample=Image.BILINEAR)
w,h=crop.size
crop=ImageTk.PhotoImage(crop)
zoombox.append(widget.create_image(x+5,y-5,image=crop))
root.update_idletasks()
raise NameError
#time.sleep(0.1)
def changedisplay_pc(frame):
for widget in frame.winfo_children():
widget.pack_forget()
#widget.configure(image=displayimg[text])
#widget.image=displayimg[text]
#widget.pack()
w=displayimg['PCs']['Size'][1]
l=displayimg['PCs']['Size'][0]
widget.config(width=w,height=l)
widget.create_image(0,0,image=displayimg['PCs']['Image'],anchor=NW)
widget.pack()
widget.update()
def pcweightupdate(displayframe):
getPCs()
changedisplay_pc(displayframe)
def buttonpress(val,displayframe,buttonframe):
global buttonvar,pc_combine_up,kmeans
buttonvar.set(val)
kmeans.set(1)
pc_combine_up.set(0.5)
buttonchildren=buttonframe.winfo_children()
for child in buttonchildren:
child.config(highlightbackground='white')
print(buttonchildren[val])
buttonchild=buttonchildren[val]
buttonchild.config(highlightbackground='red')
print('press button ',buttonvar.get())
getPCs()
changedisplay_pc(displayframe)
# if kmeans.get()>1:
changekmeansbar('')
beforecluster('')
# changecluster('')
def PCbuttons(frame,displayframe):
#display pc buttons
# buttonvar=IntVar()
#buttonvar.set(0)
for widget in frame.winfo_children():
widget.pack_forget()
buttonframe=LabelFrame(frame)
buttonframe.pack()
for i in range(len(pcbuttons)):
butimg=pcbuttons[i]
but=Button(buttonframe,text='',image=butimg,compound=TOP,command=partial(buttonpress,i,displayframe,buttonframe))
if i==buttonvar.get():
but.config(highlightbackground='red')
row=int(i/3)
col=i%3
# print(row,col)
but.grid(row=int(i/3),column=col)
print('default button',buttonvar.get())
# change cluster,display
def displaypreview(text):
global figcanvas,resviewframe
for widget in resviewframe.winfo_children():
widget.pack_forget()
# previewframe=Canvas(frame,width=450,height=400,bg='white')
figcanvas.pack()
figcanvas.delete(ALL)
if text=='Color Deviation':
previewtext='ColorIndices'
if text=='ColorIndices':
previewtext='Color Deviation'
previewimage=previewimg[previewtext]['Image']
figcanvas.create_image(0,0,image=previewimage,anchor=NW)
figcanvas.update()
def switchevent(event,widget,img):
global zoomoff,zoomfnid_m,zoomfnid_l,zoombox
zoomoff= not zoomoff
if zoomoff==True:
widget.unbind('<Motion>',zoomfnid_m)
widget.unbind('<Leave>',zoomfnid_l)
if len(zoombox)>0:
for i in range(len(zoombox)):
widget.delete(zoombox.pop(0))
widget.update()
else:
zoomfnid_m=widget.bind('<Motion>',lambda event,arg=widget:zoom(event,arg,img))
zoomfnid_l=widget.bind('<Leave>',lambda event,arg=widget:deletezoom(event,arg))
def changedisplayimg(frame,text):
global displaybut_var,figcanvas,resviewframe,reflabel
displaybut_var.set(disbuttonoption[text])
for widget in frame.winfo_children():
widget.pack_forget()
#widget.configure(image=displayimg[text])
#widget.image=displayimg[text]
#widget.pack()
w=displayimg[text]['Size'][1]
l=displayimg[text]['Size'][0]
widget.config(width=w,height=l)
widget.create_image(0,0,image=displayimg[text]['Image'],anchor=NW)
widget.pack()
widget.update()
global rects,selareapos,app,delapp,delrects,delselarea,originselarea
global zoomfnid_m,zoomfnid_l
app=sel_area.Application(widget)
# delapp=sel_area.Application(widget)
if text=='Output':
try:
image=outputsegbands[currentfilename]['iter0']
displayfig()
except:
return
zoomfnid_m=widget.bind('<Motion>',lambda event,arg=widget:zoom(event,arg,image))
zoomfnid_l=widget.bind('<Leave>',lambda event,arg=widget:deletezoom(event,arg))
delrects=app.start(zoomfnid_m,zoomfnid_l)
widget.bind('<Double-Button-1>',lambda event,arg=widget:switchevent(event,arg,image))
print('delrects',delrects)
else:
reflabel=0
print('reflabel=',reflabel)
try:
delelareadim=app.getinfo(delrects[1])
if delelareadim!=[]:
delselarea=delelareadim
app.end()
except:
pass
if text=='Origin':
try:
image=originsegbands['Origin']
zoomfnid_m=widget.bind('<Motion>',lambda event,arg=widget:zoom(event,arg,image))
zoomfnid_l=widget.bind('<Leave>',lambda event,arg=widget:deletezoom(event,arg))
except:
return
widget.bind('<Double-Button-1>',lambda event,arg=widget:switchevent(event,arg,image))
for widget in resviewframe.winfo_children():
widget.pack_forget()
rects=app.start()
print(rects)
originselarea=True
else:
widget.unbind('<Motion>')
selareadim=app.getinfo(rects[1])
if selareadim!=[]:
selareapos=selareadim
app.end(rects)
if text=='PCs':
selareadim=app.getinfo(rects[1])
if selareadim!=[0,0,1,1] and selareadim!=[] and selareadim!=selareapos:
selareapos=selareadim
if selareapos!=[0,0,1,1] and originselarea==True:
#need to redo PCA
npfilter=np.zeros((displayimg['Origin']['Size'][0],displayimg['Origin']['Size'][1]))
filter=Image.fromarray(npfilter)
draw=ImageDraw.Draw(filter)
draw.ellipse(selareapos,fill='red')
filter=np.array(filter)
filter=np.divide(filter,np.max(filter))
filter=cv2.resize(filter,(displayfea_w,displayfea_l),interpolation=cv2.INTER_LINEAR)
partialsingleband(filter)
originselarea=False
pass
PCbuttons(resviewframe,frame)
pass
if text=='Color Deviation':
#displaypreview
displaypreview(text)
pass
if text=='ColorIndices':
#displaypreview
displaypreview(text)
pass
#print('change to '+text)
#time.sleep(1)
def updateresizeshape(shape,content):
shape.append(int(content))
return shape
def generatedisplayimg(filename): # init display images
global resizeshape,previewshape
try:
# firstimg=Multiimagebands[filename]
#height,width=firstimg.size
# height,width,c=displaybandarray[filename]['LabOstu'].shape
bandsize=Multiimagebands[filename].size
if bandsize[0]*bandsize[1]>2000*2000:
ratio=findratio([bandsize[0],bandsize[1]],[2000,2000])
else:
ratio=1
height,width=bandsize[0]/ratio,bandsize[1]/ratio
# ratio=findratio([height,width],[850,850])
ratio=findratio([height,width],[screenstd,screenstd])
print('displayimg ratio',ratio)
resizeshape=[]
# if height*width<850*850:
if height*width<screenstd*screenstd:
#resize=cv2.resize(Multiimage[filename],(int(width*ratio),int(height*ratio)),interpolation=cv2.INTER_LINEAR)
updateresizeshape(resizeshape,width*ratio)
updateresizeshape(resizeshape,height*ratio)
# resizeshape.append(width*ratio)
# resizeshape.append(height*ratio)
if height>screenstd:
resizeshape=[]
ratio=round(height/screenstd)
updateresizeshape(resizeshape,width*ratio)
updateresizeshape(resizeshape,height*ratio)
if width>screenstd:
resizeshape=[]
ratio=round(width/screenstd)
updateresizeshape(resizeshape,width*ratio)
updateresizeshape(resizeshape,height*ratio)
else:
#resize=cv2.resize(Multiimage[filename],(int(width/ratio),int(height/ratio)),interpolation=cv2.INTER_LINEAR)
updateresizeshape(resizeshape,width/ratio)
updateresizeshape(resizeshape,height/ratio)
ratio=findratio([height,width],[400,450])
previewshape=[]
if height*width<450*400:
#resize=cv2.resize(Multiimage[filename],(int(width*ratio),int(height*ratio)),interpolation=cv2.INTER_LINEAR)
updateresizeshape(previewshape,width*ratio)
updateresizeshape(previewshape,height*ratio)
if height>400:
previewshape=[]
ratio=round(height/screenstd)
updateresizeshape(previewshape,width/ratio)
updateresizeshape(previewshape,height/ratio)
if width>450:
previewshape=[]
ratio=round(width/screenstd)
updateresizeshape(previewshape,width/ratio)
updateresizeshape(previewshape,height/ratio)
else:
#resize=cv2.resize(Multiimage[filename],(int(width/ratio),int(height/ratio)),interpolation=cv2.INTER_LINEAR)
updateresizeshape(previewshape,width/ratio)
updateresizeshape(previewshape,height/ratio)
resize=cv2.resize(Multiimage[filename],(int(resizeshape[0]),int(resizeshape[1])),interpolation=cv2.INTER_LINEAR)
originimg=Image.fromarray(resize.astype('uint8'))
originsegbands.update({'Origin':originimg})
rgbimg=Image.fromarray(resize.astype('uint8'))
draw=ImageDraw.Draw(rgbimg)
suggsize=14
font=ImageFont.truetype('cmb10.ttf',size=suggsize)
content='\n File: '+filename
draw.text((10-1, 10+1), text=content, font=font, fill='white')
draw.text((10+1, 10+1), text=content, font=font, fill='white')
draw.text((10-1, 10-1), text=content, font=font, fill='white')
draw.text((10+1, 10-1), text=content, font=font, fill='white')
#draw.text((10,10),text=content,font=font,fill=(141,2,31,0))
draw.text((10,10),text=content,font=font,fill='black')
rgbimg=ImageTk.PhotoImage(rgbimg)
tempdict={}
tempdict.update({'Size':resize.shape})
tempdict.update({'Image':rgbimg})
except:
tempdict={}
tempimg=np.zeros((screenstd,screenstd))
tempdict.update({'Size':tempimg.shape})
tempdict.update({'Image':ImageTk.PhotoImage(Image.fromarray(tempimg.astype('uint8')))})
displayimg['Origin']=tempdict
#if height*width<850*850:
# resize=cv2.resize(Multigray[filename],(int(width*ratio),int(height*ratio)),interpolation=cv2.INTER_LINEAR)
#else:
#resize=cv2.resize(Multigray[filename],(int(width/ratio),int(height/ratio)),interpolation=cv2.INTER_LINEAR)
tempimg=np.zeros((screenstd,screenstd))
tempdict={}
try:
tempdict.update({'Size':resize.shape})
tempdict.update({'Image':ImageTk.PhotoImage(Image.fromarray(np.zeros((int(resizeshape[1]),int(resizeshape[0]))).astype('uint8')))})
except:
tempdict.update({'Size':tempimg.shape})
#if height*width<850*850:
# tempdict.update({'Image':ImageTk.PhotoImage(Image.fromarray(np.zeros((int(height*ratio),int(width*ratio))).astype('uint8')))})
#else:
# tempdict.update({'Image':ImageTk.PhotoImage(Image.fromarray(np.zeros((int(height/ratio),int(width/ratio))).astype('uint8')))})
# tempdict.update({'Image':ImageTk.PhotoImage(Image.fromarray(np.zeros((int(resizeshape[1]),int(resizeshape[0]))).astype('uint8')))})
tempdict.update({'Image':ImageTk.PhotoImage(Image.fromarray(tempimg.astype('uint8')))})
displayimg['Output']=tempdict
tempdict={}
try:
tempdict.update({'Size':resize.shape})
tempdict.update({'Image':ImageTk.PhotoImage(Image.fromarray(np.zeros((int(resizeshape[1]),int(resizeshape[0]))).astype('uint8')))})
except:
tempdict.update({'Size':tempimg.shape})
tempdict.update({'Image':ImageTk.PhotoImage(Image.fromarray(tempimg.astype('uint8')))})
displayimg['PCs']=tempdict
tempdict={}
temppreviewdict={}
temppreviewimg=np.zeros((450,400))
try:
tempband=np.zeros((displaybandarray[filename]['LabOstu'][:,:,0].shape))
# tempband=tempband+displaybandarray[filename]['LabOstu']
# ratio=findratio([tempband.shape[0],tempband.shape[1]],[850,850])
#if tempband.shape[0]*tempband.shape[1]<850*850:
# tempband=cv2.resize(ratio,(int(tempband.shape[1]*ratio),int(tempband.shape[0]*ratio)),interpolation=cv2.INTER_LINEAR)
#else:
# tempband=cv2.resize(ratio,(int(tempband.shape[1]/ratio),int(tempband.shape[0]/ratio)),interpolation=cv2.INTER_LINEAR)
tempband=cv2.resize(tempband,(int(resizeshape[0]),int(resizeshape[1])),interpolation=cv2.INTER_LINEAR)
tempdict.update({'Size':tempband.shape})
tempdict.update({'Image':ImageTk.PhotoImage(Image.fromarray(tempband[:,:,2].astype('uint8')))})
temppreview=cv2.resize(tempband,(int(previewshape[0]),int(previewshape[1])),interpolation=cv2.INTER_LINEAR)
temppreview=Image.fromarray(temppreview.astype('uint8'))
temppreviewdict.update({'Size':previewshape})
temppreviewdict.update({'Image':ImageTk.PhotoImage(temppreview)})
# print('resizeshape',resizeshape)
#pyplt.imsave('displayimg.png',tempband[:,:,0])
#indimg=cv2.imread('displayimg.png')
except:
tempdict.update({'Size':tempimg.shape})
tempdict.update({'Image':ImageTk.PhotoImage(Image.fromarray(tempimg.astype('uint8')))})
temppreviewdict.update({'Size':temppreviewimg.shape})
temppreviewdict.update({'Image':ImageTk.PhotoImage(Image.fromarray(temppreviewimg.astype('uint8')))})
displayimg['ColorIndices']=tempdict
previewimg['ColorIndices']=temppreviewdict
#resize=cv2.resize(Multigray[filename],(int(resizeshape[0]),int(resizeshape[1])),interpolation=cv2.INTER_LINEAR)
#grayimg=ImageTk.PhotoImage(Image.fromarray(resize.astype('uint8')))
#tempdict={}
#tempdict.update({'Size':resize.shape})
#tempdict.update({'Image':grayimg})
tempdict={}
temppreviewdict={}
try:
colordeviate=np.zeros((tempband[:,:,0].shape[0],tempband[:,:,0].shape[1],3),'uint8')
kvar=int(kmeans.get())
for i in range(kvar):
locs=np.where(tempband[:,:,0]==i)
colordeviate[locs]=colorbandtable[i,:]
# pyplt.imsave('colordeviation.png',colordeviate)
# # colordevimg=Image.fromarray(colordeviate.astype('uint8'))
# # colordevimg.save('colordeviation.png',"PNG")
# testcolor=Image.open('colordeviation.png')
print('colordeviation.png')
# colortempdict={}
colordeviate=cv2.resize(colordeviate,(int(resizeshape[0]),int(resizeshape[1])),interpolation=cv2.INTER_LINEAR)
tempdict.update({'Size':colordeviate.shape})
tempdict.update({'Image':ImageTk.PhotoImage(Image.fromarray(colordeviate.astype('uint8')))})
# colortempdict.update({'Size':colordeviate.shape})
# colortempdict.update({'Image':ImageTk.PhotoImage(Image.fromarray(colordeviate.astype('uint8')))})
# colortempdict.update({'Image':ImageTk.PhotoImage(testcolor)})
# tempdict={}
temppreview=cv2.resize(colordeviate,(int(previewshape[0]),int(previewshape[1])),interpolation=cv2.INTER_LINEAR)
temppreviewdict.update({'Size':temppreview.shape})
temppreviewdict.update({'Image':ImageTk.PhotoImage(Image.fromarray(temppreview[:,:,0].astype('uint8')))})
except:
tempdict.update({'Size':tempimg.shape})
tempdict.update({'Image':ImageTk.PhotoImage(Image.fromarray(tempimg.astype('uint8')))})
temppreviewdict.update({'Size':temppreviewimg.shape})
temppreviewdict.update({'Image':ImageTk.PhotoImage(Image.fromarray(temppreviewimg.astype('uint8')))})
# displayimg['Color Deviation']=colortempdict
displayimg['Color Deviation']=tempdict
previewimg['Color Deviation']=temppreviewdict
def Open_File(filename): #add to multi-image,multi-gray #call band calculation
global Multiimage,Multigray,Multitype,Multiimagebands,Multigraybands,filenames
try:
Filersc=cv2.imread(filename,flags=cv2.IMREAD_ANYCOLOR)
ndim=np.ndim(Filersc)
if ndim==2:
height,width=np.shape(Filersc)
channel=1
Filersc.reshape((height,width,channel))
else:
height,width,channel=np.shape(Filersc)
Filesize=(height,width)
print('filesize:',height,width)
RGBfile=cv2.cvtColor(Filersc,cv2.COLOR_BGR2RGB)
Multiimage.update({filename:RGBfile})
if ndim==2:
Grayfile=np.copy(Filersc)
else:
Grayfile=cv2.cvtColor(Filersc,cv2.COLOR_BGR2Lab)
Grayfile=cv2.cvtColor(Grayfile,cv2.COLOR_BGR2GRAY)
#Grayfile=cv2.GaussianBlur(Grayfile,(3,3),cv2.BORDER_DEFAULT)
#ostu=filters.threshold_otsu(Grayfile)
#Grayfile=Grayfile.astype('float32')
#Grayfile=Grayfile/ostu
Grayimg=img(Filesize,Grayfile)
RGBbands=np.zeros((channel,height,width))
for j in range(channel):
band=RGBfile[:,:,j]
band=np.where(band==0,1e-6,band)
nans=np.isnan(band)
band[nans]=1e-6
#ostu=filters.threshold_otsu(band)
#band=band/ostu
RGBbands[j,:,:]=band
RGBimg=img(Filesize,RGBbands)
tempdict={filename:RGBimg}
Multiimagebands.update(tempdict)
tempdict={filename:Grayfile}
Multigray.update(tempdict)
tempdict={filename:0}
Multitype.update(tempdict)
tempdict={filename:Grayimg}
Multigraybands.update(tempdict)
except:
messagebox.showerror('Invalid Image Format','Cannot open '+filename)
return False
filenames.append(filename)
return True
def Open_Map():
if proc_mode[proc_name].get()=='1':
batchprocess.Open_batchfile()
return
global mappath,elesize,labellist
filepath=filedialog.askopenfilename()
if len(filepath)>0:
if 'csv' in filepath:
mappath=filepath
elesize=[]
labellist=[]
rows=[]
print('open map at: '+mappath)
with open(mappath,mode='r',encoding='utf-8-sig') as f:
csvreader=csv.reader(f)
for row in csvreader:
rows.append(row)
temprow=[]
for ele in row:
if ele is not '':
temprow.append(ele)
elesize.append(len(temprow))
for i in range(len(rows)):
for j in range(len(rows[i])):
if rows[i][j]!='':
labellist.append(rows[i][j])
else:
messagebox.showerror('Invalide File',message='Please open csv formate file as map file.')
corlortable=tkintercorestat.get_colortable(reseglabels)
tup=(reseglabels,[],corlortable,{},currentfilename)
print(elesize)
mapdict,mapimage,smallset=showcounting(tup,True,True,True)
tempimgbands={}
tempimgdict={}
tempsmall={}
tempimgbands.update({'iter0':mapimage})
tempimgdict.update({'iter0':mapdict})
tempsmall.update({'iter0':smallset})
outputimgdict.update({currentfilename:tempimgdict})
outputimgbands.update({currentfilename:tempimgbands})
outputsegbands.update({currentfilename:tempsmall})
changeoutputimg(currentfilename,'1')
def Open_Multifile():
global extractbutton,outputbutton
if proc_mode[proc_name].get()=='1':
batchprocess.Open_batchfolder()
extractbutton.config(state=NORMAL)
outputbutton.config(state=NORMAL)
return
# else:
# extractbutton.config(state=DISABLED)
global Multiimage,Multigray,Multitype,Multiimagebands,changefileframe,imageframe,Multigraybands,filenames
global changefiledrop,filedropvar,originbandarray,displaybandarray,clusterdisplay,currentfilename,resviewframe
global refsubframe,reseglabels,refbutton,figcanvas,loccanvas,originlabels,changekmeans,refarea
global originlabeldict,convband,panelA
global havecolorstrip
global colordicesband,oldpcachoice
global pccombinebar_up
global displaylabels,displaypclabels
global buttonvar
global colorindicearray
global selarea
MULTIFILES=filedialog.askopenfilenames()
root.update()
if len(MULTIFILES)>0:
Multiimage={}
Multigray={}
Multitype={}
Multiimagebands={}
Multigraybands={}
filenames=[]
originbandarray={}
colorindicearray={}
displaybandarray={}
clusterdisplay={}
oldpcachoice=[]
reseglabels=None
originlabels=None
originlabeldict=None
#changekmeans=True
convband=None
refvar.set('0')
kmeans.set('2')
panelA.delete(ALL)
panelA.unbind('<Button-1>')
panelA.unbind('<Shift-Button-1>')
refarea=None
havecolorstrip=False
displaypclabels=None
buttonvar.set(0)
# if 'NDI' in bandchoice:
# bandchoice['NDI'].set('1')
# if 'NDVI' in bandchoice:
# bandchoice['NDVI'].set('1')
refbutton.config(state=DISABLED)
# selareabutton.configure(state=DISABLED)
selarea.set('0')
figcanvas.delete(ALL)
#loccanvas=None
for widget in refsubframe.winfo_children():
widget.config(state=DISABLED)
#for widget in resviewframe.winfo_children():
# widget.config(state=DISABLED)
if outputbutton is not None:
outputbutton.config(state=DISABLED)
for i in range(len(MULTIFILES)):
if Open_File(MULTIFILES[i])==False:
return
generatedisplayimg(filenames[0])
changedisplayimg(imageframe,'Origin')
# imageframe.update()
# raise NameError
# yield
# thread=threading.Thread(target=singleband,args=(MULTIFILES[i],))
singleband(MULTIFILES[i])
# thread.start()
# thread.join()
for widget in changefileframe.winfo_children():
widget.pack_forget()
currentfilename=filenames[0]
# filedropvar.set(filenames[0])
# changefiledrop=OptionMenu(changefileframe,filedropvar,*filenames,command=partial(changeimage,imageframe))
# changefiledrop.pack()
#singleband(filenames[0])
generatedisplayimg(filenames[0])
# changedisplayimg(imageframe,'Origin')
getPCs()
if len(bandchoice)>0:
for i in range(len(cluster)):
bandchoice[cluster[i]].set('')
#changedisplayimg(imageframe,'Origin')
kmeans.set(1)
#reshapemodified_tif=np.zeros((displaybandarray[currentfilename]['LabOstu'].shape[0]*displaybandarray[currentfilename]['LabOstu'].shape[1],3))
#colordicesband=kmeansclassify(['LabOstu'],reshapemodified_tif)
displaylabels=kmeansclassify()
generateimgplant('')
changedisplayimg(imageframe,'Origin')
# if len(bandchoice)>0:
# bandchoice['LabOstu'].set('1')
global buttondisplay,pcaframe,kmeansbar
for widget in buttondisplay.winfo_children():
widget.config(state=NORMAL)
# for widget in pcaframe.winfo_children():
# for widget in pcselframe.winfo_children():
# widget.config(state=NORMAL)
extractbutton.config(state=NORMAL)
kmeansbar.state(["!disabled"])
pccombinebar_up.state(["!disabled"])
def fillpartialbands(vector,vectorindex,band,filter_vector):
nonzero=np.where(filter_vector!=0)
vector[nonzero,vectorindex]=vector[nonzero,vectorindex]+band
def fillbands(originbands,displaybands,vector,vectorindex,name,band,filter=0):
tempdict={name:band}
if isinstance(filter,int):
if name not in originbands:
originbands.update(tempdict)
image=cv2.resize(band,(displayfea_w,displayfea_l),interpolation=cv2.INTER_LINEAR)
displaydict={name:image}
displaybands.update(displaydict)
fea_bands=image.reshape((displayfea_l*displayfea_w),1)[:,0]
vector[:,vectorindex]=vector[:,vectorindex]+fea_bands
else:
if name not in originbands:
originbands.update(tempdict)
image=cv2.resize(band,(displayfea_w,displayfea_l),interpolation=cv2.INTER_LINEAR)
image=np.multiply(image,filter)
displaydict={name:image}
displaybands.update(displaydict)
fea_bands=image.reshape((displayfea_l*displayfea_w),1)[:,0]
vector[:,vectorindex]=vector[:,vectorindex]+fea_bands
return
def plot3d(pcas):
from mpl_toolkits.mplot3d import Axes3D # noqa: F401 unused import
import matplotlib.pyplot as plt
fig=plt.figure()
ax=fig.add_subplot(111,projection='3d')
x=pcas[:,0]
y=pcas[:,1]
z=pcas[:,2]*0+np.min(pcas[:,2])
ax.scatter(x,y,z,color='tab:purple')
x=pcas[:,0]*0+np.min(pcas[:,0])
y=pcas[:,1]
z=pcas[:,2]
ax.scatter(x,y,z,color='tab:pink')
x=pcas[:,0]
y=pcas[:,1]*0+np.max(pcas[:,1])
z=pcas[:,2]
ax.scatter(x,y,z,color='tab:olive')
ax.set_xlabel('Color Indices PC1')
ax.set_ylabel('Color Indices PC2')
ax.set_zlabel('Color Indices PC3')
# plt.show()
plt.savefig('3dplot_PC.png')
def partialoneband(filter):
global displaybandarray,originpcabands
global pcbuttons
global nonzero_vector,partialpca
partialpca=True
bands=Multiimagebands[currentfilename].bands
channel,fea_l,fea_w=bands.shape
nonzero=np.where(filter!=0)
RGB_vector=np.zeros((displayfea_l*displayfea_w,3))
colorindex_vector=np.zeros((displayfea_l*displayfea_w,12))
filter_vector=filter.reshape((displayfea_l*displayfea_w),1)[:,0]
originbands={}
displays={}
Red=cv2.resize(bands[0,:,:],(displayfea_w,displayfea_l),interpolation=cv2.INTER_LINEAR)[nonzero]
Green=cv2.resize(bands[0,:,:],(displayfea_w,displayfea_l),interpolation=cv2.INTER_LINEAR)[nonzero]
# Red=cv2.adaptiveThreshold(Red,255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY,11,2)
# Green=cv2.adaptiveThreshold(Green,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,11,2)
Blue=cv2.resize(bands[0,:,:],(displayfea_w,displayfea_l),interpolation=cv2.INTER_LINEAR)[nonzero]
# Blue=cv2.threshold(Blue,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
fillpartialbands(RGB_vector,0,Red,filter_vector)
fillpartialbands(RGB_vector,1,Green,filter_vector)
fillpartialbands(RGB_vector,2,Blue,filter_vector)
PAT_R=Red
PAT_G=Red
PAT_B=Red
ROO_R=Red
ROO_G=Red
ROO_B=Red
DIF_R=Red
DIF_G=Red
DIF_B=Red
GLD_R=Red
GLD_G=Red
GLD_B=Red
fillpartialbands(colorindex_vector,0,PAT_R,filter_vector)
fillpartialbands(colorindex_vector,1,PAT_G,filter_vector)
fillpartialbands(colorindex_vector,2,PAT_B,filter_vector)
fillpartialbands(colorindex_vector,3,ROO_R,filter_vector)
fillpartialbands(colorindex_vector,4,ROO_G,filter_vector)
fillpartialbands(colorindex_vector,5,ROO_B,filter_vector)
fillpartialbands(colorindex_vector,6,DIF_R,filter_vector)
fillpartialbands(colorindex_vector,7,DIF_G,filter_vector)
fillpartialbands(colorindex_vector,8,DIF_B,filter_vector)
fillpartialbands(colorindex_vector,9,GLD_R,filter_vector)
fillpartialbands(colorindex_vector,10,GLD_G,filter_vector)
fillpartialbands(colorindex_vector,11,GLD_B,filter_vector)
nonzero_vector=np.where(filter_vector!=0)
displayfea_vector=np.concatenate((RGB_vector,colorindex_vector),axis=1)
featurechannel=14
# np.savetxt('color-index.csv',displayfea_vector,delimiter=',',fmt='%10.5f')
# displayfea_vector=np.concatenate((RGB_vector,colorindex_vector),axis=1)
originpcabands.update({currentfilename:displayfea_vector})
pcabandsdisplay=displayfea_vector[:,:14]
pcabandsdisplay=pcabandsdisplay.reshape(displayfea_l,displayfea_w,featurechannel)
tempdictdisplay={'LabOstu':pcabandsdisplay}
displaybandarray.update({currentfilename:tempdictdisplay})
# originbandarray.update({currentfilename:originbands})
# Red=displays['Band1']
# Green=displays['Band2']
# Blue=displays['Band3']
# convimg=np.zeros((Red.shape[0],Red.shape[1],3))
# convimg[:,:,0]=Red
# convimg[:,:,1]=Green
# convimg[:,:,2]=Blue
# convimg=Image.fromarray(convimg.astype('uint8'))
# convimg.save('convimg.png','PNG')
pcbuttons=[]
need_w=int(450/3)
need_h=int(400/4)
for i in range(2,3):
band=np.copy(pcabandsdisplay[:,:,i])
# imgband=(band-band.min())*255/(band.max()-band.min())
imgband=np.copy(band)
pcimg=Image.fromarray(imgband.astype('uint8'),'L')
# pcimg.save('pc'+'_'+str(i)+'.png',"PNG")
pcimg.thumbnail((need_w,need_h),Image.ANTIALIAS)
# pcimg.save('pc'+'_'+str(i)+'.png',"PNG")
# ratio=max(displayfea_l/need_h,displayfea_w/need_w)
# print('origin band range',band.max(),band.min())
# # band,cache=tkintercorestat.pool_forward(band,{"f":int(ratio),"stride":int(ratio)})
# band=cv2.resize(band,(need_w,need_h),interpolation=cv2.INTER_LINEAR)
# bandrange=band.max()-band.min()
# print('band range',band.max(),band.min())
# band=(band-band.min())/bandrange*255
# print('button img range',band.max(),band.min())
# buttonimg=Image.fromarray(band.astype('uint8'),'L')
pcbuttons.append(ImageTk.PhotoImage(pcimg))
def partialsingleband(filter):
global displaybandarray,originpcabands
global pcbuttons
global nonzero_vector,partialpca
partialpca=True
bands=Multiimagebands[currentfilename].bands
channel,fea_l,fea_w=bands.shape
nonzero=np.where(filter!=0)
RGB_vector=np.zeros((displayfea_l*displayfea_w,3))
colorindex_vector=np.zeros((displayfea_l*displayfea_w,12))
filter_vector=filter.reshape((displayfea_l*displayfea_w),1)[:,0]
originbands={}
displays={}
if channel==1:
# Red=cv2.resize(bands[0,:,:],(displayfea_w,displayfea_l),interpolation=cv2.INTER_LINEAR)[nonzero]
# Green=cv2.resize(bands[0,:,:],(displayfea_w,displayfea_l),interpolation=cv2.INTER_LINEAR)[nonzero]
# Blue=cv2.resize(bands[0,:,:],(displayfea_w,displayfea_l),interpolation=cv2.INTER_LINEAR)[nonzero]
# fillpartialbands(RGB_vector,0,Red,filter_vector)
# fillpartialbands(RGB_vector,1,Green,filter_vector)
# fillpartialbands(RGB_vector,2,Blue,filter_vector)
partialoneband(filter)
return
else:
Red=cv2.resize(bands[0,:,:],(displayfea_w,displayfea_l),interpolation=cv2.INTER_LINEAR)[nonzero]
Green=cv2.resize(bands[1,:,:],(displayfea_w,displayfea_l),interpolation=cv2.INTER_LINEAR)[nonzero]
Blue=cv2.resize(bands[2,:,:],(displayfea_w,displayfea_l),interpolation=cv2.INTER_LINEAR)[nonzero]
fillpartialbands(RGB_vector,0,Red,filter_vector)
fillpartialbands(RGB_vector,1,Green,filter_vector)
fillpartialbands(RGB_vector,2,Blue,filter_vector)
PAT_R=Red/(Red+Green)
PAT_G=Green/(Green+Blue)
PAT_B=Blue/(Blue+Red)
ROO_R=Red/Green
ROO_G=Green/Blue
ROO_B=Blue/Red
DIF_R=2*Red-Green-Blue
DIF_G=2*Green-Blue-Red
DIF_B=2*Blue-Red-Green
GLD_R=Red/(np.multiply(np.power(Blue,0.618),np.power(Green,0.382)))
GLD_G=Green/(np.multiply(np.power(Blue,0.618),np.power(Red,0.382)))
GLD_B=Blue/(np.multiply(np.power(Green,0.618),np.power(Red,0.382)))
fillpartialbands(colorindex_vector,0,PAT_R,filter_vector)
fillpartialbands(colorindex_vector,1,PAT_G,filter_vector)
fillpartialbands(colorindex_vector,2,PAT_B,filter_vector)
fillpartialbands(colorindex_vector,3,ROO_R,filter_vector)
fillpartialbands(colorindex_vector,4,ROO_G,filter_vector)
fillpartialbands(colorindex_vector,5,ROO_B,filter_vector)
fillpartialbands(colorindex_vector,6,DIF_R,filter_vector)
fillpartialbands(colorindex_vector,7,DIF_G,filter_vector)
fillpartialbands(colorindex_vector,8,DIF_B,filter_vector)
fillpartialbands(colorindex_vector,9,GLD_R,filter_vector)
fillpartialbands(colorindex_vector,10,GLD_G,filter_vector)
fillpartialbands(colorindex_vector,11,GLD_B,filter_vector)
for i in range(12):
perc=np.percentile(colorindex_vector[:,i],1)
print('perc',perc)
colorindex_vector[:,i]=np.where(colorindex_vector[:,i]<perc,perc,colorindex_vector[:,i])
perc=np.percentile(colorindex_vector[:,i],99)
print('perc',perc)
colorindex_vector[:,i]=np.where(colorindex_vector[:,i]>perc,perc,colorindex_vector[:,i])
for i in range(3):
perc=np.percentile(RGB_vector[:,i],1)
print('perc',perc)
RGB_vector[:,i]=np.where(RGB_vector[:,i]<perc,perc,RGB_vector[:,i])
perc=np.percentile(RGB_vector[:,i],99)
print('perc',perc)
RGB_vector[:,i]=np.where(RGB_vector[:,i]>perc,perc,RGB_vector[:,i])
nonzero_vector=np.where(filter_vector!=0)
rgb_M=np.mean(RGB_vector[nonzero_vector,:].T,axis=1)
colorindex_M=np.mean(colorindex_vector[nonzero_vector,:].T,axis=1)
print('rgb_M',rgb_M,'colorindex_M',colorindex_M)
rgb_C=RGB_vector[nonzero_vector,:][0]-rgb_M.T
colorindex_C=colorindex_vector[nonzero_vector,:][0]-colorindex_M.T
rgb_V=np.corrcoef(rgb_C.T)
color_V=np.corrcoef(colorindex_C.T)
nans=np.isnan(color_V)
color_V[nans]=1e-6
rgb_std=rgb_C/(np.std(RGB_vector[nonzero_vector,:].T,axis=1)).T
color_std=colorindex_C/(np.std(colorindex_vector[nonzero_vector,:].T,axis=1)).T
nans=np.isnan(color_std)
color_std[nans]=1e-6
rgb_eigval,rgb_eigvec=np.linalg.eig(rgb_V)
color_eigval,color_eigvec=np.linalg.eig(color_V)
print('rgb_eigvec',rgb_eigvec)
print('color_eigvec',color_eigvec)
featurechannel=12
pcabands=np.zeros((colorindex_vector.shape[0],featurechannel))
rgbbands=np.zeros((colorindex_vector.shape[0],3))
for i in range(0,9):
pcn=color_eigvec[:,i]
pcnbands=np.dot(color_std,pcn)
pcvar=np.var(pcnbands)
print('color index pc',i+1,'var=',pcvar)
pcabands[nonzero_vector,i]=pcabands[nonzero_vector,i]+pcnbands
for i in range(9,12):
pcn=rgb_eigvec[:,i-9]
pcnbands=np.dot(rgb_std,pcn)
pcvar=np.var(pcnbands)
print('rgb pc',i-9+1,'var=',pcvar)
pcabands[nonzero_vector,i]=pcabands[nonzero_vector,i]+pcnbands
rgbbands[nonzero_vector,i-9]=rgbbands[nonzero_vector,i-9]+pcnbands
# plot3d(pcabands)
# np.savetxt('rgb.csv',rgbbands,delimiter=',',fmt='%10.5f')
# pcabands[:,1]=np.copy(pcabands[:,1])
# pcabands[:,2]=pcabands[:,2]*0
# indexbands=np.zeros((colorindex_vector.shape[0],3))
# if i<5:
# indexbands[:,i-2]=indexbands[:,i-2]+pcnbands
for i in range(12):
perc=np.percentile(pcabands[:,i],1)
print('perc',perc)
pcabands[:,i]=np.where(pcabands[:,i]<perc,perc,pcabands[:,i])
perc=np.percentile(pcabands[:,i],99)
print('perc',perc)
pcabands[:,i]=np.where(pcabands[:,i]>perc,perc,pcabands[:,i])
'''save to csv'''
# indexbands[:,0]=indexbands[:,0]+pcabands[:,2]
# indexbands[:,1]=indexbands[:,1]+pcabands[:,3]
# indexbands[:,2]=indexbands[:,2]+pcabands[:,4]
# plot3d(indexbands)
# np.savetxt('pcs.csv',pcabands,delimiter=',',fmt='%10.5f')
displayfea_vector=np.concatenate((RGB_vector,colorindex_vector),axis=1)
# np.savetxt('color-index.csv',displayfea_vector,delimiter=',',fmt='%10.5f')
# displayfea_vector=np.concatenate((RGB_vector,colorindex_vector),axis=1)
originpcabands.update({currentfilename:displayfea_vector})
pcabandsdisplay=pcabands.reshape(displayfea_l,displayfea_w,featurechannel)
tempdictdisplay={'LabOstu':pcabandsdisplay}
displaybandarray.update({currentfilename:tempdictdisplay})
# originbandarray.update({currentfilename:originbands})
# Red=displays['Band1']
# Green=displays['Band2']
# Blue=displays['Band3']
# convimg=np.zeros((Red.shape[0],Red.shape[1],3))
# convimg[:,:,0]=Red
# convimg[:,:,1]=Green
# convimg[:,:,2]=Blue
# convimg=Image.fromarray(convimg.astype('uint8'))
# convimg.save('convimg.png','PNG')
pcbuttons=[]
need_w=int(450/3)
need_h=int(400/4)
for i in range(12):
band=np.copy(pcabandsdisplay[:,:,i])
imgband=(band-band.min())*255/(band.max()-band.min())
pcimg=Image.fromarray(imgband.astype('uint8'),'L')
# pcimg.save('pc'+'_'+str(i)+'.png',"PNG")
pcimg.thumbnail((need_w,need_h),Image.ANTIALIAS)
# pcimg.save('pc'+'_'+str(i)+'.png',"PNG")
# ratio=max(displayfea_l/need_h,displayfea_w/need_w)
# print('origin band range',band.max(),band.min())
# # band,cache=tkintercorestat.pool_forward(band,{"f":int(ratio),"stride":int(ratio)})
# band=cv2.resize(band,(need_w,need_h),interpolation=cv2.INTER_LINEAR)
# bandrange=band.max()-band.min()
# print('band range',band.max(),band.min())
# band=(band-band.min())/bandrange*255
# print('button img range',band.max(),band.min())
# buttonimg=Image.fromarray(band.astype('uint8'),'L')
pcbuttons.append(ImageTk.PhotoImage(pcimg))
def oneband(file):
global displaybandarray,originbandarray,originpcabands,displayfea_l,displayfea_w
global pcbuttons
global partialpca
partialpca=False
try:
bands=Multiimagebands[file].bands
except:
return
pcbuttons=[]
channel,fea_l,fea_w=bands.shape
print('bandsize',fea_l,fea_w)
if fea_l*fea_w>2000*2000:
ratio=findratio([fea_l,fea_w],[2000,2000])
else:
ratio=1
print('ratio',ratio)
originbands={}
displays={}
displaybands=cv2.resize(bands[0,:,:],(int(fea_w/ratio),int(fea_l/ratio)),interpolation=cv2.INTER_LINEAR)
displayfea_l,displayfea_w=displaybands.shape
RGB_vector=np.zeros((displayfea_l*displayfea_w,3))
colorindex_vector=np.zeros((displayfea_l*displayfea_w,12))
Red=bands[0,:,:].astype('uint8')
# _,Red=cv2.threshold(Red,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
Green=bands[0,:,:].astype('uint8')
# _,Green=cv2.threshold(Green,0,255,cv2.THRESH_OTSU)
Blue=bands[0,:,:].astype('uint8')
# _,Blue=cv2.threshold(Blue,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
fillbands(originbands,displays,RGB_vector,0,'Band1',Red)
fillbands(originbands,displays,RGB_vector,1,'Band2',Green)
fillbands(originbands,displays,RGB_vector,2,'Band3',Blue)
PAT_R=bands[0,:,:].astype('uint8')
# PAT_R=cv2.adaptiveThreshold(PAT_R,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,11,2)
PAT_G=bands[0,:,:]
# PAT_G=cv2.adaptiveThreshold(PAT_G,255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY,11,2)
PAT_B=bands[0,:,:]
ROO_R=bands[0,:,:]
ROO_G=bands[0,:,:]
ROO_B=bands[0,:,:]
DIF_R=bands[0,:,:]
DIF_G=bands[0,:,:]
DIF_B=bands[0,:,:]
GLD_R=bands[0,:,:]
GLD_G=bands[0,:,:]
GLD_B=bands[0,:,:]
fillbands(originbands,displays,colorindex_vector,0,'PAT_R',PAT_R)
fillbands(originbands,displays,colorindex_vector,1,'PAT_G',PAT_G)
fillbands(originbands,displays,colorindex_vector,2,'PAT_B',PAT_B)
fillbands(originbands,displays,colorindex_vector,3,'ROO_R',ROO_R)
fillbands(originbands,displays,colorindex_vector,4,'ROO_G',ROO_G)
fillbands(originbands,displays,colorindex_vector,5,'ROO_B',ROO_B)
fillbands(originbands,displays,colorindex_vector,6,'DIF_R',DIF_R)
fillbands(originbands,displays,colorindex_vector,7,'DIF_G',DIF_G)
fillbands(originbands,displays,colorindex_vector,8,'DIF_B',DIF_B)
fillbands(originbands,displays,colorindex_vector,9,'GLD_R',GLD_R)
fillbands(originbands,displays,colorindex_vector,10,'GLD_G',GLD_G)
fillbands(originbands,displays,colorindex_vector,11,'GLD_B',GLD_B)
displayfea_vector=np.concatenate((RGB_vector,colorindex_vector),axis=1)
# np.savetxt('color-index.csv',displayfea_vector,delimiter=',',fmt='%10.5f')
featurechannel=14
originpcabands.update({file:displayfea_vector})
# pcabandsdisplay=pcabands.reshape(displayfea_l,displayfea_w,featurechannel)
# pcabandsdisplay=np.concatenate((RGB_vector,colorindex_vector),axis=2)
pcabandsdisplay=displayfea_vector[:,:14]
pcabandsdisplay=pcabandsdisplay.reshape(displayfea_l,displayfea_w,featurechannel)
tempdictdisplay={'LabOstu':pcabandsdisplay}
displaybandarray.update({file:tempdictdisplay})
originbandarray.update({file:originbands})
# Red=displays['Band1']
# Green=displays['Band2']
# Blue=displays['Band3']
# convimg=np.zeros((Red.shape[0],Red.shape[1],3))
# convimg[:,:,0]=Red
# convimg[:,:,1]=Green
# convimg[:,:,2]=Blue
# convimg=Image.fromarray(convimg.astype('uint8'))
# convimg.save('convimg.png','PNG')
need_w=int(450/3)
need_h=int(400/4)
for i in range(2,3):
band=np.copy(pcabandsdisplay[:,:,i])
# band=np.copy(Red)
# imgband=(band-band.min())*255/(band.max()-band.min())
imgband=np.copy(band)
pcimg=Image.fromarray(imgband.astype('uint8'),'L')
# pcimg.save('pc'+'_'+str(i)+'.png',"PNG")
pcimg.thumbnail((need_w,need_h),Image.ANTIALIAS)
# pcimg.save('pc'+'_'+str(i)+'.png',"PNG")
# ratio=max(displayfea_l/need_h,displayfea_w/need_w)
# print('origin band range',band.max(),band.min())
# # band,cache=tkintercorestat.pool_forward(band,{"f":int(ratio),"stride":int(ratio)})
# band=cv2.resize(band,(need_w,need_h),interpolation=cv2.INTER_LINEAR)
# bandrange=band.max()-band.min()
# print('band range',band.max(),band.min())
# band=(band-band.min())/bandrange*255
# print('button img range',band.max(),band.min())
# buttonimg=Image.fromarray(band.astype('uint8'),'L')
pcbuttons.append(ImageTk.PhotoImage(pcimg))
def singleband(file):
global displaybandarray,originbandarray,originpcabands,displayfea_l,displayfea_w
global pcbuttons
global partialpca
partialpca=False
try:
bands=Multiimagebands[file].bands
except:
return
pcbuttons=[]
channel,fea_l,fea_w=bands.shape
print('bandsize',fea_l,fea_w)
if fea_l*fea_w>2000*2000:
ratio=findratio([fea_l,fea_w],[2000,2000])
else:
ratio=1
print('ratio',ratio)
originbands={}
displays={}
displaybands=cv2.resize(bands[0,:,:],(int(fea_w/ratio),int(fea_l/ratio)),interpolation=cv2.INTER_LINEAR)
# displaybands=np.copy(bands[0,:,:])
displayfea_l,displayfea_w=displaybands.shape
# displayfea_l,displayfea_w=fea_l,fea_w
print(displayfea_l,displayfea_w)
RGB_vector=np.zeros((displayfea_l*displayfea_w,3))
colorindex_vector=np.zeros((displayfea_l*displayfea_w,12))
if channel==1:
# Red=bands[0,:,:]
# Green=bands[0,:,:]
# Blue=bands[0,:,:]
oneband(file)
return
else:
Red=bands[0,:,:]
Green=bands[1,:,:]
Blue=bands[2,:,:]
fillbands(originbands,displays,RGB_vector,0,'Band1',Red)
fillbands(originbands,displays,RGB_vector,1,'Band2',Green)
fillbands(originbands,displays,RGB_vector,2,'Band3',Blue)
# import matplotlib.pyplot as plt
# fig,axs=plt.subplots(1,3)
# for i in range(3):
# minpc2=np.min(RGB_vector[:,i])
# maxpc2=np.max(RGB_vector[:,i])
# print(minpc2,maxpc2)
# bins=range(int(minpc2),int(maxpc2),10)
# axs[i].hist(RGB_vector[:,i],bins,range=(minpc2,maxpc2))
# axs[i].set_title('RGBband_'+str(i+1))
# # plt.hist(pcabands[:,13],bins,range=(minpc2,maxpc2))
# plt.show()
# secondsmallest_R=np.partition(Red,1)[1][0]
# secondsmallest_G=np.partition(Green,1)[1][0]
# secondsmallest_B=np.partition(Blue,1)[1][0]
#
# Red=Red+secondsmallest_R
# Green=Green+secondsmallest_G
# Blue=Blue+secondsmallest_B
# Red=Red/255+1
# Green=Green/255+1
# Blue=Blue/255+1
PAT_R=Red/(Red+Green)
PAT_G=Green/(Green+Blue)
PAT_B=Blue/(Blue+Red)
ROO_R=Red/(Green+1e-6)
ROO_G=Green/(Blue+1e-6)
ROO_B=Blue/(Red+1e-6)
DIF_R=2*Red-Green-Blue
DIF_G=2*Green-Blue-Red
DIF_B=2*Blue-Red-Green
GLD_R=Red/(np.multiply(np.power(Blue,0.618),np.power(Green,0.382))+1e-6)
GLD_G=Green/(np.multiply(np.power(Blue,0.618),np.power(Red,0.382))+1e-6)
GLD_B=Blue/(np.multiply(np.power(Green,0.618),np.power(Red,0.382))+1e-6)
fillbands(originbands,displays,colorindex_vector,0,'PAT_R',PAT_R)
fillbands(originbands,displays,colorindex_vector,1,'PAT_G',PAT_G)
fillbands(originbands,displays,colorindex_vector,2,'PAT_B',PAT_B)
fillbands(originbands,displays,colorindex_vector,3,'ROO_R',ROO_R)
fillbands(originbands,displays,colorindex_vector,4,'ROO_G',ROO_G)
fillbands(originbands,displays,colorindex_vector,5,'ROO_B',ROO_B)
fillbands(originbands,displays,colorindex_vector,6,'DIF_R',DIF_R)
fillbands(originbands,displays,colorindex_vector,7,'DIF_G',DIF_G)
fillbands(originbands,displays,colorindex_vector,8,'DIF_B',DIF_B)
fillbands(originbands,displays,colorindex_vector,9,'GLD_R',GLD_R)
fillbands(originbands,displays,colorindex_vector,10,'GLD_G',GLD_G)
fillbands(originbands,displays,colorindex_vector,11,'GLD_B',GLD_B)
# for i in [5,11]:
# colorindex_vector[:,i]=np.log10(colorindex_vector[:,i])
# perc=np.percentile(colorindex_vector[:,i],99)
# print('perc',perc)
# colorindex_vector[:,i]=np.where(colorindex_vector[:,i]>perc,perc,colorindex_vector[:,i])
#
# for i in [0,1,3,4,9,10]:
# colorindex_vector[:,i]=np.log10(colorindex_vector[:,i])
# perc=np.percentile(colorindex_vector[:,i],90)
# print('perc',perc)
# colorindex_vector[:,i]=np.where(colorindex_vector[:,i]>perc,perc,colorindex_vector[:,i])
# for i in [5,11]:
# colorindex_vector[:,i]=np.log10(colorindex_vector[:,i])
# perc=np.percentile(colorindex_vector[:,i],99)
# print('perc',perc)
# colorindex_vector[:,i]=np.where(colorindex_vector[:,i]>perc,perc,colorindex_vector[:,i])
#
# for i in [3,4,9,10]:
# colorindex_vector[:,i]=np.log10(colorindex_vector[:,i])
# perc=np.percentile(colorindex_vector[:,i],1)
# print('perc',perc)
# colorindex_vector[:,i]=np.where(colorindex_vector[:,i]<perc,perc,colorindex_vector[:,i])
# perc=np.percentile(colorindex_vector[:,i],99)
# print('perc',perc)
# colorindex_vector[:,i]=np.where(colorindex_vector[:,i]>perc,perc,colorindex_vector[:,i])
#
# for i in [0,1]:
# colorindex_vector[:,i]=np.log10(colorindex_vector[:,i])
# perc=np.percentile(colorindex_vector[:,i],2)
# print('perc',perc)
# colorindex_vector[:,i]=np.where(colorindex_vector[:,i]<perc,perc,colorindex_vector[:,i])
# for i in [0,1,3,4,9,10]:
# colorindex_vector[:,i]=np.log10(colorindex_vector[:,i])
for i in range(12):
perc=np.percentile(colorindex_vector[:,i],1)
print('perc',perc)
colorindex_vector[:,i]=np.where(colorindex_vector[:,i]<perc,perc,colorindex_vector[:,i])
perc=np.percentile(colorindex_vector[:,i],99)
print('perc',perc)
colorindex_vector[:,i]=np.where(colorindex_vector[:,i]>perc,perc,colorindex_vector[:,i])
for i in range(3):
perc=np.percentile(RGB_vector[:,i],1)
print('perc',perc)
RGB_vector[:,i]=np.where(RGB_vector[:,i]<perc,perc,RGB_vector[:,i])
perc=np.percentile(RGB_vector[:,i],99)
print('perc',perc)
RGB_vector[:,i]=np.where(RGB_vector[:,i]>perc,perc,RGB_vector[:,i])
# import matplotlib.pyplot as plt
# fig,axs=plt.subplots(4,3)
# for i in range(12):
# minpc2=np.min(colorindex_vector[:,i])
# maxpc2=np.max(colorindex_vector[:,i])
# print(minpc2,maxpc2)
# # bins=range(int(minpc2),int(maxpc2)+1,10)
# axs[int(i/3),i%3].hist(colorindex_vector[:,i],10,range=(minpc2,maxpc2))
# axs[int(i/3),i%3].set_title('Colorindex_'+str(i+1))
# # axs[i].hist(colorindex_vector[:,i],10,range=(minpc2,maxpc2))
# # axs[i].set_title('Colorindex_'+str(i+1))
# # plt.hist(pcabands[:,13],bins,range=(minpc2,maxpc2))
# plt.show()
rgb_M=np.mean(RGB_vector.T,axis=1)
colorindex_M=np.mean(colorindex_vector.T,axis=1)
print('rgb_M',rgb_M,'colorindex_M',colorindex_M)
rgb_C=RGB_vector-rgb_M
colorindex_C=colorindex_vector-colorindex_M
rgb_V=np.corrcoef(rgb_C.T)
color_V=np.corrcoef(colorindex_C.T)
nans=np.isnan(color_V)
color_V[nans]=1e-6
rgb_std=rgb_C/np.std(RGB_vector.T,axis=1)
color_std=colorindex_C/np.std(colorindex_vector.T,axis=1)
nans=np.isnan(color_std)
color_std[nans]=1e-6
rgb_eigval,rgb_eigvec=np.linalg.eig(rgb_V)
color_eigval,color_eigvec=np.linalg.eig(color_V)
print('rgb_eigvec',rgb_eigvec)
print('color_eigvec',color_eigvec)
featurechannel=12
pcabands=np.zeros((colorindex_vector.shape[0],featurechannel))
rgbbands=np.zeros((colorindex_vector.shape[0],3))
# plot3d(pcabands)
# np.savetxt('rgb.csv',rgbbands,delimiter=',',fmt='%10.5f')
# pcabands[:,1]=np.copy(pcabands[:,1])
# pcabands[:,2]=pcabands[:,2]*0
indexbands=np.zeros((colorindex_vector.shape[0],3))
# for i in range(3,featurechannel):
# csvpcabands=np.zeros((colorindex_vector.shape[0],15))
for i in range(0,9):
pcn=color_eigvec[:,i]
pcnbands=np.dot(color_std,pcn)
pcvar=np.var(pcnbands)
print('color index pc',i+1,'var=',pcvar)
pcabands[:,i]=pcabands[:,i]+pcnbands
# if i<5:
# indexbands[:,i-2]=indexbands[:,i-2]+pcnbands
for i in range(9,12):
pcn=rgb_eigvec[:,i-9]
pcnbands=np.dot(rgb_std,pcn)
pcvar=np.var(pcnbands)
print('rgb pc',i+1,'var=',pcvar)
pcabands[:,i]=pcabands[:,i]+pcnbands
rgbbands[:,i-9]=rgbbands[:,i-9]+pcnbands
# for i in range(0,12):
# pcn=color_eigvec[:,i]
# pcnbands=np.dot(color_std,pcn)
# pcvar=np.var(pcnbands)
# print('csv color index pc',i+1,'var=',pcvar)
# csvpcabands[:,i]=csvpcabands[:,i]+pcnbands
# for i in range(12,15):
# pcn=rgb_eigvec[:,i-12]
# pcnbands=np.dot(rgb_std,pcn)
# csvpcabands[:,i]=csvpcabands[:,i]+pcnbands
#
'''save to csv'''
# indexbands[:,0]=indexbands[:,0]+pcabands[:,2]
# indexbands[:,1]=indexbands[:,1]+pcabands[:,3]
# indexbands[:,2]=indexbands[:,2]+pcabands[:,4]
# plot3d(indexbands)
# np.savetxt('pcs.csv',pcabands,delimiter=',',fmt='%10.5f')
# minpc=np.min(pcabands)
#
# meanpc=np.mean(pcabands)
# stdpc=np.std(pcabands)
# print('meanpc',meanpc,'stdpc',stdpc)
# pcabands=pcabands-meanpc/stdpc
# import matplotlib.pyplot as plt
# minpc2=np.min(pcabands[:,13])
# maxpc2=np.max(pcabands[:,13])
# print(minpc2,maxpc2)
# bins=range(int(minpc2),int(maxpc2),10)
# plt.hist(pcabands[:,13],bins,range=(minpc2,maxpc2))
# plt.show()
# np.savetxt('pcs.csv',pcabands[:,3],delimiter=',',fmt='%10.5f')
for i in range(12):
perc= | np.percentile(pcabands[:,i],1) | numpy.percentile |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.