prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
# Importando bibliotecas necessárias
# Biblioteca numérica
import numpy
# Visualisar imagens
from matplotlib import pyplot
# Biblioteca para leitura de CSV
from pandas import read_csv
# Uma das inúmeras funções do SKlear úteis para limpar o dataset
from sklearn.preprocessing import MinMaxScaler
# Importação de camadas do Keras
from keras.models import Sequential
# Dense é a rede "tradicional", neurônios simples
from keras.layers import Dense, Input
# Recurrent é a classe mais simples de redes neurais recorrentes.
from keras.layers import SimpleRNN
# LSTM é uma classe de redes neurais recorrentes mais avençadas que as RNN comuns
from keras.layers import LSTM
# Função usada para gerar o conjunto de dados da serie
from tensorflow import initializers
def create_dataset(dados_da_serie):
# Lista de amostras
entrada_rede_neural_X, saida_rede_neural_Y = [], []
# Para cada ponto dos dados no tempo, adiciono a lista o valor no ponto (Tanto para X quanto para Y)
# Isso ocorre porque a partir do ponto anterior eu devo predizer exatamente o valor posterior.
for i in range(len(dados_da_serie) - 2):
dado_em_um_determinado_ponto = dados_da_serie[(i + 1), 0]
entrada_rede_neural_X.append([dado_em_um_determinado_ponto])
saida_rede_neural_Y.append(dado_em_um_determinado_ponto)
return entrada_rede_neural_X, saida_rede_neural_Y
def visualizar_resultados(modelo):
pyplot.subplot(211)
pyplot.title('Perda da rede(treinamento)')
# Plota os valores de perda da rede em cada época.
pyplot.plot(modelo.history['loss'], color='blue')
pyplot.subplot(212)
pyplot.title('Erro médio absoluto')
# Plota os valores de acurácia da rede em cada época.
pyplot.plot(modelo.history['mean_absolute_error'], color='blue')
pyplot.show('figura')
pyplot.close()
# Carregamento dos dados da tabela
dados_tabela = read_csv('tabela/tabela.csv', usecols=[1], engine='python')
conjunto_de_dados = dados_tabela.values
# Conversão para float32
conjunto_de_dados = conjunto_de_dados.astype('float32')
# Separação os dados em dois conjuntos, teste(40%) e trainamento(60%)
tamanho_conjunto_treinamento = int(len(conjunto_de_dados) * 0.60)
tamanho_conjunto_testes = len(conjunto_de_dados) - tamanho_conjunto_treinamento
dados_para_treinamento = conjunto_de_dados[0:tamanho_conjunto_treinamento, :]
dados_para_testes = conjunto_de_dados[tamanho_conjunto_treinamento:len(conjunto_de_dados), :]
# Criando o dataset trainamento e teste
conjunto_de_trainamento_entrada_rede, conjunto_treinamento_saida_rede = create_dataset(dados_para_treinamento)
conjunto_de_testes_entrada_rede, conjunto_de_testes_saida_rede = create_dataset(dados_para_testes)
# Convertendo lista de amostras para numpy array
conjunto_de_trainamento_entrada_rede = numpy.array(conjunto_de_trainamento_entrada_rede)
conjunto_treinamento_saida_rede = | numpy.array(conjunto_treinamento_saida_rede) | numpy.array |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 27 13:21:28 2021
@author: <NAME>
"""
import keyboard
import time
import pickle
import torch
import random
import cv2
import numpy as np
import copy as scp
from PIL import Image
from matplotlib import style
from dataclasses import dataclass
from operator import add
from collections import deque
style.use("ggplot")
@dataclass
class filter_options:
''' Chapter 3.4.5 '''
circle_diameter : float = 250
phis : np.array = np.array([2*np.pi/3-np.pi/2, 4*np.pi/3-np.pi/2, 3*np.pi/2])
diameters : np.array = np.array([80, 80])
@dataclass
class slot:
position : np.array = np.array([0, 0])
index : tuple = (0,0)
is_used: bool = False
class Ruetris:
#colors for visualisation with cv
piece_colors = [
(254, 254, 254), #white for free space
(0, 0, 0), #black for deadzones
(255, 255, 0), #color one for block one
(147, 88, 254), #same for other pieces
(54, 175, 144),
(255, 0, 0),
(102, 217, 238),
(254, 151, 32),
(0, 0, 255)
]
#array with geometry of pieces as numpy arrays
''' Chapter 3.2 '''
pieces = [
np.array([[2, 2], #Smashboy
[2, 2]]),
np.array([[0, 3, 0], #Teewee
[3, 3, 3]]),
np.array([[0, 4, 4], #Rhode Island Z
[4, 4, 0]]),
np.array([[5, 5, 0], #Cleveland Z
[0, 5, 5]]),
np.array([[6, 6, 6, 6]]), #Hero
np.array([[0, 0, 7], #<NAME>
[7, 7, 7]]),
np.array([[8, 0, 0], #<NAME>
[8, 8, 8]])
]
#constructor
def __init__(self, opt):
self.table_dia = opt.table_dia
self.block_size = opt.block_size
self.state_report = opt.state_report
self.filter_opt = filter_options()
self.action_input = opt.action_method
self.rnd_pos = opt.rnd_pos
self.rnd_rot = opt.rnd_rot
self.reward_action_dict = opt.reward_action_dict
self.c1 = opt.c1
self.c2 = opt.c2
self.c3 = opt.c3
self.c4 = opt.c4
self.create_supervised_data = opt.create_supervised_data
#self.supervised_data_size = opt.supervised_data_size
self.cli_print = opt.print_state_2_cli
self.create_slot_list()
self.num_of_cells = int(np.sqrt(len(self.slot_list)))
self.centre_idx = (self.num_of_cells//2, self.num_of_cells//2)
self.update_slot_indizes() #now that we know matrix dimension
self.deadzoneFilter()
#extra_board for visualization of numbers
self.info_frame = np.ones((self.num_of_cells * self.block_size,
self.num_of_cells * int(self.block_size / 2), 3),
dtype=np.uint8) * np.array([62, 11, 78], dtype=np.uint8)
self.text_color = (255, 255, 255)
self.reset()
''' Kind of observation: Chapter 3.4.2.3 '''
if self.state_report=='reduced':
self.state_dim = 4**2+4
elif self.state_report=='full_bool':
self.state_dim = self.num_of_cells**2*2
elif self.state_report=='full_float':
self.state_dim = self.num_of_cells**2
''' Kind of interactin: Chapter 3.4.2.2 '''
self.action_dim = 9 if self.action_input=='controller' else 1
def create_slot_list(self):
self.slot_list = []
Finished = False
layer_num = 1
k = 1
idx = 0
self.slot_list.append(slot()) #centre
right = np.array([self.block_size, 0])
up = np.array([0, self.block_size])
left = np.array([-self.block_size, 0])
down = np.array([0, -self.block_size])
while not Finished:
for i in range(layer_num*8):
if i == 0:
pos = self.slot_list[idx].position + right
elif i // (k+1) == 0:
pos = self.slot_list[idx].position + up
elif i // (k+1) == 1:
pos = self.slot_list[idx].position + left
elif i // (k+1) == 2:
pos = self.slot_list[idx].position + down
elif i // (k+1) == 3:
pos = self.slot_list[idx].position + right
self.slot_list.append(slot(position=pos))
idx += 1
if np.linalg.norm(self.slot_list[idx].position)/np.sqrt(2) >= self.table_dia/2:
Finished = True
else:
k += 2
layer_num += 1
def update_slot_indizes(self):
Finished = False
layer_num = 1
k = 1
idx = 0
self.slot_list[idx].index = self.centre_idx
right = (0, 1)
up = (-1, 0)
left = (0, -1)
down = (1, 0)
while not Finished:
for i in range(layer_num*8):
if i == 0:
idx_tuple = tuple(map(add, self.slot_list[idx].index, right))
elif i // (k+1) == 0:
idx_tuple = tuple(map(add, self.slot_list[idx].index, up))
elif i // (k+1) == 1:
idx_tuple = tuple(map(add, self.slot_list[idx].index, left))
elif i // (k+1) == 2:
idx_tuple = tuple(map(add, self.slot_list[idx].index, down))
elif i // (k+1) == 3:
idx_tuple = tuple(map(add, self.slot_list[idx].index, right))
idx += 1
self.slot_list[idx].index = idx_tuple
if idx == len(self.slot_list)-1:
Finished = True
else:
k += 2
layer_num += 1
def create_playboard(self):
''' Figure 3.19 '''
self.playboard = np.zeros((self.num_of_cells, self.num_of_cells),
dtype=float) #empty out playboard
for slot in self.slot_list:
self.playboard[slot.index] = slot.is_used
def create_pieceboard(self, piece, piece_pos):
''' Figure 3.20 '''
pieceboard = np.zeros((self.num_of_cells,
self.num_of_cells), dtype=float)
for i,row in enumerate(piece):
for j,entry in enumerate(row):
pieceboard[i + piece_pos["i"]][j + piece_pos["j"]] =\
1.0 if bool(entry) else 0.0
return pieceboard
def reset(self):
self.create_playboard()
self.piece, self.piece_pos = self.new_piece()
self.score = 0
self.wasted_places = 0
self.holes = self.get_holes(self.playboard)
self.parts_on_board = 0
self.gameover = False
return self.get_state(self.playboard, self.piece, self.piece_pos)
def get_state(self, playboard, piece, piece_pos, cli_print=False):
''' Kind of observation: Chapter 3.4.2.3 '''
if self.state_report == 'full_bool':
pieceboard = self.create_pieceboard(piece, piece_pos)
flat_state = np.append(playboard.flatten(), pieceboard.flatten(),
axis=0)
if cli_print:
print(playboard)
print(pieceboard)
return torch.FloatTensor(flat_state)
elif self.state_report == 'full_float':
if cli_print:
print(self.get_full_float_state(playboard, piece, piece_pos))
print('\n')
return torch.FloatTensor(self.get_full_float_state(\
playboard, piece, piece_pos).flatten())
elif self.state_report == 'reduced':
distances_vec = self.calc_distances(playboard, piece, piece_pos)
if cli_print:
print(distances_vec)
return torch.FloatTensor(np.array(distances_vec).flatten())
def get_state_no_piece(self, playboard):
''' Kind of observation, but with no piece: Chapter 3.4.2.3 '''
if self.state_report == 'full_bool':
pieceboard = np.zeros(playboard.shape) #no information about piece
flat_state = np.append(playboard.flatten(), pieceboard.flatten(),
axis=0)
return torch.FloatTensor(flat_state)
elif self.state_report == 'full_float':
return torch.FloatTensor(playboard.flatten())
elif self.state_report == 'reduced':
flat_state = np.zeros(20) #and no information for reduced
return torch.FloatTensor(flat_state)
def get_full_float_state(self, playboard, piece, piece_pos):
''' Figure 3.21 '''
float_board = scp.copy(playboard)
for i,row in enumerate(piece):
for j,entry in enumerate(row):
condition_A = bool(playboard[i + piece_pos["i"]][j + piece_pos["j"]])
condition_B = bool(entry)
if condition_B and condition_A:
float_board[i + piece_pos["i"]][j + piece_pos["j"]] = -1.0
elif condition_B and not condition_A:
float_board[i + piece_pos["i"]][j + piece_pos["j"]] = 0.5
return float_board
def get_state_action_dict(self):
''' All possible storage positions: Figure 3.24 '''
state_action_dict = {}
piece = scp.copy(self.piece)
if self.piece_idx == 0: #smashboy
num_rotations = 1
elif self.piece_idx == 2 or self.piece_idx == 3 or self.piece_idx == 4:
num_rotations = 2
else:
num_rotations = 4
for rot in range(num_rotations):
valid_i = self.num_of_cells - len(piece)
valid_j = self.num_of_cells - len(piece[0])
for x in range((valid_i+1)*(valid_j+1)):
i = x // (valid_j+1)
j = x % (valid_j+1)
pos = {'i': i, 'j': j}
out_of_boundary, overlapping = self.check_action(pos, piece)
if not out_of_boundary and not overlapping:
state_action_dict[(i, j, rot)] = \
self.get_state(self.playboard, piece, pos) if not \
self.reward_action_dict else self.action_place_dummy(self.playboard, piece, pos)
_, piece = self.rot_action('right', piece)
return state_action_dict
def action_place_dummy(self, board, piece, pos):
''' Calculation of rewards without changing state of environment.
This is for the 'Benchmark-Agent' Chapter 3.4.2.5 '''
score_inc = 0
next_board = scp.deepcopy(board)
for i,row in enumerate(piece):
for j,entry in enumerate(row):
if bool(entry):
index = (i+pos['i'], j+pos['j'])
next_board[index] = 1
distances_vec = self.calc_distances(board, piece, pos)
n_neighbours = distances_vec[0:16].count(0)
x_i_norm = sum(distances_vec[16:])/4
current_wasted_places = self.calc_wasted_places(next_board)
n_wasted_places = current_wasted_places - self.wasted_places
score_inc += self.eval_measurements(x_i_norm, n_wasted_places,
n_neighbours)
return score_inc
def calc_distances(self, playboard, piece, piece_pos):
''' Chapter 3.4.2.1 and Figure 3.12 '''
distances_vec = []
base_vec = []
for i,row in enumerate(piece):
for j,entry in enumerate(row):
if bool(entry):
distances_vec.append(self.count4directions(i,j,
playboard, piece_pos))
base_vec.append(self.distance2base(i,j, piece_pos))
flat_distances = [distance for sublist in distances_vec for distance in sublist]
return flat_distances + base_vec
def count4directions(self, i, j, playboard, piece_pos):
''' Chapter 3.4.2.3, Figure 3.22 and 3.23 '''
start_idx = (i + piece_pos['i'], j + piece_pos['j'])
directions = [(1,0), (0,1), (-1,0), (0,-1)] if playboard[start_idx] \
else [(-1,0), (0,-1), (1,0), (0,1)]
looking_for = not playboard[start_idx]
distances = []
for direction in directions:
distance = 1 if playboard[start_idx] else 0
idx_tuple = tuple(map(add, start_idx, direction))
while True:
is_i_min = not idx_tuple[0]>=0
is_j_min = not idx_tuple[1]>=0
is_i_max = not idx_tuple[0]<self.num_of_cells
is_j_max = not idx_tuple[1]<self.num_of_cells
condition = is_i_min or is_j_min or is_i_max or is_j_max
if condition or playboard[idx_tuple]==looking_for:
c = self.num_of_cells
distances.append(-distance/c if playboard[start_idx] \
else distance/c)
break
else:
distance += 1
idx_tuple = tuple(map(add, idx_tuple, direction))
return distances
def distance2base(self, i, j, piece_pos): #manhatten-distance or squared-distance would speed up things
''' Equation 3.1 '''
target_idx = (i + piece_pos['i'], j + piece_pos['j'])
for slot in self.slot_list:
if slot.index == target_idx:
return | np.linalg.norm(slot.position) | numpy.linalg.norm |
from itertools import product
import numpy as np
from numpy.linalg import norm
from numpy.testing import (assert_, assert_allclose,
assert_equal, suppress_warnings)
from pytest import raises as assert_raises
from scipy.sparse import issparse, lil_matrix
from scipy.sparse.linalg import aslinearoperator
from scipy.optimize import least_squares
from scipy.optimize._lsq.least_squares import IMPLEMENTED_LOSSES
from scipy.optimize._lsq.common import EPS, make_strictly_feasible
def fun_trivial(x, a=0):
return (x - a)**2 + 5.0
def jac_trivial(x, a=0.0):
return 2 * (x - a)
def fun_2d_trivial(x):
return np.array([x[0], x[1]])
def jac_2d_trivial(x):
return np.identity(2)
def fun_rosenbrock(x):
return np.array([10 * (x[1] - x[0]**2), (1 - x[0])])
def jac_rosenbrock(x):
return np.array([
[-20 * x[0], 10],
[-1, 0]
])
def jac_rosenbrock_bad_dim(x):
return np.array([
[-20 * x[0], 10],
[-1, 0],
[0.0, 0.0]
])
def fun_rosenbrock_cropped(x):
return fun_rosenbrock(x)[0]
def jac_rosenbrock_cropped(x):
return jac_rosenbrock(x)[0]
# When x is 1-D array, return is 2-D array.
def fun_wrong_dimensions(x):
return np.array([x, x**2, x**3])
def jac_wrong_dimensions(x, a=0.0):
return np.atleast_3d(jac_trivial(x, a=a))
def fun_bvp(x):
n = int(np.sqrt(x.shape[0]))
u = np.zeros((n + 2, n + 2))
x = x.reshape((n, n))
u[1:-1, 1:-1] = x
y = u[:-2, 1:-1] + u[2:, 1:-1] + u[1:-1, :-2] + u[1:-1, 2:] - 4 * x + x**3
return y.ravel()
class BroydenTridiagonal:
def __init__(self, n=100, mode='sparse'):
np.random.seed(0)
self.n = n
self.x0 = -np.ones(n)
self.lb = np.linspace(-2, -1.5, n)
self.ub = np.linspace(-0.8, 0.0, n)
self.lb += 0.1 * np.random.randn(n)
self.ub += 0.1 * np.random.randn(n)
self.x0 += 0.1 * np.random.randn(n)
self.x0 = make_strictly_feasible(self.x0, self.lb, self.ub)
if mode == 'sparse':
self.sparsity = lil_matrix((n, n), dtype=int)
i = np.arange(n)
self.sparsity[i, i] = 1
i = np.arange(1, n)
self.sparsity[i, i - 1] = 1
i = np.arange(n - 1)
self.sparsity[i, i + 1] = 1
self.jac = self._jac
elif mode == 'operator':
self.jac = lambda x: aslinearoperator(self._jac(x))
elif mode == 'dense':
self.sparsity = None
self.jac = lambda x: self._jac(x).toarray()
else:
assert_(False)
def fun(self, x):
f = (3 - x) * x + 1
f[1:] -= x[:-1]
f[:-1] -= 2 * x[1:]
return f
def _jac(self, x):
J = lil_matrix((self.n, self.n))
i = np.arange(self.n)
J[i, i] = 3 - 2 * x
i = np.arange(1, self.n)
J[i, i - 1] = -1
i = np.arange(self.n - 1)
J[i, i + 1] = -2
return J
class ExponentialFittingProblem:
"""Provide data and function for exponential fitting in the form
y = a + exp(b * x) + noise."""
def __init__(self, a, b, noise, n_outliers=1, x_range=(-1, 1),
n_points=11, random_seed=None):
np.random.seed(random_seed)
self.m = n_points
self.n = 2
self.p0 = np.zeros(2)
self.x = np.linspace(x_range[0], x_range[1], n_points)
self.y = a + np.exp(b * self.x)
self.y += noise * np.random.randn(self.m)
outliers = np.random.randint(0, self.m, n_outliers)
self.y[outliers] += 50 * noise * np.random.rand(n_outliers)
self.p_opt = np.array([a, b])
def fun(self, p):
return p[0] + np.exp(p[1] * self.x) - self.y
def jac(self, p):
J = np.empty((self.m, self.n))
J[:, 0] = 1
J[:, 1] = self.x * np.exp(p[1] * self.x)
return J
def cubic_soft_l1(z):
rho = np.empty((3, z.size))
t = 1 + z
rho[0] = 3 * (t**(1/3) - 1)
rho[1] = t ** (-2/3)
rho[2] = -2/3 * t**(-5/3)
return rho
LOSSES = list(IMPLEMENTED_LOSSES.keys()) + [cubic_soft_l1]
class BaseMixin:
def test_basic(self):
# Test that the basic calling sequence works.
res = least_squares(fun_trivial, 2., method=self.method)
assert_allclose(res.x, 0, atol=1e-4)
assert_allclose(res.fun, fun_trivial(res.x))
def test_args_kwargs(self):
# Test that args and kwargs are passed correctly to the functions.
a = 3.0
for jac in ['2-point', '3-point', 'cs', jac_trivial]:
with suppress_warnings() as sup:
sup.filter(UserWarning,
"jac='(3-point|cs)' works equivalently to '2-point' for method='lm'")
res = least_squares(fun_trivial, 2.0, jac, args=(a,),
method=self.method)
res1 = least_squares(fun_trivial, 2.0, jac, kwargs={'a': a},
method=self.method)
assert_allclose(res.x, a, rtol=1e-4)
assert_allclose(res1.x, a, rtol=1e-4)
assert_raises(TypeError, least_squares, fun_trivial, 2.0,
args=(3, 4,), method=self.method)
assert_raises(TypeError, least_squares, fun_trivial, 2.0,
kwargs={'kaboom': 3}, method=self.method)
def test_jac_options(self):
for jac in ['2-point', '3-point', 'cs', jac_trivial]:
with suppress_warnings() as sup:
sup.filter(UserWarning,
"jac='(3-point|cs)' works equivalently to '2-point' for method='lm'")
res = least_squares(fun_trivial, 2.0, jac, method=self.method)
assert_allclose(res.x, 0, atol=1e-4)
assert_raises(ValueError, least_squares, fun_trivial, 2.0, jac='oops',
method=self.method)
def test_nfev_options(self):
for max_nfev in [None, 20]:
res = least_squares(fun_trivial, 2.0, max_nfev=max_nfev,
method=self.method)
assert_allclose(res.x, 0, atol=1e-4)
def test_x_scale_options(self):
for x_scale in [1.0, np.array([0.5]), 'jac']:
res = least_squares(fun_trivial, 2.0, x_scale=x_scale)
assert_allclose(res.x, 0)
assert_raises(ValueError, least_squares, fun_trivial,
2.0, x_scale='auto', method=self.method)
assert_raises(ValueError, least_squares, fun_trivial,
2.0, x_scale=-1.0, method=self.method)
assert_raises(ValueError, least_squares, fun_trivial,
2.0, x_scale=None, method=self.method)
assert_raises(ValueError, least_squares, fun_trivial,
2.0, x_scale=1.0+2.0j, method=self.method)
def test_diff_step(self):
# res1 and res2 should be equivalent.
# res2 and res3 should be different.
res1 = least_squares(fun_trivial, 2.0, diff_step=1e-1,
method=self.method)
res2 = least_squares(fun_trivial, 2.0, diff_step=-1e-1,
method=self.method)
res3 = least_squares(fun_trivial, 2.0,
diff_step=None, method=self.method)
assert_allclose(res1.x, 0, atol=1e-4)
assert_allclose(res2.x, 0, atol=1e-4)
assert_allclose(res3.x, 0, atol=1e-4)
assert_equal(res1.x, res2.x)
assert_equal(res1.nfev, res2.nfev)
def test_incorrect_options_usage(self):
assert_raises(TypeError, least_squares, fun_trivial, 2.0,
method=self.method, options={'no_such_option': 100})
assert_raises(TypeError, least_squares, fun_trivial, 2.0,
method=self.method, options={'max_nfev': 100})
def test_full_result(self):
# MINPACK doesn't work very well with factor=100 on this problem,
# thus using low 'atol'.
res = least_squares(fun_trivial, 2.0, method=self.method)
assert_allclose(res.x, 0, atol=1e-4)
assert_allclose(res.cost, 12.5)
assert_allclose(res.fun, 5)
assert_allclose(res.jac, 0, atol=1e-4)
assert_allclose(res.grad, 0, atol=1e-2)
assert_allclose(res.optimality, 0, atol=1e-2)
assert_equal(res.active_mask, 0)
if self.method == 'lm':
assert_(res.nfev < 30)
assert_(res.njev is None)
else:
assert_(res.nfev < 10)
assert_(res.njev < 10)
assert_(res.status > 0)
assert_(res.success)
def test_full_result_single_fev(self):
# MINPACK checks the number of nfev after the iteration,
# so it's hard to tell what he is going to compute.
if self.method == 'lm':
return
res = least_squares(fun_trivial, 2.0, method=self.method,
max_nfev=1)
assert_equal(res.x, np.array([2]))
assert_equal(res.cost, 40.5)
assert_equal(res.fun, np.array([9]))
assert_equal(res.jac, np.array([[4]]))
assert_equal(res.grad, np.array([36]))
assert_equal(res.optimality, 36)
assert_equal(res.active_mask, np.array([0]))
assert_equal(res.nfev, 1)
assert_equal(res.njev, 1)
assert_equal(res.status, 0)
assert_equal(res.success, 0)
def test_rosenbrock(self):
x0 = [-2, 1]
x_opt = [1, 1]
for jac, x_scale, tr_solver in product(
['2-point', '3-point', 'cs', jac_rosenbrock],
[1.0, np.array([1.0, 0.2]), 'jac'],
['exact', 'lsmr']):
with suppress_warnings() as sup:
sup.filter(UserWarning,
"jac='(3-point|cs)' works equivalently to '2-point' for method='lm'")
res = least_squares(fun_rosenbrock, x0, jac, x_scale=x_scale,
tr_solver=tr_solver, method=self.method)
assert_allclose(res.x, x_opt)
def test_rosenbrock_cropped(self):
x0 = [-2, 1]
if self.method == 'lm':
assert_raises(ValueError, least_squares, fun_rosenbrock_cropped,
x0, method='lm')
else:
for jac, x_scale, tr_solver in product(
['2-point', '3-point', 'cs', jac_rosenbrock_cropped],
[1.0, np.array([1.0, 0.2]), 'jac'],
['exact', 'lsmr']):
res = least_squares(
fun_rosenbrock_cropped, x0, jac, x_scale=x_scale,
tr_solver=tr_solver, method=self.method)
assert_allclose(res.cost, 0, atol=1e-14)
def test_fun_wrong_dimensions(self):
assert_raises(ValueError, least_squares, fun_wrong_dimensions,
2.0, method=self.method)
def test_jac_wrong_dimensions(self):
assert_raises(ValueError, least_squares, fun_trivial,
2.0, jac_wrong_dimensions, method=self.method)
def test_fun_and_jac_inconsistent_dimensions(self):
x0 = [1, 2]
assert_raises(ValueError, least_squares, fun_rosenbrock, x0,
jac_rosenbrock_bad_dim, method=self.method)
def test_x0_multidimensional(self):
x0 = np.ones(4).reshape(2, 2)
assert_raises(ValueError, least_squares, fun_trivial, x0,
method=self.method)
def test_x0_complex_scalar(self):
x0 = 2.0 + 0.0*1j
assert_raises(ValueError, least_squares, fun_trivial, x0,
method=self.method)
def test_x0_complex_array(self):
x0 = [1.0, 2.0 + 0.0*1j]
assert_raises(ValueError, least_squares, fun_trivial, x0,
method=self.method)
def test_bvp(self):
# This test was introduced with fix #5556. It turned out that
# dogbox solver had a bug with trust-region radius update, which
# could block its progress and create an infinite loop. And this
# discrete boundary value problem is the one which triggers it.
n = 10
x0 = np.ones(n**2)
if self.method == 'lm':
max_nfev = 5000 # To account for Jacobian estimation.
else:
max_nfev = 100
res = least_squares(fun_bvp, x0, ftol=1e-2, method=self.method,
max_nfev=max_nfev)
assert_(res.nfev < max_nfev)
assert_(res.cost < 0.5)
def test_error_raised_when_all_tolerances_below_eps(self):
# Test that all 0 tolerances are not allowed.
assert_raises(ValueError, least_squares, fun_trivial, 2.0,
method=self.method, ftol=None, xtol=None, gtol=None)
def test_convergence_with_only_one_tolerance_enabled(self):
if self.method == 'lm':
return # should not do test
x0 = [-2, 1]
x_opt = [1, 1]
for ftol, xtol, gtol in [(1e-8, None, None),
(None, 1e-8, None),
(None, None, 1e-8)]:
res = least_squares(fun_rosenbrock, x0, jac=jac_rosenbrock,
ftol=ftol, gtol=gtol, xtol=xtol,
method=self.method)
assert_allclose(res.x, x_opt)
class BoundsMixin:
def test_inconsistent(self):
assert_raises(ValueError, least_squares, fun_trivial, 2.0,
bounds=(10.0, 0.0), method=self.method)
def test_infeasible(self):
assert_raises(ValueError, least_squares, fun_trivial, 2.0,
bounds=(3., 4), method=self.method)
def test_wrong_number(self):
assert_raises(ValueError, least_squares, fun_trivial, 2.,
bounds=(1., 2, 3), method=self.method)
def test_inconsistent_shape(self):
assert_raises(ValueError, least_squares, fun_trivial, 2.0,
bounds=(1.0, [2.0, 3.0]), method=self.method)
# 1-D array wont't be broadcasted
assert_raises(ValueError, least_squares, fun_rosenbrock, [1.0, 2.0],
bounds=([0.0], [3.0, 4.0]), method=self.method)
def test_in_bounds(self):
for jac in ['2-point', '3-point', 'cs', jac_trivial]:
res = least_squares(fun_trivial, 2.0, jac=jac,
bounds=(-1.0, 3.0), method=self.method)
assert_allclose(res.x, 0.0, atol=1e-4)
assert_equal(res.active_mask, [0])
assert_(-1 <= res.x <= 3)
res = least_squares(fun_trivial, 2.0, jac=jac,
bounds=(0.5, 3.0), method=self.method)
assert_allclose(res.x, 0.5, atol=1e-4)
assert_equal(res.active_mask, [-1])
assert_(0.5 <= res.x <= 3)
def test_bounds_shape(self):
for jac in ['2-point', '3-point', 'cs', jac_2d_trivial]:
x0 = [1.0, 1.0]
res = least_squares(fun_2d_trivial, x0, jac=jac)
assert_allclose(res.x, [0.0, 0.0])
res = least_squares(fun_2d_trivial, x0, jac=jac,
bounds=(0.5, [2.0, 2.0]), method=self.method)
assert_allclose(res.x, [0.5, 0.5])
res = least_squares(fun_2d_trivial, x0, jac=jac,
bounds=([0.3, 0.2], 3.0), method=self.method)
assert_allclose(res.x, [0.3, 0.2])
res = least_squares(
fun_2d_trivial, x0, jac=jac, bounds=([-1, 0.5], [1.0, 3.0]),
method=self.method)
assert_allclose(res.x, [0.0, 0.5], atol=1e-5)
def test_rosenbrock_bounds(self):
x0_1 = np.array([-2.0, 1.0])
x0_2 = np.array([2.0, 2.0])
x0_3 = np.array([-2.0, 2.0])
x0_4 = np.array([0.0, 2.0])
x0_5 = np.array([-1.2, 1.0])
problems = [
(x0_1, ([-np.inf, -1.5], np.inf)),
(x0_2, ([-np.inf, 1.5], np.inf)),
(x0_3, ([-np.inf, 1.5], np.inf)),
(x0_4, ([-np.inf, 1.5], [1.0, np.inf])),
(x0_2, ([1.0, 1.5], [3.0, 3.0])),
(x0_5, ([-50.0, 0.0], [0.5, 100]))
]
for x0, bounds in problems:
for jac, x_scale, tr_solver in product(
['2-point', '3-point', 'cs', jac_rosenbrock],
[1.0, [1.0, 0.5], 'jac'],
['exact', 'lsmr']):
res = least_squares(fun_rosenbrock, x0, jac, bounds,
x_scale=x_scale, tr_solver=tr_solver,
method=self.method)
assert_allclose(res.optimality, 0.0, atol=1e-5)
class SparseMixin:
def test_exact_tr_solver(self):
p = BroydenTridiagonal()
assert_raises(ValueError, least_squares, p.fun, p.x0, p.jac,
tr_solver='exact', method=self.method)
assert_raises(ValueError, least_squares, p.fun, p.x0,
tr_solver='exact', jac_sparsity=p.sparsity,
method=self.method)
def test_equivalence(self):
sparse = BroydenTridiagonal(mode='sparse')
dense = BroydenTridiagonal(mode='dense')
res_sparse = least_squares(
sparse.fun, sparse.x0, jac=sparse.jac,
method=self.method)
res_dense = least_squares(
dense.fun, dense.x0, jac=sparse.jac,
method=self.method)
assert_equal(res_sparse.nfev, res_dense.nfev)
assert_allclose(res_sparse.x, res_dense.x, atol=1e-20)
assert_allclose(res_sparse.cost, 0, atol=1e-20)
assert_allclose(res_dense.cost, 0, atol=1e-20)
def test_tr_options(self):
p = BroydenTridiagonal()
res = least_squares(p.fun, p.x0, p.jac, method=self.method,
tr_options={'btol': 1e-10})
assert_allclose(res.cost, 0, atol=1e-20)
def test_wrong_parameters(self):
p = BroydenTridiagonal()
assert_raises(ValueError, least_squares, p.fun, p.x0, p.jac,
tr_solver='best', method=self.method)
assert_raises(TypeError, least_squares, p.fun, p.x0, p.jac,
tr_solver='lsmr', tr_options={'tol': 1e-10})
def test_solver_selection(self):
sparse = BroydenTridiagonal(mode='sparse')
dense = BroydenTridiagonal(mode='dense')
res_sparse = least_squares(sparse.fun, sparse.x0, jac=sparse.jac,
method=self.method)
res_dense = least_squares(dense.fun, dense.x0, jac=dense.jac,
method=self.method)
assert_allclose(res_sparse.cost, 0, atol=1e-20)
assert_allclose(res_dense.cost, 0, atol=1e-20)
assert_(issparse(res_sparse.jac))
assert_(isinstance(res_dense.jac, np.ndarray))
def test_numerical_jac(self):
p = BroydenTridiagonal()
for jac in ['2-point', '3-point', 'cs']:
res_dense = least_squares(p.fun, p.x0, jac, method=self.method)
res_sparse = least_squares(
p.fun, p.x0, jac,method=self.method,
jac_sparsity=p.sparsity)
assert_equal(res_dense.nfev, res_sparse.nfev)
assert_allclose(res_dense.x, res_sparse.x, atol=1e-20)
assert_allclose(res_dense.cost, 0, atol=1e-20)
assert_allclose(res_sparse.cost, 0, atol=1e-20)
def test_with_bounds(self):
p = BroydenTridiagonal()
for jac, jac_sparsity in product(
[p.jac, '2-point', '3-point', 'cs'], [None, p.sparsity]):
res_1 = least_squares(
p.fun, p.x0, jac, bounds=(p.lb, np.inf),
method=self.method,jac_sparsity=jac_sparsity)
res_2 = least_squares(
p.fun, p.x0, jac, bounds=(-np.inf, p.ub),
method=self.method, jac_sparsity=jac_sparsity)
res_3 = least_squares(
p.fun, p.x0, jac, bounds=(p.lb, p.ub),
method=self.method, jac_sparsity=jac_sparsity)
assert_allclose(res_1.optimality, 0, atol=1e-10)
assert_allclose(res_2.optimality, 0, atol=1e-10)
assert_allclose(res_3.optimality, 0, atol=1e-10)
def test_wrong_jac_sparsity(self):
p = BroydenTridiagonal()
sparsity = p.sparsity[:-1]
assert_raises(ValueError, least_squares, p.fun, p.x0,
jac_sparsity=sparsity, method=self.method)
def test_linear_operator(self):
p = BroydenTridiagonal(mode='operator')
res = least_squares(p.fun, p.x0, p.jac, method=self.method)
assert_allclose(res.cost, 0.0, atol=1e-20)
assert_raises(ValueError, least_squares, p.fun, p.x0, p.jac,
method=self.method, tr_solver='exact')
def test_x_scale_jac_scale(self):
p = BroydenTridiagonal()
res = least_squares(p.fun, p.x0, p.jac, method=self.method,
x_scale='jac')
assert_allclose(res.cost, 0.0, atol=1e-20)
p = BroydenTridiagonal(mode='operator')
assert_raises(ValueError, least_squares, p.fun, p.x0, p.jac,
method=self.method, x_scale='jac')
class LossFunctionMixin:
def test_options(self):
for loss in LOSSES:
res = least_squares(fun_trivial, 2.0, loss=loss,
method=self.method)
assert_allclose(res.x, 0, atol=1e-15)
assert_raises(ValueError, least_squares, fun_trivial, 2.0,
loss='hinge', method=self.method)
def test_fun(self):
# Test that res.fun is actual residuals, and not modified by loss
# function stuff.
for loss in LOSSES:
res = least_squares(fun_trivial, 2.0, loss=loss,
method=self.method)
assert_equal(res.fun, fun_trivial(res.x))
def test_grad(self):
# Test that res.grad is true gradient of loss function at the
# solution. Use max_nfev = 1, to avoid reaching minimum.
x = np.array([2.0]) # res.x will be this.
res = least_squares(fun_trivial, x, jac_trivial, loss='linear',
max_nfev=1, method=self.method)
assert_equal(res.grad, 2 * x * (x**2 + 5))
res = least_squares(fun_trivial, x, jac_trivial, loss='huber',
max_nfev=1, method=self.method)
assert_equal(res.grad, 2 * x)
res = least_squares(fun_trivial, x, jac_trivial, loss='soft_l1',
max_nfev=1, method=self.method)
assert_allclose(res.grad,
2 * x * (x**2 + 5) / (1 + (x**2 + 5)**2)**0.5)
res = least_squares(fun_trivial, x, jac_trivial, loss='cauchy',
max_nfev=1, method=self.method)
assert_allclose(res.grad, 2 * x * (x**2 + 5) / (1 + (x**2 + 5)**2))
res = least_squares(fun_trivial, x, jac_trivial, loss='arctan',
max_nfev=1, method=self.method)
assert_allclose(res.grad, 2 * x * (x**2 + 5) / (1 + (x**2 + 5)**4))
res = least_squares(fun_trivial, x, jac_trivial, loss=cubic_soft_l1,
max_nfev=1, method=self.method)
assert_allclose(res.grad,
2 * x * (x**2 + 5) / (1 + (x**2 + 5)**2)**(2/3))
def test_jac(self):
# Test that res.jac.T.dot(res.jac) gives Gauss-Newton approximation
# of Hessian. This approximation is computed by doubly differentiating
# the cost function and dropping the part containing second derivative
# of f. For a scalar function it is computed as
# H = (rho' + 2 * rho'' * f**2) * f'**2, if the expression inside the
# brackets is less than EPS it is replaced by EPS. Here, we check
# against the root of H.
x = 2.0 # res.x will be this.
f = x**2 + 5 # res.fun will be this.
res = least_squares(fun_trivial, x, jac_trivial, loss='linear',
max_nfev=1, method=self.method)
assert_equal(res.jac, 2 * x)
# For `huber` loss the Jacobian correction is identically zero
# in outlier region, in such cases it is modified to be equal EPS**0.5.
res = least_squares(fun_trivial, x, jac_trivial, loss='huber',
max_nfev=1, method=self.method)
assert_equal(res.jac, 2 * x * EPS**0.5)
# Now, let's apply `loss_scale` to turn the residual into an inlier.
# The loss function becomes linear.
res = least_squares(fun_trivial, x, jac_trivial, loss='huber',
f_scale=10, max_nfev=1)
assert_equal(res.jac, 2 * x)
# 'soft_l1' always gives a positive scaling.
res = least_squares(fun_trivial, x, jac_trivial, loss='soft_l1',
max_nfev=1, method=self.method)
assert_allclose(res.jac, 2 * x * (1 + f**2)**-0.75)
# For 'cauchy' the correction term turns out to be negative, and it
# replaced by EPS**0.5.
res = least_squares(fun_trivial, x, jac_trivial, loss='cauchy',
max_nfev=1, method=self.method)
assert_allclose(res.jac, 2 * x * EPS**0.5)
# Now use scaling to turn the residual to inlier.
res = least_squares(fun_trivial, x, jac_trivial, loss='cauchy',
f_scale=10, max_nfev=1, method=self.method)
fs = f / 10
assert_allclose(res.jac, 2 * x * (1 - fs**2)**0.5 / (1 + fs**2))
# 'arctan' gives an outlier.
res = least_squares(fun_trivial, x, jac_trivial, loss='arctan',
max_nfev=1, method=self.method)
assert_allclose(res.jac, 2 * x * EPS**0.5)
# Turn to inlier.
res = least_squares(fun_trivial, x, jac_trivial, loss='arctan',
f_scale=20.0, max_nfev=1, method=self.method)
fs = f / 20
assert_allclose(res.jac, 2 * x * (1 - 3 * fs**4)**0.5 / (1 + fs**4))
# cubic_soft_l1 will give an outlier.
res = least_squares(fun_trivial, x, jac_trivial, loss=cubic_soft_l1,
max_nfev=1)
assert_allclose(res.jac, 2 * x * EPS**0.5)
# Turn to inlier.
res = least_squares(fun_trivial, x, jac_trivial,
loss=cubic_soft_l1, f_scale=6, max_nfev=1)
fs = f / 6
assert_allclose(res.jac,
2 * x * (1 - fs**2 / 3)**0.5 * (1 + fs**2)**(-5/6))
def test_robustness(self):
for noise in [0.1, 1.0]:
p = ExponentialFittingProblem(1, 0.1, noise, random_seed=0)
for jac in ['2-point', '3-point', 'cs', p.jac]:
res_lsq = least_squares(p.fun, p.p0, jac=jac,
method=self.method)
assert_allclose(res_lsq.optimality, 0, atol=1e-2)
for loss in LOSSES:
if loss == 'linear':
continue
res_robust = least_squares(
p.fun, p.p0, jac=jac, loss=loss, f_scale=noise,
method=self.method)
assert_allclose(res_robust.optimality, 0, atol=1e-2)
assert_(norm(res_robust.x - p.p_opt) <
norm(res_lsq.x - p.p_opt))
class TestDogbox(BaseMixin, BoundsMixin, SparseMixin, LossFunctionMixin):
method = 'dogbox'
class TestTRF(BaseMixin, BoundsMixin, SparseMixin, LossFunctionMixin):
method = 'trf'
def test_lsmr_regularization(self):
p = BroydenTridiagonal()
for regularize in [True, False]:
res = least_squares(p.fun, p.x0, p.jac, method='trf',
tr_options={'regularize': regularize})
assert_allclose(res.cost, 0, atol=1e-20)
class TestLM(BaseMixin):
method = 'lm'
def test_bounds_not_supported(self):
assert_raises(ValueError, least_squares, fun_trivial,
2.0, bounds=(-3.0, 3.0), method='lm')
def test_m_less_n_not_supported(self):
x0 = [-2, 1]
assert_raises(ValueError, least_squares, fun_rosenbrock_cropped, x0,
method='lm')
def test_sparse_not_supported(self):
p = BroydenTridiagonal()
assert_raises(ValueError, least_squares, p.fun, p.x0, p.jac,
method='lm')
def test_jac_sparsity_not_supported(self):
assert_raises(ValueError, least_squares, fun_trivial, 2.0,
jac_sparsity=[1], method='lm')
def test_LinearOperator_not_supported(self):
p = BroydenTridiagonal(mode="operator")
assert_raises(ValueError, least_squares, p.fun, p.x0, p.jac,
method='lm')
def test_loss(self):
res = least_squares(fun_trivial, 2.0, loss='linear', method='lm')
assert_allclose(res.x, 0.0, atol=1e-4)
assert_raises(ValueError, least_squares, fun_trivial, 2.0,
method='lm', loss='huber')
def test_basic():
# test that 'method' arg is really optional
res = least_squares(fun_trivial, 2.0)
assert_allclose(res.x, 0, atol=1e-10)
def test_small_tolerances_for_lm():
for ftol, xtol, gtol in [(None, 1e-13, 1e-13),
(1e-13, None, 1e-13),
(1e-13, 1e-13, None)]:
assert_raises(ValueError, least_squares, fun_trivial, 2.0, xtol=xtol,
ftol=ftol, gtol=gtol, method='lm')
def test_fp32_gh12991():
# checks that smaller FP sizes can be used in least_squares
# this is the minimum working example reported for gh12991
np.random.seed(1)
x = np.linspace(0, 1, 100).astype("float32")
y = | np.random.random(100) | numpy.random.random |
"""
Module: Potential
This module shall be used to implement subclasses of Potentials that formulate a potential as an Function with N-Dimensions.
This module contains all available potentials.
"""
import numpy as np
import sympy as sp
from ensembler.util import ensemblerTypes as t
from ensembler.util.ensemblerTypes import Number, Union, Iterable
# Base Classes
from ensembler.potentials._basicPotentials import _potentialNDCls
class harmonicOscillatorPotential(_potentialNDCls):
"""
ND harmonic oscillator potential
"""
name: str = "harmonicOscilator"
nDimensions: int = sp.symbols("nDimensions")
position: sp.Matrix = sp.Matrix([sp.symbols("r")])
r_shift: sp.Matrix = sp.Matrix([sp.symbols("r_shift")])
Voff: sp.Matrix = sp.Matrix([sp.symbols("V_off")])
k: sp.Matrix = sp.Matrix([sp.symbols("k")])
V_dim = 0.5 * k * (position - r_shift) ** 2 + Voff
i = sp.Symbol("i")
V_functional = sp.Sum(V_dim[i, 0], (i, 0, nDimensions))
def __init__(self, k: np.array = np.array([1.0, 1.0, 1.0]), r_shift: np.array = np.array([0.0, 0.0, 0.0]),
Voff: np.array = np.array([0.0, 0.0, 0.0]), nDimensions: int = 3):
"""
__init__
Constructs an harmonic Oscillator with an on runtime defined dimensionality.
Parameters
----------
k: List[float], optional
force constants, as many as nDim, defaults to [1.0, 1.0, 1.0]
x_shift: List[float], optional
shift of the minimum in the x Axis, as many as nDim, defaults to [0.0, 0.0, 0.0]
y_shift: List[float], optional
shift on the y Axis, as many as nDim, defaults to [0.0, 0.0, 0.0]
nDim
dimensionality of the harmoic oscillator object. default: 3
"""
self.constants = {self.nDimensions:nDimensions}
self.constants.update({"k_" + str(j): k[j] for j in range(self.constants[self.nDimensions])})
self.constants.update({"r_shift" + str(j): r_shift[j] for j in range(self.constants[self.nDimensions])})
self.constants.update({"V_off_" + str(j): Voff[j] for j in range(self.constants[self.nDimensions])})
super().__init__(nDimensions=nDimensions)
def _initialize_functions(self):
"""
Build up the nDimensionssymbolic definitions
"""
# Parameters
nDimensions= self.constants[self.nDimensions]
self.position = sp.Matrix([sp.symbols("r_" + str(i)) for i in range(nDimensions)])
self.r_shift = sp.Matrix([sp.symbols("r_shift" + str(i)) for i in range(nDimensions)])
self.V_off = sp.Matrix([sp.symbols("V_off_" + str(i)) for i in range(nDimensions)])
self.k = sp.Matrix([sp.symbols("k_" + str(i)) for i in range(nDimensions)])
# Function
self.V_dim = 0.5 * sp.matrix_multiply_elementwise(self.k, (
(self.position - self.r_shift).applyfunc(lambda x: x ** 2))) # +self.Voff
self.V_functional = sp.Sum(self.V_dim[self.i, 0], (self.i, 0, self.nDimensions - 1))
class envelopedPotential(_potentialNDCls):
"""
This implementation of exponential Coupling for EDS is a more numeric robust and variable implementation, it allows N states.
Therefore the computation of energies and the deviation is not symbolic.
Here N-states are coupled by the log-sum-exp resulting in a new reference state $V_R$,
$V_R = -1/{\beta} * \ln(\sum_i^Ne^(-\beta*s*(V_i-E^R_i)))$
This potential coupling is for example used in EDS.
"""
name = "Enveloping Potential"
T, kb, position = sp.symbols("T kb r")
beta = 1 / (kb * T)
Vis = sp.Matrix(["V_i"])
Eoffis = sp.Matrix(["Eoff_i"])
sis = sp.Matrix(["s_i"])
i, nStates = sp.symbols("i N")
V_functional = -1 / (beta * sis[0, 0]) * sp.log(
sp.Sum(sp.exp(-beta * sis[i, 0] * (Vis[i, 0] - Eoffis[i, 0])), (i, 0, nStates)))
def __init__(self, V_is: t.List[_potentialNDCls] = (
harmonicOscillatorPotential(nDimensions=2), harmonicOscillatorPotential(r_shift=[3,3], nDimensions=2)),
s: float = 1.0, eoff: t.List[float] = None, T: float = 1, kb: float = 1):
"""
__init__
This function constructs a enveloped potential, enveloping all given states.
Parameters
----------
V_is: List[_potential1DCls], optional
The states(potential classes) to be enveloped (default: [harmonicOscillatorPotential(), harmonicOscillatorPotential(x_shift=3)])
s: float, optional
the smoothing parameter, lowering the barriers between the states
eoff: List[float], optional
the energy offsets of the individual states in the reference potential. These can be used to allow a more uniform sampling. (default: seta ll to 0)
T: float, optional
the temperature of the reference state (default: 1 = T)
kb: float, optional
the boltzman constant (default: 1 = kb)
"""
self.constants = {self.T: T, self.kb: kb}
nStates = len(V_is)
self._Eoff_i = [0 for x in range(nStates)]
self._s = [0 for x in range(nStates)]
self._V_is = [0 for x in range(nStates)]
# for calculate implementations
self.V_is = V_is
self.s_i = s
self.Eoff_i = eoff
super().__init__(nDimensions=V_is[0].constants[V_is[0].nDimensions], nStates=len(V_is))
def _initialize_functions(self):
"""
build the symbolic functionality.
"""
# for sympy Sympy Updates - Check!:
self.statePotentials = {"state_" + str(j): self.V_is[j] for j in range(self.constants[self.nStates])}
Eoffis = {"Eoff_" + str(i): self.Eoff_i[i] for i in range(self.constants[self.nStates])}
sis = {"s_" + str(i): self.s_i[i] for i in range(self.constants[self.nStates])}
keys = zip(sorted(self.statePotentials.keys()), sorted(Eoffis.keys()), sorted(sis.keys()))
self.states = sp.Matrix([sp.symbols(l) * (sp.symbols(j) - sp.symbols(k)) for j, k, l in keys])
self.constants.update({**{state: value.V for state, value in self.statePotentials.items()}, **Eoffis, **sis})
self.V_functional = -1 / (self.beta * self.sis[0, 0]) * sp.log(
sp.Sum(sp.exp(-self.beta * self.states[self.i, 0]), (self.i, 0, self.nStates - 1)))
self._update_functions()
# also make sure that states are up to work:
[V._update_functions() for V in self.V_is]
if (all([self.s_i[0] == s for s in self.s_i[1:]])):
self.ene = self._calculate_energies_singlePos_overwrite_oneS
else:
self.ene = self._calculate_energies_singlePos_overwrite_multiS
self.force = self._calculate_dvdpos_singlePos_overwrite
@property
def V_is(self) -> t.List[_potentialNDCls]:
"""
V_is are the state potential classes enveloped by the reference state.
Returns
-------
V_is: t.List[_potential1DCls]
"""
return self._V_is
@V_is.setter
def V_is(self, V_is: t.List[_potentialNDCls]):
if (isinstance(V_is, Iterable) and all([isinstance(Vi, _potentialNDCls) for Vi in V_is])):
self._V_is = V_is
self.constants.update({self.nStates: len(V_is)})
else:
raise IOError("Please give the enveloped potential for V_is only 1D-Potential classes in a list.")
def set_Eoff(self, Eoff: Union[Number, Iterable[Number]]):
"""
This function is setting the Energy offsets of the states enveloped by the reference state.
Parameters
----------
Eoff: Union[Number, Iterable[Number]]
"""
self.Eoff_i = Eoff
@property
def Eoff(self) -> t.List[Number]:
"""
The Energy offsets are used to bias the single states in the reference potential by a constant offset.
Therefore each state of the enveloping potential has its own energy offset.
Returns
-------
Eoff:t.List[Number]
"""
return self.Eoff_i
@Eoff.setter
def Eoff(self, Eoff: Union[Number, Iterable[Number], None]):
self.Eoff_i = Eoff
@property
def Eoff_i(self) -> t.List[Number]:
"""
The Energy offsets are used to bias the single states in the reference potential by a constant offset.
Therefore each state of the enveloping potential has its own energy offset.
Returns
-------
Eoff:t.List[Number]
"""
return self._Eoff_i
@Eoff_i.setter
def Eoff_i(self, Eoff: Union[Number, Iterable[Number], None]):
if (isinstance(Eoff, type(None))):
self._Eoff_i = [0.0 for state in range(self.constants[self.nStates])]
Eoffis = {"Eoff_" + str(i): self.Eoff_i[i] for i in range(self.constants[self.nStates])}
self.constants.update({**Eoffis})
elif (len(Eoff) == self.constants[self.nStates]):
self._Eoff_i = Eoff
Eoffis = {"Eoff_" + str(i): self.Eoff_i[i] for i in range(self.constants[self.nStates])}
self.constants.update({**Eoffis})
else:
raise IOError(
"Energy offset Vector and state potentials don't have the same length!\n states in Eoff " + str(
len(Eoff)) + "\t states in Vi" + str(len(self.V_is)))
def set_s(self, s: Union[Number, Iterable[Number]]):
"""
set_s
is a function used to set an smoothing parameter.
Parameters
----------
s:Union[Number, Iterable[Number]]
Returns
-------
"""
self.s_i = s
@property
def s(self) -> t.List[Number]:
return self.s_i
@s.setter
def s(self, s: Union[Number, Iterable[Number]]):
self.s_i = s
@property
def s_i(self) -> t.List[Number]:
return self._s
@s_i.setter
def s_i(self, s: Union[Number, Iterable[Number]]):
if (isinstance(s, Number)):
self._s = [s for x in range(self.constants[self.nStates])]
sis = {"s_" + str(i): self.s_i[i] for i in range(self.constants[self.nStates])}
self.constants.update({**sis})
elif (len(s) == self.constants[self.nStates]):
self._s = s
sis = {"s_" + str(i): self.s_i[i] for i in range(self.constants[self.nStates])}
self.constants.update({**sis})
else:
raise IOError("s Vector/Number and state potentials don't have the same length!\n states in s " + str(
len(s)) + "\t states in Vi" + str(len(self.V_is)))
self._update_functions()
def _calculate_energies_singlePos_overwrite_multiS(self, positions) -> np.array:
sum_prefactors, _ = self._logsumexp_calc_gromos(positions)
beta = self.constants[self.T] * self.constants[self.kb] # kT - *self.constants[self.T]
Vr = (-1 / (beta)) * sum_prefactors
return np.squeeze(Vr)
def _calculate_energies_singlePos_overwrite_oneS(self, positions) -> np.array:
sum_prefactors, _ = self._logsumexp_calc(positions)
beta = self.constants[self.T] * self.constants[self.kb]
Vr = (-1 / (beta * self.s_i[0])) * sum_prefactors
return np.squeeze(Vr)
def _calculate_dvdpos_singlePos_overwrite(self, positions: (t.Iterable[float])) -> np.array:
"""
Parameters
----------
positions
Returns
-------
"""
positions = np.array(positions, ndmin=2)
# print("Pos: ", position)
V_R_part, V_Is_ene = self._logsumexp_calc_gromos(positions)
V_R_part = np.array(V_R_part, ndmin=2).T
# print("V_R_part: ", V_R_part.shape, V_R_part)
# print("V_I_ene: ",V_Is_ene.shape, V_Is_ene)
V_Is_dhdpos = np.array([-statePot.force(positions) for statePot in self.V_is], ndmin=1).T
# print("V_I_force: ",V_Is_dhdpos.shape, V_Is_dhdpos)
adapt = np.concatenate([V_R_part for s in range(self.constants[self.nStates])], axis=1)
# print("ADAPT: ",adapt.shape, adapt)
scaling = np.exp(V_Is_ene - adapt)
# print("scaling: ", scaling.shape, scaling)
dVdpos_state = np.multiply(scaling,
V_Is_dhdpos) # np.array([(ene/V_R_part) * force for ene, force in zip(V_Is_ene, V_Is_dhdpos)])
# print("state_contributions: ",dVdpos_state.shape, dVdpos_state)
dVdpos = np.sum(dVdpos_state, axis=1)
# print("forces: ",dVdpos.shape, dVdpos)
return np.squeeze(dVdpos)
def _logsumexp_calc(self, position):
prefactors = []
beta = self.constants[self.T] * self.constants[self.kb]
for state in range(self.constants[self.nStates]):
prefactor = np.array(-beta * self.s_i[state] * (self.V_is[state].ene(position) - self.Eoff_i[state]),
ndmin=1).T
prefactors.append(prefactor)
prefactors = np.array(prefactors, ndmin=2).T
from scipy.special import logsumexp
# print("Prefactors", prefactors)
sum_prefactors = logsumexp(prefactors, axis=1)
# print("logexpsum: ", np.squeeze(sum_prefactors))
return np.squeeze(sum_prefactors), np.array(prefactors, ndmin=2).T
def _logsumexp_calc_gromos(self, position):
"""
code from gromos:
Parameters
----------
position
Returns
-------
"""
prefactors = []
beta = self.constants[self.T] * self.constants[self.kb] # kT - *self.constants[self.T]
partA = np.array(-beta * self.s_i[0] * (self.V_is[0].ene(position) - self.Eoff_i[0]), ndmin=1)
partB = np.array(-beta * self.s_i[1] * (self.V_is[1].ene(position) - self.Eoff_i[1]), ndmin=1)
partAB = np.array([partA, partB]).T
log_prefac = 1 + np.exp(np.min(partAB, axis=1) - | np.max(partAB, axis=1) | numpy.max |
""" Tests for the model. """
import unittest
import sys
from numpy.testing import assert_array_almost_equal, assert_array_equal
import numpy as np
from numpy import random
from pyhacrf import Hacrf
from pyhacrf.state_machine import GeneralStateMachine, DefaultStateMachine
from pyhacrf.pyhacrf import _GeneralModel, _AdjacentModel
from pyhacrf import StringPairFeatureExtractor
TEST_PRECISION = 3
class TestHacrf(unittest.TestCase):
def test_initialize_parameters(self):
start_states = [0]
transitions = [(0, 0, (1, 1)),
(0, 1, (0, 1)),
(0, 0, (1, 0))]
states_to_classes = {0: 'a'}
state_machine = GeneralStateMachine(start_states=start_states,
transitions=transitions,
states_to_classes=states_to_classes)
n_features = 3
actual_parameters = Hacrf._initialize_parameters(state_machine, n_features)
expected_parameter_shape = (5, 3)
self.assertEqual(actual_parameters.shape, expected_parameter_shape)
def test_fit_predict(self):
incorrect = ['helloooo', 'freshh', 'ffb', 'h0me', 'wonderin', 'relaionship', 'hubby', 'krazii', 'mite', 'tropic']
correct = ['hello', 'fresh', 'facebook', 'home', 'wondering', 'relationship', 'husband', 'crazy', 'might', 'topic']
training = zip(incorrect, correct)
fe = StringPairFeatureExtractor(match=True, numeric=True)
xf = fe.fit_transform(training)
model = Hacrf()
model.fit(xf, [0, 0, 0, 0, 0, 1, 1, 1, 1, 1])
expected_parameters = np.array([[-10.76945326, 144.03414923, 0.],
[31.84369748, -106.41885651, 0.],
[-52.08919467, 4.56943665, 0.],
[31.01495044, -13.0593297, 0.],
[49.77302218, -6.42566204, 0.],
[-28.69877796, 24.47127009, 0.],
[-85.34524911, 21.87370646, 0.],
[106.41949333, 6.18587125, 0.]])
print(model.parameters)
assert_array_almost_equal(model.parameters, expected_parameters,
decimal=TEST_PRECISION)
expected_probas = np.array([[1.00000000e+000, 3.51235685e-039],
[1.00000000e+000, 4.79716208e-039],
[1.00000000e+000, 2.82744641e-139],
[1.00000000e+000, 6.49580729e-012],
[9.99933798e-001, 6.62022561e-005],
[8.78935957e-005, 9.99912106e-001],
[4.84538335e-009, 9.99999995e-001],
[1.25170233e-250, 1.00000000e+000],
[2.46673086e-010, 1.00000000e+000],
[1.03521293e-033, 1.00000000e+000]])
actual_predict_probas = model.predict_proba(xf)
print(actual_predict_probas)
assert_array_almost_equal(actual_predict_probas, expected_probas,
decimal=TEST_PRECISION)
expected_predictions = np.array([0, 0, 0, 0, 0, 1, 1, 1, 1, 1])
actual_predictions = model.predict(xf)
assert_array_almost_equal(actual_predictions, expected_predictions,
decimal=TEST_PRECISION)
def test_fit_predict_regularized(self):
incorrect = ['helloooo', 'freshh', 'ffb', 'h0me', 'wonderin', 'relaionship', 'hubby', 'krazii', 'mite', 'tropic']
correct = ['hello', 'fresh', 'facebook', 'home', 'wondering', 'relationship', 'husband', 'crazy', 'might', 'topic']
training = zip(incorrect, correct)
fe = StringPairFeatureExtractor(match=True, numeric=True)
xf = fe.fit_transform(training)
model = Hacrf(l2_regularization=10.0)
model.fit(xf, [0, 0, 0, 0, 0, 1, 1, 1, 1, 1])
print(model.parameters)
expected_parameters = np.array([[-0.0569188, 0.07413339, 0.],
[0.00187709, -0.06377866, 0.],
[-0.01908823, 0.00586189, 0.],
[0.01721114, -0.00636556, 0.],
[0.01578279, 0.0078614, 0.],
[-0.0139057, -0.00862948, 0.],
[-0.00623241, 0.02937325, 0.],
[0.00810951, -0.01774676, 0.]])
assert_array_almost_equal(model.parameters, expected_parameters,
decimal=TEST_PRECISION)
expected_probas = np.array([[0.5227226, 0.4772774],
[0.52568993, 0.47431007],
[0.4547091, 0.5452909],
[0.51179222, 0.48820778],
[0.46347576, 0.53652424],
[0.45710098, 0.54289902],
[0.46159657, 0.53840343],
[0.42997978, 0.57002022],
[0.47419724, 0.52580276],
[0.50797852, 0.49202148]])
actual_predict_probas = model.predict_proba(xf)
print(actual_predict_probas)
assert_array_almost_equal(actual_predict_probas, expected_probas,
decimal=TEST_PRECISION)
expected_predictions = np.array([0, 0, 1, 0, 1, 1, 1, 1, 1, 0])
actual_predictions = model.predict(xf)
assert_array_almost_equal(actual_predictions, expected_predictions,
decimal=TEST_PRECISION)
class TestGeneralModel(unittest.TestCase):
def test_build_lattice(self):
n_states = 4 # Because 3 is the max
start_states = [0, 1]
transitions = [(0, 0, (1, 1)),
(0, 1, (0, 1)),
(0, 0, (1, 0)),
(0, 3, lambda i, j, k: (0, 2))]
states_to_classes = {0: 0, 1: 1, 3: 3}
state_machine = GeneralStateMachine(start_states, transitions, states_to_classes)
x = np.zeros((2, 3, 9))
# # ________
# 1. . . # 1 0 - 10 - 31
# # | /_______
# 0. . . # 0 10 -- 1 3
# 0 1 2 # 0 1 2
#
# 1(0, 1), 3(0, 2), 1(1, 1), 1(0, 0) should be pruned because they represent partial alignments.
# Only nodes that are reachable by stepping back from (1, 2) must be included in the lattice.
actual_lattice = state_machine.build_lattice(x)
expected_lattice = np.array([(0, 0, 0, 1, 0, 0, 2 + n_states),
(0, 0, 0, 1, 1, 0, 0 + n_states),
(1, 0, 0, 1, 2, 3, 3 + n_states),
(1, 1, 0, 1, 2, 1, 1 + n_states)])
assert_array_equal(actual_lattice, expected_lattice)
def test_build_lattice_jumps(self):
n_states = 2 # Because 1 is the max
start_states = [0, 1]
transitions = [(0, 0, (1, 1)),
(0, 1, (0, 2)),
(0, 0, (1, 0))]
states_to_classes = {0: 0, 1: 1}
state_machine = GeneralStateMachine(start_states, transitions, states_to_classes)
x = np.zeros((2, 3, 9))
# # ________
# 1. . . # 1 0 . 1
# # | _______
# 0. . . # 0 10 / . 1
# 0 1 2 # 0 1 2
#
# 1(0, 2) should be pruned because they represent partial alignments.
# Only nodes that are reachable by stepping back from (1, 2) must be included in the lattice.
actual_lattice = state_machine.build_lattice(x)
expected_lattice = np.array([(0, 0, 0, 1, 0, 0, 2 + n_states),
(1, 0, 0, 1, 2, 1, 1 + n_states)])
assert_array_equal(actual_lattice, expected_lattice)
def test_forward_single(self):
start_states = [0, 1]
transitions = [(0, 0, (1, 1)),
(0, 1, (0, 1)),
(0, 0, (1, 0)),
(0, 2, lambda i, j, k: (0, 2))]
states_to_classes = {0: 'a', 1: 'a', 2: 'b'} # Dummy
state_machine = GeneralStateMachine(start_states, transitions, states_to_classes)
parameters = np.array(range(-7, 7), dtype='float64').reshape((7, 2))
# parameters =
# 0([[-7, -6],
# 1 [-5, -4],
# 2 [-3, -2],
# 3 [-1, 0],
# 4 [ 1, 2],
# 5 [ 3, 4],
# 6 [ 5, 6]])
x = np.array([[[0, 1],
[1, 0],
[2, 1]],
[[0, 1],
[1, 0],
[1, 0]]], dtype=np.float64)
y = 'a'
# Expected lattice:
# # ________
# 1. . . # 1 0 __0 - 21
# # | /
# 0. . . # 0 0
# 0 1 2 # 0 1 2
expected_alpha = {
(0, 0, 0): np.exp(-6),
(0, 0, 0, 1, 0, 0, 5): np.exp(-6) * np.exp(4),
(0, 0, 0, 1, 1, 0, 3): np.exp(-6) * np.exp(-1),
(1, 0, 0): | np.exp(-6) | numpy.exp |
import unittest
import numpy as np
from .storage import SequenceStorage as ExperienceReplay, SequenceSampler, BatchSequenceStorage, LambdaSampler, PlusOneSampler, merge_batches
class SequenceStorageTest(unittest.TestCase):
def assertNumpyArrayEqual(self, a1, a2, msg = 'Arrays must be equal'):
if not np.array_equal(a1, a2):
self.fail(msg=f"{a1} != {a2} : " + msg)
def testShouldStoreAll(self):
replay = ExperienceReplay(4, samplers = (SequenceSampler(2),))
replay.insert(1, 0, 0.0, False)
replay.insert(5, 0, 0.0, False)
replay.insert(2, 0, 0.0, False)
replay.insert(4, 0, 0.0, False)
replay.insert(6, 0, 0.0, False)
replay.insert(7, 0, 0.0, False)
self.assertEqual(replay[0][0], 2)
self.assertEqual(replay[1][0], 4)
self.assertEqual(replay[2][0], 6)
self.assertEqual(replay[3][0], 7)
def testNegativeIndex(self):
replay = ExperienceReplay(4, samplers = (SequenceSampler(2),))
replay.insert(1, 0, 0.0, False)
replay.insert(5, 0, 0.0, False)
replay.insert(2, 0, 0.0, False)
replay.insert(4, 0, 0.0, False)
replay.insert(6, 0, 0.0, False)
replay.insert(7, 0, 0.0, False)
self.assertEqual(replay[-4][0], 2)
self.assertEqual(replay[-3][0], 4)
self.assertEqual(replay[-2][0], 6)
self.assertEqual(replay[-1][0], 7)
def testLength(self):
replay = ExperienceReplay(4, samplers = (SequenceSampler(2),))
self.assertEqual(len(replay), 0)
replay.insert(1, 0, 0.0, False)
self.assertEqual(len(replay), 1)
replay.insert(2, 0, 0.0, False)
self.assertEqual(len(replay), 2)
replay.insert(4, 0, 0.0, False)
self.assertEqual(len(replay), 3)
replay.insert(6, 0, 0.0, False)
self.assertEqual(len(replay), 4)
replay.insert(7, 0, 0.0, False)
self.assertEqual(len(replay), 4)
def testSamplerStats(self):
replay = ExperienceReplay(4, samplers = (LambdaSampler(2, lambda _, get: get(-1)[0] % 2 == 0),))
replay.insert(1, 0, 0.0, False)
replay.insert(2, 0, 0.0, False)
replay.insert(4, 0, 0.0, False)
replay.insert(6, 0, 0.0, False)
replay.insert(7, 0, 0.0, False)
self.assertEqual(replay.selector_lengths[0], 2)
def testSamplerStatsRemove(self):
replay = ExperienceReplay(4, samplers = (LambdaSampler(2, lambda _, get: get(-1)[0] % 2 == 0),))
replay.insert(6, 0, 0.0, False)
self.assertNumpyArrayEqual(replay.selector_data[:, 0], [False, False, False, False])
replay.insert(2, 0, 0.0, False)
self.assertNumpyArrayEqual(replay.selector_data[:, 0], [False, True, False, False])
replay.insert(4, 0, 0.0, False)
self.assertNumpyArrayEqual(replay.selector_data[:, 0], [False, True, True, False])
replay.insert(6, 0, 0.0, False)
self.assertNumpyArrayEqual(replay.selector_data[:, 0], [False, True, True, True])
replay.insert(7, 0, 0.0, False)
self.assertEqual(replay.selector_lengths[0], 2)
self.assertNumpyArrayEqual(replay.selector_data[:, 0], [False, False, True, True])
def testSamplingWithEpisodeEnd(self):
import numpy
numpy.random.seed(1)
replay = ExperienceReplay(4, samplers = (LambdaSampler(2, lambda _, get: get(-1)[0] % 2 == 0),))
replay.insert(6, 0, 0.0, False)
replay.insert(2, 0, 0.0, True)
replay.insert(4, 0, 0.0, False)
replay.insert(6, 0, 0.0, True)
replay.insert(7, 0, 0.0, False)
wasSampled = set()
wasFirst = set()
for _ in range(100):
batch = replay.sample(0)
wasSampled.add(batch[0][1])
wasFirst.add(batch[0][0])
self.assertEqual(batch[0].shape[0], 2)
self.assertSetEqual(wasFirst, set([4]))
self.assertSetEqual(wasSampled, set([6]))
def testResampling(self):
import numpy
numpy.random.seed(1)
replay = ExperienceReplay(4, samplers = (LambdaSampler(2, lambda _, get: get(-1)[0] % 2 == 0),))
replay.insert(6, 0, 0.0, False)
replay.insert(2, 0, 0.0, False)
replay.insert(4, 0, 0.0, False)
replay.insert(6, 0, 0.0, False)
replay.insert(7, 0, 0.0, False)
toBeSampled = set([4, 6])
wasSampled = set()
wasFirst = set()
for _ in range(100):
batch = replay.sample(0)
wasSampled.add(batch[0][1])
wasFirst.add(batch[0][0])
self.assertEqual(batch[0].shape[0], 2)
self.assertEqual(len(toBeSampled - wasSampled), 0, 'something was not sampled')
self.assertEqual(len(wasSampled), len(toBeSampled), 'something was not supposed to be sampled')
self.assertSetEqual(wasFirst, set([2,4]))
def testPlusOneSampling(self):
import numpy
numpy.random.seed(1)
replay = ExperienceReplay(4, samplers = (PlusOneSampler(2),))
replay.insert(6, 0, 0.0, False)
replay.insert(2, 0, 0.0, True)
replay.insert(4, 0, 0.0, False)
replay.insert(6, 0, 0.0, True)
replay.insert(7, 0, 0.0, False)
wasSampled = set()
wasFirst = set()
for _ in range(100):
batch = replay.sample(0)
wasSampled.add(batch[0][-1])
wasFirst.add(batch[0][0])
self.assertEqual(batch[0].shape[0], 3)
self.assertSetEqual(wasFirst, set([4]))
self.assertSetEqual(wasSampled, set([7]))
def testPlusOneResampling(self):
import numpy
numpy.random.seed(1)
replay = ExperienceReplay(4, samplers = (PlusOneSampler(2),))
replay.insert(6, 0, 0.0, False)
replay.insert(2, 0, 0.0, True)
replay.insert(4, 0, 0.0, False)
replay.insert(6, 0, 0.0, False)
replay.insert(7, 0, 0.0, False)
wasSampled = set()
wasFirst = set()
for _ in range(100):
batch = replay.sample(0)
wasSampled.add(batch[0][-1])
wasFirst.add(batch[0][0])
self.assertEqual(batch[0].shape[0], 3)
self.assertSetEqual(wasFirst, set([4]))
self.assertSetEqual(wasSampled, set([7]))
def testPlusOneShortMemory(self):
import numpy
numpy.random.seed(1)
replay = ExperienceReplay(4, samplers = (PlusOneSampler(2),))
replay.insert(1, 0, 0.0, False)
replay.insert(2, 0, 0.0, True)
for _ in range(100):
batch = replay.sample(0)
self.assertIsNone(batch)
class BatchSequenceStorageTest(unittest.TestCase):
def testStore(self):
replay = BatchSequenceStorage(2, 4, samplers = [SequenceSampler(2)])
replay.insert(np.array([1,2]), np.array([1,1]), np.array([1.0, 1.0]), np.array([False, False]))
replay.insert(np.array([3,4]), np.array([1,1]), np.array([1.0, 1.0]), np.array([False, False]))
replay.insert(np.array([5,6]), np.array([1,1]), np.array([1.0, 1.0]), np.array([False, False]))
replay.insert(np.array([7,8]), np.array([1,1]), np.array([1.0, 1.0]), np.array([False, False]))
def testSampleShape(self):
replay = BatchSequenceStorage(2, 4, samplers = [SequenceSampler(2)])
replay.insert(np.array([1,2]), np.array([1,1]), | np.array([1.0, 1.0]) | numpy.array |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# # PyKOALA: KOALA data processing and analysis
# by <NAME> and <NAME>
# Extra work by <NAME> (MQ PACE student)
# Plus Taylah and Matt (sky subtraction)
from __future__ import absolute_import, division, print_function
from past.utils import old_div
version = "Version 0.72 - 13th February 2020"
import copy
import os.path as pth
import sys
from astropy.convolution import Gaussian2DKernel, interpolate_replace_nans
from astropy.io import fits
from astropy.wcs import WCS
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import numpy as np
from scipy import interpolate
from scipy.ndimage.interpolation import shift
import scipy.signal as sig
from .constants import C, PARSEC as pc
from .utils.cube_alignment import offset_between_cubes, compare_cubes, align_n_cubes
from .utils.flux import search_peaks, fluxes, dfluxes, substract_given_gaussian
from .utils.io import read_table, save_rss_fits, save_fits_file
from .utils.moffat import fit_Moffat
from .utils.plots import (
plot_redshift_peaks, plot_weights_for_getting_smooth_spectrum,
plot_correction_in_fibre_p_fibre, plot_suspicious_fibres_graph, plot_skyline_5578,
plot_offset_between_cubes, plot_response, plot_telluric_correction, plot_plot
)
from .utils.sky_spectrum import scale_sky_spectrum, median_filter
from .utils.spectrum_tools import rebin_spec_shift, smooth_spectrum
from .utils.utils import (
FitsExt, FitsFibresIFUIndex, coord_range, median_absolute_deviation,
)
from ._version import get_versions
__version__ = get_versions()["version"]
del get_versions
# -----------------------------------------------------------------------------
# Define constants
# -----------------------------------------------------------------------------
DATA_PATH = pth.join(pth.dirname(__file__), "data")
# -----------------------------------------------------------------------------
# Define COLOUR scales
# -----------------------------------------------------------------------------
fuego_color_map = colors.LinearSegmentedColormap.from_list(
"fuego",
(
(0.25, 0, 0),
(0.5, 0, 0),
(1, 0, 0),
(1, 0.5, 0),
(1, 0.75, 0),
(1, 1, 0),
(1, 1, 1),
),
N=256,
gamma=1.0,
)
fuego_color_map.set_bad("lightgray")
plt.register_cmap(cmap=fuego_color_map)
projo = [0.25, 0.5, 1, 1.0, 1.00, 1, 1]
pverde = [0.00, 0.0, 0, 0.5, 0.75, 1, 1]
pazul = [0.00, 0.0, 0, 0.0, 0.00, 0, 1]
# -----------------------------------------------------------------------------
# RSS CLASS
# -----------------------------------------------------------------------------
class RSS(object):
"""
Collection of row-stacked spectra (RSS).
Attributes
----------
wavelength: np.array(float)
Wavelength, in Angstroms.
intensity: np.array(float)
Intensity :math:`I_\lambda` per unit wavelength.
variance: np.array(float)
Variance :math:`\sigma^2_\lambda` per unit wavelength
(note the square in the definition of the variance).
"""
# -----------------------------------------------------------------------------
def __init__(self):
self.description = "Undefined row-stacked spectra (RSS)"
self.n_spectra = 0
self.n_wave = 0
self.wavelength = np.zeros((0))
self.intensity = np.zeros((0, 0))
self.intensity_corrected = self.intensity
self.variance = np.zeros_like(self.intensity)
self.RA_centre_deg = 0.0
self.DEC_centre_deg = 0.0
self.offset_RA_arcsec = np.zeros((0))
self.offset_DEC_arcsec = np.zeros_like(self.offset_RA_arcsec)
self.ALIGNED_RA_centre_deg = 0.0 # Added by ANGEL, 6 Sep
self.ALIGNED_DEC_centre_deg = 0.0 # Added by ANGEL, 6 Sep
self.relative_throughput = np.ones((0)) # Added by ANGEL, 16 Sep
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def compute_integrated_fibre(
self,
list_spectra="all",
valid_wave_min=0,
valid_wave_max=0,
min_value=0.1,
plot=False,
title=" - Integrated values",
warnings=True,
text="...",
correct_negative_sky=False,
):
"""
Compute the integrated flux of a fibre in a particular range, valid_wave_min to valid_wave_max.
Parameters
----------
list_spectra: float (default "all")
list with the number of fibres for computing integrated value
if using "all" it does all fibres
valid_wave_min, valid_wave_max : float
the integrated flux value will be computed in the range [valid_wave_min, valid_wave_max]
(default = , if they all 0 we use [self.valid_wave_min, self.valid_wave_max]
min_value: float (default 0)
For values lower than min_value, we set them as min_value
plot : Boolean (default = False)
Plot
title : string
Title for the plot
text: string
A bit of extra text
warnings : Boolean (default = False)
Write warnings, e.g. when the integrated flux is negative
correct_negative_sky : Boolean (default = False)
Corrects negative values making 0 the integrated flux of the lowest fibre
Example
----------
integrated_fibre_6500_6600 = star1r.compute_integrated_fibre(valid_wave_min=6500, valid_wave_max=6600,
title = " - [6500,6600]", plot = True)
"""
print("\n Computing integrated fibre values {}".format(text))
if list_spectra == "all":
list_spectra = list(range(self.n_spectra))
if valid_wave_min == 0:
valid_wave_min = self.valid_wave_min
if valid_wave_max == 0:
valid_wave_max = self.valid_wave_max
self.integrated_fibre = np.zeros(self.n_spectra)
region = np.where(
(self.wavelength > valid_wave_min) & (self.wavelength < valid_wave_max)
)
waves_in_region = len(region[0])
n_negative_fibres = 0
negative_fibres = []
for i in range(self.n_spectra):
self.integrated_fibre[i] = np.nansum(self.intensity_corrected[i, region])
if self.integrated_fibre[i] < 0:
if warnings:
print(
" WARNING: The integrated flux in fibre {:4} is negative, flux/wave = {:10.2f}, (probably sky), CHECK !".format(
i, self.integrated_fibre[i]/waves_in_region
))
n_negative_fibres = n_negative_fibres + 1
# self.integrated_fibre[i] = min_value
negative_fibres.append(i)
if len(negative_fibres) != 0:
print("\n> Number of fibres with integrated flux < 0 : {:4}, that is the {:5.2f} % of the total !".format(
n_negative_fibres, n_negative_fibres * 100.0 / self.n_spectra
))
negative_fibres_sorted = []
integrated_intensity_sorted = np.argsort(
self.integrated_fibre/waves_in_region
)
for fibre_ in range(n_negative_fibres):
negative_fibres_sorted.append(integrated_intensity_sorted[fibre_])
# print "\n> Checking results using",n_negative_fibres,"fibres with the lowest integrated intensity"
# print " which are :",negative_fibres_sorted
if correct_negative_sky:
min_sky_value = self.integrated_fibre[negative_fibres_sorted[0]]
min_sky_value_per_wave = min_sky_value/waves_in_region
print(
"\n> Correcting negative values making 0 the integrated flux of the lowest fibre, which is {:4} with {:10.2f} counts/wave".format(
negative_fibres_sorted[0], min_sky_value_per_wave
))
# print self.integrated_fibre[negative_fibres_sorted[0]]
self.integrated_fibre = self.integrated_fibre - min_sky_value
for i in range(self.n_spectra):
self.intensity_corrected[i] = (
self.intensity_corrected[i] - min_sky_value_per_wave
)
else:
print(
"\n> Adopting integrated flux = {:5.2f} for all fibres with negative integrated flux (for presentation purposes)".format(
min_value
))
for i in negative_fibres_sorted:
self.integrated_fibre[i] = min_value
# for i in range(self.n_spectra):
# if self.integrated_fibre[i] < 0:
# if warnings: print " WARNING: The integrated flux in fibre {:4} STILL is negative, flux/wave = {:10.2f}, (probably sky), CHECK !".format(i,self.integrated_fibre[i]/waves_in_region)
if plot:
# print"\n Plotting map with integrated values:"
self.RSS_map(
self.integrated_fibre,
norm=colors.PowerNorm(gamma=1.0 / 4.0),
title=title,
)
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def identify_el(
self,
high_fibres=10,
brightest_line="Ha",
cut=1.5,
fibre=0,
broad=1.0,
verbose=True,
plot=True,
):
"""
Identify fibres with highest intensity (high_fibres=10).
Add all in a single spectrum.
Identify emission features.
These emission features should be those expected in all the cube!
Also, choosing fibre=number, it identifies el in a particular fibre.
Parameters
----------
high_fibres: float (default 10)
use the high_fibres highest intensity fibres for identifying
brightest_line : string (default "Ha")
string name with the emission line that is expected to be the brightest in integrated spectrum
cut: float (default 1.5)
The peak has to have a cut higher than cut to be considered as emission line
fibre: integer (default 0)
If fibre is given, it identifies emission lines in the given fibre
broad: float (default 1.0)
Broad (FWHM) of the expected emission lines
verbose : boolean (default = True)
Write results
plot : boolean (default = False)
Plot results
Example
----------
self.el=self.identify_el(high_fibres=10, brightest_line = "Ha",
cut=2., verbose=True, plot=True, fibre=0, broad=1.5)
"""
if fibre == 0:
integrated_intensity_sorted = np.argsort(self.integrated_fibre)
region = []
for fibre in range(high_fibres):
region.append(integrated_intensity_sorted[-1 - fibre])
if verbose:
print("\n> Identifying emission lines using the {} fibres with the highest integrated intensity".format(high_fibres))
print(" which are : {}".format(region))
combined_high_spectrum = np.nansum(self.intensity_corrected[region], axis=0)
else:
combined_high_spectrum = self.intensity_corrected[fibre]
if verbose:
print("\n> Identifying emission lines in fibre {}".format(fibre))
# Search peaks
peaks, peaks_name, peaks_rest, continuum_limits = search_peaks(
self.wavelength,
combined_high_spectrum,
plot=plot,
cut=cut,
brightest_line=brightest_line,
verbose=False,
)
p_peaks_l = []
p_peaks_fwhm = []
# Do Gaussian fit and provide center & FWHM (flux could be also included, not at the moment as not abs. flux-cal done)
if verbose:
print("\n Emission lines identified:")
for eline in range(len(peaks)):
lowlow = continuum_limits[0][eline]
lowhigh = continuum_limits[1][eline]
highlow = continuum_limits[2][eline]
highhigh = continuum_limits[3][eline]
resultado = fluxes(
self.wavelength,
combined_high_spectrum,
peaks[eline],
verbose=False,
broad=broad,
lowlow=lowlow,
lowhigh=lowhigh,
highlow=highlow,
highhigh=highhigh,
plot=plot,
fcal=False,
)
p_peaks_l.append(resultado[1])
p_peaks_fwhm.append(resultado[5])
if verbose:
print(" {:3}. {:7s} {:8.2f} centered at {:8.2f} and FWHM = {:6.2f}".format(
eline + 1,
peaks_name[eline],
peaks_rest[eline],
p_peaks_l[eline],
p_peaks_fwhm[eline],
))
return [peaks_name, peaks_rest, p_peaks_l, p_peaks_fwhm]
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def correct_high_cosmics_and_defects(
self,
step=50,
correct_high_cosmics=False,
fibre_p=0,
remove_5578=False, # if fibre_p=fibre plots the corrections in that fibre
clip_high=100,
warnings=False,
plot=True,
plot_suspicious_fibres=True,
verbose=False,
fig_size=12,
):
"""
Task for correcting high cosmics and CCD defects using median values of nearby pixels.
2dFdr corrects for (the majority) of the cosmic rays, usually correct_high_cosmics = False.
ANGEL COMMENT: Check, probably can be improved using MATT median running + plotting outside
Parameters
----------
rect_high_cosmics: boolean (default = False)
Correct ONLY CCD defects
re_p: integer (default = 0)
Plots the corrections in fibre fibre_p
ove_5578: boolean (default = False)
Removes skyline 5578 (blue spectrum) using Gaussian fit
ND CHECK: This also MODIFIES the throughput correction correcting for flux_5578_medfilt /median_flux_5578_medfilt
step: integer (default = 50)
Number of points for calculating median value
clip_high : float (default = 100)
Minimum value of flux/median in a pixel to be consider as a cosmic
if s[wave] > clip_high*fit_median[wave] -> IT IS A COSMIC
verbose: boolean (default = False)
Write results
warnings: boolean (default = False)
Write warnings
plot: boolean (default = False)
Plot results
plot_suspicious_fibres: boolean (default = False)
Plots fibre(s) that could have a cosmic left (but it could be OK)
IF self.integrated_fibre[fibre]/median_running[fibre] > max_value -> SUSPICIOUS FIBRE
Example
----------
self.correct_high_cosmics_and_defects(correct_high_cosmics=False, step=40, remove_5578 = True,
clip_high=120, plot_suspicious_fibres=True, warnings=True, verbose=False, plot=True)
"""
print("\n> Correcting for high cosmics and CCD defects...")
wave_min = self.valid_wave_min # CHECK ALL OF THIS...
wave_max = self.valid_wave_max
wlm = self.wavelength
if correct_high_cosmics == False:
print(" Only CCD defects (nan and negative values) are considered.")
else:
print(" Using clip_high = {} for high cosmics".format(clip_high))
print(" IMPORTANT: Be sure that any emission or sky line is fainter than clip_high/continuum !! ")
flux_5578 = [] # For correcting sky line 5578 if requested
if wave_min < 5578 and remove_5578:
print(" Sky line 5578 will be removed using a Gaussian fit...")
integrated_fibre_uncorrected = self.integrated_fibre
print(" ")
output_every_few = np.sqrt(self.n_spectra) + 1
next_output = -1
max_ratio_list = []
for fibre in range(self.n_spectra):
if fibre > next_output:
sys.stdout.write("\b" * 30)
sys.stdout.write(
" Cleaning... {:5.2f}% completed".format(
fibre * 100.0 / self.n_spectra
)
)
sys.stdout.flush()
next_output = fibre + output_every_few
s = self.intensity_corrected[fibre]
running_wave = []
running_step_median = []
cuts = np.int(self.n_wave/step) # using np.int instead of // for improved readability
for cut in range(cuts):
if cut == 0:
next_wave = wave_min
else:
next_wave = np.nanmedian(
(wlm[np.int(cut * step)] + wlm[np.int((cut + 1) * step)])/2
)
if next_wave < wave_max:
running_wave.append(next_wave)
# print("SEARCHFORME1", step, running_wave[cut])
region = np.where(
(wlm > running_wave[cut] - np.int(step/2)) # step/2 doesn't need to be an int, but probably
& (wlm < running_wave[cut] + np.int(step/2)) # want it to be so the cuts are uniform.
)
# print('SEARCHFORME3', region)
running_step_median.append(
np.nanmedian(self.intensity_corrected[fibre, region])
)
running_wave.append(wave_max)
region = np.where((wlm > wave_max - step) & (wlm < wave_max))
running_step_median.append(
np.nanmedian(self.intensity_corrected[fibre, region])
)
for i in range(len(running_step_median)):
if np.isnan(running_step_median[i]) == True:
if i < 10:
running_step_median[i] = np.nanmedian(running_step_median[0:9])
if i > 10:
running_step_median[i] = np.nanmedian(
running_step_median[-9:-1]
)
a7x, a6x, a5x, a4x, a3x, a2x, a1x, a0x = np.polyfit(
running_wave, running_step_median, 7
)
fit_median = (
a0x
+ a1x * wlm
+ a2x * wlm ** 2
+ a3x * wlm ** 3
+ a4x * wlm ** 4
+ a5x * wlm ** 5
+ a6x * wlm ** 6
+ a7x * wlm ** 7
)
if fibre == fibre_p:
espectro_old = copy.copy(self.intensity_corrected[fibre, :])
espectro_fit_median = fit_median
for wave in range(self.n_wave): # (1,self.n_wave-3):
if s[wave] < 0:
s[wave] = fit_median[wave] # Negative values for median values
if np.isnan(s[wave]) == True:
s[wave] = fit_median[wave] # nan for median value
if (
correct_high_cosmics and fit_median[wave] > 0
): # NEW 15 Feb 2019, v7.1 2dFdr takes well cosmic rays
if s[wave] > clip_high * fit_median[wave]:
if verbose:
print(" "
"CLIPPING HIGH = {} in fibre {} w = {} value= {} v/median= {}".format(clip_high, fibre, wlm[wave], s[wave], s[wave]/fit_median[wave])) # " median=",fit_median[wave]
s[wave] = fit_median[wave]
if fibre == fibre_p:
espectro_new = copy.copy(s)
max_ratio_list.append(np.nanmax(s/fit_median))
self.intensity_corrected[fibre, :] = s
# Removing Skyline 5578 using Gaussian fit if requested
if wave_min < 5578 and remove_5578:
resultado = fluxes(
wlm, s, 5578, plot=False, verbose=False
) # fmin=-5.0E-17, fmax=2.0E-16,
# resultado = [rms_cont, fit[0], fit_error[0], gaussian_flux, gaussian_flux_error, fwhm, fwhm_error, flux, flux_error, ew, ew_error, spectrum ]
self.intensity_corrected[fibre] = resultado[11]
flux_5578.append(resultado[3])
sys.stdout.write("\b" * 30)
sys.stdout.write(" Cleaning... 100.00 completed")
sys.stdout.flush()
max_ratio = np.nanmax(max_ratio_list)
print("\n Maximum value found of flux/continuum = {}".format(max_ratio))
if correct_high_cosmics:
print(" Recommended value for clip_high = {} , here we used {}".format(int(max_ratio + 1), clip_high))
# Plot correction in fibre p_fibre
if fibre_p > 0:
plot_correction_in_fibre_p_fibre(fig_size,
wlm,
espectro_old,
espectro_fit_median,
espectro_new,
fibre_p,
clip_high)
# print" "
if correct_high_cosmics == False:
text = "for spectra corrected for defects..."
title = " - Throughput + CCD defects corrected"
else:
text = "for spectra corrected for high cosmics and defects..."
title = " - Throughput + high-C & D corrected"
self.compute_integrated_fibre(
valid_wave_min=wave_min,
valid_wave_max=wave_max,
text=text,
plot=plot,
title=title,
)
if plot:
print(" Plotting integrated fibre values before and after correcting for high cosmics and CCD defects:\n")
plt.figure(figsize=(fig_size, fig_size / 2.5))
plt.plot(integrated_fibre_uncorrected, "r", label="Uncorrected", alpha=0.5)
plt.ylabel("Integrated Flux")
plt.xlabel("Fibre")
plt.ylim(
[np.nanmin(self.integrated_fibre), np.nanmax(self.integrated_fibre)]
)
plt.title(self.description)
# Check if integrated value is high
median_running = []
step_f = 10
max_value = 2.0 # For stars this is not accurate, as i/m might be between 5 and 100 in the fibres with the star
skip = 0
suspicious_fibres = []
for fibre in range(self.n_spectra):
if fibre < step_f:
median_value = np.nanmedian(
self.integrated_fibre[0: np.int(step_f)]
)
skip = 1
if fibre > self.n_spectra - step_f:
median_value = np.nanmedian(
self.integrated_fibre[-1 - np.int(step_f): -1]
)
skip = 1
if skip == 0:
median_value = np.nanmedian(
self.integrated_fibre[
fibre - np.int(step_f/2): fibre + np.int(step_f/2) # np.int is used instead of // of readability
]
)
median_running.append(median_value)
if self.integrated_fibre[fibre]/median_running[fibre] > max_value:
print(" Fibre {} has a integrated/median ratio of {} -> Might be a cosmic left!".format(fibre, self.integrated_fibre[fibre]/median_running[fibre]))
label = np.str(fibre)
plt.axvline(x=fibre, color="k", linestyle="--")
plt.text(fibre, self.integrated_fibre[fibre] / 2.0, label)
suspicious_fibres.append(fibre)
skip = 0
plt.plot(self.integrated_fibre, label="Corrected", alpha=0.6)
plt.plot(median_running, "k", label="Median", alpha=0.6)
plt.legend(frameon=False, loc=1, ncol=3)
plt.minorticks_on()
#plt.show()
#plt.close()
if plot_suspicious_fibres == True and len(suspicious_fibres) > 0:
# Plotting suspicious fibres..
figures = plot_suspicious_fibres_graph(
self,
suspicious_fibres,
fig_size,
wave_min,
wave_max,
intensity_corrected_fiber=self.intensity_corrected)
if remove_5578 and wave_min < 5578:
print(" Skyline 5578 has been removed. Checking throughput correction...")
flux_5578_medfilt = sig.medfilt(flux_5578, np.int(5))
median_flux_5578_medfilt = np.nanmedian(flux_5578_medfilt)
extra_throughput_correction = flux_5578_medfilt/median_flux_5578_medfilt
# plt.plot(extra_throughput_correction)
# plt.show()
# plt.close()
if plot:
fig = plot_skyline_5578(fig_size, flux_5578, flux_5578_medfilt)
print(" Variations in throughput between {} and {} ".format(
np.nanmin(extra_throughput_correction), np.nanmax(extra_throughput_correction)
))
print(" Applying this extra throughtput correction to all fibres...")
for i in range(self.n_spectra):
self.intensity_corrected[i, :] = (
self.intensity_corrected[i, :]/extra_throughput_correction[i]
)
self.relative_throughput = (
self.relative_throughput * extra_throughput_correction
)
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def clean_sky_residuals(
self,
extra_w=1.3,
step=25,
dclip=3.0,
wave_min=0,
wave_max=0,
verbose=False,
plot=False,
fig_size=12,
fibre=0,
):
"""
This task HAVE TO BE USED WITH EXTREME CARE
as it has not been properly tested!!!
It CAN DELETE REAL (faint) ABSORPTION/EMISSION features in spectra!!!
Use the "1dfit" option for getting a better sky substraction
ANGEL is keeping this here just in case it is eventually useful...
Parameters
----------
extra_w
step
dclip
wave_min
wave_max
verbose
plot
fig_size
fibre
Returns
-------
"""
# verbose=True
wlm = self.wavelength
if wave_min == 0:
wave_min = self.valid_wave_min
if wave_max == 0:
wave_max = self.valid_wave_max
# Exclude ranges with emission lines if needed
exclude_ranges_low = []
exclude_ranges_high = []
exclude_ranges_low_ = []
exclude_ranges_high_ = []
if self.el[1][0] != 0:
# print " Emission lines identified in the combined spectrum:"
for el in range(len(self.el[0])):
# print " {:3}. - {:7s} {:8.2f} centered at {:8.2f} and FWHM = {:6.2f}".format(el+1,self.el[0][el],self.el[1][el],self.el[2][el],self.el[3][el])
if (
self.el[0][el] == "Ha" or self.el[1][el] == 6583.41
): # Extra extend for Ha and [N II] 6583
extra = extra_w * 1.6
else:
extra = extra_w
exclude_ranges_low_.append(
self.el[2][el] - self.el[3][el] * extra
) # center-1.3*FWHM/2
exclude_ranges_high_.append(
self.el[2][el] + self.el[3][el] * extra
) # center+1.3*FWHM/2
# print self.el[0][el],self.el[1][el],self.el[2][el],self.el[3][el],exclude_ranges_low[el],exclude_ranges_high[el],extra
# Check overlapping ranges
skip_next = 0
for i in range(len(exclude_ranges_low_) - 1):
if skip_next == 0:
if exclude_ranges_high_[i] > exclude_ranges_low_[i + 1]:
# Ranges overlap, now check if next range also overlaps
if i + 2 < len(exclude_ranges_low_):
if exclude_ranges_high_[i + 1] > exclude_ranges_low_[i + 2]:
exclude_ranges_low.append(exclude_ranges_low_[i])
exclude_ranges_high.append(exclude_ranges_high_[i + 2])
skip_next = 2
if verbose:
print("Double overlap {} {}".format(exclude_ranges_low[-1], exclude_ranges_high[-1]))
else:
exclude_ranges_low.append(exclude_ranges_low_[i])
exclude_ranges_high.append(exclude_ranges_high_[i + 1])
skip_next = 1
if verbose:
print("Overlap {} {}".format(exclude_ranges_low[-1], exclude_ranges_high[-1]))
else:
exclude_ranges_low.append(exclude_ranges_low_[i])
exclude_ranges_high.append(exclude_ranges_high_[i])
if verbose:
print("Overlap {} {}".format(exclude_ranges_low[-1], exclude_ranges_high[-1]))
else:
if skip_next == 1:
skip_next = 0
if skip_next == 2:
skip_next = 1
if verbose:
print(exclude_ranges_low_[i], exclude_ranges_high_[i], skip_next)
if skip_next == 0:
exclude_ranges_low.append(exclude_ranges_low_[-1])
exclude_ranges_high.append(exclude_ranges_high_[-1])
if verbose:
print(exclude_ranges_low_[-1], exclude_ranges_high_[-1], skip_next)
# print "\n> Cleaning sky residuals in range [",wave_min,",",wave_max,"] avoiding emission lines... "
print("\n> Cleaning sky residuals avoiding emission lines... ")
if verbose:
print(" Excluded ranges using emission line parameters:")
for i in range(len(exclude_ranges_low_)):
print(exclude_ranges_low_[i], exclude_ranges_high_[i])
print(" Excluded ranges considering overlaps: ")
for i in range(len(exclude_ranges_low)):
print(exclude_ranges_low[i], exclude_ranges_high[i])
print(" ")
else:
exclude_ranges_low.append(20000.0)
exclude_ranges_high.append(30000.0)
print("\n> Cleaning sky residuals...")
say_status = 0
if fibre != 0:
f_i = fibre
f_f = fibre + 1
print(" Checking fibre {} (only this fibre is corrected, use fibre = 0 for all)...".format(fibre))
plot = True
else:
f_i = 0
f_f = self.n_spectra
for fibre in range(f_i, f_f): # (self.n_spectra):
if fibre == say_status:
print(" Checking fibre {} ...".format(fibre))
say_status = say_status + 100
s = self.intensity_corrected[fibre]
fit_median = smooth_spectrum(
wlm,
s,
step=step,
wave_min=wave_min,
wave_max=wave_max,
weight_fit_median=1.0,
plot=False,
)
old = []
if plot:
for i in range(len(s)):
old.append(s[i])
disp = s - fit_median
dispersion = np.nanmedian(np.abs(disp))
rango = 0
imprimir = 1
for i in range(len(wlm) - 1):
# if wlm[i] > wave_min and wlm[i] < wave_max : # CLEAN ONLY IN VALID WAVEVELENGTHS
if (
wlm[i] >= exclude_ranges_low[rango]
and wlm[i] <= exclude_ranges_high[rango]
):
if verbose == True and imprimir == 1:
print(" Excluding range [ {} , {} ] as it has an emission line".format(
exclude_ranges_low[rango], exclude_ranges_high[rango]))
if imprimir == 1:
imprimir = 0
# print " Checking ", wlm[i]," NOT CORRECTED ",s[i], s[i]-fit_median[i]
else:
if np.isnan(s[i]) == True:
s[i] = fit_median[i] # nan for median value
if (
disp[i] > dispersion * dclip
and disp[i + 1] < -dispersion * dclip
):
s[i] = fit_median[i]
s[i + 1] = fit_median[i + 1] # "P-Cygni-like structures
if verbose:
print(" Found P-Cygni-like feature in {}".format(wlm[i]))
if disp[i] > dispersion * dclip or disp[i] < -dispersion * dclip:
s[i] = fit_median[i]
if verbose:
print(" Clipping feature in {}".format(wlm[i]))
if wlm[i] > exclude_ranges_high[rango] and imprimir == 0:
if verbose:
print(" Checked {} End range {} {} {}".format(
wlm[i], rango,
exclude_ranges_low[rango],
exclude_ranges_high[rango]
)
)
rango = rango + 1
imprimir = 1
if rango == len(exclude_ranges_low):
rango = len(exclude_ranges_low) - 1
# print " Checking ", wlm[i]," CORRECTED IF NEEDED",s[i], s[i]-fit_median[i]
# if plot:
# for i in range(6):
# plt.figure(figsize=(fig_size, fig_size/2.5))
# plt.plot(wlm,old-fit_median, "r-", alpha=0.4)
# plt.plot(wlm,fit_median-fit_median,"g-", alpha=0.5)
# plt.axhline(y=dispersion*dclip, color="g", alpha=0.5)
# plt.axhline(y=-dispersion*dclip, color="g", alpha=0.5)
# plt.plot(wlm,s-fit_median, "b-", alpha=0.7)
#
# for exclude in range(len(exclude_ranges_low)):
# plt.axvspan(exclude_ranges_low[exclude], exclude_ranges_high[exclude], facecolor='g', alpha=0.15,zorder=3)
#
# plt.ylim(-100,200)
# if i == 0: plt.xlim(wlm[0]-10,wlm[-1]+10)
# if i == 1: plt.xlim(wlm[0],6500) # THIS IS FOR 1000R
# if i == 2: plt.xlim(6500,6700)
# if i == 3: plt.xlim(6700,7000)
# if i == 4: plt.xlim(7000,7300)
# if i == 5: plt.xlim(7300,wlm[-1])
# plt.minorticks_on()
# plt.xlabel("Wavelength [$\AA$]")
# plt.ylabel("Flux / continuum")
# plt.show()
# plt.close()
if plot:
for i in range(6):
plt.figure(figsize=(fig_size, fig_size / 2.5))
plt.plot(wlm, old, "r-", alpha=0.4)
plt.plot(wlm, fit_median, "g-", alpha=0.5)
# plt.axhline(y=dispersion*dclip, color="g", alpha=0.5)
# plt.axhline(y=-dispersion*dclip, color="g", alpha=0.5)
plt.plot(wlm, s, "b-", alpha=0.7)
for exclude in range(len(exclude_ranges_low)):
plt.axvspan(
exclude_ranges_low[exclude],
exclude_ranges_high[exclude],
facecolor="g",
alpha=0.15,
zorder=3,
)
plt.ylim(-300, 300)
if i == 0:
plt.xlim(wlm[0] - 10, wlm[-1] + 10)
if i == 1:
plt.xlim(wlm[0], 6500) # THIS IS FOR 1000R
if i == 2:
plt.xlim(6500, 6700)
if i == 3:
plt.xlim(6700, 7000)
if i == 4:
plt.xlim(7000, 7300)
if i == 5:
plt.xlim(7300, wlm[-1])
plt.minorticks_on()
plt.xlabel("Wavelength [$\AA$]")
plt.ylabel("Flux / continuum")
# plt.show()
# plt.close()
self.intensity_corrected[fibre, :] = s
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def fit_and_substract_sky_spectrum(
self,
sky,
w=1000,
spectra=1000,
# If rebin == True, it fits all wavelengths to be at the same wavelengths that SKY spectrum...
rebin=False,
brightest_line="Ha",
brightest_line_wavelength=6563.0,
maxima_sigma=3.0,
ymin=-50,
ymax=1000,
wmin=0,
wmax=0,
auto_scale_sky=False,
warnings=False,
verbose=False,
plot=False,
fig_size=12,
fibre=0,
):
"""
Given a 1D sky spectrum, this task fits
sky lines of each spectrum individually and substracts sky
Needs the observed wavelength (brightest_line_wavelength) of the brightest emission line (brightest_line) .
w is the wavelength
spec the 2D spectra
Parameters
----------
sky
w
spectra
rebin
brightest_line
brightest_line_wavelength
maxima_sigma
ymin
ymax
wmin
wmax
auto_scale_sky
warnings
verbose
plot
fig_size
fibre
Returns
-------
"""
if brightest_line_wavelength == 6563:
print("\n\n> WARNING: This is going to FAIL as the wavelength of the brightest emission line has not been included !!!")
print(" USING brightest_line_wavelength = 6563 as default ...\n\n")
brightest_line_wavelength_rest = 6562.82
if brightest_line == "O3" or brightest_line == "O3b":
brightest_line_wavelength_rest = 5006.84
if brightest_line == "Hb" or brightest_line == "hb":
brightest_line_wavelength_rest = 4861.33
print(" Using {:3} at rest wavelength {:6.2f} identified by the user at {:6.2f} to avoid fitting emission lines...".format(
brightest_line, brightest_line_wavelength_rest, brightest_line_wavelength
))
redshift = brightest_line_wavelength/brightest_line_wavelength_rest - 1.0
if w == 1000:
w = self.wavelength
if spectra == 1000:
spectra = copy.deepcopy(self.intensity_corrected)
if wmin == 0:
wmin = w[0]
if wmax == 0:
wmax = w[-1]
# Read file with sky emission lines
sky_lines_file = "sky_lines.dat"
(
sl_center,
sl_name,
sl_fnl,
sl_lowlow,
sl_lowhigh,
sl_highlow,
sl_highhigh,
sl_lmin,
sl_lmax,
) = read_table(sky_lines_file, ["f", "s", "f", "f", "f", "f", "f", "f", "f"])
number_sl = len(sl_center)
# MOST IMPORTANT EMISSION LINES IN RED
# 6300.30 [OI] -0.263 30.0 15.0 20.0 40.0
# 6312.10 [SIII] -0.264 30.0 18.0 5.0 20.0
# 6363.78 [OI] -0.271 20.0 4.0 5.0 30.0
# 6548.03 [NII] -0.296 45.0 15.0 55.0 75.0
# 6562.82 Ha -0.298 50.0 25.0 35.0 60.0
# 6583.41 [NII] -0.300 62.0 42.0 7.0 35.0
# 6678.15 HeI -0.313 20.0 6.0 6.0 20.0
# 6716.47 [SII] -0.318 40.0 15.0 22.0 45.0
# 6730.85 [SII] -0.320 50.0 30.0 7.0 35.0
# 7065.28 HeI -0.364 30.0 7.0 7.0 30.0
# 7135.78 [ArIII] -0.374 25.0 6.0 6.0 25.0
# 7318.39 [OII] -0.398 30.0 6.0 20.0 45.0
# 7329.66 [OII] -0.400 40.0 16.0 10.0 35.0
# 7751.10 [ArIII] -0.455 30.0 15.0 15.0 30.0
# 9068.90 [S-III] -0.594 30.0 15.0 15.0 30.0
el_list_no_z = [
6300.3,
6312.10,
6363.78,
6548.03,
6562.82,
6583.41,
6678.15,
6716.47,
6730.85,
7065.28,
7135.78,
7318.39,
7329.66,
7751.1,
9068.9,
]
el_list = (redshift + 1) * np.array(el_list_no_z)
# [OI] [SIII] [OI] Ha+[NII] HeI [SII] HeI [ArIII] [OII] [ArIII] [SIII]
el_low_list_no_z = [
6296.3,
6308.1,
6359.8,
6544.0,
6674.2,
6712.5,
7061.3,
7131.8,
7314.4,
7747.1,
9063.9,
]
el_high_list_no_z = [
6304.3,
6316.1,
6367.8,
6590.0,
6682.2,
6736.9,
7069.3,
7139.8,
7333.7,
7755.1,
9073.9,
]
el_low_list = (redshift + 1) * np.array(el_low_list_no_z)
el_high_list = (redshift + 1) * np.array(el_high_list_no_z)
# Double Skylines
dsky1 = [
6257.82,
6465.34,
6828.22,
6969.70,
7239.41,
7295.81,
7711.50,
7750.56,
7853.391,
7913.57,
7773.00,
7870.05,
8280.94,
8344.613,
9152.2,
9092.7,
9216.5,
8827.112,
8761.2,
0,
] # 8760.6, 0]#
dsky2 = [
6265.50,
6470.91,
6832.70,
6978.45,
7244.43,
7303.92,
7715.50,
7759.89,
7860.662,
7921.02,
7780.43,
7879.96,
8288.34,
8352.78,
9160.9,
9102.8,
9224.8,
8836.27,
8767.7,
0,
] # 8767.2, 0] #
say_status = 0
# plot=True
# verbose = True
# warnings = True
self.wavelength_offset_per_fibre = []
self.sky_auto_scale = []
if fibre != 0:
f_i = fibre
f_f = fibre + 1
print(" Checking fibre {} (only this fibre is corrected, use fibre = 0 for all)...".format(fibre))
plot = True
verbose = True
warnings = True
else:
f_i = 0
f_f = self.n_spectra
for fibre in range(f_i, f_f): # (self.n_spectra):
if fibre == say_status:
print(" Checking fibre {:4} ... ({:6.2f} % completed) ...".format(
fibre,
fibre * 100.0 / self.n_spectra
)
)
say_status = say_status + 20
# Gaussian fits to the sky spectrum
sl_gaussian_flux = []
sl_gaussian_sigma = []
sl_gauss_center = []
skip_sl_fit = [] # True emission line, False no emission line
j_lines = 0
el_low = el_low_list[j_lines]
el_high = el_high_list[j_lines]
sky_sl_gaussian_fitted = copy.deepcopy(sky)
di = 0
if verbose:
print("\n> Performing Gaussian fitting to sky lines in sky spectrum...")
for i in range(number_sl):
if sl_center[i] > el_high:
while sl_center[i] > el_high:
j_lines = j_lines + 1
if j_lines < len(el_low_list) - 1:
el_low = el_low_list[j_lines]
el_high = el_high_list[j_lines]
# print "Change to range ",el_low,el_high
else:
el_low = w[-1] + 1
el_high = w[-1] + 2
if sl_fnl[i] == 0:
plot_fit = False
else:
plot_fit = True
if sl_center[i] == dsky1[di]:
warnings_ = False
if sl_fnl[i] == 1:
warnings_ = True
if verbose:
print(" Line {} blended with {}".format(sl_center[i], dsky2[di]))
resultado = dfluxes(
w,
sky_sl_gaussian_fitted,
sl_center[i],
dsky2[di],
lowlow=sl_lowlow[i],
lowhigh=sl_lowhigh[i],
highlow=sl_highlow[i],
highhigh=sl_highhigh[i],
lmin=sl_lmin[i],
lmax=sl_lmax[i],
fmin=0,
fmax=0,
broad1=2.1 * 2.355,
broad2=2.1 * 2.355,
plot=plot_fit,
verbose=False,
plot_sus=False,
fcal=False,
warnings=warnings_,
) # Broad is FWHM for Gaussian sigm a= 1,
di = di + 1
else:
resultado = fluxes(
w,
sky_sl_gaussian_fitted,
sl_center[i],
lowlow=sl_lowlow[i],
lowhigh=sl_lowhigh[i],
highlow=sl_highlow[i],
highhigh=sl_highhigh[i],
lmin=sl_lmin[i],
lmax=sl_lmax[i],
fmin=0,
fmax=0,
broad=2.1 * 2.355,
plot=plot_fit,
verbose=False,
plot_sus=False,
fcal=False,
warnings=warnings,
) # Broad is FWHM for Gaussian sigm a= 1,
sl_gaussian_flux.append(resultado[3])
sky_sl_gaussian_fitted = resultado[11]
sl_gauss_center.append(resultado[1])
sl_gaussian_sigma.append(resultado[5] / 2.355)
if el_low < sl_center[i] < el_high:
if verbose:
print(" SKY line {} in EMISSION LINE !".format(sl_center[i]))
skip_sl_fit.append(True)
else:
skip_sl_fit.append(False)
# print " Fitted wavelength for sky line ",sl_center[i]," : ",resultado[1]," ",resultado[5]
if plot_fit:
if verbose:
print(" Fitted wavelength for sky line {} : {} sigma = {}".format(
sl_center[i], sl_gauss_center[i], sl_gaussian_sigma[i])
)
wmin = sl_lmin[i]
wmax = sl_lmax[i]
# Gaussian fit to object spectrum
object_sl_gaussian_flux = []
object_sl_gaussian_sigma = []
ratio_object_sky_sl_gaussian = []
dif_center_obj_sky = []
spec = spectra[fibre]
object_sl_gaussian_fitted = copy.deepcopy(spec)
object_sl_gaussian_center = []
di = 0
if verbose:
print("\n> Performing Gaussian fitting to sky lines in fibre {} of object data...".format(fibre))
for i in range(number_sl):
if sl_fnl[i] == 0:
plot_fit = False
else:
plot_fit = True
if skip_sl_fit[i]:
if verbose:
print(" SKIPPING SKY LINE {} as located within the range of an emission line!".format(
sl_center[i]))
object_sl_gaussian_flux.append(
float("nan")
) # The value of the SKY SPECTRUM
object_sl_gaussian_center.append(float("nan"))
object_sl_gaussian_sigma.append(float("nan"))
dif_center_obj_sky.append(float("nan"))
else:
if sl_center[i] == dsky1[di]:
warnings_ = False
if sl_fnl[i] == 1:
warnings_ = True
if verbose:
print(" Line {} blended with {}".format(sl_center[i], dsky2[di]))
resultado = dfluxes(
w,
object_sl_gaussian_fitted,
sl_center[i],
dsky2[di],
lowlow=sl_lowlow[i],
lowhigh=sl_lowhigh[i],
highlow=sl_highlow[i],
highhigh=sl_highhigh[i],
lmin=sl_lmin[i],
lmax=sl_lmax[i],
fmin=0,
fmax=0,
broad1=sl_gaussian_sigma[i] * 2.355,
broad2=sl_gaussian_sigma[i] * 2.355,
plot=plot_fit,
verbose=False,
plot_sus=False,
fcal=False,
warnings=warnings_,
)
di = di + 1
if (
resultado[3] > 0
and resultado[5] / 2.355 < maxima_sigma
and resultado[13] > 0
and resultado[14] / 2.355 < maxima_sigma
): # and resultado[5] < maxima_sigma: # -100000.: #0:
use_sigma = resultado[5] / 2.355
object_sl_gaussian_flux.append(resultado[3])
object_sl_gaussian_fitted = resultado[11]
object_sl_gaussian_center.append(resultado[1])
object_sl_gaussian_sigma.append(use_sigma)
dif_center_obj_sky.append(
object_sl_gaussian_center[i] - sl_gauss_center[i]
)
else:
if verbose:
print(" Bad fit for {}! ignoring it...".format(sl_center[i]))
object_sl_gaussian_flux.append(float("nan"))
object_sl_gaussian_center.append(float("nan"))
object_sl_gaussian_sigma.append(float("nan"))
dif_center_obj_sky.append(float("nan"))
skip_sl_fit[i] = True # We don't substract this fit
else:
resultado = fluxes(
w,
object_sl_gaussian_fitted,
sl_center[i],
lowlow=sl_lowlow[i],
lowhigh=sl_lowhigh[i],
highlow=sl_highlow[i],
highhigh=sl_highhigh[i],
lmin=sl_lmin[i],
lmax=sl_lmax[i],
fmin=0,
fmax=0,
broad=sl_gaussian_sigma[i] * 2.355,
plot=plot_fit,
verbose=False,
plot_sus=False,
fcal=False,
warnings=warnings,
) # Broad is FWHM for Gaussian sigma= 1,
# print sl_center[i],sl_gaussian_sigma[i], resultado[5]/2.355, maxima_sigma
if (
resultado[3] > 0 and resultado[5] / 2.355 < maxima_sigma
): # and resultado[5] < maxima_sigma: # -100000.: #0:
object_sl_gaussian_flux.append(resultado[3])
object_sl_gaussian_fitted = resultado[11]
object_sl_gaussian_center.append(resultado[1])
object_sl_gaussian_sigma.append(resultado[5] / 2.355)
dif_center_obj_sky.append(
object_sl_gaussian_center[i] - sl_gauss_center[i]
)
else:
if verbose:
print(" Bad fit for {}! ignoring it...".format(sl_center[i]))
object_sl_gaussian_flux.append(float("nan"))
object_sl_gaussian_center.append(float("nan"))
object_sl_gaussian_sigma.append(float("nan"))
dif_center_obj_sky.append(float("nan"))
skip_sl_fit[i] = True # We don't substract this fit
ratio_object_sky_sl_gaussian.append(
old_div(object_sl_gaussian_flux[i], sl_gaussian_flux[i])
) # TODO: to remove once sky_line_fitting is active and we can do 1Dfit
# Scale sky lines that are located in emission lines or provided negative values in fit
# reference_sl = 1 # Position in the file! Position 1 is sky line 6363.4
# sl_ref_ratio = sl_gaussian_flux/sl_gaussian_flux[reference_sl]
if verbose:
print("\n> Correcting skylines for which we couldn't get a Gaussian fit...\n")
for i in range(number_sl):
if skip_sl_fit[i] == True:
# Use known center, sigma of the sky and peak
gauss_fix = sl_gaussian_sigma[i]
small_center_correction = 0.0
# Check if center of previous sky line has a small difference in wavelength
small_center_correction = np.nanmedian(dif_center_obj_sky[0:i])
if verbose:
print("- Small correction of center wavelength of sky line {} : {}".format(
sl_center[i], small_center_correction))
object_sl_gaussian_fitted = substract_given_gaussian(
w,
object_sl_gaussian_fitted,
sl_center[i] + small_center_correction,
peak=0,
sigma=gauss_fix,
flux=0,
search_peak=True,
lowlow=sl_lowlow[i],
lowhigh=sl_lowhigh[i],
highlow=sl_highlow[i],
highhigh=sl_highhigh[i],
lmin=sl_lmin[i],
lmax=sl_lmax[i],
plot=False,
verbose=verbose,
)
# Substract second Gaussian if needed !!!!!
for di in range(len(dsky1) - 1):
if sl_center[i] == dsky1[di]:
if verbose:
print(" This was a double sky line, also substracting {} at {}".format(
dsky2[di], np.array(dsky2[di]) + small_center_correction))
object_sl_gaussian_fitted = substract_given_gaussian(
w,
object_sl_gaussian_fitted,
np.array(dsky2[di]) + small_center_correction,
peak=0,
sigma=gauss_fix,
flux=0,
search_peak=True,
lowlow=sl_lowlow[i],
lowhigh=sl_lowhigh[i],
highlow=sl_highlow[i],
highhigh=sl_highhigh[i],
lmin=sl_lmin[i],
lmax=sl_lmax[i],
plot=False,
verbose=verbose,
)
# wmin,wmax = 6100,6500
# ymin,ymax= -100,400
#
# wmin,wmax = 6350,6700
# wmin,wmax = 7100,7700
# wmin,wmax = 7600,8200
# wmin,wmax = 8200,8500
# wmin,wmax = 7350,7500
# wmin,wmax=6100, 8500 #7800, 8000#6820, 6850 #6700,7000 #6300,6450#7500
# wmin,wmax = 8700,9300
# ymax=800
if plot:
plt.figure(figsize=(11, 4))
plt.plot(w, spec, "y", alpha=0.7, label="Object")
plt.plot(
w,
object_sl_gaussian_fitted,
"k",
alpha=0.5,
label="Obj - sky fitted",
)
plt.plot(w, sky_sl_gaussian_fitted, "r", alpha=0.5, label="Sky fitted")
plt.plot(w, spec - sky, "g", alpha=0.5, label="Obj - sky")
plt.plot(
w,
object_sl_gaussian_fitted - sky_sl_gaussian_fitted,
"b",
alpha=0.9,
label="Obj - sky fitted - rest sky",
)
plt.xlim(wmin, wmax)
plt.ylim(ymin, ymax)
ptitle = "Fibre " + np.str(fibre) # +" with rms = "+np.str(rms[i])
plt.title(ptitle)
plt.xlabel("Wavelength [$\AA$]")
plt.ylabel("Flux [counts]")
plt.legend(frameon=True, loc=2, ncol=5)
plt.minorticks_on()
for i in range(len(el_list)):
plt.axvline(x=el_list[i], color="k", linestyle="--", alpha=0.5)
for i in range(number_sl):
if sl_fnl[i] == 1:
plt.axvline(
x=sl_center[i], color="brown", linestyle="-", alpha=1
)
else:
plt.axvline(
x=sl_center[i], color="y", linestyle="--", alpha=0.6
)
for i in range(len(dsky2) - 1):
plt.axvline(x=dsky2[i], color="orange", linestyle="--", alpha=0.6)
# plt.show()
# plt.close()
offset = np.nanmedian(
np.array(object_sl_gaussian_center) - np.array(sl_gauss_center)
)
if verbose:
# reference_sl = 1 # Position in the file!
# sl_ref_ratio = sl_gaussian_flux/sl_gaussian_flux[reference_sl]
# print "\n n line fsky fspec fspec/fsky l_obj-l_sky fsky/6363.4 sigma_sky sigma_fspec"
# #print "\n n c_object c_sky c_obj-c_sky"
# for i in range(number_sl):
# if skip_sl_fit[i] == False: print "{:2} {:6.1f} {:8.2f} {:8.2f} {:7.4f} {:5.2f} {:6.3f} {:6.3f} {:6.3f}" .format(i+1,sl_center[i],sl_gaussian_flux[i],object_sl_gaussian_flux[i],ratio_object_sky_sl_gaussian[i],object_sl_gaussian_center[i]-sl_gauss_center[i],sl_ref_ratio[i],sl_gaussian_sigma[i],object_sl_gaussian_sigma[i])
# #if skip_sl_fit[i] == False: print "{:2} {:9.3f} {:9.3f} {:9.3f}".format(i+1, object_sl_gaussian_center[i], sl_gauss_center[i], dif_center_obj_sky[i])
#
print("\n> Median center offset between OBJ and SKY : {} A\n> Median gauss for the OBJECT {} A".format(offset, np.nanmedian(object_sl_gaussian_sigma)))
print("> Median flux OBJECT / SKY = {}".format(np.nanmedian(ratio_object_sky_sl_gaussian)))
self.wavelength_offset_per_fibre.append(offset)
# plt.plot(object_sl_gaussian_center, ratio_object_sky_sl_gaussian, "r+")
if auto_scale_sky:
if verbose:
print("\n> As requested, using this value to scale sky spectrum before substraction... ")
auto_scale = np.nanmedian(ratio_object_sky_sl_gaussian)
self.sky_auto_scale.append(np.nanmedian(ratio_object_sky_sl_gaussian))
# self.sky_emission = auto_scale * self.sky_emission
else:
auto_scale = 1.0
self.sky_auto_scale.append(1.0)
if rebin:
if verbose:
print("\n> Rebinning the spectrum of fibre {} to match sky spectrum...".format(fibre))
f = object_sl_gaussian_fitted
f_new = rebin_spec_shift(w, f, offset)
else:
f_new = object_sl_gaussian_fitted
self.intensity_corrected[fibre] = (
f_new - auto_scale * sky_sl_gaussian_fitted
)
# check offset center wavelength
# good_sl_center=[]
# good_sl_center_dif=[]
# plt.figure(figsize=(14, 4))
# for i in range(number_sl):
# if skip_sl_fit[i] == False:
# plt.plot(sl_center[i],dif_center_obj_sky[i],"g+", alpha=0.7, label="Object")
# good_sl_center.append(sl_center[i])
# good_sl_center_dif.append(dif_center_obj_sky[i])
#
# a1x,a0x = np.polyfit(good_sl_center, good_sl_center_dif, 1)
# fx = a0x + a1x*w
# #print a0x, a1x
# offset = np.nanmedian(good_sl_center_dif)
# print "median =",offset
# plt.plot(w,fx,"b", alpha=0.7, label="Fit")
# plt.axhline(y=offset, color='r', linestyle='--')
# plt.xlim(6100,9300)
# #plt.ylim(ymin,ymax)
# ptitle = "Fibre "+np.str(fibre)#+" with rms = "+np.str(rms[i])
# plt.title(ptitle)
# plt.xlabel("Wavelength [$\AA$]")
# plt.ylabel("c_obj - c_sky")
# #plt.legend(frameon=True, loc=2, ncol=4)
# plt.minorticks_on()
# plt.show()
# plt.close()
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def do_extinction_curve(
self, observatory_file=pth.join(DATA_PATH, "ssoextinct.dat"), plot=True
):
"""
Parameters
----------
observatory_file
plot
Returns
-------
"""
print("\n> Computing extinction at given airmass...")
# Read data
data_observatory = np.loadtxt(observatory_file, unpack=True)
extinction_curve_wavelengths = data_observatory[0]
extinction_curve = data_observatory[1]
extinction_corrected_airmass = 10 ** (0.4 * self.airmass * extinction_curve)
# Make fit
tck = interpolate.splrep(
extinction_curve_wavelengths, extinction_corrected_airmass, s=0
)
self.extinction_correction = interpolate.splev(self.wavelength, tck, der=0)
# Plot
if plot:
plt.figure(figsize=(10, 5))
plt.plot(extinction_curve_wavelengths, extinction_corrected_airmass, "+")
plt.xlim(np.min(self.wavelength), np.max(self.wavelength))
cinco_por_ciento = 0.05 * (
np.max(self.extinction_correction) - np.min(self.extinction_correction)
)
plt.ylim(
np.min(self.extinction_correction) - cinco_por_ciento,
np.max(self.extinction_correction) + cinco_por_ciento,
)
plt.plot(self.wavelength, self.extinction_correction, "g")
plt.minorticks_on()
plt.title("Correction for extinction using airmass = {}".format(self.airmass))
plt.ylabel("Flux correction")
plt.xlabel("Wavelength [$\AA$]")
# plt.show()
# plt.close()
# Correct for extinction at given airmass
print(" Airmass = {}".format(self.airmass))
print(" Observatory file with extinction curve : {}".format(observatory_file))
for i in range(self.n_spectra):
self.intensity_corrected[i, :] = (
self.intensity_corrected[i, :] * self.extinction_correction
)
print(" Intensities corrected for extinction stored in self.intensity_corrected !")
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def find_sky_emission(
self,
intensidad=[0, 0],
plot=True,
n_sky=200,
sky_fibres=[1000],
sky_wave_min=0,
sky_wave_max=0,
norm=colors.LogNorm(),
):
"""
Parameters
----------
intensidad
plot
n_sky
sky_fibres
sky_wave_min
sky_wave_max
norm
Returns
-------
"""
if sky_wave_min == 0:
sky_wave_min = self.valid_wave_min
if sky_wave_max == 0:
sky_wave_max = self.valid_wave_max
if np.nanmedian(intensidad) == 0:
intensidad = self.intensity_corrected
ic = 1
else:
ic = 0
if sky_fibres[0] == 1000: # As it was original
# sorted_by_flux = np.argsort(flux_ratio) ORIGINAL till 21 Jan 2019
# NEW 21 Jan 2019: Assuming cleaning of cosmics and CCD defects, we just use the spaxels with the LOWEST INTEGRATED VALUES
self.compute_integrated_fibre(
valid_wave_min=sky_wave_min, valid_wave_max=sky_wave_max, plot=False
)
sorted_by_flux = np.argsort(
self.integrated_fibre
) # (self.integrated_fibre)
print("\n> Identifying sky spaxels using the lowest integrated values in the [ {} , {}] range ...".format(sky_wave_min, sky_wave_max))
# if plot:
# # print "\n Plotting fluxes and flux ratio: "
# plt.figure(figsize=(10, 4))
# plt.plot(flux_ratio[sorted_by_flux], 'r-', label='flux ratio')
# plt.plot(flux_sky[sorted_by_flux], 'c-', label='flux sky')
# plt.plot(flux_object[sorted_by_flux], 'k-', label='flux object')
# plt.axvline(x=n_sky)
# plt.xlabel("Spaxel")
# plt.ylabel("Flux")
# plt.yscale('log')
# plt.legend(frameon=False, loc=4)
# plt.show()
# Angel routine: just take n lowest spaxels!
optimal_n = n_sky
print(" We use the lowest {} fibres for getting sky. Their positions are:".format(optimal_n))
# Compute sky spectrum and plot it
self.sky_fibres = sorted_by_flux[:optimal_n]
self.sky_emission = np.nanmedian(
intensidad[sorted_by_flux[:optimal_n]], axis=0
)
print(" List of fibres used for sky saved in self.sky_fibres")
else: # We provide a list with sky positions
print(" We use the list provided to get the sky spectrum")
print(" sky_fibres = {}".format(sky_fibres))
self.sky_fibres = np.array(sky_fibres)
self.sky_emission = np.nanmedian(intensidad[self.sky_fibres], axis=0)
if plot:
self.RSS_map(
self.integrated_fibre, None, self.sky_fibres, title=" - Sky Spaxels"
) # flux_ratio
# print " Combined sky spectrum:"
plt.figure(figsize=(10, 4))
plt.plot(self.wavelength, self.sky_emission, "c-", label="sky")
plt.yscale("log")
plt.ylabel("FLux")
plt.xlabel("Wavelength [$\AA$]")
plt.xlim(self.wavelength[0] - 10, self.wavelength[-1] + 10)
plt.axvline(x=self.valid_wave_min, color="k", linestyle="--")
plt.axvline(x=self.valid_wave_max, color="k", linestyle="--")
plt.ylim([np.nanmin(intensidad), np.nanmax(intensidad)])
plt.minorticks_on()
plt.title("{} - Combined Sky Spectrum".format(self.description))
plt.legend(frameon=False)
# plt.show()
# plt.close()
# Substract sky in all intensities
self.intensity_sky_corrected = np.zeros_like(self.intensity)
for i in range(self.n_spectra):
if ic == 1:
self.intensity_corrected[i, :] = (
self.intensity_corrected[i, :] - self.sky_emission
)
if ic == 0:
self.intensity_sky_corrected[i, :] = (
self.intensity_corrected[i, :] - self.sky_emission
)
last_sky_fibre = self.sky_fibres[-1]
# Plot median value of fibre vs. fibre
if plot:
median_sky_corrected = np.zeros(self.n_spectra)
for i in range(self.n_spectra):
if ic == 1:
median_sky_corrected[i] = np.nanmedian(
self.intensity_corrected[i, :], axis=0
)
if ic == 0:
median_sky_corrected[i] = np.nanmedian(
self.intensity_sky_corrected[i, :], axis=0
)
plt.figure(figsize=(10, 4))
plt.plot(median_sky_corrected)
plt.plot(
[0, 1000],
[
median_sky_corrected[last_sky_fibre],
median_sky_corrected[last_sky_fibre],
],
"r",
)
plt.minorticks_on()
plt.ylabel("Median Flux")
plt.xlabel("Fibre")
plt.yscale("log")
plt.ylim([np.nanmin(median_sky_corrected), np.nanmax(median_sky_corrected)])
plt.title(self.description)
plt.legend(frameon=False)
# plt.show()
# plt.close()
print(" Sky spectrum obtained and stored in self.sky_emission !! ")
print(" Intensities corrected for sky emission and stored in self.intensity_corrected !")
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def find_relative_throughput(
self,
ymin=10000,
ymax=200000, # nskyflat=False,
kernel_sky_spectrum=5,
wave_min_scale=0,
wave_max_scale=0,
plot=True,
):
"""
Determine the relative transmission of each spectrum
using a skyflat.
Parameters
----------
ymin
ymax
kernel_sky_spectrum
wave_min_scale
wave_max_scale
plot
Returns
-------
"""
# These are for the normalized flat:
# fit_skyflat_degree=0, step=50, wave_min_flat=0, wave_max_flat=0):
print("\n> Using this skyflat to find relative throughput (a scale per fibre)...")
# Check grating to chose wavelength range to get median values
if wave_min_scale == 0 and wave_max_scale == 0:
if self.grating == "1000R":
wave_min_scale = 6600.0
wave_max_scale = 6800.0
print(" For 1000R, we use the median value in the [6600, 6800] range.")
if self.grating == "1500V":
wave_min_scale = 5100.0
wave_max_scale = 5300.0
print(" For 1500V, we use the median value in the [5100, 5300] range.")
if self.grating == "580V":
wave_min_scale = 4700.0
wave_max_scale = 4800.0
print(" For 580V, we use the median value in the [4700, 4800] range.")
if self.grating == "385R":
wave_min_scale = 6600.0
wave_max_scale = 6800.0
print(" For 385R, we use the median value in the [6600, 6800] range.")
# print(" For {}, we use the median value in the [{}, {}] range.".format(
# self.grating, wave_min_scale, wave_max_scale))
else:
if wave_min_scale == 0:
wave_min_scale = self.wavelength[0]
if wave_max_scale == 0:
wave_max_scale = self.wavelength[-1]
print(" As given by the user, we use the median value in the [{} , {}] range.".format(wave_min_scale, wave_max_scale))
median_region = np.zeros(self.n_spectra)
for i in range(self.n_spectra):
region = np.where(
(self.wavelength > wave_min_scale) & (self.wavelength < wave_max_scale)
)
median_region[i] = np.nanmedian(self.intensity[i, region])
median_value_skyflat = np.nanmedian(median_region)
self.relative_throughput = median_region/median_value_skyflat
print(" Median value of skyflat in the [ {} , {}] range = {}".format(wave_min_scale, wave_max_scale, median_value_skyflat))
print(" Individual fibre corrections: min = {} max = {}".format(np.nanmin(self.relative_throughput), np.nanmax(self.relative_throughput)))
if plot:
plt.figure(figsize=(10, 4))
x = list(range(self.n_spectra))
plt.plot(x, self.relative_throughput)
# plt.ylim(0.5,4)
plt.minorticks_on()
plt.xlabel("Fibre")
plt.ylabel("Throughput using scale")
plt.title("Throughput correction using scale")
# plt.show()
# plt.close()
# print "\n Plotting spectra WITHOUT considering throughput correction..."
plt.figure(figsize=(10, 4))
for i in range(self.n_spectra):
plt.plot(self.wavelength, self.intensity[i, ])
plt.xlabel("Wavelength [$\AA$]")
plt.title("Spectra WITHOUT considering any throughput correction")
plt.xlim(self.wavelength[0] - 10, self.wavelength[-1] + 10)
plt.ylim(ymin, ymax)
plt.minorticks_on()
# plt.show()
# plt.close()
# print " Plotting spectra CONSIDERING throughput correction..."
plt.figure(figsize=(10, 4))
for i in range(self.n_spectra):
# self.intensity_corrected[i,] = self.intensity[i,] * self.relative_throughput[i]
plot_this = self.intensity[i, ]/self.relative_throughput[i]
plt.plot(self.wavelength, plot_this)
plt.ylim(ymin, ymax)
plt.minorticks_on()
plt.xlabel("Wavelength [$\AA$]")
plt.title("Spectra CONSIDERING throughput correction (scale)")
plt.xlim(self.wavelength[0] - 10, self.wavelength[-1] + 10)
plt.axvline(x=wave_min_scale, color="k", linestyle="--")
plt.axvline(x=wave_max_scale, color="k", linestyle="--")
# plt.show()
# plt.close()
print("\n> Using median value of skyflat considering a median filter of {} ...".format(kernel_sky_spectrum)) # LUKE
median_sky_spectrum = np.nanmedian(self.intensity, axis=0)
self.response_sky_spectrum = np.zeros_like(self.intensity)
rms = np.zeros(self.n_spectra)
plot_fibres = [100, 500, 501, 900]
pf = 0
for i in range(self.n_spectra):
self.response_sky_spectrum[i] = (
(self.intensity[i]/self.relative_throughput[i])/median_sky_spectrum
)
filter_response_sky_spectrum = sig.medfilt(
self.response_sky_spectrum[i], kernel_size=kernel_sky_spectrum
)
rms[i] = np.nansum(
np.abs(self.response_sky_spectrum[i] - filter_response_sky_spectrum)
)/np.nansum(self.response_sky_spectrum[i])
if plot:
if i == plot_fibres[pf]:
plt.figure(figsize=(10, 4))
plt.plot(
self.wavelength,
self.response_sky_spectrum[i],
alpha=0.3,
label="Response Sky",
)
plt.plot(
self.wavelength,
filter_response_sky_spectrum,
alpha=0.7,
linestyle="--",
label="Filtered Response Sky",
)
plt.plot(
self.wavelength,
self.response_sky_spectrum[i]/filter_response_sky_spectrum,
alpha=1,
label="Normalized Skyflat",
)
plt.xlim(self.wavelength[0] - 50, self.wavelength[-1] + 50)
plt.ylim(0.95, 1.05)
ptitle = "Fibre {} with rms = {}".format(i, rms[i])
plt.title(ptitle)
plt.xlabel("Wavelength [$\AA$]")
plt.legend(frameon=False, loc=3, ncol=1)
# plt.show()
# plt.close()
if pf < len(plot_fibres) - 1:
pf = pf + 1
print(" median rms = {} min rms = {} max rms = {}".format(np.nanmedian(rms), np.nanmin(rms),np.nanmax(rms)))
# if plot:
# plt.figure(figsize=(10, 4))
# for i in range(self.n_spectra):
# #plt.plot(self.wavelength,self.intensity[i,]/median_sky_spectrum)
# plot_this = self.intensity[i,] / self.relative_throughput[i] /median_sky_spectrum
# plt.plot(self.wavelength, plot_this)
# plt.xlabel("Wavelength [$\AA$]")
# plt.title("Spectra CONSIDERING throughput correction (scale) / median sky spectrum")
# plt.xlim(self.wavelength[0]-10,self.wavelength[-1]+10)
# plt.ylim(0.7,1.3)
# plt.minorticks_on()
# plt.show()
# plt.close()
#
# plt.plot(self.wavelength, median_sky_spectrum, color='r',alpha=0.7)
# plt.plot(self.wavelength, filter_median_sky_spectrum, color='blue',alpha=0.7)
# plt.show()
# plt.close()
#
# plt.plot(self.wavelength, median_sky_spectrum/filter_median_sky_spectrum, color='r',alpha=0.7)
# plt.show()
# plt.close()
# for i in range(2):
# response_sky_spectrum_ = self.intensity[500+i,] / self.relative_throughput[500+i] /median_sky_spectrum
# filter_response_sky_spectrum = sig.medfilt(response_sky_spectrum_,kernel_size=kernel_sky_spectrum)
# rms=np.nansum(np.abs(response_sky_spectrum_ - filter_response_sky_spectrum))/np.nansum(response_sky_spectrum_)
# for i in range(5):
# filter_response_sky_spectrum_ = (self.intensity[500+i,] / self.relative_throughput[500+i] ) / median_sky_spectrum
# filter_response_sky_spectrum = sig.medfilt(filter_response_sky_spectrum_,kernel_size=kernel_sky_spectrum)
#
# plt.plot(self.wavelength, filter_response_sky_spectrum,alpha=0.7)
# plt.ylim(0.95,1.05)
# plt.show()
# plt.close()
print("\n> Relative throughput using skyflat scaled stored in self.relative_throughput !!")
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def get_telluric_correction(
self,
n_fibres=10,
correct_from=6850.0,
correct_to=10000.0,
apply_tc=False,
step=10,
combined_cube=False,
weight_fit_median=0.5,
exclude_wlm=[
[6450, 6700],
[6850, 7050],
[7130, 7380],
], # This is range for 1000R
wave_min=0,
wave_max=0,
plot=True,
fig_size=12,
verbose=False,
):
""" # TODO BLAKE: always use false, use plots to make sure it's good. prob just save as a different file.
Get telluric correction using a spectrophotometric star
Parameters
----------
n_fibres: integer
number of fibers to add for obtaining spectrum
correct_from : float
wavelength from which telluric correction is applied (default = 6850)
apply_tc : boolean (default = False)
apply telluric correction to data
exclude_wlm=[[6450,6700],[6850,7050], [7130,7380]]:
Wavelength ranges not considering for normalising stellar continuum
Example
----------
telluric_correction_star1 = star1r.get_telluric_correction(n_fibres=15)
"""
print("\n> Obtaining telluric correction using spectrophotometric star...")
if combined_cube:
wlm = self.combined_cube.wavelength
else:
wlm = self.wavelength
if wave_min == 0:
wave_min = wlm[0]
if wave_max == 0:
wave_max = wlm[-1]
if combined_cube:
if self.combined_cube.seeing == 0:
self.combined_cube.half_light_spectrum(
5, plot=plot, min_wave=wave_min, max_wave=wave_max
)
estrella = self.combined_cube.integrated_star_flux
else:
integrated_intensity_sorted = np.argsort(self.integrated_fibre)
intensidad = self.intensity_corrected
region = []
for fibre in range(n_fibres):
region.append(integrated_intensity_sorted[-1 - fibre])
estrella = np.nansum(intensidad[region], axis=0)
smooth_med_star = smooth_spectrum(
wlm,
estrella,
wave_min=wave_min,
wave_max=wave_max,
step=step,
weight_fit_median=weight_fit_median,
exclude_wlm=exclude_wlm,
plot=plot,
verbose=verbose,
)
telluric_correction = np.ones(len(wlm))
for l in range(len(wlm)):
if wlm[l] > correct_from and wlm[l] < correct_to:
telluric_correction[l] = smooth_med_star[l]/estrella[l] # TODO: should be float, check when have star data
if plot:
plt.figure(figsize=(fig_size, fig_size / 2.5))
if combined_cube:
print(" Telluric correction for this star ({}) :".format(self.combined_cube.object))
plt.plot(wlm, estrella, color="b", alpha=0.3)
plt.plot(wlm, estrella * telluric_correction, color="g", alpha=0.5)
plt.ylim(np.nanmin(estrella), np.nanmax(estrella))
else:
print(" Example of telluric correction using fibres {} and {} :".format(region[0], region[1]))
plt.plot(wlm, intensidad[region[0]], color="b", alpha=0.3)
plt.plot(
wlm,
intensidad[region[0]] * telluric_correction,
color="g",
alpha=0.5,
)
plt.plot(wlm, intensidad[region[1]], color="b", alpha=0.3)
plt.plot(
wlm,
intensidad[region[1]] * telluric_correction,
color="g",
alpha=0.5,
)
plt.ylim(
np.nanmin(intensidad[region[1]]), np.nanmax(intensidad[region[0]])
) # CHECK THIS AUTOMATICALLY
plt.axvline(x=wave_min, color="k", linestyle="--")
plt.axvline(x=wave_max, color="k", linestyle="--")
plt.xlim(wlm[0] - 10, wlm[-1] + 10)
plt.xlabel("Wavelength [$\AA$]")
if exclude_wlm[0][0] != 0:
for i in range(len(exclude_wlm)):
plt.axvspan(
exclude_wlm[i][0], exclude_wlm[i][1], color="r", alpha=0.1
)
plt.minorticks_on()
# plt.show()
# plt.close()
if apply_tc: # Check this
print(" Applying telluric correction to this star...")
if combined_cube:
self.combined_cube.integrated_star_flux = (
self.combined_cube.integrated_star_flux * telluric_correction
)
for i in range(self.combined_cube.n_rows):
for j in range(self.combined_cube.n_cols):
self.combined_cube.data[:, i, j] = (
self.combined_cube.data[:, i, j] * telluric_correction
)
else:
for i in range(self.n_spectra):
self.intensity_corrected[i, :] = (
self.intensity_corrected[i, :] * telluric_correction
)
return telluric_correction
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def plot_spectrum(self, spectrum_number, sky=True, xmin=0, xmax=0, ymax=0, ymin=0):
"""
Plot spectrum of a particular spaxel.
Parameters
----------
spectrum_number:
spaxel to show spectrum.
sky:
if True substracts the sky
Example
-------
>>> rss1.plot_spectrum(550, sky=True)
"""
if sky:
spectrum = self.intensity_corrected[spectrum_number]
else:
spectrum = self.intensity_corrected[spectrum_number] + self.sky_emission
plt.plot(self.wavelength, spectrum)
# error = 3*np.sqrt(self.variance[spectrum_number])
# plt.fill_between(self.wavelength, spectrum-error, spectrum+error, alpha=.1)
if xmin != 0 or xmax != 0 or ymax != 0 or ymin != 0:
if xmin == 0:
xmin = self.wavelength[0]
if xmax == 0:
xmax = self.wavelength[-1]
if ymin == 0:
ymin = np.nanmin(spectrum)
if ymax == 0:
ymax = np.nanmax(spectrum)
plt.minorticks_on()
plt.xlabel("Wavelength [$\AA$]")
plt.ylabel("Relative Flux")
plt.xlim(xmin, xmax)
plt.ylim(ymin, ymax)
# plt.show()
# plt.close()
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def plot_spectra(
self,
list_spectra="all",
wavelength_range=[0],
xmin="",
xmax="",
ymax=1000,
ymin=-100,
fig_size=10,
save_file="",
sky=True,
):
"""
Plot spectrum of a list pf spaxels.
Parameters
----------
list_spectra:
spaxels to show spectrum. Default is all.
save_file:
(Optional) Save plot in file "file.extension"
fig_size:
Size of the figure (in x-axis), default: fig_size=10
Example
-------
>>> rss1.plot_spectra([1200,1300])
"""
plt.figure(figsize=(fig_size, fig_size / 2.5))
if list_spectra == "all":
list_spectra = list(range(self.n_spectra))
if len(wavelength_range) == 2:
plt.xlim(wavelength_range[0], wavelength_range[1])
if xmin == "":
xmin = np.nanmin(self.wavelength)
if xmax == "":
xmax = np.nanmax(self.wavelength)
# title = "Spectrum of spaxel {} in {}".format(spectrum_number, self.description)
# plt.title(title)
plt.minorticks_on()
plt.xlabel("Wavelength [$\AA$]")
plt.ylabel("Relative Flux")
plt.xlim(xmin, xmax)
plt.ylim(ymin, ymax)
for i in list_spectra:
self.plot_spectrum(i, sky)
if save_file == "":
#plt.show()
pass
else:
plt.savefig(save_file)
plt.close()
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def plot_combined_spectrum(
self,
list_spectra="all",
sky=True,
median=False,
xmin="",
xmax="",
ymax="",
ymin="",
fig_size=10,
save_file="",
plot=True,
):
"""
Plot combined spectrum of a list and return the combined spectrum.
Parameters
----------
list_spectra:
spaxels to show combined spectrum. Default is all.
sky:
if True substracts the sky
Example
-------
>>> rss1.plot_spectrum(550, sky=True)
"""
if list_spectra == "all":
list_spectra = list(range(self.n_spectra))
spectrum = np.zeros_like(self.intensity_corrected[list_spectra[0]])
value_list = []
# Note: spectrum of fibre is located in position fibre-1, e.g., spectrum of fibre 1 -> intensity_corrected[0]
if sky:
for fibre in list_spectra:
value_list.append(self.intensity_corrected[fibre - 1])
else:
for fibre in list_spectra:
value_list.append(
self.intensity_corrected[fibre - 1] + self.sky_emission
)
if median:
spectrum = np.nanmedian(value_list, axis=0)
else:
spectrum = np.nansum(value_list, axis=0)
if plot:
plt.figure(figsize=(fig_size, fig_size / 2.5))
if xmin == "":
xmin = np.nanmin(self.wavelength)
if xmax == "":
xmax = np.nanmax(self.wavelength)
if ymin == "":
ymin = np.nanmin(spectrum)
if ymax == "":
ymax = np.nanmax(spectrum)
plt.plot(self.wavelength, spectrum)
if len(list_spectra) == list_spectra[-1] - list_spectra[0] + 1:
title = "{} - Combined spectrum in range [{},{}]".format(
self.description, list_spectra[0], list_spectra[-1]
)
else:
title = "Combined spectrum using requested fibres"
plt.title(title)
plt.minorticks_on()
plt.xlabel("Wavelength [$\AA$]")
plt.ylabel("Relative Flux")
plt.xlim(xmin, xmax)
plt.ylim(ymin, ymax)
if save_file == "":
#plt.show()
pass
else:
plt.savefig(save_file)
plt.close()
return spectrum
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def flux_between(self, lambda_min, lambda_max, list_spectra=[]):
"""
Parameters
----------
lambda_min
lambda_max
list_spectra
Returns
-------
"""
index_min = np.searchsorted(self.wavelength, lambda_min)
index_max = np.searchsorted(self.wavelength, lambda_max) + 1
if len(list_spectra) == 0:
list_spectra = list(range(self.n_spectra))
n_spectra = len(list_spectra)
fluxes = np.empty(n_spectra)
variance = np.empty(n_spectra)
for i in range(n_spectra):
fluxes[i] = np.nanmean(self.intensity[list_spectra[i], index_min:index_max])
variance[i] = np.nanmean(
self.variance[list_spectra[i], index_min:index_max]
)
return fluxes * (lambda_max - lambda_min), variance * (lambda_max - lambda_min)
# WARNING: Are we overestimating errors?
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def median_between(self, lambda_min, lambda_max, list_spectra=[]):
"""
Parameters
----------
lambda_min
lambda_max
list_spectra
Returns
-------
"""
index_min = np.searchsorted(self.wavelength, lambda_min)
index_max = np.searchsorted(self.wavelength, lambda_max) + 1
if len(list_spectra) == 0:
list_spectra = list(range(self.n_spectra))
n_spectra = len(list_spectra)
medians = np.empty(n_spectra)
for i in range(n_spectra):
medians[i] = np.nanmedian(
self.intensity[list_spectra[i], index_min:index_max]
)
return medians
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def line_flux(
self,
left_min,
left_max,
line_min,
line_max,
right_min,
right_max,
list_spectra=[],
):
"""
Parameters
----------
left_min
left_max
line_min
line_max
right_min
right_max
list_spectra
Returns
-------
"""
# TODO: can remove old_div once this function is understood, currently not called in whole module.
if len(list_spectra) == 0:
list_spectra = list(range(self.n_spectra))
line, var_line = self.flux_between(line_min, line_max, list_spectra)
left, var_left = old_div(self.flux_between(left_min, left_max, list_spectra), (
left_max - left_min
))
right, var_right = old_div(self.flux_between(right_min, right_max, list_spectra), (
left_max - left_min
))
wavelength_left = old_div((left_min + left_max), 2)
wavelength_line = old_div((line_min + line_max), 2)
wavelength_right = old_div((right_min + right_max), 2)
continuum = left + old_div((right - left) * (wavelength_line - wavelength_left), (
wavelength_right - wavelength_left
))
var_continuum = old_div((var_left + var_right), 2)
return (
line - continuum * (line_max - line_min),
var_line + var_continuum * (line_max - line_min),
)
# WARNING: Are we overestimating errors?
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def RSS_map(
self,
variable,
norm=colors.LogNorm(),
list_spectra=[],
title=" - RSS map",
color_bar_text="Integrated Flux [Arbitrary units]",
):
"""
Plot map showing the offsets, coloured by variable.
Parameters
----------
variable
norm
list_spectra
title
color_bar_text
Returns
-------
"""
if len(list_spectra) == 0:
list_spectra = list(range(self.n_spectra))
plt.figure(figsize=(10, 10))
plt.scatter(
self.offset_RA_arcsec[list_spectra],
self.offset_DEC_arcsec[list_spectra],
c=variable[list_spectra],
cmap=fuego_color_map,
norm=norm,
s=260,
marker="h",
)
plt.title(self.description + title)
plt.xlim(
np.nanmin(self.offset_RA_arcsec) - 0.7,
np.nanmax(self.offset_RA_arcsec) + 0.7,
)
plt.ylim(
np.nanmin(self.offset_DEC_arcsec) - 0.7,
np.nanmax(self.offset_DEC_arcsec) + 0.7,
)
plt.xlabel("$\Delta$ RA [arcsec]")
plt.ylabel("$\Delta$ DEC [arcsec]")
plt.minorticks_on()
plt.grid(which="both")
plt.gca().invert_xaxis()
cbar = plt.colorbar()
plt.clim(np.nanmin(variable[list_spectra]), np.nanmax(variable[list_spectra]))
cbar.set_label(color_bar_text, rotation=90, labelpad=40)
cbar.ax.tick_params()
# plt.show()
# plt.close()
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def RSS_image(
self,
image=[0],
norm=colors.Normalize(),
cmap="seismic_r",
clow=0,
chigh=0,
labelpad=10,
title=" - RSS image",
color_bar_text="Integrated Flux [Arbitrary units]",
fig_size=13.5,
):
"""
Plot RSS image coloured by variable.
cmap = "binary_r" nice greyscale
Parameters
----------
image
norm
cmap
clow
chigh
labelpad
title
color_bar_text
fig_size
Returns
-------
"""
if np.nanmedian(image) == 0:
image = self.intensity_corrected
if clow == 0:
clow = np.nanpercentile(image, 5)
if chigh == 0:
chigh = np.nanpercentile(image, 95)
if cmap == "seismic_r":
max_abs = np.nanmax([np.abs(clow), np.abs(chigh)])
clow = -max_abs
chigh = max_abs
plt.figure(figsize=(fig_size, fig_size / 2.5))
plt.imshow(image, norm=norm, cmap=cmap, clim=(clow, chigh))
plt.title(self.description + title)
plt.minorticks_on()
plt.gca().invert_yaxis()
# plt.colorbar()
cbar = plt.colorbar()
cbar.set_label(color_bar_text, rotation=90, labelpad=labelpad)
cbar.ax.tick_params()
# plt.show()
# plt.close()
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def plot_corrected_vs_uncorrected_spectrum(self, high_fibres=10, fig_size=12):
"""
Parameters
----------
high_fibres
fig_size
Returns
-------
"""
integrated_intensity_sorted = np.argsort(self.integrated_fibre)
region = []
for fibre_ in range(high_fibres):
region.append(integrated_intensity_sorted[-1 - fibre_])
plt.figure(figsize=(fig_size, fig_size / 2.5))
I = np.nansum(self.intensity[region], axis=0)
plt.plot(self.wavelength, I, "r-", label="Uncorrected", alpha=0.3)
Ic = np.nansum(self.intensity_corrected[region], axis=0)
I_ymin = np.nanmin([np.nanmin(I), np.nanmin(Ic)])
I_ymax = np.nanmax([np.nanmax(I), np.nanmax(Ic)])
I_rango = I_ymax - I_ymin
plt.plot(self.wavelength, Ic, "g-", label="Corrected", alpha=0.4)
plt.ylabel("Flux")
plt.xlabel("Wavelength [$\AA$]")
plt.minorticks_on()
plt.xlim(self.wavelength[0] - 10, self.wavelength[-1] + 10)
plt.axvline(x=self.valid_wave_min, color="k", linestyle="--")
plt.axvline(x=self.valid_wave_max, color="k", linestyle="--")
plt.ylim([I_ymin - (I_rango/10), I_ymax + (I_rango/10)])
plt.title(
self.object
+ " - Combined spectrum - "
+ "{}".format(high_fibres)
+ " fibres with highest intensity"
)
plt.legend(frameon=False, loc=4, ncol=2)
# plt.show()
# plt.close()
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# Idea: take a RSS dominated by skylines. Read it (only throughput correction). For each fibre, fit Gaussians to ~10 skylines.
# Compare with REST wavelengths. Get a median value per fibre. Perform a second-order fit to all median values.
# Correct for that using a reference fibre (1). Save results to be applied to the rest of files of the night (assuming same configuration).
def fix_2dfdr_wavelengths(
self,
sol=[0, 0, 0],
fibre=0,
maxima_sigma=2.5,
maxima_offset=1.5,
xmin=7740,
xmax=7770,
ymin=0,
ymax=1000,
plot=True,
verbose=True,
warnings=True,
):
"""
Parameters
----------
sol
fibre
maxima_sigma
maxima_offset
xmin
xmax
ymin
ymax
plot
verbose
warnings
Returns
-------
"""
print("\n> Fixing 2dfdr wavelengths using skylines.")
w = self.wavelength
if sol[0] == 0: # Solutions are not given
# Read file with sky emission line
sky_lines_file = "sky_lines_rest.dat"
(
sl_center,
sl_name,
sl_fnl,
sl_lowlow,
sl_lowhigh,
sl_highlow,
sl_highhigh,
sl_lmin,
sl_lmax,
) = read_table(
sky_lines_file, ["f", "s", "f", "f", "f", "f", "f", "f", "f"]
)
number_sl = len(sl_center)
# Fitting Gaussians to skylines...
say_status = 0
self.wavelength_offset_per_fibre = []
wave_median_offset = []
print("\n> Performing a Gaussian fit to selected, bright skylines... (this will FAIL if RSS is not corrected for CCD defects...)")
if fibre != 0:
f_i = fibre
f_f = fibre + 1
print(" Checking fibre {} (only this fibre is corrected, use fibre = 0 for all)...".format(fibre))
verbose = True
warnings = True
else:
f_i = 0
f_f = self.n_spectra
verbose = False
for fibre in range(f_i, f_f): # (self.n_spectra):
spectrum = self.intensity_corrected[fibre]
if fibre == say_status:
print(" Checking fibre {:4} ... ({:6.2f} % completed) ...".format(
fibre, fibre * 100.0 / self.n_spectra
))
say_status = say_status + 20
# Gaussian fits to the sky spectrum
sl_gaussian_flux = []
sl_gaussian_sigma = []
sl_gauss_center = []
sl_offset = []
sl_offset_good = []
if verbose:
print("\n> Performing Gaussian fitting to bright sky lines in all fibres of rss file...")
for i in range(number_sl):
if sl_fnl[i] == 0:
plot_fit = False
else:
plot_fit = True
resultado = fluxes(
w,
spectrum,
sl_center[i],
lowlow=sl_lowlow[i],
lowhigh=sl_lowhigh[i],
highlow=sl_highlow[i],
highhigh=sl_highhigh[i],
lmin=sl_lmin[i],
lmax=sl_lmax[i],
fmin=0,
fmax=0,
broad=2.1 * 2.355,
plot=plot_fit,
verbose=False,
plot_sus=False,
fcal=False,
warnings=warnings,
) # Broad is FWHM for Gaussian sigm a= 1,
sl_gaussian_flux.append(resultado[3])
sl_gauss_center.append(resultado[1])
sl_gaussian_sigma.append(resultado[5] / 2.355)
sl_offset.append(sl_gauss_center[i] - sl_center[i])
if (
sl_gaussian_flux[i] < 0
or np.abs(sl_center[i] - sl_gauss_center[i]) > maxima_offset
or sl_gaussian_sigma[i] > maxima_sigma
):
if verbose:
print(" Bad fitting for {} ... ignoring this fit...".format(sl_center[i]))
else:
sl_offset_good.append(sl_offset[i])
if verbose:
print(" Fitted wavelength for sky line {:8.3f}: center = {:8.3f} sigma = {:6.3f} offset = {:7.3f} ".format(
sl_center[i],
sl_gauss_center[i],
sl_gaussian_sigma[i],
sl_offset[i],
))
median_offset_fibre = np.nanmedian(sl_offset_good)
wave_median_offset.append(median_offset_fibre)
if verbose:
print("\n> Median offset for fibre {:3} = {:7.3f}".format(
fibre, median_offset_fibre
))
# Second-order fit ...
xfibre = list(range(0, self.n_spectra))
a2x, a1x, a0x = np.polyfit(xfibre, wave_median_offset, 2)
print("\n> Fitting a second-order polynomy a0x + a1x * fibre + a2x * fibre**2:")
else:
print("\n> Solution to the second-order polynomy a0x + a1x * fibre + a2x * fibre**2 have been provided:")
a0x = sol[0]
a1x = sol[1]
a2x = sol[2]
xfibre = list(range(0, self.n_spectra))
print(" a0x = {} a1x = {} a2x = {}".format(a0x, a1x, a2x))
self.wavelength_parameters = [a0x, a1x, a2x] # Save solutions
fx = a0x + a1x * np.array(xfibre) + a2x * np.array(xfibre) ** 2
if plot:
plt.figure(figsize=(10, 4))
if sol[0] == 0:
plt.plot(xfibre, wave_median_offset)
pf = wave_median_offset
else:
pf = fx
plt.plot(xfibre, fx, "r")
plot_plot(
xfibre,
pf,
ptitle="Second-order fit to individual offsets",
xmin=-20,
xmax=1000,
xlabel="Fibre",
ylabel="offset",
)
# Applying results
print("\n> Applying results to all fibres...")
for fibre in xfibre:
f = self.intensity_corrected[fibre]
w_shift = fx[fibre]
self.intensity_corrected[fibre] = rebin_spec_shift(w, f, w_shift)
# Check results
if plot:
plt.figure(figsize=(10, 4))
for i in [0, 300, 600, 950]:
plt.plot(w, self.intensity[i])
plot_plot(
w,
self.intensity[0],
ptitle="Before corrections, fibres 0, 300, 600, 950",
xmin=xmin,
xmax=xmax,
ymin=ymin,
ymax=ymax,
)
plt.figure(figsize=(10, 4))
for i in [0, 300, 600, 950]:
plt.plot(w, self.intensity_corrected[i])
plot_plot(
w,
self.intensity_corrected[0],
ptitle="Checking wavelength corrections in fibres 0, 300, 600, 950",
xmin=xmin,
xmax=xmax,
ymin=ymin,
ymax=ymax,
)
print("\n> Small fixing of the 2dFdr wavelengths done!")
return
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# KOALA_RSS CLASS
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
class KOALA_RSS(RSS):
"""
This class reads the FITS files returned by
`2dfdr
<https://aat.anu.edu.au/science/instruments/current/AAOmega/reduction>`_
and performs basic analysis tasks (see description under each method).
Parameters
----------
filename : string
FITS file returned by 2dfdr, containing the Raw Stacked Spectra.
The code makes sure that it contains 1000 spectra
with 2048 wavelengths each.
Example
-------
>>> pointing1 = KOALA_RSS('data/16jan20058red.fits')
> Reading file "data/16jan20058red.fits" ...
2048 wavelength points between 6271.33984375 and 7435.43408203
1000 spaxels
These numbers are the right ones for KOALA!
DONE!
"""
# -----------------------------------------------------------------------------
def __init__(
self,
filename,
save_rss_to_fits_file="",
rss_clean=False, # TASK_KOALA_RSS
apply_throughput=True,
skyflat="",
plot_skyflat=False,
flat="",
nskyflat=True,
correct_ccd_defects=False,
correct_high_cosmics=False,
clip_high=100,
step_ccd=50,
remove_5578=False,
plot_suspicious_fibres=False,
fix_wavelengths=False,
sol=[0, 0, 0],
sky_method="self",
n_sky=50,
sky_fibres=[1000], # do_sky=True
sky_spectrum=[0],
sky_rss=[0],
scale_sky_rss=0,
scale_sky_1D=1.0,
is_sky=False,
win_sky=151,
auto_scale_sky=False,
correct_negative_sky=False,
sky_wave_min=0,
sky_wave_max=0,
cut_sky=5.0,
fmin=1,
fmax=10,
individual_sky_substraction=False,
fibre_list=[100, 200, 300, 400, 500, 600, 700, 800, 900],
do_extinction=True,
telluric_correction=[0],
id_el=False,
high_fibres=10,
brightest_line="Ha",
cut=1.5,
broad=1.0,
plot_id_el=False,
id_list=[0],
brightest_line_wavelength=0,
clean_sky_residuals=False,
dclip=3.0,
extra_w=1.3,
step_csr=25,
fibre=0,
valid_wave_min=0,
valid_wave_max=0,
warnings=True,
verbose=False,
plot=True,
norm=colors.LogNorm(),
fig_size=12,
):
"""
Parameters
----------
filename
save_rss_to_fits_file
rss_clean
apply_throughput
skyflat
plot_skyflat
flat
nskyflat
correct_ccd_defects
correct_high_cosmics
clip_high
step_ccd
remove_5578
plot_suspicious_fibres
fix_wavelengths
sol
sky_method
n_sky
sky_fibres
sky_spectrum
sky_rss
scale_sky_rss
scale_sky_1D
is_sky
win_sky
auto_scale_sky
correct_negative_sky
sky_wave_min
sky_wave_max
cut_sky
fmin
fmax
individual_sky_substraction
fibre_list
do_extinction
telluric_correction
id_el
high_fibres
brightest_line
cut
broad
plot_id_el
id_list
brightest_line_wavelength
clean_sky_residuals
dclip
extra_w
step_csr
fibre
valid_wave_min
valid_wave_max
warnings
verbose
plot
norm
fig_size
"""
# Just read file if rss_clean = True
if rss_clean:
apply_throughput = False
correct_ccd_defects = False
fix_wavelengths = False
sol = [0, 0, 0]
sky_method = "none"
do_extinction = False
telluric_correction = [0]
id_el = False
clean_sky_residuals = False
plot = False
correct_negative_sky = False
# Create RSS object
super(KOALA_RSS, self).__init__()
print("\n> Reading file", '"' + filename + '"', "...")
RSS_fits_file = fits.open(filename) # Open file
self.rss_list = []
# General info:
self.object = RSS_fits_file[FitsExt.main].header["OBJECT"]
self.description = self.object + " - " + filename
self.RA_centre_deg = RSS_fits_file[FitsExt.fibres_ifu].header["CENRA"] * 180/np.pi
self.DEC_centre_deg = RSS_fits_file[FitsExt.fibres_ifu].header["CENDEC"] * 180/np.pi
self.exptime = RSS_fits_file[FitsExt.main].header["EXPOSED"]
# WARNING: Something is probably wrong/inaccurate here!
# Nominal offsets between pointings are totally wrong!
# Read good/bad spaxels
all_spaxels = list(range(len(RSS_fits_file[FitsExt.fibres_ifu].data)))
quality_flag = [RSS_fits_file[FitsExt.fibres_ifu].data[i][FitsFibresIFUIndex.quality_flag] for i in all_spaxels]
good_spaxels = [i for i in all_spaxels if quality_flag[i] == 1]
bad_spaxels = [i for i in all_spaxels if quality_flag[i] == 0]
# for i in range(1):
# print i, RSS_fits_file[2]
#
# Create wavelength, intensity, and variance arrays only for good spaxels
wcsKOALA = WCS(RSS_fits_file[FitsExt.main].header)
# variance = RSS_fits_file[1].data[good_spaxels]
index_wave = np.arange(RSS_fits_file[FitsExt.main].header["NAXIS1"])
wavelength = wcsKOALA.dropaxis(1).wcs_pix2world(index_wave, 0)[0]
intensity = RSS_fits_file[FitsExt.main].data[good_spaxels]
print("\n Number of spectra in this RSS = {}, number of good spectra = {} , number of bad spectra ={}".format(
len(RSS_fits_file[FitsExt.main].data), len(good_spaxels), len(bad_spaxels)))
print(" Bad fibres = {}".format(bad_spaxels))
# Read errors using RSS_fits_file[1]
# self.header1 = RSS_fits_file[1].data # CHECK WHEN DOING ERRORS !!!
# Read spaxel positions on sky using RSS_fits_file[2]
self.header2_data = RSS_fits_file[FitsExt.fibres_ifu].data
# CAREFUL !! header 2 has the info of BAD fibres, if we are reading from our created RSS files we have to do it in a different way...
# print RSS_fits_file[2].data
if len(bad_spaxels) == 0:
offset_RA_arcsec_ = []
offset_DEC_arcsec_ = []
for i in range(len(good_spaxels)):
offset_RA_arcsec_.append(self.header2_data[i][FitsFibresIFUIndex.ra_offset])
offset_DEC_arcsec_.append(self.header2_data[i][FitsFibresIFUIndex.dec_offset])
offset_RA_arcsec = np.array(offset_RA_arcsec_)
offset_DEC_arcsec = np.array(offset_DEC_arcsec_)
variance = np.zeros_like(intensity) # CHECK FOR ERRORS
else:
offset_RA_arcsec = np.array(
[RSS_fits_file[FitsExt.fibres_ifu].data[i][FitsFibresIFUIndex.ra_offset] for i in good_spaxels]
)
offset_DEC_arcsec = np.array(
[RSS_fits_file[FitsExt.fibres_ifu].data[i][FitsFibresIFUIndex.dec_offset] for i in good_spaxels]
)
self.ID = np.array(
[RSS_fits_file[FitsExt.fibres_ifu].data[i][FitsFibresIFUIndex.spec_id] for i in good_spaxels]
) # These are the good fibres
variance = RSS_fits_file[FitsExt.var].data[good_spaxels] # CHECK FOR ERRORS
self.ZDSTART = RSS_fits_file[FitsExt.main].header["ZDSTART"] # Zenith distance (degrees?)
self.ZDEND = RSS_fits_file[FitsExt.main].header["ZDEND"]
# KOALA-specific stuff
self.PA = RSS_fits_file[FitsExt.main].header["TEL_PA"] # Position angle?
self.grating = RSS_fits_file[FitsExt.main].header["GRATID"]
# Check RED / BLUE arm for AAOmega
if RSS_fits_file[FitsExt.main].header["SPECTID"] == "RD":
AAOmega_Arm = "RED"
if RSS_fits_file[FitsExt.main].header["SPECTID"] == "BL":
AAOmega_Arm = "BLUE"
# For WCS
self.CRVAL1_CDELT1_CRPIX1 = []
self.CRVAL1_CDELT1_CRPIX1.append(RSS_fits_file[FitsExt.main].header["CRVAL1"]) # see https://idlastro.gsfc.nasa.gov/ftp/pro/astrom/aaareadme.txt maybe?
self.CRVAL1_CDELT1_CRPIX1.append(RSS_fits_file[FitsExt.main].header["CDELT1"])
self.CRVAL1_CDELT1_CRPIX1.append(RSS_fits_file[FitsExt.main].header["CRPIX1"])
# SET RSS
# FROM HERE IT WAS self.set_data before ------------------------------------------
self.wavelength = wavelength
self.n_wave = len(wavelength)
# Check that dimensions match KOALA numbers
if self.n_wave != 2048 and len(all_spaxels) != 1000:
print("\n *** WARNING *** : These numbers are NOT the standard ones for KOALA")
print("\n> Setting the data for this file:")
if variance.shape != intensity.shape:
print("\n* ERROR: * the intensity and variance matrices are {} and {} respectively\n".format(intensity.shape, variance.shape))
raise ValueError
n_dim = len(intensity.shape)
if n_dim == 2:
self.intensity = intensity
self.variance = variance
elif n_dim == 1:
self.intensity = intensity.reshape((1, self.n_wave))
self.variance = variance.reshape((1, self.n_wave))
else:
print("\n* ERROR: * the intensity matrix supplied has {} dimensions\n".format(n_dim))
raise ValueError
self.n_spectra = self.intensity.shape[0]
self.n_wave = len(self.wavelength)
print(" Found {} spectra with {} wavelengths".format(
self.n_spectra, self.n_wave
), "between {:.2f} and {:.2f} Angstrom".format(
self.wavelength[0], self.wavelength[-1]
))
if self.intensity.shape[1] != self.n_wave:
print("\n* ERROR: * spectra have {} wavelengths rather than {}".format(self.intensity.shape[1], self.n_wave))
raise ValueError
if (
len(offset_RA_arcsec) != self.n_spectra
or len(offset_DEC_arcsec) != self.n_spectra
):
print("\n* ERROR: * offsets (RA, DEC) = ({},{}) rather than {}".format(
len(self.offset_RA_arcsec), len(self.offset_DEC_arcsec), self.n_spectra
)
)
raise ValueError
else:
self.offset_RA_arcsec = offset_RA_arcsec
self.offset_DEC_arcsec = offset_DEC_arcsec
# Check if NARROW (spaxel_size = 0.7 arcsec)
# or WIDE (spaxel_size=1.25) field of view
# (if offset_max - offset_min > 31 arcsec in both directions)
if (
np.max(offset_RA_arcsec) - np.min(offset_RA_arcsec) > 31
or np.max(offset_DEC_arcsec) - np.min(offset_DEC_arcsec) > 31
):
self.spaxel_size = 1.25
field = "WIDE"
else:
self.spaxel_size = 0.7
field = "NARROW"
# Get min and max for rss
self.RA_min, self.RA_max, self.DEC_min, self.DEC_max = coord_range([self])
self.DEC_segment = (
self.DEC_max - self.DEC_min
) * 3600.0 # +1.25 for converting to total field of view
self.RA_segment = (self.RA_max - self.RA_min) * 3600.0 # +1.25
# UPDATE THIS TO BE VALID TO ALL GRATINGS!
# ALSO CONSIDER WAVELENGTH RANGE FOR SKYFLATS AND OBJECTS
if valid_wave_min == 0 and valid_wave_max == 0:
self.valid_wave_min = np.min(self.wavelength)
self.valid_wave_max = np.max(self.wavelength)
# if self.grating == "1000R":
# self.valid_wave_min = 6600. # CHECK ALL OF THIS...
# self.valid_wave_max = 6800.
# print " For 1000R, we use the [6200, 7400] range."
# if self.grating == "1500V":
# self.valid_wave_min = np.min(self.wavelength)
# self.valid_wave_max = np.max(self.wavelength)
# print " For 1500V, we use all the range."
# if self.grating == "580V":
# self.valid_wave_min = 3650.
# self.valid_wave_max = 5700.
# print " For 580V, we use the [3650, 5700] range."
# if self.grating == "1500V":
# self.valid_wave_min = 4620. #4550
# self.valid_wave_max = 5350. #5350
# print " For 1500V, we use the [4550, 5350] range."
else:
self.valid_wave_min = valid_wave_min
self.valid_wave_max = valid_wave_max
print(" As specified, we use the [ {} , {} ] range.".format(self.valid_wave_min, self.valid_wave_max))
# Plot RSS_image
if plot:
self.RSS_image(image=self.intensity, cmap="binary_r")
# Deep copy of intensity into intensity_corrected
self.intensity_corrected = copy.deepcopy(self.intensity)
# Divide by flatfield if needed
if flat != "":
print("\n> Dividing the data by the flatfield provided...")
self.intensity_corrected = (self.intensity_corrected/flat.intensity_corrected) # todo: check division per pixel works.
# Check if apply relative throughput & apply it if requested
if apply_throughput:
if plot_skyflat:
plt.figure(figsize=(10, 4))
for i in range(self.n_spectra):
plt.plot(self.wavelength, self.intensity[i, ])
plt.ylim(0, 200 * np.nanmedian(self.intensity))
plt.minorticks_on()
plt.xlabel("Wavelength [$\AA$]")
plt.title("Spectra WITHOUT CONSIDERING throughput correction")
# plt.show()
# plt.close()
print("\n> Applying relative throughput correction using median skyflat values per fibre...")
self.relative_throughput = skyflat.relative_throughput
self.response_sky_spectrum = skyflat.response_sky_spectrum
for i in range(self.n_spectra):
self.intensity_corrected[i, :] = (
self.intensity_corrected[i, :]/self.relative_throughput[i]
)
if nskyflat:
print("\n IMPORTANT: We are dividing intensity data by the sky.response_sky_spectrum !!! ")
print(" This is kind of a flat, the changes are between {} and {}".format(
np.nanmin(skyflat.response_sky_spectrum), np.nanmax(skyflat.response_sky_spectrum)))
print(" ")
self.intensity_corrected = (
self.intensity_corrected/self.response_sky_spectrum
)
if plot_skyflat:
plt.figure(figsize=(10, 4))
for i in range(self.n_spectra):
plt.plot(self.wavelength, self.intensity_corrected[i, ])
plt.ylim(0, 200 * np.nanmedian(self.intensity_corrected))
plt.minorticks_on()
plt.xlabel("Wavelength [$\AA$]")
plt.title("Spectra CONSIDERING throughput correction (median value per fibre)")
# plt.show()
# plt.close()
print(" Intensities corrected for relative throughput stored in self.intensity_corrected !")
text_for_integrated_fibre = "after throughput correction..."
title_for_integrated_fibre = " - Throughput corrected"
else:
if rss_clean == False:
print("\n> Intensities NOT corrected for relative throughput")
self.relative_throughput = np.ones(self.n_spectra)
text_for_integrated_fibre = "..."
title_for_integrated_fibre = ""
# Compute integrated map after throughput correction & plot if requested
self.compute_integrated_fibre(
plot=plot,
title=title_for_integrated_fibre,
text=text_for_integrated_fibre,
warnings=warnings,
correct_negative_sky=correct_negative_sky,
valid_wave_min=valid_wave_min,
valid_wave_max=valid_wave_max,
)
plot_integrated_fibre_again = 0 # Check if we need to plot it again
# Compare corrected vs uncorrected spectrum
# self.plot_corrected_vs_uncorrected_spectrum(high_fibres=high_fibres, fig_size=fig_size)
# Cleaning high cosmics and defects
if sky_method == "1D" or sky_method == "2D":
# If not it will not work when applying scale for sky substraction...
remove_5578 = False
if correct_ccd_defects:
if plot:
plot_integrated_fibre_again = plot_integrated_fibre_again + 1
self.correct_high_cosmics_and_defects(
correct_high_cosmics=correct_high_cosmics,
step=step_ccd,
remove_5578=remove_5578,
clip_high=clip_high,
plot_suspicious_fibres=plot_suspicious_fibres,
warnings=warnings,
verbose=verbose,
plot=plot,
)
# Compare corrected vs uncorrected spectrum
if plot:
self.plot_corrected_vs_uncorrected_spectrum(
high_fibres=high_fibres, fig_size=fig_size
)
# Fixing small wavelengths
if sol[0] != 0:
self.fix_2dfdr_wavelengths(sol=sol)
else:
if fix_wavelengths:
self.fix_2dfdr_wavelengths()
# else:
# print "\n> We don't fix 2dfdr wavelengths on this rss."
# SKY SUBSTRACTION sky_method
#
# Several options here: (1) "1D" : Consider a single sky spectrum, scale it and substract it
# (2) "2D" : Consider a 2D sky. i.e., a sky image, scale it and substract it fibre by fibre
# (3) "self" : Obtain the sky spectrum using the n_sky lowest fibres in the RSS file (DEFAULT)
# (4) "none" : None sky substraction is performed
# (5) "1Dfit": Using an external 1D sky spectrum, fits sky lines in both sky spectrum AND all the fibres
if sky_method != "none" and is_sky == False:
plot_integrated_fibre_again = plot_integrated_fibre_again + 1
# (5)
if sky_method == "1Dfit":
print("\n> Fitting sky lines in both a provided sky spectrum AND all the fibres")
print(" This process takes ~20 minutes for 385R!\n")
if scale_sky_1D != 0:
print(" Sky spectrum scaled by {}".format(scale_sky_1D))
sky = np.array(sky_spectrum) * scale_sky_1D
print(" Sky spectrum provided = {}".format(sky))
self.sky_emission = sky
self.fit_and_substract_sky_spectrum(
sky,
brightest_line_wavelength=brightest_line_wavelength,
brightest_line=brightest_line,
maxima_sigma=3.0,
ymin=-50,
ymax=1000,
wmin=0,
wmax=0,
auto_scale_sky=auto_scale_sky,
warnings=False,
verbose=False,
plot=False,
fig_size=12,
fibre=fibre,
)
# (1) If a single sky_spectrum is provided:
if sky_method == "1D":
if sky_spectrum[0] != 0:
print("\n> Sustracting the sky using the sky spectrum provided, checking the scale OBJ/SKY...")
if scale_sky_1D == 0:
self.sky_emission = scale_sky_spectrum(
self.wavelength,
sky_spectrum,
self.intensity_corrected,
cut_sky=cut_sky,
fmax=fmax,
fmin=fmin,
fibre_list=fibre_list,
)
else:
self.sky_emission = sky_spectrum * scale_sky_1D
print(" As requested, we scale the given 1D spectrum by {}".format(scale_sky_1D))
if individual_sky_substraction:
print("\n As requested, performing individual sky substraction in each fibre...")
else:
print("\n Substracting sky to all fibres using scaled sky spectrum provided...")
# For blue spectra, remove 5578 in the sky spectrum...
if self.valid_wave_min < 5578:
resultado = fluxes(
self.wavelength,
self.sky_emission,
5578,
plot=False,
verbose=False,
) # fmin=-5.0E-17, fmax=2.0E-16,
# resultado = [rms_cont, fit[0], fit_error[0], gaussian_flux, gaussian_flux_error, fwhm, fwhm_error, flux, flux_error, ew, ew_error, spectrum ]
self.sky_emission = resultado[11]
for i in range(self.n_spectra):
# Clean 5578 if needed in RSS data
if self.valid_wave_min < 5578:
resultado = fluxes(
self.wavelength,
self.intensity_corrected[i],
5578,
plot=False,
verbose=False,
) # fmin=-5.0E-17, fmax=2.0E-16,
# resultado = [rms_cont, fit[0], fit_error[0], gaussian_flux, gaussian_flux_error, fwhm, fwhm_error, flux, flux_error, ew, ew_error, spectrum ]
self.intensity_corrected[i] = resultado[11]
if individual_sky_substraction:
# Do this INDIVIDUALLY for each fibre
if i == 100:
print(" Substracting sky in fibre 100...")
if i == 200:
print(" Substracting sky in fibre 200...")
if i == 300:
print(" Substracting sky in fibre 300...")
if i == 400:
print(" Substracting sky in fibre 400...")
if i == 500:
print(" Substracting sky in fibre 500...")
if i == 600:
print(" Substracting sky in fibre 600...")
if i == 700:
print(" Substracting sky in fibre 700...")
if i == 800:
print(" Substracting sky in fibre 800...")
if i == 900:
print(" Substracting sky in fibre 900...")
sky_emission = scale_sky_spectrum(
self.wavelength,
sky_spectrum,
self.intensity_corrected,
cut_sky=cut_sky,
fmax=fmax,
fmin=fmin,
fibre_list=[i],
verbose=False,
plot=False,
warnings=False,
)
self.intensity_corrected[i, :] = (
self.intensity_corrected[i, :] - sky_emission
) # sky_spectrum * self.exptime/sky_exptime
else:
self.intensity_corrected[i, :] = (
self.intensity_corrected[i, :] - self.sky_emission
) # sky_spectrum * self.exptime/sky_exptime
if plot:
plt.figure(figsize=(fig_size, fig_size / 2.5))
plt.plot(self.wavelength, sky_spectrum)
plt.minorticks_on()
plt.axvline(x=self.valid_wave_min, color="k", linestyle="--")
plt.axvline(x=self.valid_wave_max, color="k", linestyle="--")
plt.xlim(self.wavelength[0] - 10, self.wavelength[-1] + 10)
plt.title("Sky spectrum provided (Scaled)")
plt.xlabel("Wavelength [$\AA$]")
# plt.show()
# plt.close()
print(" Intensities corrected for sky emission and stored in self.intensity_corrected !")
self.sky_emission = sky_spectrum
else:
print("\n> Sustracting the sky using the sky spectrum requested but any sky spectrum provided !")
sky_method = "self"
n_sky = 50
# (2) If a 2D sky, sky_rss, is provided
if sky_method == "2D": # if np.nanmedian(sky_rss.intensity_corrected) != 0:
if scale_sky_rss != 0:
print("\n> Using sky image provided to substract sky, considering a scale of", scale_sky_rss, "...")
self.sky_emission = scale_sky_rss * sky_rss.intensity_corrected
self.intensity_corrected = (
self.intensity_corrected - self.sky_emission
)
else:
print("\n> Using sky image provided to substract sky, computing the scale using sky lines")
# check scale fibre by fibre
self.sky_emission = copy.deepcopy(sky_rss.intensity_corrected)
scale_per_fibre = np.ones((self.n_spectra))
scale_per_fibre_2 = np.ones((self.n_spectra))
lowlow = 15
lowhigh = 5
highlow = 5
highhigh = 15
if self.grating == "580V":
print(" For 580V we use bright skyline at 5578 AA ...")
sky_line = 5578
sky_line_2 = 0
if self.grating == "1000R":
# print " For 1000R we use skylines at 6300.5 and 6949.0 AA ..." ### TWO LINES GIVE WORSE RESULTS THAN USING ONLY 1...
print(" For 1000R we use skyline at 6949.0 AA ...")
sky_line = 6949.0 # 6300.5
lowlow = 22 # for getting a good continuuem in 6949.0
lowhigh = 12
highlow = 36
highhigh = 52
sky_line_2 = 0 # 6949.0 #7276.5 fails
lowlow_2 = 22 # for getting a good continuuem in 6949.0
lowhigh_2 = 12
highlow_2 = 36
highhigh_2 = 52
if sky_line_2 != 0:
print(" ... first checking {} ...".format(sky_line))
for fibre_sky in range(self.n_spectra):
skyline_spec = fluxes(
self.wavelength,
self.intensity_corrected[fibre_sky],
sky_line,
plot=False,
verbose=False,
lowlow=lowlow,
lowhigh=lowhigh,
highlow=highlow,
highhigh=highhigh,
) # fmin=-5.0E-17, fmax=2.0E-16,
# resultado = [rms_cont, fit[0], fit_error[0], gaussian_flux, gaussian_flux_error, fwhm, fwhm_error, flux, flux_error, ew, ew_error, spectrum ]
self.intensity_corrected[fibre_sky] = skyline_spec[11]
skyline_sky = fluxes(
self.wavelength,
self.sky_emission[fibre_sky],
sky_line,
plot=False,
verbose=False,
lowlow=lowlow,
lowhigh=lowhigh,
highlow=highlow,
highhigh=highhigh,
) # fmin=-5.0E-17, fmax=2.0E-16,
scale_per_fibre[fibre_sky] = old_div(skyline_spec[3], skyline_sky[3]) # TODO: get data for 2D and test if can remove
self.sky_emission[fibre_sky] = skyline_sky[11]
if sky_line_2 != 0:
print(" ... now checking {} ...".format(sky_line_2))
for fibre_sky in range(self.n_spectra):
skyline_spec = fluxes(
self.wavelength,
self.intensity_corrected[fibre_sky],
sky_line_2,
plot=False,
verbose=False,
lowlow=lowlow_2,
lowhigh=lowhigh_2,
highlow=highlow_2,
highhigh=highhigh_2,
) # fmin=-5.0E-17, fmax=2.0E-16,
# resultado = [rms_cont, fit[0], fit_error[0], gaussian_flux, gaussian_flux_error, fwhm, fwhm_error, flux, flux_error, ew, ew_error, spectrum ]
self.intensity_corrected[fibre_sky] = skyline_spec[11]
skyline_sky = fluxes(
self.wavelength,
self.sky_emission[fibre_sky],
sky_line_2,
plot=False,
verbose=False,
lowlow=lowlow_2,
lowhigh=lowhigh_2,
highlow=highlow_2,
highhigh=highhigh_2,
) # fmin=-5.0E-17, fmax=2.0E-16,
scale_per_fibre_2[fibre_sky] = (
old_div(skyline_spec[3], skyline_sky[3]) # TODO: get data for 2D and test if can remove
)
self.sky_emission[fibre_sky] = skyline_sky[11]
# Median value of scale_per_fibre, and apply that value to all fibres
if sky_line_2 == 0:
scale_sky_rss = np.nanmedian(scale_per_fibre)
self.sky_emission = self.sky_emission * scale_sky_rss
else:
scale_sky_rss = np.nanmedian(
old_div((scale_per_fibre + scale_per_fibre_2), 2) # TODO: get data for 2D and test if can remove
)
# Make linear fit
scale_sky_rss_1 = np.nanmedian(scale_per_fibre)
scale_sky_rss_2 = np.nanmedian(scale_per_fibre_2)
print(
" Median scale for line 1 : {} range [ {}, {} ]]".format(
scale_sky_rss_1, np.nanmin(scale_per_fibre), np.nanmax(scale_per_fibre)
)
)
print(
" Median scale for line 2 : {} range [ {}, {} ]]".format(
scale_sky_rss_2, np.nanmin(scale_per_fibre_2), np.nanmax(scale_per_fibre_2)
)
)
b = old_div((scale_sky_rss_1 - scale_sky_rss_2), (
sky_line - sky_line_2 # TODO: get data for 2D and test if can remove
))
a = scale_sky_rss_1 - b * sky_line
# ,a+b*sky_line,a+b*sky_line_2
print(" Appling linear fit with a = {} b = {} to all fibres in sky image...".format(a, b))
for i in range(self.n_wave):
self.sky_emission[:, i] = self.sky_emission[:, i] * (
a + b * self.wavelength[i]
)
if plot:
plt.figure(figsize=(fig_size, fig_size / 2.5))
label1 = "$\lambda$" + np.str(sky_line)
plt.plot(scale_per_fibre, alpha=0.5, label=label1)
plt.minorticks_on()
plt.ylim(np.nanmin(scale_per_fibre), np.nanmax(scale_per_fibre))
plt.axhline(y=scale_sky_rss, color="k", linestyle="--")
if sky_line_2 == 0:
text = (
"Scale OBJECT / SKY using sky line $\lambda$ {}".format(sky_line))
print(" Scale per fibre in the range [{} , {} ], median value is {}".format(np.nanmin(scale_per_fibre), np.nanmax(scale_per_fibre), scale_sky_rss))
print(" Using median value to scale sky emission provided...")
if sky_line_2 != 0:
text = (
"Scale OBJECT / SKY using sky lines $\lambda$ {} and $\lambda$".format(sky_line, sky_line_2))
label2 = "$\lambda$ {}".format(sky_line_2)
plt.plot(scale_per_fibre_2, alpha=0.5, label=label2)
plt.axhline(y=scale_sky_rss_1, color="k", linestyle=":")
plt.axhline(y=scale_sky_rss_2, color="k", linestyle=":")
plt.legend(frameon=False, loc=1, ncol=2)
plt.title(text)
plt.xlabel("Fibre")
# plt.show()
# plt.close()
self.intensity_corrected = (
self.intensity_corrected - self.sky_emission
)
# (3) No sky spectrum or image is provided, obtain the sky using the n_sky lowest fibres
if sky_method == "self":
print("\n Using {} lowest intensity fibres to create a sky...".format(n_sky))
self.find_sky_emission(
n_sky=n_sky,
plot=plot,
sky_fibres=sky_fibres,
sky_wave_min=sky_wave_min,
sky_wave_max=sky_wave_max,
)
# print "\n AFTER SKY SUBSTRACTION:"
# self.compute_integrated_fibre(plot=False, warnings=warnings) #title =" - Throughput corrected", text="after throughput correction..."
# count_negative = 0
# for i in range(self.n_spectra):
# if self.integrated_fibre[i] < 0.11 :
# #print " Fibre ",i," has an integrated flux of ", self.integrated_fibre[i]
# count_negative=count_negative+1
# print self.integrated_fibre
# print " Number of fibres with NEGATIVE integrated value AFTER SKY SUBSTRACTION = ", count_negative
# If this RSS is an offset sky, perform a median filter to increase S/N
if is_sky:
print("\n> This RSS file is defined as SKY... applying median filter with window {} ...".format(win_sky))
medfilt_sky = median_filter(
self.intensity_corrected, self.n_spectra, self.n_wave, win_sky=win_sky
)
self.intensity_corrected = copy.deepcopy(medfilt_sky)
print(" Median filter applied, results stored in self.intensity_corrected !")
# Get airmass and correct for extinction AFTER SKY SUBTRACTION
ZD = (self.ZDSTART + self.ZDEND)/2
self.airmass = 1/np.cos(np.radians(ZD))
self.extinction_correction = np.ones(self.n_wave)
if do_extinction:
self.do_extinction_curve(pth.join(DATA_PATH, "ssoextinct.dat"), plot=plot)
# Check if telluric correction is needed & apply
if telluric_correction[0] != 0:
plot_integrated_fibre_again = plot_integrated_fibre_again + 1
print("\n> Applying telluric correction...")
if plot:
plt.figure(figsize=(fig_size, fig_size / 2.5))
plt.plot(self.wavelength, telluric_correction)
plt.minorticks_on()
plt.axvline(x=self.valid_wave_min, color="k", linestyle="--")
plt.axvline(x=self.valid_wave_max, color="k", linestyle="--")
plt.xlim(self.wavelength[0] - 10, self.wavelength[-1] + 10)
plt.ylim(0.9, 2)
plt.title("Telluric correction")
plt.xlabel("Wavelength [$\AA$]")
# plt.show()
# plt.close()
if plot:
integrated_intensity_sorted = np.argsort(self.integrated_fibre)
region = [
integrated_intensity_sorted[-1],
integrated_intensity_sorted[0],
]
print(" Example of telluric correction using fibres {} and {} :".format(region[0], region[1]))
plt.figure(figsize=(fig_size, fig_size / 2.5))
plt.plot(
self.wavelength,
self.intensity_corrected[region[0]],
color="r",
alpha=0.3,
)
plt.plot(
self.wavelength,
self.intensity_corrected[region[1]],
color="r",
alpha=0.3,
)
for i in range(self.n_spectra):
self.intensity_corrected[i, :] = (
self.intensity_corrected[i, :] * telluric_correction
)
if plot:
plt.plot(
self.wavelength,
self.intensity_corrected[region[0]],
color="b",
alpha=0.5,
)
plt.plot(
self.wavelength,
self.intensity_corrected[region[1]],
color="g",
alpha=0.5,
)
plt.minorticks_on()
plt.axvline(x=self.valid_wave_min, color="k", linestyle="--")
plt.axvline(x=self.valid_wave_max, color="k", linestyle="--")
plt.xlim(self.wavelength[0] - 10, self.wavelength[-1] + 10)
plt.ylim(
np.nanmin(self.intensity_corrected[region[1]]),
np.nanmax(self.intensity_corrected[region[0]]),
) # CHECK THIS AUTOMATICALLY
plt.xlabel("Wavelength [$\AA$]")
# plt.show()
# plt.close()
# Check if identify emission lines is requested & do
if id_el:
if brightest_line_wavelength == 0:
self.el = self.identify_el(
high_fibres=high_fibres,
brightest_line=brightest_line,
cut=cut,
verbose=True,
plot=plot_id_el,
fibre=0,
broad=broad,
)
print("\n Emission lines identified saved in self.el !!")
else:
brightest_line_rest_wave = 6562.82
print("\n As given, line {} at rest wavelength = {} is at {}".format(brightest_line, brightest_line_rest_wave, brightest_line_wavelength))
self.el = [
[brightest_line],
[brightest_line_rest_wave],
[brightest_line_wavelength],
[7.2],
]
# PUTAAA sel.el=[peaks_name,peaks_rest, p_peaks_l, p_peaks_fwhm]
else:
self.el = [[0], [0], [0], [0]]
# Check if id_list provided
if id_list[0] != 0:
if id_el:
print("\n> Checking if identified emission lines agree with list provided")
# Read list with all emission lines to get the name of emission lines
emission_line_file = "data/lineas_c89_python.dat"
el_center, el_name = read_table(emission_line_file, ["f", "s"])
# Find brightest line to get redshift
for i in range(len(self.el[0])):
if self.el[0][i] == brightest_line:
obs_wave = self.el[2][i]
redshift = (self.el[2][i] - self.el[1][i])/self.el[1][i]
print(" Brightest emission line {} foud at {} , redshift = {}".format(brightest_line, obs_wave, redshift))
el_identified = [[], [], [], []]
n_identified = 0
for line in id_list:
id_check = 0
for i in range(len(self.el[1])):
if line == self.el[1][i]:
if verbose:
print(" Emission line {} {} has been identified".format(self.el[0][i], self.el[1][i]))
n_identified = n_identified + 1
id_check = 1
el_identified[0].append(self.el[0][i]) # Name
el_identified[1].append(self.el[1][i]) # Central wavelength
el_identified[2].append(
self.el[2][i]
) # Observed wavelength
el_identified[3].append(self.el[3][i]) # "FWHM"
if id_check == 0:
for i in range(len(el_center)):
if line == el_center[i]:
el_identified[0].append(el_name[i])
print(" Emission line {} {} has NOT been identified, adding...".format(el_name[i], line))
el_identified[1].append(line)
el_identified[2].append(line * (redshift + 1))
el_identified[3].append(4 * broad)
self.el = el_identified
print(" Number of emission lines identified = {} of a total of {} provided. self.el updated accordingly".format(n_identified, len(id_list)))
else:
print("\n> List of emission lines provided but no identification was requested")
# Clean sky residuals if requested
if clean_sky_residuals:
plot_integrated_fibre_again = plot_integrated_fibre_again + 1
self.clean_sky_residuals(
extra_w=extra_w,
step=step_csr,
dclip=dclip,
verbose=verbose,
fibre=fibre,
wave_min=valid_wave_min,
wave_max=valid_wave_max,
)
# set_data was till here... -------------------------------------------------------------------
if fibre != 0:
plot_integrated_fibre_again = 0
# Plot corrected values
if plot == True and rss_clean == False: # plot_integrated_fibre_again > 0 :
self.compute_integrated_fibre(
plot=plot,
title=" - Intensities Corrected",
warnings=warnings,
text="after all corrections have been applied...",
valid_wave_min=valid_wave_min,
valid_wave_max=valid_wave_max,
correct_negative_sky=correct_negative_sky,
)
integrated_intensity_sorted = np.argsort(self.integrated_fibre)
region = []
for fibre_ in range(high_fibres):
region.append(integrated_intensity_sorted[-1 - fibre_])
print("\n> Checking results using {} fibres with the highest integrated intensity".format(high_fibres))
print(" which are : {}".format(region))
plt.figure(figsize=(fig_size, fig_size / 2.5))
I = np.nansum(self.intensity[region], axis=0)
plt.plot(self.wavelength, I, "r-", label="Uncorrected", alpha=0.3)
Ic = np.nansum(self.intensity_corrected[region], axis=0)
plt.axhline(y=0, color="k", linestyle=":")
plt.plot(self.wavelength, Ic, "g-", label="Corrected", alpha=0.4)
plt.ylabel("Flux")
plt.xlabel("Wavelength [$\AA$]")
plt.minorticks_on()
plt.xlim(self.wavelength[0] - 10, self.wavelength[-1] + 10)
plt.axvline(x=self.valid_wave_min, color="k", linestyle="--")
plt.axvline(x=self.valid_wave_max, color="k", linestyle="--")
yy1 = np.nanpercentile(Ic, 0)
yy2 = np.nanpercentile(Ic, 99)
rango = yy2 - yy1
plt.ylim(yy1 - rango * 0.05, yy2)
plt.title("{} - Combined spectrum - {} fibres with highest intensity".format(self.object, high_fibres))
plt.legend(frameon=False, loc=4, ncol=2)
# plt.show()
# plt.close()
region = []
for fibre_ in range(high_fibres):
region.append(integrated_intensity_sorted[fibre_])
print("\n> Checking results using {} fibres with the lowest integrated intensity".format(high_fibres))
print(" which are : {}".format(region))
plt.figure(figsize=(fig_size, fig_size / 2.5))
I = np.nansum(self.intensity[region], axis=0)
plt.plot(self.wavelength, I, "r-", label="Uncorrected", alpha=0.3)
Ic = np.nansum(self.intensity_corrected[region], axis=0)
I_ymin = np.nanmin([np.nanmin(I), np.nanmin(Ic)])
I_ymax = np.nanmax([np.nanmax(I), np.nanmax(Ic)])
I_med = np.nanmedian(Ic)
I_rango = I_ymax - I_ymin
plt.axhline(y=0, color="k", linestyle=":")
plt.plot(self.wavelength, Ic, "g-", label="Corrected", alpha=0.4)
plt.ylabel("Flux")
plt.xlabel("Wavelength [$\AA$]")
plt.minorticks_on()
plt.xlim(self.wavelength[0] - 10, self.wavelength[-1] + 10)
plt.axvline(x=self.valid_wave_min, color="k", linestyle="--")
plt.axvline(x=self.valid_wave_max, color="k", linestyle="--")
# plt.ylim([I_ymin-I_rango/18,I_ymax-I_rango*0.65])
plt.ylim([I_med - I_rango * 0.65, I_med + I_rango * 0.65])
plt.title("{} - Combined spectrum - {} fibres with lowest intensity".format(self.object, high_fibres))
plt.legend(frameon=False, loc=4, ncol=2)
# plt.show()
# plt.close()
# Plot RSS_image
if plot:
self.RSS_image()
if rss_clean:
self.RSS_image()
# Print summary and information from header
print("\n> Summary of reading rss file ''{}'' :".format(filename))
print("\n This is a KOALA '{}' file, using grating '{}' in AAOmega".format(AAOmega_Arm, self.grating))
print(" Object: {}".format(self.object))
print(" Field of view: {} (spaxel size = {} arcsec)".format(field, self.spaxel_size))
print(" Center position: (RA, DEC) = ({:.3f}, {:.3f}) degrees".format(
self.RA_centre_deg, self.DEC_centre_deg
))
print(" Field covered [arcsec] = {:.1f} x {:.1f}".format(
self.RA_segment + self.spaxel_size, self.DEC_segment + self.spaxel_size
))
print(" Position angle (PA) = {:.1f} degrees".format(self.PA))
print(" ")
if rss_clean:
print(" This was a CLEAN RSS file, no correction was applied!")
print(" Values stored in self.intensity_corrected are the same that those in self.intensity")
else:
if flat != "":
print(" Intensities divided by the given flatfield")
if apply_throughput:
print(" Intensities corrected for throughput !")
else:
print(" Intensities NOT corrected for throughput")
if correct_ccd_defects == True and correct_high_cosmics == True:
print(" Intensities corrected for high cosmics and CCD defects !")
if correct_ccd_defects == True and correct_high_cosmics == False:
print(" Intensities corrected for CCD defects (but NOT for high cosmics) !")
if correct_ccd_defects == False and correct_high_cosmics == False:
print(" Intensities NOT corrected for high cosmics and CCD defects")
if sol[0] != 0:
print(" All fibres corrected for small wavelength shifts using wavelength solution provided!")
else:
if fix_wavelengths:
print(" Wavelengths corrected for small shifts using Gaussian fit to selected bright skylines in all fibres!")
else:
print(" Wavelengths NOT corrected for small shifts")
if is_sky:
print(" This is a SKY IMAGE, median filter with window {} applied !".format(win_sky))
else:
if sky_method == "none":
print(" Intensities NOT corrected for sky emission")
if sky_method == "self":
print(" Intensities corrected for sky emission using {} spaxels with lowest values !".format(n_sky))
if sky_method == "1D":
print(" Intensities corrected for sky emission using (scaled) spectrum provided ! ")
if sky_method == "1Dfit":
print(" Intensities corrected for sky emission fitting Gaussians to both 1D sky spectrum and each fibre ! ")
if sky_method == "2D":
print(" Intensities corrected for sky emission using sky image provided scaled by {} !".format(scale_sky_rss))
if telluric_correction[0] != 0:
print(" Intensities corrected for telluric absorptions !")
else:
print(" Intensities NOT corrected for telluric absorptions")
if do_extinction:
print(" Intensities corrected for extinction !")
else:
print(" Intensities NOT corrected for extinction")
if correct_negative_sky:
print(" Intensities CORRECTED (if needed) for negative integrate flux values!")
if id_el:
print(" ", len(
self.el[0]
), "emission lines identified and stored in self.el !")
print(" ", self.el[0])
if clean_sky_residuals == True and fibre == 0:
print(" Intensities cleaned for sky residuals !")
if clean_sky_residuals == True and fibre != 0:
print(" Only fibre {} has been corrected for sky residuals".format(fibre))
if clean_sky_residuals == False:
print(" Intensities NOT corrected for sky residuals")
print(" All applied corrections are stored in self.intensity_corrected !")
if save_rss_to_fits_file != "":
save_rss_fits(self, fits_file=save_rss_to_fits_file)
print("\n> KOALA RSS file read !")
# -----------------------------------------------------------------------------
# INTERPOLATED CUBE CLASS
# -----------------------------------------------------------------------------
class Interpolated_cube(object): # TASK_Interpolated_cube
"""
Constructs a cube by accumulating RSS with given offsets.
"""
# -----------------------------------------------------------------------------
def __init__(
self,
RSS,
pixel_size_arcsec,
kernel_size_arcsec,
centre_deg=[],
size_arcsec=[],
aligned_coor=False,
plot=False,
flux_calibration=[0],
zeros=False,
ADR=False,
force_ADR=False,
offsets_files="",
offsets_files_position="",
shape=[],
rss_file="",
warnings=False,
): # Angel added aligned_coor 6 Sep, flux_calibration, zeros 27 Oct;
# added ADR 28 Feb offsets_files, shape for defining shape of cube
# warnings (when cubing) added 13 Jan 2019
"""
Parameters
----------
RSS
pixel_size_arcsec
kernel_size_arcsec
centre_deg
size_arcsec
aligned_coor
plot
flux_calibration
zeros
ADR
force_ADR
offsets_files
offsets_files_position
shape
rss_file
warnings
"""
self.RSS = RSS
self.n_wave = RSS.n_wave
self.pixel_size_arcsec = pixel_size_arcsec
self.kernel_size_arcsec = kernel_size_arcsec
self.kernel_size_pixels = (
float(kernel_size_arcsec/pixel_size_arcsec)
) # must be a float number!
self.wavelength = RSS.wavelength
self.description = RSS.description + " - CUBE"
self.object = RSS.object
self.PA = RSS.PA
self.grating = RSS.grating
self.CRVAL1_CDELT1_CRPIX1 = RSS.CRVAL1_CDELT1_CRPIX1
self.total_exptime = RSS.exptime
self.rss_list = RSS.rss_list
self.RA_segment = RSS.RA_segment
self.offsets_files = offsets_files # Offsets between files when align cubes
self.offsets_files_position = (
offsets_files_position # Position of this cube when aligning
)
self.valid_wave_min = RSS.valid_wave_min
self.valid_wave_max = RSS.valid_wave_max
self.seeing = 0.0
self.flux_cal_step = 0.0
self.flux_cal_min_wave = 0.0
self.flux_cal_max_wave = 0.0
if zeros:
print("\n> Creating empty cube using information provided in rss file: ")
print(" {}".format(self.description))
else:
print("\n> Creating cube from file rss file: {}".format(self.description))
print(" Pixel size = {} arcsec".format(self.pixel_size_arcsec))
print(" kernel size = {} arcsec".format(self.kernel_size_arcsec))
# centre_deg = [RA,DEC] if we need to give new RA, DEC centre
if len(centre_deg) == 2:
self.RA_centre_deg = centre_deg[0]
self.DEC_centre_deg = centre_deg[1]
else:
self.RA_centre_deg = RSS.RA_centre_deg
self.DEC_centre_deg = RSS.DEC_centre_deg
if aligned_coor == True:
self.xoffset_centre_arcsec = (
self.RA_centre_deg - RSS.ALIGNED_RA_centre_deg
) * 3600.0
self.yoffset_centre_arcsec = (
self.DEC_centre_deg - RSS.ALIGNED_DEC_centre_deg
) * 3600.0
print(self.RA_centre_deg)
print(RSS.ALIGNED_RA_centre_deg)
print((self.RA_centre_deg - RSS.ALIGNED_RA_centre_deg) * 3600.0)
print("\n\n\n\n")
if zeros == False:
print(" Using ALIGNED coordenates for centering cube...")
else:
self.xoffset_centre_arcsec = (
self.RA_centre_deg - RSS.RA_centre_deg
) * 3600.0
self.yoffset_centre_arcsec = (
self.DEC_centre_deg - RSS.DEC_centre_deg
) * 3600.0
if len(size_arcsec) == 2:
self.n_cols = np.int((size_arcsec[0]/self.pixel_size_arcsec)) + 2 * np.int(
(self.kernel_size_arcsec/self.pixel_size_arcsec)
)
self.n_rows = np.int((size_arcsec[1]/self.pixel_size_arcsec)) + 2 * np.int(
(self.kernel_size_arcsec/self.pixel_size_arcsec)
)
else:
self.n_cols = (
2
* (
np.int(
(np.nanmax(
np.abs(RSS.offset_RA_arcsec - self.xoffset_centre_arcsec)
)/self.pixel_size_arcsec)
)
+ np.int(self.kernel_size_pixels)
)
+ 3
) # -3 ### +1 added by Angel 25 Feb 2018 to put center in center
self.n_rows = (
2
* (
np.int(
(np.nanmax(
np.abs(RSS.offset_DEC_arcsec - self.yoffset_centre_arcsec)
)/self.pixel_size_arcsec)
)
+ np.int(self.kernel_size_pixels)
)
+ 3
) # -3 ### +1 added by Angel 25 Feb 2018 to put center in center
if self.n_cols % 2 != 0:
self.n_cols += 1 # Even numbers to have [0,0] in the centre
if self.n_rows % 2 != 0:
self.n_rows += 1
# If we define a specific shape
if len(shape) == 2:
self.n_rows = shape[0]
self.n_cols = shape[1]
# Define zeros
self._weighted_I = np.zeros((self.n_wave, self.n_rows, self.n_cols))
self._weight = np.zeros_like(self._weighted_I)
self.flux_calibration = np.zeros(self.n_wave)
# self.offset_from_center_x_arcsec = 0.
# self.offset_from_center_y_arcsec = 0.
if zeros:
self.data = np.zeros_like(self._weighted_I)
else:
print("\n Smooth cube, (RA, DEC)_centre = ({}, {}) degree".format(
self.RA_centre_deg, self.DEC_centre_deg
))
print(" Size = {} columns (RA) x {} rows (DEC); {:.2f} x {:.2f} arcsec".format(
self.n_cols,
self.n_rows,
(self.n_cols + 1) * pixel_size_arcsec,
(self.n_rows + 1) * pixel_size_arcsec,
))
sys.stdout.write(" Adding {} spectra... ".format(RSS.n_spectra))
sys.stdout.flush()
output_every_few = np.sqrt(RSS.n_spectra) + 1
next_output = -1
for i in range(RSS.n_spectra):
if i > next_output:
sys.stdout.write("\b" * 6)
sys.stdout.write("{:5.2f}%".format(i * 100.0 / RSS.n_spectra))
sys.stdout.flush()
next_output = i + output_every_few
offset_rows = ((
RSS.offset_DEC_arcsec[i] - self.yoffset_centre_arcsec
)/pixel_size_arcsec)
offset_cols = ((
-RSS.offset_RA_arcsec[i] + self.xoffset_centre_arcsec
)/pixel_size_arcsec)
corrected_intensity = RSS.intensity_corrected[i]
self.add_spectrum(
corrected_intensity, offset_rows, offset_cols, warnings=warnings
)
self.data = self._weighted_I/self._weight
self.trace_peak(plot=plot)
# Check flux calibration
if np.nanmedian(flux_calibration) == 0:
fcal = False
else:
self.flux_calibration = flux_calibration
fcal = True
# This should be in 1 line of step of loop, I couldn't get it # Yago HELP !!
for x in range(self.n_rows):
for y in range(self.n_cols):
self.data[:, x, y] = (
(((self.data[:, x, y]/self.flux_calibration)/1e16)/self.RSS.exptime)
)
# plt.plot(self.wavelength,self.data[:,x,y]) #
# ylabel="Flux [ erg cm$^{-2}$ s$^{-1}$ A$^{-1}$]"
# Correct for Atmospheric Differential Refraction (ADR) if requested
if ADR:
self.ADR_correction(plot=plot, force_ADR=force_ADR)
else:
print("\n> Data NO corrected for Atmospheric Differential Refraction (ADR).")
# Get integrated maps (all waves and valid range), locate peaks, plots
self.get_integrated_map_and_plot(plot=plot, fcal=fcal)
# For calibration stars, we get an integrated star flux and a seeing
self.integrated_star_flux = np.zeros_like(self.wavelength)
if fcal:
print("\n> Absolute flux calibration included in this interpolated cube.")
else:
print("\n> This interpolated cube does not include an absolute flux calibration.")
print("> Interpolated cube done!\n")
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def ADR_correction(self, plot=True, force_ADR=False):
"""
Correct for Atmospheric Diferential Refraction (ADR)
Parameters
----------
plot
force_ADR
Returns
-------
"""
self.data_ADR = copy.deepcopy(self.data)
do_ADR = True
# First we check if it is needed (unless forced)...
if (
self.ADR_x_max < self.pixel_size_arcsec/2
and self.ADR_y_max < self.pixel_size_arcsec/2
):
print("\n> Atmospheric Differential Refraction (ADR) correction is NOT needed.")
print(" The computed max ADR values ({:.2f},{:.2f}) are smaller than half the pixel size of {:.2f} arcsec".format(
self.ADR_x_max, self.ADR_y_max, self.pixel_size_arcsec
))
do_ADR = False
if force_ADR:
print(' However we proceed to do the ADR correction as indicated: "force_ADR = True" ...')
do_ADR = True
if do_ADR:
print("\n> Correcting for Atmospheric Differential Refraction (ADR)...")
sys.stdout.flush()
output_every_few = np.sqrt(self.n_wave) + 1
next_output = -1
for l in range(self.n_wave):
if l > next_output:
sys.stdout.write("\b" * 36)
sys.stdout.write(
" Moving plane {:5}/{:5}... {:5.2f}%".format(
l, self.n_wave, l * 100.0 / self.n_wave
)
)
sys.stdout.flush()
next_output = l + output_every_few
tmp = copy.deepcopy(self.data_ADR[l, :, :])
mask = copy.deepcopy(tmp) * 0.0
mask[np.where(np.isnan(tmp))] = 1 # make mask where Nans are
kernel = Gaussian2DKernel(5)
tmp_nonan = interpolate_replace_nans(tmp, kernel)
# need to see if there are still nans. This can happen in the padded parts of the grid
# where the kernel is not large enough to cover the regions with NaNs.
if np.isnan(np.sum(tmp_nonan)):
tmp_nonan = np.nan_to_num(tmp_nonan)
tmp_shift = shift(
tmp_nonan,
[
(-2 * self.ADR_y[l]/self.pixel_size_arcsec),
(-2 * self.ADR_x[l]/self.pixel_size_arcsec),
],
cval=np.nan,
)
mask_shift = shift(
mask,
[
(-2 * self.ADR_y[l]/self.pixel_size_arcsec),
(-2 * self.ADR_x[l]/self.pixel_size_arcsec),
],
cval=np.nan,
)
tmp_shift[mask_shift > 0.5] = np.nan
self.data_ADR[l, :, :] = copy.deepcopy(tmp_shift)
# print(l,tmp.shape,2*self.ADR_y[l],2*self.ADR_x[l],np.sum(tmp_nonan),np.sum(tmp),np.sum(tmp_shift))
# for y in range(self.n_rows):
# for x in range(self.n_cols):
# # mal = 0
# if np.int(np.round(x+2*self.ADR_x[l]/self.pixel_size_arcsec)) < self.n_cols :
# if np.int(np.round(y+2*self.ADR_y[l]/self.pixel_size_arcsec)) < self.n_rows :
# # print self.data.shape,x,"->",np.int(np.round(x+self.ADR_x[i]/self.pixel_size_arcsec))," ",y,"->",np.int(np.round(y+self.ADR_y[i]/self.pixel_size_arcsec))
# self.data_ADR[l,y,x]=self.data[l, np.int(np.round(y+2*self.ADR_y[l]/self.pixel_size_arcsec )), np.int(np.round(x+2*self.ADR_x[l]/self.pixel_size_arcsec)) ]
# else: mal = 1
# else: mal = 1
# if mal == 1:
# if l == 0 : print self.data.shape,x,"->",np.int(np.round(x+self.ADR_x[i]/self.pixel_size_arcsec))," ",y,"->",np.int(np.round(y+self.ADR_y[i]/self.pixel_size_arcsec))," bad data !"
# Check values tracing ADR data ...
self.trace_peak(ADR=True, plot=plot)
# SAVE DATA !!!!
# In prep...
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def get_integrated_map_and_plot(
self, min_wave=[0], max_wave=[0], plot=True, fcal=False
): # CHECK
"""
Integrated map and plot
Parameters
----------
min_wave
max_wave
plot
fcal
Returns
-------
"""
# Integrated map between good wavelengths
if min_wave == [0]:
min_wave = self.valid_wave_min
if max_wave == [0]:
max_wave = self.valid_wave_max
self.integrated_map_all = np.nansum(self.data, axis=0)
self.integrated_map = np.nansum(
self.data[
np.searchsorted(self.wavelength, min_wave): np.searchsorted(
self.wavelength, max_wave
)
],
axis=0,
)
# Search for peak of emission in integrated map and compute offsets from centre
self.max_y, self.max_x = np.unravel_index(
self.integrated_map.argmax(), self.integrated_map.shape
)
self.spaxel_RA0 = np.int(self.n_cols/2) + 1 # Using np.int for readability
self.spaxel_DEC0 = np.int(self.n_rows/2) + 1 # Using np.int for readability
self.offset_from_center_x_arcsec_integrated = (
self.max_x - self.spaxel_RA0 + 1
) * self.pixel_size_arcsec # Offset from center using INTEGRATED map
self.offset_from_center_y_arcsec_integrated = (
self.max_y - self.spaxel_DEC0 + 1
) * self.pixel_size_arcsec # Offset from center using INTEGRATED map
if plot:
self.plot_spectrum_integrated_cube(fcal=fcal)
self.plot_spectrum_cube(self.max_y, self.max_x, fcal=fcal)
print("\n> Created integrated map between {:5.2f} and {:5.2f}.".format(
min_wave, max_wave
))
print(" The peak of the emission in integrated image is in spaxel [ {} , {} ]".format(self.max_x, self.max_y))
print(" The peak of the emission tracing all wavelengths is in spaxel [ {} , {} ]".format(
np.round(self.x_peak_median, 2), np.round(self.y_peak_median, 2)))
self.offset_from_center_x_arcsec_tracing = (
self.x_peak_median - self.spaxel_RA0 + 1
) * self.pixel_size_arcsec # Offset from center using INTEGRATED map
self.offset_from_center_y_arcsec_tracing = (
self.y_peak_median - self.spaxel_DEC0 + 1
) * self.pixel_size_arcsec # Offset from center using INTEGRATED map
if plot:
self.plot_map(
norm=colors.Normalize(),
spaxel=[self.max_x, self.max_y],
spaxel2=[self.x_peak_median, self.y_peak_median],
fcal=fcal,
)
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def add_spectrum(self, intensity, offset_rows, offset_cols, warnings=False):
"""
Add one single spectrum to the datacube
Parameters
----------
intensity: np.array(float)
Spectrum.
offset_rows, offset_cols: float
Offset with respect to the image centre, in pixels.
kernel_FWHM_pixels: float
FWHM of the interpolating kernel, in pixels
"""
kernel_centre_x = 0.5 * self.n_cols + offset_cols
x_min = int(kernel_centre_x - self.kernel_size_pixels)
x_max = int(kernel_centre_x + self.kernel_size_pixels) + 1
n_points_x = x_max - x_min
x = (
(np.linspace(x_min - kernel_centre_x, x_max - kernel_centre_x, n_points_x)/self.kernel_size_pixels)
)
x[0] = -1.0
x[-1] = 1.0
weight_x = np.diff(((3.0 * x - x ** 3 + 2.0)/4))
kernel_centre_y = 0.5 * self.n_rows + offset_rows
y_min = int(kernel_centre_y - self.kernel_size_pixels)
y_max = int(kernel_centre_y + self.kernel_size_pixels) + 1
n_points_y = y_max - y_min
y = (
(np.linspace(y_min - kernel_centre_y, y_max - kernel_centre_y, n_points_y)/self.kernel_size_pixels)
)
y[0] = -1.0
y[-1] = 1.0
weight_y = np.diff(((3.0 * y - y ** 3 + 2.0)/4))
if x_min < 0 or x_max >= self.n_cols or y_min < 0 or y_max >= self.n_rows:
if warnings:
print("**** WARNING **** : Spectra outside field of view: {} {} {}".format(x_min, kernel_centre_x, x_max))
print(" : {} {} {}".format(y_min, kernel_centre_y, y_max))
else:
bad_wavelengths = np.argwhere(np.isnan(intensity))
intensity[bad_wavelengths] = 0.0
ones = np.ones_like(intensity)
ones[bad_wavelengths] = 0.0
self._weighted_I[:, y_min: y_max - 1, x_min: x_max - 1] += (
intensity[:, np.newaxis, np.newaxis]
* weight_y[np.newaxis, :, np.newaxis]
* weight_x[np.newaxis, np.newaxis, :]
)
self._weight[:, y_min: y_max - 1, x_min: x_max - 1] += (
ones[:, np.newaxis, np.newaxis]
* weight_y[np.newaxis, :, np.newaxis]
* weight_x[np.newaxis, np.newaxis, :]
)
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def plot_spectrum_cube(
self,
x,
y,
lmin=0,
lmax=0,
fmin=1e-30,
fmax=1e30,
fcal=False,
fig_size=10.0,
fig_size_y=0.0,
save_file="",
title="",
z=0.0,
): # Angel added 8 Sep
"""
Plot spectrum of a particular spaxel.
Parameters
----------
x, y:
coordenates of spaxel to show spectrum.
fcal:
Use flux calibration, default fcal=False.\n
If fcal=True, cube.flux_calibration is used.
save_file:
(Optional) Save plot in file "file.extension"
fig_size:
Size of the figure (in x-axis), default: fig_size=10
Example
-------
>>> cube.plot_spectrum_cube(20, 20, fcal=True)
"""
if np.isscalar(x):
if fcal == False:
spectrum = self.data[:, x, y]
ylabel = "Flux [relative units]"
else:
spectrum = self.data[:, x, y] * 1e16 # /self.flux_calibration / 1E16
# ylabel="Flux [ 10$^{-16}$ * erg cm$^{-2}$ s$^{-1}$ A$^{-1}$]"
ylabel = "Flux [ 10$^{-16}$ erg cm$^{-2}$ s$^{-1}$ A$^{-1}$]"
else:
print(" Adding spaxel 1 = [ {} , {} ]".format(x[0], y[0]))
spectrum = self.data[:, x[0], y[0]]
for i in range(len(x) - 1):
spectrum = spectrum + self.data[:, x[i + 1], y[i + 1]]
print(" Adding spaxel {} = [ {} , {}]".format(i + 2, x[i + 1],[i + 1]))
ylabel = "Flux [relative units]"
if fcal:
spectrum = (spectrum/self.flux_calibration)/1e16
ylabel = "Flux [ erg cm$^{-2}$ s$^{-1}$ A$^{-1}$]"
# Set limits
if fmin == 1e-30:
fmin = np.nanmin(spectrum)
if fmax == 1e30:
fmax = np.nanmax(spectrum)
if lmin == 0:
lmin = self.wavelength[0]
if lmax == 0:
lmax = self.wavelength[-1]
if fig_size_y == 0.0:
fig_size_y = fig_size / 3.0
plt.figure(figsize=(fig_size, fig_size_y))
plt.plot(self.wavelength, spectrum)
plt.minorticks_on()
plt.ylim(fmin, fmax)
plt.xlim(lmin, lmax)
if title == "":
title = "Spaxel ({} , {}) in {}".format(x, y, self.description)
plt.title(title)
plt.xlabel("Wavelength [$\AA$]")
plt.ylabel(ylabel)
# Identify lines
if z != 0:
elines = [
3727.00,
3868.75,
3967.46,
3889.05,
4026.0,
4068.10,
4101.2,
4340.47,
4363.21,
4471.48,
4658.10,
4686.0,
4711.37,
4740.16,
4861.33,
4958.91,
5006.84,
5197.82,
6300.30,
6312.10,
6363.78,
6548.03,
6562.82,
6583.41,
6678.15,
6716.47,
6730.85,
7065.28,
7135.78,
7281.35,
7320,
7330,
]
# elines=[3727.00, 3868.75, 3967.46, 3889.05, 4026., 4068.10, 4101.2, 4340.47, 4363.21, 4471.48, 4658.10, 4861.33, 4958.91, 5006.84, 5197.82, 6300.30, 6312.10, 6363.78, 6548.03, 6562.82, 6583.41, 6678.15, 6716.47, 6730.85, 7065.28, 7135.78, 7320, 7330 ]
for i in elines:
plt.plot([i * (1 + z), i * (1 + z)], [fmin, fmax], "g:", alpha=0.95)
alines = [3934.777, 3969.588, 4308, 5175] # ,4305.61, 5176.7] # POX 4
# alines=[3934.777,3969.588,4308,5170] #,4305.61, 5176.7]
for i in alines:
plt.plot([i * (1 + z), i * (1 + z)], [fmin, fmax], "r:", alpha=0.95)
if save_file == "":
#plt.show()
pass
else:
plt.savefig(save_file)
#plt.close()
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def plot_spectrum_integrated_cube(
self,
lmin=0,
lmax=0,
fmin=1e-30,
fmax=1e30,
fcal=False,
fig_size=10,
save_file="",
): # Angel added 8 Sep
"""
Plot integrated spectrum
Parameters
----------
fcal:
Use flux calibration, default fcal=False.\n
If fcal=True, cube.flux_calibration is used.
save_file:
(Optional) Save plot in file "file.extension"
fig_size:
Size of the figure (in x-axis), default: fig_size=10
Example
-------
>>> cube.plot_spectrum_cube(20, 20, fcal=True)
"""
spectrum = np.nansum(np.nansum(self.data, axis=1), axis=1)
if fcal == False:
ylabel = "Flux [relative units]"
else:
spectrum = spectrum * 1e16
# ylabel="Flux [ 10$^{-16}$ * erg cm$^{-2}$ s$^{-1}$ A$^{-1}$]"
ylabel = "Flux [ 10$^{-16}$ erg cm$^{-2}$ s$^{-1}$ A$^{-1}$]"
# Set limits
if fmin == 1e-30:
fmin = np.nanmin(spectrum)
if fmax == 1e30:
fmax = np.nanmax(spectrum)
if lmin == 0:
lmin = self.wavelength[0]
if lmax == 0:
lmax = self.wavelength[-1]
plt.figure(figsize=(fig_size, fig_size / 2.5))
plt.plot(self.wavelength, spectrum)
plt.minorticks_on()
plt.ylim(fmin, fmax)
plt.xlim(lmin, lmax)
title = "Integrated spectrum in {}".format(self.description)
plt.title(title)
plt.xlabel("Wavelength [$\AA$]")
plt.ylabel(ylabel)
if save_file == "":
#plt.show()
pass
else:
plt.savefig(save_file)
#plt.close()
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def plot_weight(
self, norm=colors.Normalize(), cmap="gist_gray", fig_size=10, save_file=""
):
"""
Plot weitgh map."
Example
----------
>>> cube1s.plot_weight()
"""
interpolated_map = np.mean(self._weight, axis=0)
self.plot_map(
interpolated_map,
norm=norm,
fig_size=fig_size,
cmap=cmap,
save_file=save_file,
description=self.description + " - Weight map",
)
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def plot_wavelength(
self,
wavelength,
w2=0.0,
cmap=fuego_color_map,
fig_size=10,
norm=colors.PowerNorm(gamma=1.0 / 4.0),
save_file="",
fcal=False,
):
"""
Plot map at a particular wavelength or in a wavelength range
Parameters
----------
wavelength: float
wavelength to be mapped.
norm:
Colour scale, default = colors.PowerNorm(gamma=1./4.)
Normalization scale
Lineal scale: norm=colors.Normalize().
Log scale:norm=colors.LogNorm()
cmap:
Color map used, default cmap=fuego_color_map
Velocities: cmap="seismic"
save_file:
(Optional) Save plot in file "file.extension"
"""
if w2 == 0.0:
interpolated_map = self.data[np.searchsorted(self.wavelength, wavelength)]
description = "{} - {} $\AA$".format(self.description, wavelength)
else:
interpolated_map = np.nansum(
self.data[
np.searchsorted(self.wavelength, wavelength): np.searchsorted(
self.wavelength, w2
)
],
axis=0,
)
description = "{} - Integrating [{}-{}] $\AA$".format(
self.description, wavelength, w2
)
self.plot_map(
mapa=interpolated_map,
norm=norm,
fig_size=fig_size,
cmap=cmap,
save_file=save_file,
description=description,
fcal=fcal,
)
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def plot_map(
self,
mapa="",
norm=colors.Normalize(),
cmap="fuego",
fig_size=10,
fcal=False,
save_file="",
description="",
contours=True,
clabel=False,
spaxel=0,
spaxel2=0,
spaxel3=0,
):
"""
Show a given map.
Parameters
----------
map: np.array(float)
Map to be plotted. If not given, it plots the integrated map.
norm:
Normalization scale, default is lineal scale.
Lineal scale: norm=colors.Normalize().
Log scale: norm=colors.LogNorm()
Power law: norm=colors.PowerNorm(gamma=1./4.)
cmap: (default cmap="fuego").
Color map used.
Weight: cmap = "gist_gray"
Velocities: cmap="seismic".
Try also "inferno",
spaxel,spaxel2,spaxel3:
[x,y] positions of spaxels to show with a green circle, blue square and red triangle
"""
if description == "":
description = self.description
if mapa == "":
mapa = self.integrated_map
description = description + " - Integrated Map"
fig, ax = plt.subplots(figsize=(fig_size, fig_size))
cax = ax.imshow(
mapa,
origin="lower",
interpolation="none",
norm=norm,
cmap=cmap,
extent=(
-0.5 * self.n_cols * self.pixel_size_arcsec,
0.5 * self.n_cols * self.pixel_size_arcsec,
-0.5 * self.n_rows * self.pixel_size_arcsec,
+0.5 * self.n_rows * self.pixel_size_arcsec,
),
)
if contours:
CS = plt.contour(
mapa,
extent=(
-0.5 * self.n_cols * self.pixel_size_arcsec,
0.5 * self.n_cols * self.pixel_size_arcsec,
-0.5 * self.n_rows * self.pixel_size_arcsec,
+0.5 * self.n_rows * self.pixel_size_arcsec,
),
)
if clabel:
plt.clabel(CS, inline=1, fontsize=10)
ax.set_title(description, fontsize=14)
plt.tick_params(labelsize=12)
plt.xlabel("$\Delta$ RA [arcsec]", fontsize=12)
plt.ylabel("$\Delta$ DEC [arcsec]", fontsize=12)
plt.legend(loc="upper right", frameon=False)
plt.minorticks_on()
plt.grid(which="both", color="white")
# plt.gca().invert_xaxis() #MAMA
if spaxel != 0:
print(" The center of the cube is in spaxel [ {} , {} ]".format(self.spaxel_RA0, self.spaxel_DEC0))
plt.plot([0], [0], "+", ms=13, color="black", mew=4)
plt.plot([0], [0], "+", ms=10, color="white", mew=2)
offset_from_center_x_arcsec = (
spaxel[0] - self.spaxel_RA0 + 1.5
) * self.pixel_size_arcsec
offset_from_center_y_arcsec = (
spaxel[1] - self.spaxel_DEC0 + 1.5
) * self.pixel_size_arcsec
print(" - Green circle: {}, Offset from center [arcsec] : {} {}".format(spaxel, offset_from_center_x_arcsec, offset_from_center_y_arcsec))
plt.plot(
[offset_from_center_x_arcsec],
[offset_from_center_y_arcsec],
"o",
color="green",
ms=7,
)
if spaxel2 != 0:
offset_from_center_x_arcsec = (
spaxel2[0] - self.spaxel_RA0 + 1.5
) * self.pixel_size_arcsec
offset_from_center_y_arcsec = (
spaxel2[1] - self.spaxel_DEC0 + 1.5
) * self.pixel_size_arcsec
print(" - Blue square: {} , Offset from center [arcsec] : {} , {}".format(np.round(spaxel2, 2), np.round(offset_from_center_x_arcsec, 3), np.round(offset_from_center_y_arcsec, 3)))
plt.plot(
[offset_from_center_x_arcsec],
[offset_from_center_y_arcsec],
"s",
color="blue",
ms=7,
)
if spaxel3 != 0:
offset_from_center_x_arcsec = (
spaxel3[0] - self.spaxel_RA0 + 1.5
) * self.pixel_size_arcsec
offset_from_center_y_arcsec = (
spaxel3[1] - self.spaxel_DEC0 + 1.5
) * self.pixel_size_arcsec
print(" - Red triangle: {} , Offset from center [arcsec] : {} , {}".format(np.round(spaxel3, 2), np.round(offset_from_center_x_arcsec, 3), np.round(offset_from_center_y_arcsec, 3)))
plt.plot(
[offset_from_center_x_arcsec],
[offset_from_center_y_arcsec],
"v",
color="red",
ms=7,
)
cbar = fig.colorbar(cax, fraction=0.0457, pad=0.04)
if fcal:
barlabel = "{}".format("Integrated Flux [erg s$^{-1}$ cm$^{-2}$]")
else:
barlabel = "{}".format("Integrated Flux [Arbitrary units]")
cbar.set_label(barlabel, rotation=270, labelpad=20, fontsize=14)
# cbar.ax.set_yticklabels(['< -1', '0', '> 1'])# vertically oriented colorbar
if save_file == "":
#plt.show()
pass
else:
plt.savefig(save_file)
plt.close()
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def create_map(self, wavelength1, wavelength2, name="NEW_MAP"):
"""
Create map adding maps in a wavelength range."
Parameters
----------
wavelength1, wavelength2: floats
The map will integrate all flux in the range [wavelength1, wavelength2].
map_name: string
String with the name of the map, must be the same than file created here.
Example
-------
>>> a = cube.create_map(6810,6830, "a")
> Created map with name a integrating range [ 6810 , 6830 ]
"""
mapa = np.nansum(
self.data[
np.searchsorted(self.wavelength, wavelength1): np.searchsorted(
self.wavelength, wavelength2
)
],
axis=0,
)
print("\n> Created map with name {} integrating range [ {} , {} ]".format(name, wavelength1, wavelength2))
print(" Data shape {}".format(np.shape(self.data)))
print(" Int map shape {}".format(np.shape(mapa)))
return mapa
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def trace_peak(
self, edgelow=10, edgehigh=10, plot=False, ADR=False, smoothfactor=2
): # TASK_trace_peak
"""
Parameters
----------
edgelow
edgehigh
plot
ADR
smoothfactor
Returns
-------
"""
print("\n\n> Tracing intensity peak over all wavelengths...")
x = np.arange(self.n_cols)
y = np.arange(self.n_rows)
if ADR:
print(" Checking ADR correction (small jumps are due to pixel size) ...")
tmp = copy.deepcopy(self.data_ADR)
tmp_img = np.nanmedian(tmp, axis=0)
sort = np.sort(tmp_img.ravel())
low_ind = np.where(tmp_img < sort[int(0.8 * len(sort))])
for i in np.arange(len(low_ind[0])):
tmp[:, low_ind[0][i], low_ind[1][i]] = np.nan
weight = np.nan_to_num(tmp) # self.data_ADR)
smoothfactor = 10
else:
tmp = copy.deepcopy(self.data)
tmp_img = np.nanmedian(tmp, axis=0)
sort = np.sort(tmp_img.ravel())
low_ind = np.where(tmp_img < sort[int(0.9 * len(sort))])
# print(low_ind.shape)
for i in np.arange(len(low_ind[0])):
tmp[:, low_ind[0][i], low_ind[1][i]] = np.nan
weight = np.nan_to_num(tmp) # self.data)
# try to median smooth image for better results?
# weight=sig.medfilt(weight,kernel_size=[51,1,1])
# also threshold the image so only the top 80% are used
mean_image = np.nanmean(weight, axis=0)
mean_image /= np.nanmean(mean_image)
weight *= mean_image[np.newaxis, :, :]
xw = x[np.newaxis, np.newaxis, :] * weight
yw = y[np.newaxis, :, np.newaxis] * weight
w = np.nansum(weight, axis=(1, 2))
self.x_peak = np.nansum(xw, axis=(1, 2))/w
self.y_peak = np.nansum(yw, axis=(1, 2))/w
self.x_peak_median = np.nanmedian(self.x_peak)
self.y_peak_median = np.nanmedian(self.y_peak)
self.x_peak_median_index = np.nanargmin(
np.abs(self.x_peak - self.x_peak_median)
)
self.y_peak_median_index = np.nanargmin(
np.abs(self.y_peak - self.y_peak_median)
)
wl = self.wavelength
x = (
self.x_peak - self.x_peak[self.x_peak_median_index]
) * self.pixel_size_arcsec
y = (
self.y_peak - self.y_peak[self.y_peak_median_index]
) * self.pixel_size_arcsec
odd_number = (
smoothfactor * int((np.sqrt(self.n_wave)/2)) + 1
) # Originarily, smoothfactor = 2
print(" Using medfilt window = {}".format(odd_number))
# fit, trimming edges
index = np.arange(len(x))
valid_ind = np.where(
(index >= edgelow)
& (index <= len(wl) - edgehigh)
& (~np.isnan(x))
& (~np.isnan(y))
)[0]
valid_wl = wl[valid_ind]
valid_x = x[valid_ind]
wlm = sig.medfilt(valid_wl, odd_number)
wx = sig.medfilt(valid_x, odd_number)
# iteratively clip and refit for WX
maxit = 10
niter = 0
stop = 0
fit_len = 100 # -100
while stop < 1:
# print ' Trying iteration ', niter,"..."
# a2x,a1x,a0x = np.polyfit(wlm, wx, 2)
fit_len_init = copy.deepcopy(fit_len)
if niter == 0:
fit_index = np.where(wx == wx)
fit_len = len(fit_index)
sigma_resid = 0.0
if niter > 0:
sigma_resid = median_absolute_deviation(resid)
fit_index = np.where(np.abs(resid) < 4 * sigma_resid)[0]
fit_len = len(fit_index)
try:
p = np.polyfit(wlm[fit_index], wx[fit_index], 2)
pp = np.poly1d(p)
fx = pp(wl)
fxm = pp(wlm)
resid = wx - fxm
# print " Iteration {:2} results in RA: sigma_residual = {:.6f}, fit_len = {:5} fit_len ={:5}".format(niter,sigma_resid,fit_len_init,fit_len)
except Exception:
print(" Skipping iteration {}".format(niter))
if (niter >= maxit) or (fit_len_init == fit_len):
if niter >= maxit:
print(" x: Max iterations, {:2}, reached!")
if fit_len_init == fit_len:
print(" x: All interval fitted in iteration {:2} ! ".format(niter))
stop = 2
niter = niter + 1
# valid_y = y[edgelow:len(wl)-edgehigh]
valid_ind = np.where(
(index >= edgelow)
& (index <= len(wl) - edgehigh)
& (~np.isnan(x))
& (~np.isnan(y))
)[0]
valid_y = y[valid_ind]
wy = sig.medfilt(valid_y, odd_number)
# iteratively clip and refit for WY
maxit = 10
niter = 0
stop = 0
fit_len = -100
while stop < 1:
fit_len_init = copy.deepcopy(fit_len)
if niter == 0:
fit_index = np.where(wy == wy)
fit_len = len(fit_index)
sigma_resid = 0.0
if niter > 0:
sigma_resid = median_absolute_deviation(resid)
fit_index = np.where(np.abs(resid) < 4 * sigma_resid)[0]
fit_len = len(fit_index)
try:
p = np.polyfit(wlm[fit_index], wy[fit_index], 2)
pp = np.poly1d(p)
fy = pp(wl)
fym = pp(wlm)
resid = wy - fym
# print " Iteration {:2} results in DEC: sigma_residual = {:.6f}, fit_len = {:5} fit_len ={:5}".format(niter,sigma_resid,fit_len_init,fit_len)
except Exception:
print(" Skipping iteration {}".format(niter))
if (niter >= maxit) or (fit_len_init == fit_len):
if niter >= maxit:
print(" y: Max iterations, {:2}, reached!")
if fit_len_init == fit_len:
print(" y: All interval fitted in iteration {:2} ! ".format(niter))
stop = 2
niter = niter + 1
self.ADR_x = fx
self.ADR_y = fy
self.ADR_x_max = np.nanmax(self.ADR_x) - np.nanmin(self.ADR_x)
self.ADR_y_max = np.nanmax(self.ADR_y) - np.nanmin(self.ADR_y)
ADR_xy = np.sqrt(self.ADR_x ** 2 + self.ADR_y ** 2)
self.ADR_total = np.nanmax(ADR_xy) - np.nanmin(ADR_xy)
if plot:
plt.figure(figsize=(10, 5))
plt.plot(wl, fx, "-g", linewidth=3.5)
plt.plot(wl, fy, "-g", linewidth=3.5)
plt.plot(wl, x, "k.", alpha=0.2)
plt.plot(wl, y, "r.", alpha=0.2)
plt.plot(wl, sig.medfilt(x, odd_number), "k-")
plt.plot(wl, sig.medfilt(y, odd_number), "r-")
hi = np.max([np.nanpercentile(x, 95), np.nanpercentile(y, 95)])
lo = np.min([np.nanpercentile(x, 5), np.nanpercentile(y, 5)])
plt.ylim(lo, hi)
plt.ylabel("$\Delta$ offset [arcsec]")
plt.xlabel("Wavelength [$\AA$]")
plt.title(self.description)
# plt.show()
# plt.close()
print("> Peak coordinates tracing all wavelengths found in spaxel: ({:.2f}, {:.2f})".format(
self.x_peak_median, self.y_peak_median
))
print(" Effect of the ADR : {:.2f} in RA (black), {:.2f} in DEC (red), TOTAL = +/- {:.2f} arcsec".format(
self.ADR_x_max, self.ADR_y_max, self.ADR_total
))
# Check numbers using SMOOTH data
ADR_x_max = np.nanmax(fxm) - np.nanmin(fxm)
ADR_y_max = np.nanmax(fym) - np.nanmin(fym)
ADR_xy = np.sqrt(fxm ** 2 + fym ** 2)
ADR_total = np.nanmax(ADR_xy) - np.nanmin(ADR_xy)
print(" Using SMOOTH values: ")
print(" Effect of the ADR : {:.2f} in RA (black), {:.2f} in DEC (red), TOTAL = +/- {:.2f} arcsec".format(
ADR_x_max, ADR_y_max, ADR_total
))
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def growth_curve_between(self, min_wave=0, max_wave=0, plot=False, verbose=False):
"""
Compute growth curve in a wavelength range.
Returns r2_growth_curve, F_growth_curve, flux, r2_half_light
Parameters
----------
min_wave, max_wave: floats
wavelength range = [min_wave, max_wave].
plot: boolean
Plot yes/no
Example
-------
>>>r2_growth_curve, F_growth_curve, flux, r2_half_light = self.growth_curve_between(min_wave, max_wave, plot=True) # 0,1E30 ??
"""
if min_wave == 0:
min_wave = self.valid_wave_min
if max_wave == 0:
max_wave = self.valid_wave_max
if verbose:
print(" - Calculating growth curve between {} {} :".format(min_wave, max_wave))
index_min = np.searchsorted(self.wavelength, min_wave)
index_max = np.searchsorted(self.wavelength, max_wave)
intensity = np.nanmean(self.data[index_min:index_max, :, :], axis=0)
x_peak = np.median(self.x_peak[index_min:index_max])
y_peak = np.median(self.y_peak[index_min:index_max])
x = np.arange(self.n_cols) - x_peak
y = np.arange(self.n_rows) - y_peak
r2 = np.sum(np.meshgrid(x ** 2, y ** 2), axis=0)
sorted_by_distance = np.argsort(r2, axis=None)
F_growth_curve = []
r2_growth_curve = []
total_flux = 0.0
for spaxel in sorted_by_distance:
index = np.unravel_index(spaxel, (self.n_rows, self.n_cols))
I = intensity[index]
# print spaxel, r2[index], L, total_flux, np.isnan(L)
# if np.isnan(L) == False and L > 0:
if np.isnan(I) == False:
total_flux += I # TODO: Properly account for solid angle...
F_growth_curve.append(total_flux)
r2_growth_curve.append(r2[index])
F_guess = np.max(F_growth_curve)
r2_half_light = np.interp(0.5 * F_guess, F_growth_curve, r2_growth_curve)
self.seeing = np.sqrt(r2_half_light) * self.pixel_size_arcsec
if plot:
r_norm = np.sqrt(np.array(r2_growth_curve)/r2_half_light)
F_norm = np.array(F_growth_curve)/F_guess
print(" Flux guess = {} {} ratio = {}".format(F_guess, np.nansum(intensity), np.nansum(intensity)/F_guess))
print(" Half-light radius: {} arcsec = seeing if object is a star ".format(self.seeing))
print(" Light within 2, 3, 4, 5 half-light radii: {}".format(np.interp([2, 3, 4, 5], r_norm, F_norm)))
plt.figure(figsize=(10, 8))
plt.plot(r_norm, F_norm, "-")
plt.title(
"Growth curve between {} and {} in {}".format(min_wave, max_wave, self.object))
plt.xlabel("Radius [arcsec]")
plt.ylabel("Flux")
plt.axvline(x=self.seeing, color="g", alpha=0.7)
plt.axhline(y=0.5, color="k", linestyle=":", alpha=0.5)
plt.axvline(x=2 * self.seeing, color="k", linestyle=":", alpha=0.2)
plt.axvline(x=3 * self.seeing, color="k", linestyle=":", alpha=0.2)
plt.axvline(x=4 * self.seeing, color="k", linestyle=":", alpha=0.2)
plt.axvline(x=5 * self.seeing, color="r", linestyle="--", alpha=0.2)
# plt.axhline(y=np.interp([2, 3, 4], r_norm, F_norm), color='k', linestyle=':', alpha=0.2)
plt.axhline(
y=np.interp([6], r_norm, F_norm), color="r", linestyle="--", alpha=0.2
)
plt.minorticks_on()
# plt.show()
# plt.close()
return r2_growth_curve, F_growth_curve, F_guess, r2_half_light
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def half_light_spectrum(
self, r_max=1, plot=False, smooth=21, min_wave=0, max_wave=0
):
"""
Compute half light spectrum (for r_max=1) or integrated star spectrum (for r_max=5) in a wavelength range.
Parameters
----------
r_max = 1: float
r_max to integrate, in units of r2_half_light (= seeing if object is a star, for flux calibration make r_max=5)
min_wave, max_wave: floats
wavelength range = [min_wave, max_wave]
smooth = 21: float
smooth the data
plot: boolean
Plot yes/no
Example
-------
>>> self.half_light_spectrum(5, plot=plot, min_wave=min_wave, max_wave=max_wave)
"""
if min_wave == 0:
min_wave = self.valid_wave_min
if max_wave == 0:
max_wave = self.valid_wave_max
(
r2_growth_curve,
F_growth_curve,
flux,
r2_half_light,
) = self.growth_curve_between(
min_wave, max_wave, plot=True, verbose=True
) # 0,1E30 ??
# print "\n> Computing growth-curve spectrum..."
intensity = []
smooth_x = sig.medfilt(self.x_peak, smooth) # originally, smooth = 11
smooth_y = sig.medfilt(self.y_peak, smooth)
edgelow = (np.abs(self.wavelength - min_wave)).argmin()
edgehigh = (np.abs(self.wavelength - max_wave)).argmin()
valid_wl = self.wavelength[edgelow:edgehigh]
for l in range(self.n_wave): # self.n_wave
# wavelength = self.wavelength[l]
# if l % (self.n_wave/10+1) == 0:
# print " {:.2f} Angstroms (wavelength {}/{})..." \
# .format(wavelength, l+1, self.n_wave)
x = np.arange(self.n_cols) - smooth_x[l]
y = np.arange(self.n_rows) - smooth_y[l]
r2 = np.sum(np.meshgrid(x ** 2, y ** 2), axis=0)
spaxels = np.where(r2 < r2_half_light * r_max ** 2)
intensity.append(np.nansum(self.data[l][spaxels]))
valid_intensity = intensity[edgelow:edgehigh]
valid_wl_smooth = sig.medfilt(valid_wl, smooth)
valid_intensity_smooth = sig.medfilt(valid_intensity, smooth)
if plot:
fig_size = 12
plt.figure(figsize=(fig_size, fig_size / 2.5))
plt.plot(self.wavelength, intensity, "b", alpha=1, label="Intensity")
plt.plot(
valid_wl_smooth,
valid_intensity_smooth,
"r-",
alpha=0.5,
label="Smooth = " + "{}".format(smooth),
)
margen = 0.1 * (np.nanmax(intensity) - np.nanmin(intensity))
plt.ylim(np.nanmin(intensity) - margen, np.nanmax(intensity) + margen)
plt.xlim(np.min(self.wavelength), np.max(self.wavelength))
plt.ylabel("Flux")
plt.xlabel("Wavelength [$\AA$]")
plt.title("Integrated spectrum of {} for r_half_light = {}".format(self.object, r_max))
plt.axvline(x=min_wave, color="k", linestyle="--", alpha=0.5)
plt.axvline(x=max_wave, color="k", linestyle="--", alpha=0.5)
plt.minorticks_on()
plt.legend(frameon=False, loc=1)
# plt.show()
# plt.close()
if r_max == 5:
print(" Saving this integrated star flux in self.integrated_star_flux")
self.integrated_star_flux = np.array(intensity)
return np.array(intensity)
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def do_response_curve(
self,
filename,
min_wave=0,
max_wave=0,
step=25.0,
fit_degree=3,
exp_time=60,
smooth=0.03,
ha_width=0,
plot=True,
verbose=False,
): # smooth new 5 Mar, smooth=21, now we don't use it
"""
Compute the response curve of a spectrophotometric star.
Parameters
----------
filename: string
filename where the spectrophotometric data are included (e.g. ffeige56.dat)
min_wave, max_wave: floats
wavelength range = [min_wave, max_wave] where the fit is performed
step = 25: float
Step (in A) for smoothing the data
fit_degree = 3: integer
degree of the polynomium used for the fit (3, 5, or 7).
If fit_degree = 0 it interpolates the data
exp_time = 60: float
Exposition time of the calibration star
smooth = 0.03: float
Smooth value for interpolating the data for fit_degree = 0.
plot: boolean
Plot yes/no
Example
-------
>>> babbsdsad
"""
if min_wave == 0:
min_wave = self.valid_wave_min
if max_wave == 0:
max_wave = self.valid_wave_max
print("\n> Computing response curve for {} using step= {}, in range [ {} , {} ]".format(self.object, step, min_wave, max_wave))
# flux_cal_read in units of ergs/cm/cm/s/A * 10**16
# lambda_cal_read, flux_cal_read, delta_lambda_read = np.loadtxt(filename, usecols=(0,1,3), unpack=True)
lambda_cal_read, flux_cal_read = np.loadtxt(
filename, usecols=(0, 1), unpack=True
)
valid_wl_smooth = np.arange(lambda_cal_read[0], lambda_cal_read[-1], step)
tck_star = interpolate.splrep(lambda_cal_read, flux_cal_read, s=0)
valid_flux_smooth = interpolate.splev(valid_wl_smooth, tck_star, der=0)
valid_wave_min = min_wave
valid_wave_max = max_wave
edgelow = (np.abs(valid_wl_smooth - valid_wave_min)).argmin()
edgehigh = (np.abs(valid_wl_smooth - valid_wave_max)).argmin()
lambda_cal = valid_wl_smooth[edgelow:edgehigh]
flux_cal = valid_flux_smooth[edgelow:edgehigh]
lambda_min = lambda_cal - step
lambda_max = lambda_cal + step
if (
self.flux_cal_step == step
and self.flux_cal_min_wave == min_wave
and self.flux_cal_max_wave == max_wave
):
print(" This has been computed before for step= {} in range [ {} , {} ], using values computed before...".format(step, min_wave, max_wave))
measured_counts = self.flux_cal_measured_counts
else:
measured_counts = np.array(
[
self.fit_Moffat_between(lambda_min[i], lambda_max[i])[0]
if lambda_cal[i] > min_wave
and lambda_cal[i] < max_wave # 6200 #3650 # 7400 #5700
else np.NaN
for i in range(len(lambda_cal))
]
)
self.flux_cal_step = step
self.flux_cal_min_wave = min_wave
self.flux_cal_max_wave = max_wave
self.flux_cal_measured_counts = measured_counts
_response_curve_ = (
old_div(old_div(measured_counts, flux_cal), exp_time) # TODO, function is not called. fix once called
) # Added exp_time Jan 2019 counts / (ergs/cm/cm/s/A * 10**16) / s = counts * ergs*cm*cm*A / 10**16
if np.isnan(_response_curve_[0]) == True:
_response_curve_[0] = _response_curve_[
1
] # - (response_curve[2] - response_curve[1])
scale = np.nanmedian(_response_curve_)
# self.integrated_star_flux = self.half_light_spectrum(5, plot=plot, min_wave=min_wave, max_wave=max_wave)
edgelow_ = (np.abs(self.wavelength - lambda_cal[0])).argmin()
edgehigh_ = (np.abs(self.wavelength - lambda_cal[-1])).argmin()
self.response_wavelength = self.wavelength[edgelow_:edgehigh_]
response_wavelength = []
response_curve = []
if ha_width > 0:
skipping = 0
print(" Skipping H-alpha absorption with width ={} A ...".format(ha_width))
for i in range(len(lambda_cal)):
if (
lambda_cal[i] > 6563 - ha_width / 2.0
and lambda_cal[i] < 6563 + ha_width / 2.0
):
# print " Skipping ",lambda_cal[i]
skipping = skipping + 1
else:
response_wavelength.append(lambda_cal[i])
response_curve.append(_response_curve_[i])
print(" ... Skipping a total of {} wavelength points".format(skipping))
else:
response_wavelength = lambda_cal
response_curve = _response_curve_
if fit_degree == 0:
print(" Using interpolated data with smooth = {} for computing the response curve... ".format(smooth))
median_kernel = 151
response_curve_medfilt = sig.medfilt(response_curve, np.int(median_kernel))
interpolated_flat = interpolate.splrep(
response_wavelength, response_curve_medfilt, s=smooth
)
self.response_curve = interpolate.splev(
self.response_wavelength, interpolated_flat, der=0
)
else:
if fit_degree != 9:
if fit_degree != 7:
if fit_degree != 5:
if fit_degree != 3:
print(" We can't use a polynomium of grade here, using fit_degree = 3 instead".format(fit_degree))
fit_degree = 3
if fit_degree == 3:
a3x, a2x, a1x, a0x = np.polyfit(response_wavelength, response_curve, 3)
a4x = 0
a5x = 0
a6x = 0
a7x = 0
a8x = 0
a9x = 0
if fit_degree == 5:
a5x, a4x, a3x, a2x, a1x, a0x = np.polyfit(
response_wavelength, response_curve, 5
)
a6x = 0
a7x = 0
a8x = 0
a9x = 0
if fit_degree == 7:
a7x, a6x, a5x, a4x, a3x, a2x, a1x, a0x = np.polyfit(
response_wavelength, response_curve, 7
)
a8x = 0
a9x = 0
if fit_degree == 9:
a9x, a8x, a7x, a6x, a5x, a4x, a3x, a2x, a1x, a0x = np.polyfit(
response_wavelength, response_curve, 9
)
wlm = self.response_wavelength
self.response_curve = (
a0x
+ a1x * wlm
+ a2x * wlm ** 2
+ a3x * wlm ** 3
+ a4x * wlm ** 4
+ a5x * wlm ** 5
+ a6x * wlm ** 6
+ a7x * wlm ** 7
+ a8x * wlm ** 8
+ a9x * wlm ** 9
) # Better use next
# Adapting Matt code for trace peak ----------------------------------
smoothfactor = 2
wl = response_wavelength # response_wavelength
x = response_curve
odd_number = (
smoothfactor * int((np.sqrt(len(wl))/2)) - 1
) # Originarily, smoothfactor = 2
print(" Using medfilt window = {} for fitting...".format(odd_number))
# fit, trimming edges
# index=np.arange(len(x))
# edgelow=0
# edgehigh=1
# valid_ind=np.where((index >= edgelow) & (index <= len(wl)-edgehigh) & (~np.isnan(x)) )[0]
# print valid_ind
# valid_wl = wl[edgelow:-edgehigh] # wl[valid_ind]
# valid_x = x[edgelow:-edgehigh] #x[valid_ind]
# wlm = sig.medfilt(valid_wl, odd_number)
# wx = sig.medfilt(valid_x, odd_number)
wlm = sig.medfilt(wl, odd_number)
wx = sig.medfilt(x, odd_number)
# iteratively clip and refit for WX
maxit = 10
niter = 0
stop = 0
fit_len = 100 # -100
while stop < 1:
# print ' Trying iteration ', niter,"..."
# a2x,a1x,a0x = np.polyfit(wlm, wx, 2)
fit_len_init = copy.deepcopy(fit_len)
if niter == 0:
fit_index = np.where(wx == wx)
fit_len = len(fit_index)
sigma_resid = 0.0
if niter > 0:
sigma_resid = median_absolute_deviation(resid)
fit_index = np.where(np.abs(resid) < 4 * sigma_resid)[0]
fit_len = len(fit_index)
try:
p = np.polyfit(wlm[fit_index], wx[fit_index], fit_degree)
pp = np.poly1d(p)
fx = pp(wl)
fxm = pp(wlm)
resid = wx - fxm
# print " Iteration {:2} results in RA: sigma_residual = {:.6f}, fit_len = {:5} fit_len ={:5}".format(niter,sigma_resid,fit_len_init,fit_len)
except Exception:
print(" Skipping iteration {}".format(niter))
if (niter >= maxit) or (fit_len_init == fit_len):
if niter >= maxit:
print(" Max iterations, {:2}, reached!")
if fit_len_init == fit_len:
print(" All interval fitted in iteration {:2} ! ".format(niter))
stop = 2
niter = niter + 1
# --------------------------------------------------------------------
if plot:
plt.figure(figsize=(10, 8))
plt.plot(
lambda_cal,
old_div(measured_counts, exp_time), # TODO, function is not called. fix once called
"g+",
ms=10,
mew=3,
label="measured counts",
)
plt.plot(lambda_cal, flux_cal * scale, "k*-", label="flux_cal * scale")
plt.plot(
lambda_cal,
flux_cal * _response_curve_,
"c:",
label="flux_cal * response",
)
plt.xlim(np.min(self.wavelength), np.max(self.wavelength))
plt.ylabel("Flux")
plt.xlabel("Wavelength [$\AA$]")
plt.title("Response curve for absolute flux calibration using {}".format(self.object))
plt.legend(frameon=False, loc=1)
plt.grid(which="both")
plt.axvline(x=min_wave, color="k", linestyle="--", alpha=0.5)
plt.axvline(x=max_wave, color="k", linestyle="--", alpha=0.5)
plt.minorticks_on()
# plt.show()
# plt.close()
plt.figure(figsize=(10, 8))
if fit_degree > 0:
text = "Fit using polynomium of degree {}".format(fit_degree)
else:
text = "Using interpolated data with smooth = {}".format(smooth)
plt.plot(
self.response_wavelength,
self.response_curve,
"r-",
alpha=0.4,
linewidth=4,
label=text,
)
plt.plot(lambda_cal, _response_curve_, "k--", alpha=0.8)
plt.plot(
response_wavelength,
response_curve,
"g-",
alpha=0.8,
label="Response curve",
)
plt.plot(
wl, fx, "b-", linewidth=6, alpha=0.5, label="Response curve (filtered)"
)
plt.xlim(np.min(self.wavelength), np.max(self.wavelength))
plt.ylabel("Flux")
plt.xlabel("Wavelength [$\AA$]")
plt.title("Response curve for absolute flux calibration using {}".format(self.object))
plt.minorticks_on()
plt.grid(which="both")
plt.axvline(x=min_wave, color="k", linestyle="--", alpha=0.5)
plt.axvline(x=max_wave, color="k", linestyle="--", alpha=0.5)
plt.legend(frameon=True, loc=4, ncol=4)
# plt.show()
# plt.close()
interpolated_flat = interpolate.splrep(response_wavelength, fx) # , s=smooth)
self.response_curve = interpolate.splev(
self.response_wavelength, interpolated_flat, der=0
)
# plt.plot(self.response_wavelength, self.response_curve, "b-", alpha=0.5, linewidth=6, label = "Response curve (filtered)")
print(" Min wavelength at {:.2f} with value = {:.3f} /s".format(
self.response_wavelength[0], self.response_curve[0]
))
print(" Max wavelength at {:.2f} with value = {:.3f} /s".format(
self.response_wavelength[-1], self.response_curve[-1]
))
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def fit_Moffat_between(self, min_wave=0, max_wave=0, r_max=5, plot=False):
"""
Parameters
----------
min_wave
max_wave
r_max
plot
Returns
-------
"""
if min_wave == 0:
min_wave = self.valid_wave_min
if max_wave == 0:
max_wave = self.valid_wave_max
(
r2_growth_curve,
F_growth_curve,
flux,
r2_half_light,
) = self.growth_curve_between(min_wave, max_wave, plot)
flux, alpha, beta = fit_Moffat(
r2_growth_curve, F_growth_curve, flux, r2_half_light, r_max, plot
)
r2_half_light = alpha * (np.power(2.0, 1.0 / beta) - 1)
if plot:
print("Moffat fit: Flux = {:.3e},".format(flux), "HWHM = {:.3f},".format(
np.sqrt(r2_half_light) * self.pixel_size_arcsec
), "beta = {:.3f}".format(beta))
return flux, np.sqrt(r2_half_light) * self.pixel_size_arcsec, beta
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# CUBE CLASS (ANGEL + BEN) ALL OF THIS NEEDS TO BE CAREFULLY TESTED & UPDATED!
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
class CUBE(RSS, Interpolated_cube):
"""
This class reads the FITS files with COMBINED datacubes.
Routines included:
- cube.map_wavelength(wavelength, contours=True)\n
- cube.plot_spectrum_cube(x,y, fcal=True)
"""
# -----------------------------------------------------------------------------
def __init__(self, filename):
# Create RSS object
super(CUBE, self).__init__()
print("\n> Reading combined datacube ''{}''".format(filename))
RSS_fits_file = fits.open(filename) # Open file
# General info:
self.object = RSS_fits_file[0].header["OBJECT"]
# self.description = self.object + ' - ' + filename
self.description = RSS_fits_file[0].header[
"DESCRIP"
] # NOTE: it was originally "DEF"
self.RA_centre_deg = RSS_fits_file[0].header["RAcen"]
self.DEC_centre_deg = RSS_fits_file[0].header["DECcen"]
self.PA = RSS_fits_file[0].header["PA"]
self.wavelength = RSS_fits_file[1].data # TODO: why is this 1? shouldn't it be [0], maybe cause we are doing the biggest variance?
self.flux_calibration = RSS_fits_file[2].data
self.n_wave = len(self.wavelength)
self.data = RSS_fits_file[0].data
self.wave_resolution = (self.wavelength[-1] - self.wavelength[0])/self.n_wave
self.n_cols = RSS_fits_file[0].header["Ncols"]
self.n_rows = RSS_fits_file[0].header["Nrows"]
self.pixel_size_arcsec = RSS_fits_file[0].header["PIXsize"]
self.flux_calibrated = RSS_fits_file[0].header["FCAL"]
self.number_of_combined_files = RSS_fits_file[0].header["COFILES"]
self.offsets_files = RSS_fits_file[0].header["OFFSETS"]
print("\n Object = {}".format(self.object))
print(" Description = {}".format(self.description))
print(" Centre: RA = {} Deg".format(self.RA_centre_deg))
print(" DEC = {} Deg".format(self.DEC_centre_deg))
print(" PA = {} Deg".format(self.PA))
print(" Size [pix] = {} x {}".format(self.n_rows, self.n_cols))
print(" Size [arcsec] = {} x {}".format(self.n_rows * self.pixel_size_arcsec, self.n_cols * self.pixel_size_arcsec))
print(" Pix size = {} arcsec".format(self.pixel_size_arcsec))
print(" Files combined = {}".format(self.number_of_combined_files))
print(" Offsets used = {}".format(self.offsets_files))
print(" Wave Range = [ {} , {} ]".format(self.wavelength[0], self.wavelength[-1]))
print(" Wave Resol. = {} A/pix".format(self.wave_resolution))
print(" Flux Cal. = {}".format(self.flux_calibrated))
print("\n> Use these parameters for acceding the data :\n")
print(" cube.wavelength : Array with wavelengths")
print(" cube.data[w,x,y] : Flux of the w wavelength in spaxel (x,y)")
if self.flux_calibrated:
print(" cube.flux_calibration : Flux calibration per wavelength [ 1 / (1E-16 * erg/cm**2/s/A) ] ")
print("\n> Cube readed! ")
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def map_wavelength(
self,
wavelength,
cmap="fuego",
fig_size=10,
norm=colors.PowerNorm(gamma=1.0 / 4.0),
save_file="",
contours=True,
fcal=False,
):
"""
Plot map at a particular wavelength.
Parameters
----------
wavelength: float
wavelength to be mapped.
norm:
Colour scale, default = colors.PowerNorm(gamma=1./4.)\n
Log scale: norm=colors.LogNorm() \n
Lineal scale: norm=colors.Normalize().
cmap:
Color map used, default cmap="fuego"\n
Weight: cmap = "gist_gray" \n
Velocities: cmap="seismic".\n
Try also "inferno",
save_file:
(Optional) Save plot in file "file.extension"
Example
-------
>>> cube.map_wavelength(6820, contours=True, cmap="seismic")
"""
if fcal:
interpolated_map = self.data[np.searchsorted(self.wavelength, wavelength)]
else:
interpolated_map = self.data[np.searchsorted(self.wavelength, wavelength)]
title = "{} - {} $\AA$".format(self.description, wavelength)
self.plot_map(
interpolated_map,
cmap=cmap,
fig_size=fig_size,
norm=norm,
contours=contours,
save_file=save_file,
title=title,
fcal=fcal,
) # CHECK
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def plot_map(
self,
mapa,
cmap="fuego",
fig_size=10,
norm=colors.PowerNorm(gamma=1.0 / 4.0),
save_file="",
contours=True,
title="",
vmin=0,
vmax=1000,
fcal=False,
log=False,
clabel=False,
barlabel="",
):
"""
Plot a given map.
Parameters
----------
wavelength: float
wavelength to be mapped.
norm:
Colour scale, default = colors.PowerNorm(gamma=1./4.)\n
Log scale: norm=colors.LogNorm() \n
Lineal scale: norm=colors.Normalize().
cmap:
Color map used, default cmap="fuego"\n
Weight: cmap = "gist_gray" \n
Velocities: cmap="seismic".\n
Try also "inferno",
save_file:
(Optional) Save plot in file "file.extension"
Example
-------
>>> cube.plot_map(mapa, contours=True, cmap="seismic")
"""
fig, ax = plt.subplots(figsize=(fig_size, fig_size))
if log:
cax = ax.imshow(
mapa,
origin="lower",
interpolation="none",
norm=colors.LogNorm(vmin=vmin, vmax=vmax),
cmap=cmap,
extent=(
-0.5 * self.n_cols * self.pixel_size_arcsec,
0.5 * self.n_cols * self.pixel_size_arcsec,
-0.5 * self.n_rows * self.pixel_size_arcsec,
0.5 * self.n_rows * self.pixel_size_arcsec,
),
)
print("Map in log scale")
else:
cax = ax.imshow(
mapa,
origin="lower",
interpolation="none",
norm=norm,
cmap=cmap,
extent=(
-0.5 * self.n_cols * self.pixel_size_arcsec,
0.5 * self.n_cols * self.pixel_size_arcsec,
-0.5 * self.n_rows * self.pixel_size_arcsec,
0.5 * self.n_rows * self.pixel_size_arcsec,
),
vmin=vmin,
vmax=vmax,
)
if contours:
CS = plt.contour(
mapa,
extent=(
-0.5 * self.n_cols * self.pixel_size_arcsec,
0.5 * self.n_cols * self.pixel_size_arcsec,
-0.5 * self.n_rows * self.pixel_size_arcsec,
0.5 * self.n_rows * self.pixel_size_arcsec,
),
)
if clabel:
plt.clabel(CS, inline=1, fontsize=10)
ax.set_title(title, fontsize=fig_size * 1.3)
plt.tick_params(labelsize=fig_size)
plt.xlabel("$\Delta$ RA [arcsec]", fontsize=fig_size * 1.2)
plt.ylabel("$\Delta$ DEC [arcsec]", fontsize=fig_size * 1.2)
# plt.legend(loc='upper right', frameon=False)
plt.minorticks_on()
plt.grid(which="both", color="green")
plt.gca().invert_xaxis()
sm = plt.cm.ScalarMappable(cmap=cmap, norm=plt.Normalize(vmin=vmin, vmax=vmax))
sm._A = []
cbar = fig.colorbar(sm, fraction=0.0499, pad=0.02)
# cbar = fig.colorbar(cax, fraction=0.0490, pad=0.04, norm=colors.Normalize(clip=False))
if barlabel == "":
if fcal:
barlabel = "{}".format("Integrated Flux [10$^{-16}$ erg s$^{-1}$ cm$^{-2}$]")
else:
barlabel = "{}".format("Integrated Flux [Arbitrary units]")
# if fcal:
# cbar.set_label("{}".format("Integrated Flux [10$^{-16}$ erg s$^{-1}$ cm$^{-2}$]"), rotation=270, labelpad=40, fontsize=fig_size*1.2)
# else:
# cbar.set_label("{}".format("Integrated Flux [Arbitrary units]"), rotation=270, labelpad=40, fontsize=fig_size*1.2)
cbar.set_label(barlabel, rotation=270, labelpad=20, fontsize=fig_size * 1.2)
# cbar.ax.set_yticklabels(['< -1', '0', '> 1'])# vertically oriented colorbar
# cbar.set_ticks([1.5,2,3,4,5,6], update_ticks=True)
# cbar.set_ticklabels([1.5,2,3,4,5,6])
if save_file == "":
#plt.show()
pass
else:
plt.savefig(save_file)
plt.close()
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
#
# BEN ROUTINES
#
#
def subtractContinuum(self, spectrum):
"""
Subtract the median value from each intensity in a provided spectrum.
Parameters
----------
spectrum:
The list of intensities.
"""
med = np.nanmedian(spectrum)
for i in range(len(spectrum)):
spectrum[i] = spectrum[i] - med
if spectrum[i] < 0:
spectrum[i] = 0
return spectrum
def plot_spectrum_cube_ben(
self,
x,
y,
lmin=0,
lmax=0,
fmin=1e-30,
fmax=1e30,
fig_size=10,
save_file="",
fcal=False,
):
"""
Plot spectrum of a particular spaxel.
Parameters
----------
x, y:
coordenates of spaxel to show spectrum.
lmin, lmax:
The range of wavelengths to plot. Default is whole spectrum.
fmin, fmax:
Plot spectrum in flux range [fmin, fmax]
fcal:
Use flux calibration, default fcal=False.\n
If fcal=True, cube.flux_calibration is used.
save_file:
(Optional) Save plot in file "file.extension"
fig_size:
Size of the figure (in x-axis), default: fig_size=10
Example
-------
>>> cube.plot_spectrum_cube_ben(20, 20, fcal=True)
"""
# Define x and y axis to plot
newWave = []
newSpectrum = []
if fcal == False:
spectrum = self.data[:, x, y]
ylabel = "Flux [relative units]"
else:
spectrum = (self.data[:, x, y]/self.flux_calibration)/1e16
# ylabel="Flux [ 10$^{-16}$ * erg cm$^{-2}$ s$^{-1}$ A$^{-1}$]"
ylabel = "Flux [ erg cm$^{-2}$ s$^{-1}$ A$^{-1}$]"
# Remove NaN values from spectrum and replace them with zero.
spectrum = np.nan_to_num(spectrum)
# Subtract continuum from spectrum
subSpectrum = self.subtractContinuum(spectrum)
if fmin == 1e-30:
fmin = np.nanmin(spectrum)
if fmax == 1e30:
fmax = np.nanmax(spectrum)
# Since I can't define the correct default startpoint/endpoint within the
# function arguments section, I set them here.
if lmin == 0:
lmin = self.wavelength[0]
if lmax == 0:
lmax = self.wavelength[-1]
# Create a new list of wavelengths to plot based on the provided
# wavelength startpoint and endpoint.
for i in range(len(self.wavelength)):
if self.wavelength[i] >= lmin and self.wavelength[i] <= lmax:
newWave.append(self.wavelength[i])
newSpectrum.append(subSpectrum[i])
plt.figure(figsize=(fig_size, fig_size / 2.5))
plt.plot(newWave, newSpectrum)
plt.ylim([fmin, fmax])
plt.xlim([lmin, lmax])
plt.minorticks_on()
title = "Spectrum of spaxel ({} , {}) in {}".format(x, y, self.description)
plt.title(title, fontsize=fig_size * 1.2)
plt.tick_params(labelsize=fig_size * 0.8)
plt.xlabel("Wavelength [$\AA$]", fontsize=fig_size * 1)
plt.ylabel(ylabel, fontsize=fig_size * 1)
if save_file == "":
#plt.show()
pass
else:
plt.savefig(save_file)
plt.close()
def calculateRatio(self, x, y, aStart, aEnd, bStart, bEnd, fcal=False):
"""
Given two wavelengths ranges, find the peak intensities and calculate the ratio
between them.
Parameters
----------
x, y:
The spaxel we are interested in.
aStart, aEnd:
The startpoint and endpoint of the range that the first emission line
will fall in.
bStart, bEnd:
The startpoint and endpoint of the range that the second emission line
will fall in.
"""
aFirstIndex = np.searchsorted(self.wavelength, aStart)
aLastIndex = np.searchsorted(self.wavelength, aEnd)
bFirstIndex = np.searchsorted(self.wavelength, bStart)
bLastIndex = np.searchsorted(self.wavelength, bEnd)
if fcal == False:
spectrum = self.data[:, x, y]
else:
spectrum = (self.data[:, x, y]/self.flux_calibration)/1e16
spectrum = np.nan_to_num(spectrum)
subSpectrum = self.subtractContinuum(spectrum)
aValues = []
tempIndex = aFirstIndex
while tempIndex <= aLastIndex:
aValues.append(subSpectrum[tempIndex])
tempIndex = tempIndex + 1
aMax = | np.nanmax(aValues) | numpy.nanmax |
''' Data loading functions during training
(modified from from https://github.com/yukimasano/self-label) '''
import torchvision
import torch
import torchvision.transforms as tfs
import models
import os, glob, natsort, pdb
import numpy as np
import util
import xarray as xr
from scipy.stats import mode
import pickle
from sklearn.model_selection import StratifiedKFold, StratifiedShuffleSplit
from datetime import datetime as dt
import h5py
import mne
class DataSet(torch.utils.data.Dataset):
""" pytorch Dataset that return image index too"""
def __init__(self, dt):
self.dt = dt
def __getitem__(self, index):
data, target = self.dt[index]
return data, target, index
def __len__(self):
return len(self.dt)
def get_aug_dataloader(image_dir, is_validation=False,
batch_size=256, image_size=256, crop_size=224,
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225],
num_workers=8,
augs=1, shuffle=True):
print(image_dir)
if image_dir is None:
return None
print("imagesize: ", image_size, "cropsize: ", crop_size)
normalize = tfs.Normalize(mean=mean, std=std)
if augs == 0:
_transforms = tfs.Compose([
tfs.Resize(image_size),
tfs.CenterCrop(crop_size),
tfs.ToTensor(),
normalize
])
elif augs == 1:
_transforms = tfs.Compose([
tfs.Resize(image_size),
tfs.CenterCrop(crop_size),
tfs.RandomHorizontalFlip(),
tfs.ToTensor(),
normalize
])
elif augs == 2:
_transforms = tfs.Compose([
tfs.Resize(image_size),
tfs.RandomResizedCrop(crop_size),
tfs.RandomHorizontalFlip(),
tfs.ToTensor(),
normalize
])
elif augs == 3:
_transforms = tfs.Compose([
tfs.RandomResizedCrop(crop_size),
tfs.RandomGrayscale(p=0.2),
tfs.ColorJitter(0.4, 0.4, 0.4, 0.4),
tfs.RandomHorizontalFlip(),
tfs.ToTensor(),
normalize
])
if is_validation:
dataset = DataSet(torchvision.datasets.ImageFolder(image_dir + '/val', _transforms))
else:
dataset = DataSet(torchvision.datasets.ImageFolder(image_dir + '/train', _transforms))
loader = torch.utils.data.DataLoader(
dataset,
batch_size=batch_size,
shuffle=shuffle,
num_workers=num_workers,
pin_memory=True,
drop_last=False
)
return loader
def return_model_loader(args, return_loader=True):
outs = [args.ncl]*args.hc
assert args.arch in ['alexnet','resnetv2','resnetv1','htnet','htnet_pose','htnet_rnn']
if not hasattr(args, 't_max'):
args.t_max = 1
if not hasattr(args, 't_min'):
args.t_min = -1
if not hasattr(args, 'rescale_rate'):
args.rescale_rate = 0
if not hasattr(args, 'cont_data'):
args.cont_data = False
train_loader = get_htnet_data_loader(data_dir=args.imagenet_path,
batch_size=args.batch_size,
num_workers=args.workers,
pat_id = args.pat_id,
n_states = args.n_states,
data_srate = args.data_srate,
len_eps = args.len_eps,
curr_fold = args.curr_fold,
tlim = [args.t_min, args.t_max],
rescale_rate = args.rescale_rate,
cont_data = args.cont_data,
use_ecog = args.use_ecog) # SP 12/22/2020: switched to HTNet subfunc and removed n_augs argument
if (args.arch == 'alexnet'):
model = models.__dict__[args.arch](num_classes=outs)
elif args.arch in ['htnet','htnet_pose','htnet_rnn']:
Chans,Samples = train_loader.dataset.__getitem__(0)[0].shape
model, params = models.__dict__[args.arch](num_classes=outs, Chans=Chans, Samples=Samples,
use_ecog = args.use_ecog, is_supervised = args.is_supervised,
cont_data = args.cont_data, param_lp=args.param_lp)
elif args.arch == 'resnetv2': # resnet
model = models.__dict__[args.arch](num_classes=outs, nlayers=50, expansion=1)
else:
model = models.__dict__[args.arch](num_classes=outs)
if not return_loader:
return model
return model, train_loader, Chans, Samples, params
def get_standard_data_loader(image_dir, is_validation=False,
batch_size=192, image_size=256, crop_size=224,
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225],
num_workers=8,no_random_crops=False, tencrops=True):
"""Get a standard data loader for evaluating AlexNet representations in a standard way.
"""
if image_dir is None:
return None
normalize = tfs.Normalize(mean=mean, std=std)
if is_validation:
if tencrops:
transforms = tfs.Compose([
tfs.Resize(image_size),
tfs.TenCrop(crop_size),
tfs.Lambda(lambda crops: torch.stack([normalize(tfs.ToTensor()(crop)) for crop in crops]))
])
batch_size = int(batch_size/10)
else:
transforms = tfs.Compose([
tfs.Resize(image_size),
tfs.CenterCrop(crop_size),
tfs.ToTensor(),
normalize
])
else:
if not no_random_crops:
transforms = tfs.Compose([
tfs.RandomResizedCrop(crop_size),
tfs.RandomHorizontalFlip(),
tfs.ToTensor(),
normalize
])
else:
transforms = tfs.Compose([
tfs.Resize(image_size),
tfs.CenterCrop(crop_size),
tfs.RandomHorizontalFlip(),
tfs.ToTensor(),
normalize
])
dataset = torchvision.datasets.ImageFolder(image_dir, transforms)
loader = torch.utils.data.DataLoader(
dataset,
batch_size=batch_size,
shuffle=not is_validation,
num_workers=num_workers,
pin_memory=True,
sampler=None
)
return loader
def get_standard_data_loader_pairs(dir_path, **kargs):
"""Get a pair of data loaders for training and validation.
This is only used for the representation EVALUATION part.
"""
train = get_standard_data_loader(os.path.join(dir_path, "train"), is_validation=False, **kargs)
val = get_standard_data_loader(os.path.join(dir_path, "val"), is_validation=True, **kargs)
return train, val
### HTNet-specific code ###
def get_htnet_data_loader(data_dir, dat_type='train',
batch_size=192,
num_workers=1, shuffle=False,
pat_id = 'EC01', n_states = 2,
data_srate = 250, len_eps = 3, curr_fold = 0,
tlim = [-1,1], rescale_rate = 0, cont_data = False,
use_ecog=True):
"""Get a data loader for evaluating HTNet representations in a standard way.
"""
shuffle = dat_type == 'train'
if data_dir is None:
return None
if cont_data:
is_h5 = True if data_dir[-2:] == 'h5' else False
if is_h5:
dataset = ContNeuralDataset(data_dir)
else:
dataset = NeuralDataset(data_dir, dat_type=dat_type,
pat_id=pat_id, n_states=n_states,
curr_fold=curr_fold, tlim = tlim,
rescale_rate = rescale_rate)
# win_len = 2
# win_spacing = 0
# t_ind_lims = [12,16] #[8,16]
# dataset = ContNeuralDataset(data_dir, win_len = win_len,
# win_spacing = win_spacing, t_ind_lims = t_ind_lims)
else:
dataset = NeuralDataset(data_dir, dat_type=dat_type,
pat_id=pat_id, n_states=n_states,
curr_fold=curr_fold, tlim = tlim,
rescale_rate = rescale_rate,
use_ecog=use_ecog)
# if is_validation:
# dataset = NeuralDataset(data_dir + '/val')
# else:
# dataset = NeuralDataset(data_dir + '/train')
loader = torch.utils.data.DataLoader(
dataset,
batch_size=batch_size,
shuffle=shuffle,
num_workers=num_workers,
pin_memory=True,
sampler=None
)
return loader
class NeuralDataset(torch.utils.data.Dataset):
'''Data loader class (currently loads in all data at once; may want to change this for larger datasets)'''
def __init__(self, lp, dat_type='train', rand_seed = 1337, pat_id = 'EC01',
tlim = [-1,1], n_chans_all = 140, n_folds = 3,
curr_fold = 0, n_states = 2, data_srate = 250, len_eps = 3,
rescale_rate = 0, use_ecog=True):
self.dat_type = dat_type
self.rescale_rate = rescale_rate
np.random.seed(rand_seed)
# Load ECoG data
X,y,X_test,y_test,sbj_order,sbj_order_test = load_data(pat_id, lp+'/',
n_chans_all=n_chans_all,
test_day=None, tlim=tlim,
n_states=n_states,
data_srate = data_srate, len_eps = len_eps) # test_day='last'
X[np.isnan(X)] = 0 # set all NaN's to 0
# Make sure labels start at 0 and are consecutive
miny = y.min()
y -= miny
y = y.astype('int')
# Create splits for train/val and fit model
n_folds = 5
sss = StratifiedKFold(n_splits=n_folds, shuffle=True, random_state=0)
for i, inds in enumerate(sss.split(X, y)):
if i == curr_fold:
train_inds, test_inds = inds
# Split data and labels into train/val sets
scalings = 'median' # 'mean' if use_ecog else 'median'
ss_dat = mne.decoding.Scaler(scalings=scalings)
ss_dat.fit(X[train_inds,...])
if self.dat_type=='train':
# Standardize data
x_train = ss_dat.transform(X[train_inds,...]) # X[train_inds,...]
self.x_data = torch.tensor(x_train, dtype=torch.float32)
self.y_data = torch.tensor(y[train_inds], dtype=torch.long)
elif self.dat_type=='test':
# Standardize data
X_test = ss_dat.transform(X[test_inds,...])
self.x_data = torch.tensor(X_test, dtype=torch.float32)
self.y_data = torch.tensor(y[test_inds], dtype=torch.long)
else:
# Standardize data
x_val = ss_dat.transform(X[test_inds,...]) # X[val_inds,...]
self.x_data = torch.tensor(x_val, dtype=torch.float32)
self.y_data = torch.tensor(y[test_inds], dtype=torch.long)
def __len__(self):
return len(self.x_data) # required
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
X = self.x_data[idx, ...]
y = self.y_data[idx]
return X, y, idx
def load_data(pats_ids_in, lp, n_chans_all=64, test_day=None, tlim=[-1,1], event_types=['rest','move'],
n_states = 2, data_srate = 250, len_eps = 3):
'''
Load ECoG data from all subjects and combine (uses xarray variables)
If len(pats_ids_in)>1, the number of electrodes will be padded or cut to match n_chans_all
If test_day is not None, a variable with test data will be generated for the day specified
If test_day = 'last', the last day will be set as the test day.
'''
if not isinstance(pats_ids_in, list):
pats_ids_in = [pats_ids_in]
sbj_order,sbj_order_test = [],[]
X_test_subj,y_test_subj = [],[] #placeholder vals
#Gather each subjects data, and concatenate all days
fID = natsort.natsorted(glob.glob(lp+pats_ids_in[0]+'*_data.nc'))[0]
ep_data_in = xr.open_dataset(fID)
if 'events' not in ep_data_in['__xarray_dataarray_variable__'].dims:
# Case for loading Kai's data
for j in range(len(pats_ids_in)):
pat_curr = pats_ids_in[j]
fID = natsort.natsorted(glob.glob(lp+pat_curr+'*_data.nc'))[0]
ep_data_in = xr.open_dataset(fID).to_array().values
ep_data_in = ep_data_in.reshape(ep_data_in.shape[0],ep_data_in.shape[1],-1,int(len_eps*data_srate))
ep_data_in = np.moveaxis(ep_data_in,2,0).squeeze() # events, 1, channels, time
labels = ep_data_in[...,-1,:].squeeze()
labels = mode(labels,axis=1)[0].squeeze() # 1 state per event
ep_data_in = ep_data_in[...,:-1,:]
n_ecog_chans = ep_data_in.shape[-2]
if n_chans_all < n_ecog_chans:
n_chans_curr = n_chans_all
else:
n_chans_curr = n_ecog_chans
#Remove events with greater than n_states
if n_states==3:
labels_cp = labels.copy()
labels = np.delete(labels, | np.nonzero(labels_cp>2) | numpy.nonzero |
# Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
# with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
# and limitations under the License.
import pytest # noqa: F401; pylint: disable=unused-variable
from pytest import approx
def test_importable():
import pecos.xmc # noqa: F401
from pecos import xmc # noqa: F401
from pecos.xmc import xlinear # noqa: F401
from pecos.xmc import xtransformer # noqa: F401
from pecos.xmc import Indexer # noqa: F401
def test_hierarchicalkmeans():
import numpy as np
import scipy.sparse as smat
from sklearn.preprocessing import normalize
from pecos.xmc import Indexer
feat_mat = normalize(
smat.csr_matrix([[1, 0], [0.95, 0.05], [0.9, 0.1], [0, 1]], dtype=np.float32)
)
target_balanced = [0, 0, 1, 1]
target_imbalanced = [0, 0, 0, 1]
balanced_chain = Indexer.gen(feat_mat, max_leaf_size=3)
balanced_assignments = (balanced_chain[-1].todense() == [0, 1]).all(axis=1).A1
assert np.array_equal(balanced_assignments, target_balanced) or np.array_equal(
~balanced_assignments, target_balanced
)
imbalanced_chain = Indexer.gen(feat_mat, imbalanced_ratio=0.4, max_leaf_size=3)
imbalanced_assignments = (imbalanced_chain[-1].todense() == [0, 1]).all(axis=1).A1
assert np.array_equal(imbalanced_assignments, target_imbalanced) or np.array_equal(
~imbalanced_assignments, target_imbalanced
)
chain2 = Indexer.gen(feat_mat, max_leaf_size=1, nr_splits=2)
chain4 = Indexer.gen(feat_mat, max_leaf_size=1, nr_splits=4)
assert (chain2.chain[-1] - chain4.chain[-1]).nnz == 0
assert (chain2.chain[1].dot(chain2.chain[0]) - chain4.chain[0]).nnz == 0
def test_label_embedding():
import random
import numpy as np
import scipy.sparse as smat
from sklearn.preprocessing import normalize
from pecos.xmc import LabelEmbeddingFactory
X = smat.csr_matrix(smat.eye(3)).astype(np.float32)
X_dense = X.toarray()
Y = np.array([[1, 1, 1, 1, 0], [1, 1, 0, 1, 1], [0, 1, 1, 1, 1]])
Y = smat.csr_matrix(Y).astype(np.float32)
Lt_dense = np.array(
[
[0.70710678, 0.70710678, 0.0],
[0.57735027, 0.57735027, 0.57735027],
[0.70710678, 0.0, 0.70710678],
[0.57735027, 0.57735027, 0.57735027],
[0.0, 0.70710678, 0.70710678],
]
)
Lt = smat.csr_matrix(Lt_dense)
# pifa, X.dtype = csr_matrix, and simple X/Y with closed-form Lt_dense
Lp = LabelEmbeddingFactory.create(Y, X, method="pifa").toarray()
assert Lt_dense == approx(
Lp, abs=1e-6
), f"Lt_dense (true label embedding) != Lp (pifa label embedding), where closed-form X is sparse"
# pifa, X.dtype = np.array, and the same X/Y with previous closed-form Lt_dense
Lp = LabelEmbeddingFactory.create(Y, X_dense, method="pifa")
assert Lt_dense == approx(
Lp, abs=1e-6
), f"Lt_dense (true label embedding) != Lp (pifa label embedding), where closed-form X is dense"
# test data for pifa_lf_concat and pifa_lf_convex_combine
Lp = LabelEmbeddingFactory.create(Y, X_dense, method="pifa")
Lt_half_dense = Lt_dense * 0.5
Lt_half = smat.csr_matrix(Lt_half_dense)
# test data for pifa_lf_concat
Lplc_true = np.hstack([Lp, Lt_half_dense])
# pifa_lf_concat, X.dtype = ndarray, Z.dtype = ndarray
Lplc = LabelEmbeddingFactory.create(Y, X_dense, Z=Lt_half_dense, method="pifa_lf_concat")
assert isinstance(
Lplc, np.ndarray
), f"Return matrix should be np.ndarray when X.dtype = ndarray, Z.dtype = ndarray"
assert Lplc == approx(
Lplc_true
), f"Lplc_true (true label embedding) != Lplc (pifa_lf_concat label embedding), where X.dtype = ndarray, Z.dtype = ndarray"
# pifa_lf_concat, X.dtype = ndarray, Z.dtype = csr_matrix
Lplc = LabelEmbeddingFactory.create(Y, X_dense, Z=Lt_half, method="pifa_lf_concat")
assert isinstance(
Lplc, smat.csr_matrix
), f"Return matrix should be csr_matrix when X.dtype = ndarray, Z.dtype = csr_matrix"
assert Lplc.toarray() == approx(
Lplc_true
), f"Lplc_true (true label embedding) != Lplc (pifa_lf_concat label embedding), where X.dtype = ndarray, Z.dtype = csr_matrix"
# pifa_lf_concat, X.dtype = csr_matrix, Z.dtype = ndarray
Lplc = LabelEmbeddingFactory.create(Y, X, Z=Lt_half_dense, method="pifa_lf_concat")
assert isinstance(
Lplc, smat.csr_matrix
), f"Return matrix should be csr_matrix when X.dtype = csr_matrix, Z.dtype = ndarray"
assert Lplc.toarray() == approx(
Lplc_true
), f"Lplc_true (true label embedding) != Lplc (pifa_lf_concat label embedding), where X.dtype = csr_matrix, Z.dtype = ndarray"
# pifa_lf_concat, X.dtype = csr_matrix, Z.dtype = csr_matrix
Lplc = LabelEmbeddingFactory.create(Y, X, Z=Lt_half, method="pifa_lf_concat")
assert isinstance(
Lplc, smat.csr_matrix
), f"Return matrix should be csr_matrix when X.dtype = csr_matrix, Z.dtype = csr_matrix"
assert Lplc.toarray() == approx(
Lplc_true
), f"Lplc_true (true label embedding) != Lplc (pifa_lf_concat label embedding), where X.dtype = csr_matrix, Z.dtype = csr_matrix"
# pifa_lf_convex_combine, alpha is a number
alpha = 0.3
Lplcvx_true = alpha * Lp + (1.0 - alpha) * Lt_half_dense
# pifa_lf_convex_combine, X.dtype = ndarray, Z.dtype = ndarray
Lplcvx = LabelEmbeddingFactory.create(
Y, X_dense, Z=Lt_half_dense, alpha=alpha, method="pifa_lf_convex_combine"
)
assert isinstance(
Lplcvx, np.ndarray
), f"Return matrix should be ndarray when X.dtype = ndarray, Z.dtype = ndarray"
assert Lplcvx == approx(
Lplcvx_true
), f"Lplcvx_true (true label embedding) != Lplcvx (pifa_lf_convex_combine label embedding), where X.dtype = ndarray, Z.dtype = ndarray"
# pifa_lf_convex_combine, X.dtype = ndarray, Z.dtype = csr_matrix
Lplcvx = LabelEmbeddingFactory.create(
Y, X_dense, Z=Lt_half, alpha=alpha, method="pifa_lf_convex_combine"
)
assert isinstance(
Lplcvx, np.ndarray
), f"Return matrix should be ndarray when X.dtype = ndarray, Z.dtype = csr_matrix"
assert Lplcvx == approx(
Lplcvx_true
), f"Lplcvx_true (true label embedding) != Lplcvx (pifa_lf_convex_combine label embedding), where X.dtype = ndarray, Z.dtype = csr_matrix"
# pifa_lf_convex_combine, X.dtype = csr_matrix, Z.dtype = ndarray
Lplcvx = LabelEmbeddingFactory.create(
Y, X, Z=Lt_half_dense, alpha=alpha, method="pifa_lf_convex_combine"
)
assert isinstance(
Lplcvx, np.ndarray
), f"Return matrix should be ndarray when X.dtype = csr_matrix, Z.dtype = ndarray"
assert Lplcvx == approx(
Lplcvx_true
), f"Lplcvx_true (true label embedding) != Lplcvx (pifa_lf_convex_combine label embedding), where X.dtype = csr_matrix, Z.dtype = ndarray"
# pifa_lf_convex_combine, X.dtype = csr_matrix, Z.dtype = csr_matrix
Lplcvx = LabelEmbeddingFactory.create(
Y, X, Z=Lt_half, alpha=alpha, method="pifa_lf_convex_combine"
)
assert isinstance(
Lplcvx, smat.csr_matrix
), f"Return matrix should be csr_matrix when X.dtype = csr_matrix, Z.dtype = csr_matrix"
assert Lplcvx.toarray() == approx(
Lplcvx_true
), f"Lplcvx_true (true label embedding) != Lplcvx (pifa_lf_convex_combine label embedding), where X.dtype = csr_matrix, Z.dtype = csr_matrix"
# pifa_lf_convex_combine, alpha is an 1-D array
alpha = np.array([0.1, 0.2, 0.3, 0.4, 0.5])
Lplcvx_true = | np.zeros_like(Lp) | numpy.zeros_like |
import argparse
import time
import os
import glob
import sys
import json
import shutil
import itertools
import numpy as np
import pandas as pd
import csv
import torch
from torch import nn
from torch.autograd import Variable
from sklearn.metrics import confusion_matrix
from torch.nn import functional as F
from opts import parse_opts_online
from model import generate_model, _modify_first_conv_layer, _construct_depth_model
from mean import get_mean, get_std
from spatial_transforms import *
from temporal_transforms import *
from target_transforms import ClassLabel
from dataset import get_online_data
from utils import Logger, AverageMeter, LevenshteinDistance, Queue
import pdb
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure
import scipy.io as sio
import pickle
def weighting_func(x):
return (1 / (1 + np.exp(-0.2*(x-9))))
opt = parse_opts_online()
def load_models(opt):
opt.resume_path = opt.resume_path_clf
opt.pretrain_path = opt.pretrain_path_clf
opt.sample_duration = opt.sample_duration_clf
opt.model = opt.model_clf
opt.model_depth = opt.model_depth_clf
opt.modality = opt.modality_clf
opt.resnet_shortcut = opt.resnet_shortcut_clf
opt.n_classes = opt.n_classes_clf
opt.n_finetune_classes = opt.n_finetune_classes_clf
if opt.root_path != '':
opt.video_path = os.path.join(opt.root_path, opt.video_path)
opt.annotation_path = os.path.join(opt.root_path, opt.annotation_path)
opt.result_path = os.path.join(opt.root_path, opt.result_path)
if opt.resume_path:
opt.resume_path = os.path.join(opt.root_path, opt.resume_path)
if opt.pretrain_path:
opt.pretrain_path = os.path.join(opt.root_path, opt.pretrain_path)
opt.scales = [opt.initial_scale]
for i in range(1, opt.n_scales):
opt.scales.append(opt.scales[-1] * opt.scale_step)
opt.arch = '{}-{}'.format(opt.model, opt.model_depth)
opt.mean = get_mean(opt.norm_value)
opt.std = get_std(opt.norm_value)
print(opt)
with open(os.path.join(opt.result_path, 'opts_clf_{}.json'.format(opt.store_name)), 'w') as opt_file:
json.dump(vars(opt), opt_file)
torch.manual_seed(opt.manual_seed)
if opt.modality == 'Depth':
opt.modality = 'RGB'
classifier, parameters = generate_model(opt)
if opt.resume_path:
print('loading checkpoint {}'.format(opt.resume_path))
checkpoint = torch.load(opt.resume_path)
assert opt.arch == checkpoint['arch']
classifier.load_state_dict(checkpoint['state_dict'])
if opt.sample_duration_clf < 32 and opt.model_clf != 'c3d':
classifier = _modify_first_conv_layer(classifier,3,3)
classifier = _construct_depth_model(classifier)
classifier = classifier.cuda()
if not opt.modality == opt.modality_clf:
opt.modality = opt.modality_clf
print('Model \n', classifier)
pytorch_total_params = sum(p.numel() for p in classifier.parameters() if
p.requires_grad)
print("Total number of trainable parameters: ", pytorch_total_params)
return classifier
classifier = load_models(opt)
if opt.no_mean_norm and not opt.std_norm:
norm_method = Normalize([0, 0, 0], [1, 1, 1])
elif not opt.std_norm:
norm_method = Normalize(opt.mean, [1, 1, 1])
else:
norm_method = Normalize(opt.mean, opt.std)
spatial_transform = Compose([
Scale(112),
CenterCrop(112),
ToTensor(opt.norm_value), norm_method
])
target_transform = ClassLabel()
## Get list of videos to test
if opt.dataset == 'egogesture':
subject_list = ['Subject{:02d}'.format(i) for i in [2, 9, 11, 14, 18, 19, 28, 31, 41, 47]]
test_paths = []
buf = 4
for subject in subject_list:
for x in glob.glob(os.path.join(opt.video_path,subject,'*/*/rgb*')):
test_paths.append(x)
elif opt.dataset == 'nv':
df = pd.read_csv(os.path.join(opt.video_path,'nvgesture_test_correct_cvpr2016_v2.lst'), delimiter = ' ', header = None)
test_paths = []
buf = 4
for x in df[0].values:
if opt.modality_det == 'RGB':
test_paths.append(os.path.join(opt.video_path, x.replace('path:', ''), 'sk_color_all'))
elif opt.modality_det == 'Depth':
test_paths.append(os.path.join(opt.video_path, x.replace('path:', ''), 'sk_depth_all'))
elif opt.dataset == 'AHG':
data = sio.loadmat(os.path.join(opt.root_path,'bega/datasets/AHG/splitfiles/testlist01.mat'))['raw_list'][0]
test_paths = []
true_classes_all = []
true_frames_all = []
buf = 0
for i in range(data.shape[0]):
test_paths.append(str(data[i][0][0]))
true_classes_all.append(np.array(data[i][1][0]))
true_frames_all.append(np.array(data[i][-2][0]))
elif opt.dataset == 'denso':
if opt.test_subset == 'val':
print('Feature extraction of Validation set with {}ns'.format(opt.sample_duration))
data = sio.loadmat(os.path.join(opt.root_path,'bega/datasets/Pointing/train_sets/valid_list3.mat'))['raw_list'][0]
elif opt.test_subset == 'test':
print('Feature extraction of Testing set with {}ns'.format(opt.sample_duration))
data = sio.loadmat(os.path.join(opt.root_path,'bega/datasets/Pointing/train_sets/test_list3.mat'))['raw_list'][0]
elif opt.test_subset == 'train':
print('Feature extraction of Training set with {}ns'.format(opt.sample_duration))
data = sio.loadmat(os.path.join(opt.root_path,'bega/datasets/Pointing/train_sets/train_list3.mat'))['raw_list'][0]
else:
print('ERROR: chose val or test set for online evaluation')
assert(opt.test_subset == 1)
test_paths = []
true_classes_all = []
true_frames_all = []
buf = 0
for i in range(data.shape[0]): #All videos
test_paths.append(str(data[i][0][0])) #path
true_classes_all.append( | np.array(data[i][1][0]) | numpy.array |
import functools
import operator
import pickle
import numpy as np
import pandas as pd
import pytest
from numpy.testing import assert_allclose, assert_array_equal
from packaging.version import Version
import xarray as xr
from xarray.core.alignment import broadcast
from xarray.core.computation import (
_UFuncSignature,
apply_ufunc,
broadcast_compat_data,
collect_dict_values,
join_dict_keys,
ordered_set_intersection,
ordered_set_union,
result_name,
unified_dim_sizes,
)
from xarray.core.pycompat import dask_version
from . import has_dask, raise_if_dask_computes, requires_dask
def assert_identical(a, b):
"""A version of this function which accepts numpy arrays"""
__tracebackhide__ = True
from xarray.testing import assert_identical as assert_identical_
if hasattr(a, "identical"):
assert_identical_(a, b)
else:
assert_array_equal(a, b)
def test_signature_properties() -> None:
sig = _UFuncSignature([["x"], ["x", "y"]], [["z"]])
assert sig.input_core_dims == (("x",), ("x", "y"))
assert sig.output_core_dims == (("z",),)
assert sig.all_input_core_dims == frozenset(["x", "y"])
assert sig.all_output_core_dims == frozenset(["z"])
assert sig.num_inputs == 2
assert sig.num_outputs == 1
assert str(sig) == "(x),(x,y)->(z)"
assert sig.to_gufunc_string() == "(dim0),(dim0,dim1)->(dim2)"
assert (
sig.to_gufunc_string(exclude_dims=set("x")) == "(dim0_0),(dim0_1,dim1)->(dim2)"
)
# dimension names matter
assert _UFuncSignature([["x"]]) != _UFuncSignature([["y"]])
def test_result_name() -> None:
class Named:
def __init__(self, name=None):
self.name = name
assert result_name([1, 2]) is None
assert result_name([Named()]) is None
assert result_name([Named("foo"), 2]) == "foo"
assert result_name([Named("foo"), Named("bar")]) is None
assert result_name([Named("foo"), Named()]) is None
def test_ordered_set_union() -> None:
assert list(ordered_set_union([[1, 2]])) == [1, 2]
assert list(ordered_set_union([[1, 2], [2, 1]])) == [1, 2]
assert list(ordered_set_union([[0], [1, 2], [1, 3]])) == [0, 1, 2, 3]
def test_ordered_set_intersection() -> None:
assert list(ordered_set_intersection([[1, 2]])) == [1, 2]
assert list(ordered_set_intersection([[1, 2], [2, 1]])) == [1, 2]
assert list(ordered_set_intersection([[1, 2], [1, 3]])) == [1]
assert list(ordered_set_intersection([[1, 2], [2]])) == [2]
def test_join_dict_keys() -> None:
dicts = [dict.fromkeys(keys) for keys in [["x", "y"], ["y", "z"]]]
assert list(join_dict_keys(dicts, "left")) == ["x", "y"]
assert list(join_dict_keys(dicts, "right")) == ["y", "z"]
assert list(join_dict_keys(dicts, "inner")) == ["y"]
assert list(join_dict_keys(dicts, "outer")) == ["x", "y", "z"]
with pytest.raises(ValueError):
join_dict_keys(dicts, "exact")
with pytest.raises(KeyError):
join_dict_keys(dicts, "foobar")
def test_collect_dict_values() -> None:
dicts = [{"x": 1, "y": 2, "z": 3}, {"z": 4}, 5]
expected = [[1, 0, 5], [2, 0, 5], [3, 4, 5]]
collected = collect_dict_values(dicts, ["x", "y", "z"], fill_value=0)
assert collected == expected
def identity(x):
return x
def test_apply_identity() -> None:
array = np.arange(10)
variable = xr.Variable("x", array)
data_array = xr.DataArray(variable, [("x", -array)])
dataset = xr.Dataset({"y": variable}, {"x": -array})
apply_identity = functools.partial(apply_ufunc, identity)
assert_identical(array, apply_identity(array))
assert_identical(variable, apply_identity(variable))
assert_identical(data_array, apply_identity(data_array))
assert_identical(data_array, apply_identity(data_array.groupby("x")))
assert_identical(dataset, apply_identity(dataset))
assert_identical(dataset, apply_identity(dataset.groupby("x")))
def add(a, b):
return apply_ufunc(operator.add, a, b)
def test_apply_two_inputs() -> None:
array = np.array([1, 2, 3])
variable = xr.Variable("x", array)
data_array = xr.DataArray(variable, [("x", -array)])
dataset = xr.Dataset({"y": variable}, {"x": -array})
zero_array = np.zeros_like(array)
zero_variable = xr.Variable("x", zero_array)
zero_data_array = xr.DataArray(zero_variable, [("x", -array)])
zero_dataset = xr.Dataset({"y": zero_variable}, {"x": -array})
assert_identical(array, add(array, zero_array))
assert_identical(array, add(zero_array, array))
assert_identical(variable, add(variable, zero_array))
assert_identical(variable, add(variable, zero_variable))
assert_identical(variable, add(zero_array, variable))
assert_identical(variable, add(zero_variable, variable))
assert_identical(data_array, add(data_array, zero_array))
assert_identical(data_array, add(data_array, zero_variable))
assert_identical(data_array, add(data_array, zero_data_array))
assert_identical(data_array, add(zero_array, data_array))
assert_identical(data_array, add(zero_variable, data_array))
assert_identical(data_array, add(zero_data_array, data_array))
assert_identical(dataset, add(dataset, zero_array))
assert_identical(dataset, add(dataset, zero_variable))
assert_identical(dataset, add(dataset, zero_data_array))
assert_identical(dataset, add(dataset, zero_dataset))
assert_identical(dataset, add(zero_array, dataset))
assert_identical(dataset, add(zero_variable, dataset))
assert_identical(dataset, add(zero_data_array, dataset))
assert_identical(dataset, add(zero_dataset, dataset))
assert_identical(data_array, add(data_array.groupby("x"), zero_data_array))
assert_identical(data_array, add(zero_data_array, data_array.groupby("x")))
assert_identical(dataset, add(data_array.groupby("x"), zero_dataset))
assert_identical(dataset, add(zero_dataset, data_array.groupby("x")))
assert_identical(dataset, add(dataset.groupby("x"), zero_data_array))
assert_identical(dataset, add(dataset.groupby("x"), zero_dataset))
assert_identical(dataset, add(zero_data_array, dataset.groupby("x")))
assert_identical(dataset, add(zero_dataset, dataset.groupby("x")))
def test_apply_1d_and_0d() -> None:
array = np.array([1, 2, 3])
variable = xr.Variable("x", array)
data_array = xr.DataArray(variable, [("x", -array)])
dataset = xr.Dataset({"y": variable}, {"x": -array})
zero_array = 0
zero_variable = xr.Variable((), zero_array)
zero_data_array = xr.DataArray(zero_variable)
zero_dataset = xr.Dataset({"y": zero_variable})
assert_identical(array, add(array, zero_array))
assert_identical(array, add(zero_array, array))
assert_identical(variable, add(variable, zero_array))
assert_identical(variable, add(variable, zero_variable))
assert_identical(variable, add(zero_array, variable))
assert_identical(variable, add(zero_variable, variable))
assert_identical(data_array, add(data_array, zero_array))
assert_identical(data_array, add(data_array, zero_variable))
assert_identical(data_array, add(data_array, zero_data_array))
assert_identical(data_array, add(zero_array, data_array))
assert_identical(data_array, add(zero_variable, data_array))
assert_identical(data_array, add(zero_data_array, data_array))
assert_identical(dataset, add(dataset, zero_array))
assert_identical(dataset, add(dataset, zero_variable))
assert_identical(dataset, add(dataset, zero_data_array))
assert_identical(dataset, add(dataset, zero_dataset))
assert_identical(dataset, add(zero_array, dataset))
assert_identical(dataset, add(zero_variable, dataset))
assert_identical(dataset, add(zero_data_array, dataset))
assert_identical(dataset, add(zero_dataset, dataset))
assert_identical(data_array, add(data_array.groupby("x"), zero_data_array))
assert_identical(data_array, add(zero_data_array, data_array.groupby("x")))
assert_identical(dataset, add(data_array.groupby("x"), zero_dataset))
assert_identical(dataset, add(zero_dataset, data_array.groupby("x")))
assert_identical(dataset, add(dataset.groupby("x"), zero_data_array))
assert_identical(dataset, add(dataset.groupby("x"), zero_dataset))
assert_identical(dataset, add(zero_data_array, dataset.groupby("x")))
assert_identical(dataset, add(zero_dataset, dataset.groupby("x")))
def test_apply_two_outputs() -> None:
array = np.arange(5)
variable = xr.Variable("x", array)
data_array = xr.DataArray(variable, [("x", -array)])
dataset = xr.Dataset({"y": variable}, {"x": -array})
def twice(obj):
def func(x):
return (x, x)
return apply_ufunc(func, obj, output_core_dims=[[], []])
out0, out1 = twice(array)
assert_identical(out0, array)
assert_identical(out1, array)
out0, out1 = twice(variable)
assert_identical(out0, variable)
assert_identical(out1, variable)
out0, out1 = twice(data_array)
assert_identical(out0, data_array)
assert_identical(out1, data_array)
out0, out1 = twice(dataset)
assert_identical(out0, dataset)
assert_identical(out1, dataset)
out0, out1 = twice(data_array.groupby("x"))
assert_identical(out0, data_array)
assert_identical(out1, data_array)
out0, out1 = twice(dataset.groupby("x"))
assert_identical(out0, dataset)
assert_identical(out1, dataset)
@requires_dask
def test_apply_dask_parallelized_two_outputs() -> None:
data_array = xr.DataArray([[0, 1, 2], [1, 2, 3]], dims=("x", "y"))
def twice(obj):
def func(x):
return (x, x)
return apply_ufunc(func, obj, output_core_dims=[[], []], dask="parallelized")
out0, out1 = twice(data_array.chunk({"x": 1}))
assert_identical(data_array, out0)
assert_identical(data_array, out1)
def test_apply_input_core_dimension() -> None:
def first_element(obj, dim):
def func(x):
return x[..., 0]
return apply_ufunc(func, obj, input_core_dims=[[dim]])
array = np.array([[1, 2], [3, 4]])
variable = xr.Variable(["x", "y"], array)
data_array = xr.DataArray(variable, {"x": ["a", "b"], "y": [-1, -2]})
dataset = xr.Dataset({"data": data_array})
expected_variable_x = xr.Variable(["y"], [1, 2])
expected_data_array_x = xr.DataArray(expected_variable_x, {"y": [-1, -2]})
expected_dataset_x = xr.Dataset({"data": expected_data_array_x})
expected_variable_y = xr.Variable(["x"], [1, 3])
expected_data_array_y = xr.DataArray(expected_variable_y, {"x": ["a", "b"]})
expected_dataset_y = xr.Dataset({"data": expected_data_array_y})
assert_identical(expected_variable_x, first_element(variable, "x"))
assert_identical(expected_variable_y, first_element(variable, "y"))
assert_identical(expected_data_array_x, first_element(data_array, "x"))
assert_identical(expected_data_array_y, first_element(data_array, "y"))
assert_identical(expected_dataset_x, first_element(dataset, "x"))
assert_identical(expected_dataset_y, first_element(dataset, "y"))
assert_identical(expected_data_array_x, first_element(data_array.groupby("y"), "x"))
assert_identical(expected_dataset_x, first_element(dataset.groupby("y"), "x"))
def multiply(*args):
val = args[0]
for arg in args[1:]:
val = val * arg
return val
# regression test for GH:2341
with pytest.raises(ValueError):
apply_ufunc(
multiply,
data_array,
data_array["y"].values,
input_core_dims=[["y"]],
output_core_dims=[["y"]],
)
expected = xr.DataArray(
multiply(data_array, data_array["y"]), dims=["x", "y"], coords=data_array.coords
)
actual = apply_ufunc(
multiply,
data_array,
data_array["y"].values,
input_core_dims=[["y"], []],
output_core_dims=[["y"]],
)
assert_identical(expected, actual)
def test_apply_output_core_dimension() -> None:
def stack_negative(obj):
def func(x):
return np.stack([x, -x], axis=-1)
result = apply_ufunc(func, obj, output_core_dims=[["sign"]])
if isinstance(result, (xr.Dataset, xr.DataArray)):
result.coords["sign"] = [1, -1]
return result
array = np.array([[1, 2], [3, 4]])
variable = xr.Variable(["x", "y"], array)
data_array = xr.DataArray(variable, {"x": ["a", "b"], "y": [-1, -2]})
dataset = xr.Dataset({"data": data_array})
stacked_array = np.array([[[1, -1], [2, -2]], [[3, -3], [4, -4]]])
stacked_variable = xr.Variable(["x", "y", "sign"], stacked_array)
stacked_coords = {"x": ["a", "b"], "y": [-1, -2], "sign": [1, -1]}
stacked_data_array = xr.DataArray(stacked_variable, stacked_coords)
stacked_dataset = xr.Dataset({"data": stacked_data_array})
assert_identical(stacked_array, stack_negative(array))
assert_identical(stacked_variable, stack_negative(variable))
assert_identical(stacked_data_array, stack_negative(data_array))
assert_identical(stacked_dataset, stack_negative(dataset))
assert_identical(stacked_data_array, stack_negative(data_array.groupby("x")))
assert_identical(stacked_dataset, stack_negative(dataset.groupby("x")))
def original_and_stack_negative(obj):
def func(x):
return (x, np.stack([x, -x], axis=-1))
result = apply_ufunc(func, obj, output_core_dims=[[], ["sign"]])
if isinstance(result[1], (xr.Dataset, xr.DataArray)):
result[1].coords["sign"] = [1, -1]
return result
out0, out1 = original_and_stack_negative(array)
assert_identical(array, out0)
assert_identical(stacked_array, out1)
out0, out1 = original_and_stack_negative(variable)
assert_identical(variable, out0)
assert_identical(stacked_variable, out1)
out0, out1 = original_and_stack_negative(data_array)
assert_identical(data_array, out0)
assert_identical(stacked_data_array, out1)
out0, out1 = original_and_stack_negative(dataset)
assert_identical(dataset, out0)
assert_identical(stacked_dataset, out1)
out0, out1 = original_and_stack_negative(data_array.groupby("x"))
assert_identical(data_array, out0)
assert_identical(stacked_data_array, out1)
out0, out1 = original_and_stack_negative(dataset.groupby("x"))
assert_identical(dataset, out0)
assert_identical(stacked_dataset, out1)
def test_apply_exclude() -> None:
def concatenate(objects, dim="x"):
def func(*x):
return np.concatenate(x, axis=-1)
result = apply_ufunc(
func,
*objects,
input_core_dims=[[dim]] * len(objects),
output_core_dims=[[dim]],
exclude_dims={dim},
)
if isinstance(result, (xr.Dataset, xr.DataArray)):
# note: this will fail if dim is not a coordinate on any input
new_coord = np.concatenate([obj.coords[dim] for obj in objects])
result.coords[dim] = new_coord
return result
arrays = [np.array([1]), np.array([2, 3])]
variables = [xr.Variable("x", a) for a in arrays]
data_arrays = [
xr.DataArray(v, {"x": c, "y": ("x", range(len(c)))})
for v, c in zip(variables, [["a"], ["b", "c"]])
]
datasets = [xr.Dataset({"data": data_array}) for data_array in data_arrays]
expected_array = np.array([1, 2, 3])
expected_variable = xr.Variable("x", expected_array)
expected_data_array = xr.DataArray(expected_variable, [("x", list("abc"))])
expected_dataset = xr.Dataset({"data": expected_data_array})
assert_identical(expected_array, concatenate(arrays))
assert_identical(expected_variable, concatenate(variables))
assert_identical(expected_data_array, concatenate(data_arrays))
assert_identical(expected_dataset, concatenate(datasets))
# must also be a core dimension
with pytest.raises(ValueError):
apply_ufunc(identity, variables[0], exclude_dims={"x"})
def test_apply_groupby_add() -> None:
array = np.arange(5)
variable = xr.Variable("x", array)
coords = {"x": -array, "y": ("x", [0, 0, 1, 1, 2])}
data_array = xr.DataArray(variable, coords, dims="x")
dataset = xr.Dataset({"z": variable}, coords)
other_variable = xr.Variable("y", [0, 10])
other_data_array = xr.DataArray(other_variable, dims="y")
other_dataset = xr.Dataset({"z": other_variable})
expected_variable = xr.Variable("x", [0, 1, 12, 13, np.nan])
expected_data_array = xr.DataArray(expected_variable, coords, dims="x")
expected_dataset = xr.Dataset({"z": expected_variable}, coords)
assert_identical(
expected_data_array, add(data_array.groupby("y"), other_data_array)
)
assert_identical(expected_dataset, add(data_array.groupby("y"), other_dataset))
assert_identical(expected_dataset, add(dataset.groupby("y"), other_data_array))
assert_identical(expected_dataset, add(dataset.groupby("y"), other_dataset))
# cannot be performed with xarray.Variable objects that share a dimension
with pytest.raises(ValueError):
add(data_array.groupby("y"), other_variable)
# if they are all grouped the same way
with pytest.raises(ValueError):
add(data_array.groupby("y"), data_array[:4].groupby("y"))
with pytest.raises(ValueError):
add(data_array.groupby("y"), data_array[1:].groupby("y"))
with pytest.raises(ValueError):
add(data_array.groupby("y"), other_data_array.groupby("y"))
with pytest.raises(ValueError):
add(data_array.groupby("y"), data_array.groupby("x"))
def test_unified_dim_sizes() -> None:
assert unified_dim_sizes([xr.Variable((), 0)]) == {}
assert unified_dim_sizes([xr.Variable("x", [1]), xr.Variable("x", [1])]) == {"x": 1}
assert unified_dim_sizes([xr.Variable("x", [1]), xr.Variable("y", [1, 2])]) == {
"x": 1,
"y": 2,
}
assert unified_dim_sizes(
[xr.Variable(("x", "z"), [[1]]), xr.Variable(("y", "z"), [[1, 2], [3, 4]])],
exclude_dims={"z"},
) == {"x": 1, "y": 2}
# duplicate dimensions
with pytest.raises(ValueError):
unified_dim_sizes([xr.Variable(("x", "x"), [[1]])])
# mismatched lengths
with pytest.raises(ValueError):
unified_dim_sizes([xr.Variable("x", [1]), xr.Variable("x", [1, 2])])
def test_broadcast_compat_data_1d() -> None:
data = np.arange(5)
var = xr.Variable("x", data)
assert_identical(data, broadcast_compat_data(var, ("x",), ()))
assert_identical(data, broadcast_compat_data(var, (), ("x",)))
assert_identical(data[:], broadcast_compat_data(var, ("w",), ("x",)))
assert_identical(data[:, None], broadcast_compat_data(var, ("w", "x", "y"), ()))
with pytest.raises(ValueError):
broadcast_compat_data(var, ("x",), ("w",))
with pytest.raises(ValueError):
broadcast_compat_data(var, (), ())
def test_broadcast_compat_data_2d() -> None:
data = np.arange(12).reshape(3, 4)
var = xr.Variable(["x", "y"], data)
assert_identical(data, broadcast_compat_data(var, ("x", "y"), ()))
assert_identical(data, broadcast_compat_data(var, ("x",), ("y",)))
assert_identical(data, broadcast_compat_data(var, (), ("x", "y")))
assert_identical(data.T, broadcast_compat_data(var, ("y", "x"), ()))
assert_identical(data.T, broadcast_compat_data(var, ("y",), ("x",)))
assert_identical(data, broadcast_compat_data(var, ("w", "x"), ("y",)))
assert_identical(data, broadcast_compat_data(var, ("w",), ("x", "y")))
assert_identical(data.T, broadcast_compat_data(var, ("w",), ("y", "x")))
assert_identical(
data[:, :, None], broadcast_compat_data(var, ("w", "x", "y", "z"), ())
)
assert_identical(
data[None, :, :].T, broadcast_compat_data(var, ("w", "y", "x", "z"), ())
)
def test_keep_attrs() -> None:
def add(a, b, keep_attrs):
if keep_attrs:
return apply_ufunc(operator.add, a, b, keep_attrs=keep_attrs)
else:
return apply_ufunc(operator.add, a, b)
a = xr.DataArray([0, 1], [("x", [0, 1])])
a.attrs["attr"] = "da"
a["x"].attrs["attr"] = "da_coord"
b = xr.DataArray([1, 2], [("x", [0, 1])])
actual = add(a, b, keep_attrs=False)
assert not actual.attrs
actual = add(a, b, keep_attrs=True)
assert_identical(actual.attrs, a.attrs)
assert_identical(actual["x"].attrs, a["x"].attrs)
actual = add(a.variable, b.variable, keep_attrs=False)
assert not actual.attrs
actual = add(a.variable, b.variable, keep_attrs=True)
assert_identical(actual.attrs, a.attrs)
ds_a = xr.Dataset({"x": [0, 1]})
ds_a.attrs["attr"] = "ds"
ds_a.x.attrs["attr"] = "da"
ds_b = xr.Dataset({"x": [0, 1]})
actual = add(ds_a, ds_b, keep_attrs=False)
assert not actual.attrs
actual = add(ds_a, ds_b, keep_attrs=True)
assert_identical(actual.attrs, ds_a.attrs)
assert_identical(actual.x.attrs, ds_a.x.attrs)
@pytest.mark.parametrize(
["strategy", "attrs", "expected", "error"],
(
pytest.param(
None,
[{"a": 1}, {"a": 2}, {"a": 3}],
{},
False,
id="default",
),
pytest.param(
False,
[{"a": 1}, {"a": 2}, {"a": 3}],
{},
False,
id="False",
),
pytest.param(
True,
[{"a": 1}, {"a": 2}, {"a": 3}],
{"a": 1},
False,
id="True",
),
pytest.param(
"override",
[{"a": 1}, {"a": 2}, {"a": 3}],
{"a": 1},
False,
id="override",
),
pytest.param(
"drop",
[{"a": 1}, {"a": 2}, {"a": 3}],
{},
False,
id="drop",
),
pytest.param(
"drop_conflicts",
[{"a": 1, "b": 2}, {"b": 1, "c": 3}, {"c": 3, "d": 4}],
{"a": 1, "c": 3, "d": 4},
False,
id="drop_conflicts",
),
pytest.param(
"no_conflicts",
[{"a": 1}, {"b": 2}, {"b": 3}],
None,
True,
id="no_conflicts",
),
),
)
def test_keep_attrs_strategies_variable(strategy, attrs, expected, error) -> None:
a = xr.Variable("x", [0, 1], attrs=attrs[0])
b = xr.Variable("x", [0, 1], attrs=attrs[1])
c = xr.Variable("x", [0, 1], attrs=attrs[2])
if error:
with pytest.raises(xr.MergeError):
apply_ufunc(lambda *args: sum(args), a, b, c, keep_attrs=strategy)
else:
expected = xr.Variable("x", [0, 3], attrs=expected)
actual = apply_ufunc(lambda *args: sum(args), a, b, c, keep_attrs=strategy)
assert_identical(actual, expected)
@pytest.mark.parametrize(
["strategy", "attrs", "expected", "error"],
(
pytest.param(
None,
[{"a": 1}, {"a": 2}, {"a": 3}],
{},
False,
id="default",
),
pytest.param(
False,
[{"a": 1}, {"a": 2}, {"a": 3}],
{},
False,
id="False",
),
pytest.param(
True,
[{"a": 1}, {"a": 2}, {"a": 3}],
{"a": 1},
False,
id="True",
),
pytest.param(
"override",
[{"a": 1}, {"a": 2}, {"a": 3}],
{"a": 1},
False,
id="override",
),
pytest.param(
"drop",
[{"a": 1}, {"a": 2}, {"a": 3}],
{},
False,
id="drop",
),
pytest.param(
"drop_conflicts",
[{"a": 1, "b": 2}, {"b": 1, "c": 3}, {"c": 3, "d": 4}],
{"a": 1, "c": 3, "d": 4},
False,
id="drop_conflicts",
),
pytest.param(
"no_conflicts",
[{"a": 1}, {"b": 2}, {"b": 3}],
None,
True,
id="no_conflicts",
),
),
)
def test_keep_attrs_strategies_dataarray(strategy, attrs, expected, error) -> None:
a = xr.DataArray(dims="x", data=[0, 1], attrs=attrs[0])
b = xr.DataArray(dims="x", data=[0, 1], attrs=attrs[1])
c = xr.DataArray(dims="x", data=[0, 1], attrs=attrs[2])
if error:
with pytest.raises(xr.MergeError):
apply_ufunc(lambda *args: sum(args), a, b, c, keep_attrs=strategy)
else:
expected = xr.DataArray(dims="x", data=[0, 3], attrs=expected)
actual = apply_ufunc(lambda *args: sum(args), a, b, c, keep_attrs=strategy)
assert_identical(actual, expected)
@pytest.mark.parametrize("variant", ("dim", "coord"))
@pytest.mark.parametrize(
["strategy", "attrs", "expected", "error"],
(
pytest.param(
None,
[{"a": 1}, {"a": 2}, {"a": 3}],
{},
False,
id="default",
),
pytest.param(
False,
[{"a": 1}, {"a": 2}, {"a": 3}],
{},
False,
id="False",
),
pytest.param(
True,
[{"a": 1}, {"a": 2}, {"a": 3}],
{"a": 1},
False,
id="True",
),
pytest.param(
"override",
[{"a": 1}, {"a": 2}, {"a": 3}],
{"a": 1},
False,
id="override",
),
pytest.param(
"drop",
[{"a": 1}, {"a": 2}, {"a": 3}],
{},
False,
id="drop",
),
pytest.param(
"drop_conflicts",
[{"a": 1, "b": 2}, {"b": 1, "c": 3}, {"c": 3, "d": 4}],
{"a": 1, "c": 3, "d": 4},
False,
id="drop_conflicts",
),
pytest.param(
"no_conflicts",
[{"a": 1}, {"b": 2}, {"b": 3}],
None,
True,
id="no_conflicts",
),
),
)
def test_keep_attrs_strategies_dataarray_variables(
variant, strategy, attrs, expected, error
):
compute_attrs = {
"dim": lambda attrs, default: (attrs, default),
"coord": lambda attrs, default: (default, attrs),
}.get(variant)
dim_attrs, coord_attrs = compute_attrs(attrs, [{}, {}, {}])
a = xr.DataArray(
dims="x",
data=[0, 1],
coords={"x": ("x", [0, 1], dim_attrs[0]), "u": ("x", [0, 1], coord_attrs[0])},
)
b = xr.DataArray(
dims="x",
data=[0, 1],
coords={"x": ("x", [0, 1], dim_attrs[1]), "u": ("x", [0, 1], coord_attrs[1])},
)
c = xr.DataArray(
dims="x",
data=[0, 1],
coords={"x": ("x", [0, 1], dim_attrs[2]), "u": ("x", [0, 1], coord_attrs[2])},
)
if error:
with pytest.raises(xr.MergeError):
apply_ufunc(lambda *args: sum(args), a, b, c, keep_attrs=strategy)
else:
dim_attrs, coord_attrs = compute_attrs(expected, {})
expected = xr.DataArray(
dims="x",
data=[0, 3],
coords={"x": ("x", [0, 1], dim_attrs), "u": ("x", [0, 1], coord_attrs)},
)
actual = apply_ufunc(lambda *args: sum(args), a, b, c, keep_attrs=strategy)
assert_identical(actual, expected)
@pytest.mark.parametrize(
["strategy", "attrs", "expected", "error"],
(
pytest.param(
None,
[{"a": 1}, {"a": 2}, {"a": 3}],
{},
False,
id="default",
),
pytest.param(
False,
[{"a": 1}, {"a": 2}, {"a": 3}],
{},
False,
id="False",
),
pytest.param(
True,
[{"a": 1}, {"a": 2}, {"a": 3}],
{"a": 1},
False,
id="True",
),
pytest.param(
"override",
[{"a": 1}, {"a": 2}, {"a": 3}],
{"a": 1},
False,
id="override",
),
pytest.param(
"drop",
[{"a": 1}, {"a": 2}, {"a": 3}],
{},
False,
id="drop",
),
pytest.param(
"drop_conflicts",
[{"a": 1, "b": 2}, {"b": 1, "c": 3}, {"c": 3, "d": 4}],
{"a": 1, "c": 3, "d": 4},
False,
id="drop_conflicts",
),
pytest.param(
"no_conflicts",
[{"a": 1}, {"b": 2}, {"b": 3}],
None,
True,
id="no_conflicts",
),
),
)
def test_keep_attrs_strategies_dataset(strategy, attrs, expected, error) -> None:
a = xr.Dataset({"a": ("x", [0, 1])}, attrs=attrs[0])
b = xr.Dataset({"a": ("x", [0, 1])}, attrs=attrs[1])
c = xr.Dataset({"a": ("x", [0, 1])}, attrs=attrs[2])
if error:
with pytest.raises(xr.MergeError):
apply_ufunc(lambda *args: sum(args), a, b, c, keep_attrs=strategy)
else:
expected = xr.Dataset({"a": ("x", [0, 3])}, attrs=expected)
actual = apply_ufunc(lambda *args: sum(args), a, b, c, keep_attrs=strategy)
assert_identical(actual, expected)
@pytest.mark.parametrize("variant", ("data", "dim", "coord"))
@pytest.mark.parametrize(
["strategy", "attrs", "expected", "error"],
(
pytest.param(
None,
[{"a": 1}, {"a": 2}, {"a": 3}],
{},
False,
id="default",
),
pytest.param(
False,
[{"a": 1}, {"a": 2}, {"a": 3}],
{},
False,
id="False",
),
pytest.param(
True,
[{"a": 1}, {"a": 2}, {"a": 3}],
{"a": 1},
False,
id="True",
),
pytest.param(
"override",
[{"a": 1}, {"a": 2}, {"a": 3}],
{"a": 1},
False,
id="override",
),
pytest.param(
"drop",
[{"a": 1}, {"a": 2}, {"a": 3}],
{},
False,
id="drop",
),
pytest.param(
"drop_conflicts",
[{"a": 1, "b": 2}, {"b": 1, "c": 3}, {"c": 3, "d": 4}],
{"a": 1, "c": 3, "d": 4},
False,
id="drop_conflicts",
),
pytest.param(
"no_conflicts",
[{"a": 1}, {"b": 2}, {"b": 3}],
None,
True,
id="no_conflicts",
),
),
)
def test_keep_attrs_strategies_dataset_variables(
variant, strategy, attrs, expected, error
):
compute_attrs = {
"data": lambda attrs, default: (attrs, default, default),
"dim": lambda attrs, default: (default, attrs, default),
"coord": lambda attrs, default: (default, default, attrs),
}.get(variant)
data_attrs, dim_attrs, coord_attrs = compute_attrs(attrs, [{}, {}, {}])
a = xr.Dataset(
{"a": ("x", [], data_attrs[0])},
coords={"x": ("x", [], dim_attrs[0]), "u": ("x", [], coord_attrs[0])},
)
b = xr.Dataset(
{"a": ("x", [], data_attrs[1])},
coords={"x": ("x", [], dim_attrs[1]), "u": ("x", [], coord_attrs[1])},
)
c = xr.Dataset(
{"a": ("x", [], data_attrs[2])},
coords={"x": ("x", [], dim_attrs[2]), "u": ("x", [], coord_attrs[2])},
)
if error:
with pytest.raises(xr.MergeError):
apply_ufunc(lambda *args: sum(args), a, b, c, keep_attrs=strategy)
else:
data_attrs, dim_attrs, coord_attrs = compute_attrs(expected, {})
expected = xr.Dataset(
{"a": ("x", [], data_attrs)},
coords={"x": ("x", [], dim_attrs), "u": ("x", [], coord_attrs)},
)
actual = apply_ufunc(lambda *args: sum(args), a, b, c, keep_attrs=strategy)
assert_identical(actual, expected)
def test_dataset_join() -> None:
ds0 = xr.Dataset({"a": ("x", [1, 2]), "x": [0, 1]})
ds1 = xr.Dataset({"a": ("x", [99, 3]), "x": [1, 2]})
# by default, cannot have different labels
with pytest.raises(ValueError, match=r"cannot align.*join.*exact.*"):
apply_ufunc(operator.add, ds0, ds1)
with pytest.raises(TypeError, match=r"must supply"):
apply_ufunc(operator.add, ds0, ds1, dataset_join="outer")
def add(a, b, join, dataset_join):
return apply_ufunc(
operator.add,
a,
b,
join=join,
dataset_join=dataset_join,
dataset_fill_value=np.nan,
)
actual = add(ds0, ds1, "outer", "inner")
expected = xr.Dataset({"a": ("x", [np.nan, 101, np.nan]), "x": [0, 1, 2]})
assert_identical(actual, expected)
actual = add(ds0, ds1, "outer", "outer")
assert_identical(actual, expected)
with pytest.raises(ValueError, match=r"data variable names"):
apply_ufunc(operator.add, ds0, xr.Dataset({"b": 1}))
ds2 = xr.Dataset({"b": ("x", [99, 3]), "x": [1, 2]})
actual = add(ds0, ds2, "outer", "inner")
expected = xr.Dataset({"x": [0, 1, 2]})
assert_identical(actual, expected)
# we used np.nan as the fill_value in add() above
actual = add(ds0, ds2, "outer", "outer")
expected = xr.Dataset(
{
"a": ("x", [np.nan, np.nan, np.nan]),
"b": ("x", [np.nan, np.nan, np.nan]),
"x": [0, 1, 2],
}
)
assert_identical(actual, expected)
@requires_dask
def test_apply_dask() -> None:
import dask.array as da
array = da.ones((2,), chunks=2)
variable = xr.Variable("x", array)
coords = xr.DataArray(variable).coords.variables
data_array = xr.DataArray(variable, dims=["x"], coords=coords)
dataset = xr.Dataset({"y": variable})
# encountered dask array, but did not set dask='allowed'
with pytest.raises(ValueError):
apply_ufunc(identity, array)
with pytest.raises(ValueError):
apply_ufunc(identity, variable)
with pytest.raises(ValueError):
apply_ufunc(identity, data_array)
with pytest.raises(ValueError):
apply_ufunc(identity, dataset)
# unknown setting for dask array handling
with pytest.raises(ValueError):
apply_ufunc(identity, array, dask="unknown")
def dask_safe_identity(x):
return apply_ufunc(identity, x, dask="allowed")
assert array is dask_safe_identity(array)
actual = dask_safe_identity(variable)
assert isinstance(actual.data, da.Array)
assert_identical(variable, actual)
actual = dask_safe_identity(data_array)
assert isinstance(actual.data, da.Array)
assert_identical(data_array, actual)
actual = dask_safe_identity(dataset)
assert isinstance(actual["y"].data, da.Array)
assert_identical(dataset, actual)
@requires_dask
def test_apply_dask_parallelized_one_arg() -> None:
import dask.array as da
array = da.ones((2, 2), chunks=(1, 1))
data_array = xr.DataArray(array, dims=("x", "y"))
def parallel_identity(x):
return apply_ufunc(identity, x, dask="parallelized", output_dtypes=[x.dtype])
actual = parallel_identity(data_array)
assert isinstance(actual.data, da.Array)
assert actual.data.chunks == array.chunks
assert_identical(data_array, actual)
computed = data_array.compute()
actual = parallel_identity(computed)
assert_identical(computed, actual)
@requires_dask
def test_apply_dask_parallelized_two_args() -> None:
import dask.array as da
array = da.ones((2, 2), chunks=(1, 1), dtype=np.int64)
data_array = xr.DataArray(array, dims=("x", "y"))
data_array.name = None
def parallel_add(x, y):
return apply_ufunc(
operator.add, x, y, dask="parallelized", output_dtypes=[np.int64]
)
def check(x, y):
actual = parallel_add(x, y)
assert isinstance(actual.data, da.Array)
assert actual.data.chunks == array.chunks
assert_identical(data_array, actual)
check(data_array, 0),
check(0, data_array)
check(data_array, xr.DataArray(0))
check(data_array, 0 * data_array)
check(data_array, 0 * data_array[0])
check(data_array[:, 0], 0 * data_array[0])
check(data_array, 0 * data_array.compute())
@requires_dask
def test_apply_dask_parallelized_errors() -> None:
import dask.array as da
array = da.ones((2, 2), chunks=(1, 1))
data_array = xr.DataArray(array, dims=("x", "y"))
# from apply_array_ufunc
with pytest.raises(ValueError, match=r"at least one input is an xarray object"):
apply_ufunc(identity, array, dask="parallelized")
# formerly from _apply_blockwise, now from apply_variable_ufunc
with pytest.raises(ValueError, match=r"consists of multiple chunks"):
apply_ufunc(
identity,
data_array,
dask="parallelized",
output_dtypes=[float],
input_core_dims=[("y",)],
output_core_dims=[("y",)],
)
# it's currently impossible to silence these warnings from inside dask.array:
# https://github.com/dask/dask/issues/3245
@requires_dask
@pytest.mark.filterwarnings("ignore:Mean of empty slice")
def test_apply_dask_multiple_inputs() -> None:
import dask.array as da
def covariance(x, y):
return (
(x - x.mean(axis=-1, keepdims=True)) * (y - y.mean(axis=-1, keepdims=True))
).mean(axis=-1)
rs = np.random.RandomState(42)
array1 = da.from_array(rs.randn(4, 4), chunks=(2, 4))
array2 = da.from_array(rs.randn(4, 4), chunks=(2, 4))
data_array_1 = xr.DataArray(array1, dims=("x", "z"))
data_array_2 = xr.DataArray(array2, dims=("y", "z"))
expected = apply_ufunc(
covariance,
data_array_1.compute(),
data_array_2.compute(),
input_core_dims=[["z"], ["z"]],
)
allowed = apply_ufunc(
covariance,
data_array_1,
data_array_2,
input_core_dims=[["z"], ["z"]],
dask="allowed",
)
assert isinstance(allowed.data, da.Array)
xr.testing.assert_allclose(expected, allowed.compute())
parallelized = apply_ufunc(
covariance,
data_array_1,
data_array_2,
input_core_dims=[["z"], ["z"]],
dask="parallelized",
output_dtypes=[float],
)
assert isinstance(parallelized.data, da.Array)
xr.testing.assert_allclose(expected, parallelized.compute())
@requires_dask
def test_apply_dask_new_output_dimension() -> None:
import dask.array as da
array = da.ones((2, 2), chunks=(1, 1))
data_array = xr.DataArray(array, dims=("x", "y"))
def stack_negative(obj):
def func(x):
return np.stack([x, -x], axis=-1)
return apply_ufunc(
func,
obj,
output_core_dims=[["sign"]],
dask="parallelized",
output_dtypes=[obj.dtype],
dask_gufunc_kwargs=dict(output_sizes={"sign": 2}),
)
expected = stack_negative(data_array.compute())
actual = stack_negative(data_array)
assert actual.dims == ("x", "y", "sign")
assert actual.shape == (2, 2, 2)
assert isinstance(actual.data, da.Array)
assert_identical(expected, actual)
@requires_dask
def test_apply_dask_new_output_sizes() -> None:
ds = xr.Dataset({"foo": (["lon", "lat"], np.arange(10 * 10).reshape((10, 10)))})
ds["bar"] = ds["foo"]
newdims = {"lon_new": 3, "lat_new": 6}
def extract(obj):
def func(da):
return da[1:4, 1:7]
return apply_ufunc(
func,
obj,
dask="parallelized",
input_core_dims=[["lon", "lat"]],
output_core_dims=[["lon_new", "lat_new"]],
dask_gufunc_kwargs=dict(output_sizes=newdims),
)
expected = extract(ds)
actual = extract(ds.chunk())
assert actual.dims == {"lon_new": 3, "lat_new": 6}
assert_identical(expected.chunk(), actual)
def pandas_median(x):
return pd.Series(x).median()
def test_vectorize() -> None:
data_array = xr.DataArray([[0, 1, 2], [1, 2, 3]], dims=("x", "y"))
expected = xr.DataArray([1, 2], dims=["x"])
actual = apply_ufunc(
pandas_median, data_array, input_core_dims=[["y"]], vectorize=True
)
assert_identical(expected, actual)
@requires_dask
def test_vectorize_dask() -> None:
# run vectorization in dask.array.gufunc by using `dask='parallelized'`
data_array = xr.DataArray([[0, 1, 2], [1, 2, 3]], dims=("x", "y"))
expected = xr.DataArray([1, 2], dims=["x"])
actual = apply_ufunc(
pandas_median,
data_array.chunk({"x": 1}),
input_core_dims=[["y"]],
vectorize=True,
dask="parallelized",
output_dtypes=[float],
)
assert_identical(expected, actual)
@requires_dask
def test_vectorize_dask_dtype() -> None:
# ensure output_dtypes is preserved with vectorize=True
# GH4015
# integer
data_array = xr.DataArray([[0, 1, 2], [1, 2, 3]], dims=("x", "y"))
expected = xr.DataArray([1, 2], dims=["x"])
actual = apply_ufunc(
pandas_median,
data_array.chunk({"x": 1}),
input_core_dims=[["y"]],
vectorize=True,
dask="parallelized",
output_dtypes=[int],
)
assert_identical(expected, actual)
assert expected.dtype == actual.dtype
# complex
data_array = xr.DataArray([[0 + 0j, 1 + 2j, 2 + 1j]], dims=("x", "y"))
expected = data_array.copy()
actual = apply_ufunc(
identity,
data_array.chunk({"x": 1}),
vectorize=True,
dask="parallelized",
output_dtypes=[complex],
)
assert_identical(expected, actual)
assert expected.dtype == actual.dtype
@requires_dask
@pytest.mark.parametrize(
"data_array",
[
xr.DataArray([[0, 1, 2], [1, 2, 3]], dims=("x", "y")),
xr.DataArray([[0 + 0j, 1 + 2j, 2 + 1j]], dims=("x", "y")),
],
)
def test_vectorize_dask_dtype_without_output_dtypes(data_array) -> None:
# ensure output_dtypes is preserved with vectorize=True
# GH4015
expected = data_array.copy()
actual = apply_ufunc(
identity,
data_array.chunk({"x": 1}),
vectorize=True,
dask="parallelized",
)
assert_identical(expected, actual)
assert expected.dtype == actual.dtype
@pytest.mark.skipif(
dask_version > Version("2021.06"),
reason="dask/dask#7669: can no longer pass output_dtypes and meta",
)
@requires_dask
def test_vectorize_dask_dtype_meta() -> None:
# meta dtype takes precedence
data_array = xr.DataArray([[0, 1, 2], [1, 2, 3]], dims=("x", "y"))
expected = xr.DataArray([1, 2], dims=["x"])
actual = apply_ufunc(
pandas_median,
data_array.chunk({"x": 1}),
input_core_dims=[["y"]],
vectorize=True,
dask="parallelized",
output_dtypes=[int],
dask_gufunc_kwargs=dict(meta=np.ndarray((0, 0), dtype=float)),
)
assert_identical(expected, actual)
assert float == actual.dtype
def pandas_median_add(x, y):
# function which can consume input of unequal length
return pd.Series(x).median() + pd.Series(y).median()
def test_vectorize_exclude_dims() -> None:
# GH 3890
data_array_a = xr.DataArray([[0, 1, 2], [1, 2, 3]], dims=("x", "y"))
data_array_b = xr.DataArray([[0, 1, 2, 3, 4], [1, 2, 3, 4, 5]], dims=("x", "y"))
expected = xr.DataArray([3, 5], dims=["x"])
actual = apply_ufunc(
pandas_median_add,
data_array_a,
data_array_b,
input_core_dims=[["y"], ["y"]],
vectorize=True,
exclude_dims=set("y"),
)
assert_identical(expected, actual)
@requires_dask
def test_vectorize_exclude_dims_dask() -> None:
# GH 3890
data_array_a = xr.DataArray([[0, 1, 2], [1, 2, 3]], dims=("x", "y"))
data_array_b = xr.DataArray([[0, 1, 2, 3, 4], [1, 2, 3, 4, 5]], dims=("x", "y"))
expected = xr.DataArray([3, 5], dims=["x"])
actual = apply_ufunc(
pandas_median_add,
data_array_a.chunk({"x": 1}),
data_array_b.chunk({"x": 1}),
input_core_dims=[["y"], ["y"]],
exclude_dims=set("y"),
vectorize=True,
dask="parallelized",
output_dtypes=[float],
)
assert_identical(expected, actual)
def test_corr_only_dataarray() -> None:
with pytest.raises(TypeError, match="Only xr.DataArray is supported"):
xr.corr(xr.Dataset(), xr.Dataset())
def arrays_w_tuples():
da = xr.DataArray(
np.random.random((3, 21, 4)),
coords={"time": pd.date_range("2000-01-01", freq="1D", periods=21)},
dims=("a", "time", "x"),
)
arrays = [
da.isel(time=range(0, 18)),
da.isel(time=range(2, 20)).rolling(time=3, center=True).mean(),
xr.DataArray([[1, 2], [1, np.nan]], dims=["x", "time"]),
xr.DataArray([[1, 2], [np.nan, np.nan]], dims=["x", "time"]),
xr.DataArray([[1, 2], [2, 1]], dims=["x", "time"]),
]
array_tuples = [
(arrays[0], arrays[0]),
(arrays[0], arrays[1]),
(arrays[1], arrays[1]),
(arrays[2], arrays[2]),
(arrays[2], arrays[3]),
(arrays[2], arrays[4]),
(arrays[4], arrays[2]),
(arrays[3], arrays[3]),
(arrays[4], arrays[4]),
]
return arrays, array_tuples
@pytest.mark.parametrize("ddof", [0, 1])
@pytest.mark.parametrize(
"da_a, da_b",
[
arrays_w_tuples()[1][3],
arrays_w_tuples()[1][4],
arrays_w_tuples()[1][5],
arrays_w_tuples()[1][6],
arrays_w_tuples()[1][7],
arrays_w_tuples()[1][8],
],
)
@pytest.mark.parametrize("dim", [None, "x", "time"])
@requires_dask
def test_lazy_corrcov(da_a, da_b, dim, ddof) -> None:
# GH 5284
from dask import is_dask_collection
with raise_if_dask_computes():
cov = xr.cov(da_a.chunk(), da_b.chunk(), dim=dim, ddof=ddof)
assert is_dask_collection(cov)
corr = xr.corr(da_a.chunk(), da_b.chunk(), dim=dim)
assert is_dask_collection(corr)
@pytest.mark.parametrize("ddof", [0, 1])
@pytest.mark.parametrize(
"da_a, da_b",
[arrays_w_tuples()[1][0], arrays_w_tuples()[1][1], arrays_w_tuples()[1][2]],
)
@pytest.mark.parametrize("dim", [None, "time"])
def test_cov(da_a, da_b, dim, ddof) -> None:
if dim is not None:
def np_cov_ind(ts1, ts2, a, x):
# Ensure the ts are aligned and missing values ignored
ts1, ts2 = broadcast(ts1, ts2)
valid_values = ts1.notnull() & ts2.notnull()
# While dropping isn't ideal here, numpy will return nan
# if any segment contains a NaN.
ts1 = ts1.where(valid_values)
ts2 = ts2.where(valid_values)
return np.ma.cov(
np.ma.masked_invalid(ts1.sel(a=a, x=x).data.flatten()),
np.ma.masked_invalid(ts2.sel(a=a, x=x).data.flatten()),
ddof=ddof,
)[0, 1]
expected = np.zeros((3, 4))
for a in [0, 1, 2]:
for x in [0, 1, 2, 3]:
expected[a, x] = np_cov_ind(da_a, da_b, a=a, x=x)
actual = xr.cov(da_a, da_b, dim=dim, ddof=ddof)
assert_allclose(actual, expected)
else:
def np_cov(ts1, ts2):
# Ensure the ts are aligned and missing values ignored
ts1, ts2 = broadcast(ts1, ts2)
valid_values = ts1.notnull() & ts2.notnull()
ts1 = ts1.where(valid_values)
ts2 = ts2.where(valid_values)
return np.ma.cov(
np.ma.masked_invalid(ts1.data.flatten()),
np.ma.masked_invalid(ts2.data.flatten()),
ddof=ddof,
)[0, 1]
expected = np_cov(da_a, da_b)
actual = xr.cov(da_a, da_b, dim=dim, ddof=ddof)
assert_allclose(actual, expected)
@pytest.mark.parametrize(
"da_a, da_b",
[arrays_w_tuples()[1][0], arrays_w_tuples()[1][1], arrays_w_tuples()[1][2]],
)
@pytest.mark.parametrize("dim", [None, "time"])
def test_corr(da_a, da_b, dim) -> None:
if dim is not None:
def np_corr_ind(ts1, ts2, a, x):
# Ensure the ts are aligned and missing values ignored
ts1, ts2 = broadcast(ts1, ts2)
valid_values = ts1.notnull() & ts2.notnull()
ts1 = ts1.where(valid_values)
ts2 = ts2.where(valid_values)
return np.ma.corrcoef(
np.ma.masked_invalid(ts1.sel(a=a, x=x).data.flatten()),
np.ma.masked_invalid(ts2.sel(a=a, x=x).data.flatten()),
)[0, 1]
expected = np.zeros((3, 4))
for a in [0, 1, 2]:
for x in [0, 1, 2, 3]:
expected[a, x] = np_corr_ind(da_a, da_b, a=a, x=x)
actual = xr.corr(da_a, da_b, dim)
assert_allclose(actual, expected)
else:
def np_corr(ts1, ts2):
# Ensure the ts are aligned and missing values ignored
ts1, ts2 = broadcast(ts1, ts2)
valid_values = ts1.notnull() & ts2.notnull()
ts1 = ts1.where(valid_values)
ts2 = ts2.where(valid_values)
return np.ma.corrcoef(
np.ma.masked_invalid(ts1.data.flatten()),
np.ma.masked_invalid(ts2.data.flatten()),
)[0, 1]
expected = np_corr(da_a, da_b)
actual = xr.corr(da_a, da_b, dim)
assert_allclose(actual, expected)
@pytest.mark.parametrize(
"da_a, da_b",
arrays_w_tuples()[1],
)
@pytest.mark.parametrize("dim", [None, "time", "x"])
def test_covcorr_consistency(da_a, da_b, dim) -> None:
# Testing that xr.corr and xr.cov are consistent with each other
# 1. Broadcast the two arrays
da_a, da_b = broadcast(da_a, da_b)
# 2. Ignore the nans
valid_values = da_a.notnull() & da_b.notnull()
da_a = da_a.where(valid_values)
da_b = da_b.where(valid_values)
expected = xr.cov(da_a, da_b, dim=dim, ddof=0) / (
da_a.std(dim=dim) * da_b.std(dim=dim)
)
actual = xr.corr(da_a, da_b, dim=dim)
assert_allclose(actual, expected)
@requires_dask
@pytest.mark.parametrize("da_a, da_b", arrays_w_tuples()[1])
@pytest.mark.parametrize("dim", [None, "time", "x"])
@pytest.mark.filterwarnings("ignore:invalid value encountered in .*divide")
def test_corr_lazycorr_consistency(da_a, da_b, dim) -> None:
da_al = da_a.chunk()
da_bl = da_b.chunk()
c_abl = xr.corr(da_al, da_bl, dim=dim)
c_ab = xr.corr(da_a, da_b, dim=dim)
c_ab_mixed = xr.corr(da_a, da_bl, dim=dim)
assert_allclose(c_ab, c_abl)
assert_allclose(c_ab, c_ab_mixed)
@requires_dask
def test_corr_dtype_error():
da_a = xr.DataArray([[1, 2], [2, 1]], dims=["x", "time"])
da_b = xr.DataArray([[1, 2], [1, np.nan]], dims=["x", "time"])
xr.testing.assert_equal(xr.corr(da_a, da_b), xr.corr(da_a.chunk(), da_b.chunk()))
xr.testing.assert_equal(xr.corr(da_a, da_b), xr.corr(da_a, da_b.chunk()))
@pytest.mark.parametrize(
"da_a",
arrays_w_tuples()[0],
)
@pytest.mark.parametrize("dim", [None, "time", "x", ["time", "x"]])
def test_autocov(da_a, dim) -> None:
# Testing that the autocovariance*(N-1) is ~=~ to the variance matrix
# 1. Ignore the nans
valid_values = da_a.notnull()
# Because we're using ddof=1, this requires > 1 value in each sample
da_a = da_a.where(valid_values.sum(dim=dim) > 1)
expected = ((da_a - da_a.mean(dim=dim)) ** 2).sum(dim=dim, skipna=True, min_count=1)
actual = xr.cov(da_a, da_a, dim=dim) * (valid_values.sum(dim) - 1)
assert_allclose(actual, expected)
@requires_dask
def test_vectorize_dask_new_output_dims() -> None:
# regression test for GH3574
# run vectorization in dask.array.gufunc by using `dask='parallelized'`
data_array = xr.DataArray([[0, 1, 2], [1, 2, 3]], dims=("x", "y"))
func = lambda x: x[np.newaxis, ...]
expected = data_array.expand_dims("z")
actual = apply_ufunc(
func,
data_array.chunk({"x": 1}),
output_core_dims=[["z"]],
vectorize=True,
dask="parallelized",
output_dtypes=[float],
dask_gufunc_kwargs=dict(output_sizes={"z": 1}),
).transpose(*expected.dims)
assert_identical(expected, actual)
with pytest.raises(
ValueError, match=r"dimension 'z1' in 'output_sizes' must correspond"
):
apply_ufunc(
func,
data_array.chunk({"x": 1}),
output_core_dims=[["z"]],
vectorize=True,
dask="parallelized",
output_dtypes=[float],
dask_gufunc_kwargs=dict(output_sizes={"z1": 1}),
)
with pytest.raises(
ValueError, match=r"dimension 'z' in 'output_core_dims' needs corresponding"
):
apply_ufunc(
func,
data_array.chunk({"x": 1}),
output_core_dims=[["z"]],
vectorize=True,
dask="parallelized",
output_dtypes=[float],
)
def test_output_wrong_number() -> None:
variable = xr.Variable("x", np.arange(10))
def identity(x):
return x
def tuple3x(x):
return (x, x, x)
with pytest.raises(ValueError, match=r"number of outputs"):
apply_ufunc(identity, variable, output_core_dims=[(), ()])
with pytest.raises(ValueError, match=r"number of outputs"):
apply_ufunc(tuple3x, variable, output_core_dims=[(), ()])
def test_output_wrong_dims() -> None:
variable = xr.Variable("x", np.arange(10))
def add_dim(x):
return x[..., np.newaxis]
def remove_dim(x):
return x[..., 0]
with pytest.raises(ValueError, match=r"unexpected number of dimensions"):
apply_ufunc(add_dim, variable, output_core_dims=[("y", "z")])
with pytest.raises(ValueError, match=r"unexpected number of dimensions"):
apply_ufunc(add_dim, variable)
with pytest.raises(ValueError, match=r"unexpected number of dimensions"):
apply_ufunc(remove_dim, variable)
def test_output_wrong_dim_size() -> None:
array = np.arange(10)
variable = xr.Variable("x", array)
data_array = xr.DataArray(variable, [("x", -array)])
dataset = xr.Dataset({"y": variable}, {"x": -array})
def truncate(array):
return array[:5]
def apply_truncate_broadcast_invalid(obj):
return apply_ufunc(truncate, obj)
with pytest.raises(ValueError, match=r"size of dimension"):
apply_truncate_broadcast_invalid(variable)
with pytest.raises(ValueError, match=r"size of dimension"):
apply_truncate_broadcast_invalid(data_array)
with pytest.raises(ValueError, match=r"size of dimension"):
apply_truncate_broadcast_invalid(dataset)
def apply_truncate_x_x_invalid(obj):
return apply_ufunc(
truncate, obj, input_core_dims=[["x"]], output_core_dims=[["x"]]
)
with pytest.raises(ValueError, match=r"size of dimension"):
apply_truncate_x_x_invalid(variable)
with pytest.raises(ValueError, match=r"size of dimension"):
apply_truncate_x_x_invalid(data_array)
with pytest.raises(ValueError, match=r"size of dimension"):
apply_truncate_x_x_invalid(dataset)
def apply_truncate_x_z(obj):
return apply_ufunc(
truncate, obj, input_core_dims=[["x"]], output_core_dims=[["z"]]
)
assert_identical(xr.Variable("z", array[:5]), apply_truncate_x_z(variable))
assert_identical(
xr.DataArray(array[:5], dims=["z"]), apply_truncate_x_z(data_array)
)
assert_identical(xr.Dataset({"y": ("z", array[:5])}), apply_truncate_x_z(dataset))
def apply_truncate_x_x_valid(obj):
return apply_ufunc(
truncate,
obj,
input_core_dims=[["x"]],
output_core_dims=[["x"]],
exclude_dims={"x"},
)
assert_identical(xr.Variable("x", array[:5]), apply_truncate_x_x_valid(variable))
assert_identical(
xr.DataArray(array[:5], dims=["x"]), apply_truncate_x_x_valid(data_array)
)
assert_identical(
xr.Dataset({"y": ("x", array[:5])}), apply_truncate_x_x_valid(dataset)
)
@pytest.mark.parametrize("use_dask", [True, False])
def test_dot(use_dask) -> None:
if use_dask:
if not has_dask:
pytest.skip("test for dask.")
a = np.arange(30 * 4).reshape(30, 4)
b = np.arange(30 * 4 * 5).reshape(30, 4, 5)
c = np.arange(5 * 60).reshape(5, 60)
da_a = xr.DataArray(a, dims=["a", "b"], coords={"a": np.linspace(0, 1, 30)})
da_b = xr.DataArray(b, dims=["a", "b", "c"], coords={"a": np.linspace(0, 1, 30)})
da_c = xr.DataArray(c, dims=["c", "e"])
if use_dask:
da_a = da_a.chunk({"a": 3})
da_b = da_b.chunk({"a": 3})
da_c = da_c.chunk({"c": 3})
actual = xr.dot(da_a, da_b, dims=["a", "b"])
assert actual.dims == ("c",)
assert (actual.data == np.einsum("ij,ijk->k", a, b)).all()
assert isinstance(actual.variable.data, type(da_a.variable.data))
actual = xr.dot(da_a, da_b)
assert actual.dims == ("c",)
assert (actual.data == np.einsum("ij,ijk->k", a, b)).all()
assert isinstance(actual.variable.data, type(da_a.variable.data))
# for only a single array is passed without dims argument, just return
# as is
actual = xr.dot(da_a)
assert_identical(da_a, actual)
# test for variable
actual = xr.dot(da_a.variable, da_b.variable)
assert actual.dims == ("c",)
assert (actual.data == np.einsum("ij,ijk->k", a, b)).all()
assert isinstance(actual.data, type(da_a.variable.data))
if use_dask:
da_a = da_a.chunk({"a": 3})
da_b = da_b.chunk({"a": 3})
actual = xr.dot(da_a, da_b, dims=["b"])
assert actual.dims == ("a", "c")
assert (actual.data == np.einsum("ij,ijk->ik", a, b)).all()
assert isinstance(actual.variable.data, type(da_a.variable.data))
actual = xr.dot(da_a, da_b, dims=["b"])
assert actual.dims == ("a", "c")
assert (actual.data == np.einsum("ij,ijk->ik", a, b)).all()
actual = xr.dot(da_a, da_b, dims="b")
assert actual.dims == ("a", "c")
assert (actual.data == np.einsum("ij,ijk->ik", a, b)).all()
actual = xr.dot(da_a, da_b, dims="a")
assert actual.dims == ("b", "c")
assert (actual.data == np.einsum("ij,ijk->jk", a, b)).all()
actual = xr.dot(da_a, da_b, dims="c")
assert actual.dims == ("a", "b")
assert (actual.data == np.einsum("ij,ijk->ij", a, b)).all()
actual = xr.dot(da_a, da_b, da_c, dims=["a", "b"])
assert actual.dims == ("c", "e")
assert (actual.data == np.einsum("ij,ijk,kl->kl ", a, b, c)).all()
# should work with tuple
actual = xr.dot(da_a, da_b, dims=("c",))
assert actual.dims == ("a", "b")
assert (actual.data == np.einsum("ij,ijk->ij", a, b)).all()
# default dims
actual = xr.dot(da_a, da_b, da_c)
assert actual.dims == ("e",)
assert (actual.data == np.einsum("ij,ijk,kl->l ", a, b, c)).all()
# 1 array summation
actual = xr.dot(da_a, dims="a")
assert actual.dims == ("b",)
assert (actual.data == np.einsum("ij->j ", a)).all()
# empty dim
actual = xr.dot(da_a.sel(a=[]), da_a.sel(a=[]), dims="a")
assert actual.dims == ("b",)
assert (actual.data == np.zeros(actual.shape)).all()
# Ellipsis (...) sums over all dimensions
actual = xr.dot(da_a, da_b, dims=...)
assert actual.dims == ()
assert (actual.data == np.einsum("ij,ijk->", a, b)).all()
actual = xr.dot(da_a, da_b, da_c, dims=...)
assert actual.dims == ()
assert (actual.data == np.einsum("ij,ijk,kl-> ", a, b, c)).all()
actual = xr.dot(da_a, dims=...)
assert actual.dims == ()
assert (actual.data == np.einsum("ij-> ", a)).all()
actual = xr.dot(da_a.sel(a=[]), da_a.sel(a=[]), dims=...)
assert actual.dims == ()
assert (actual.data == np.zeros(actual.shape)).all()
# Invalid cases
if not use_dask:
with pytest.raises(TypeError):
xr.dot(da_a, dims="a", invalid=None)
with pytest.raises(TypeError):
xr.dot(da_a.to_dataset(name="da"), dims="a")
with pytest.raises(TypeError):
xr.dot(dims="a")
# einsum parameters
actual = xr.dot(da_a, da_b, dims=["b"], order="C")
assert (actual.data == np.einsum("ij,ijk->ik", a, b)).all()
assert actual.values.flags["C_CONTIGUOUS"]
assert not actual.values.flags["F_CONTIGUOUS"]
actual = xr.dot(da_a, da_b, dims=["b"], order="F")
assert (actual.data == np.einsum("ij,ijk->ik", a, b)).all()
# dask converts Fortran arrays to C order when merging the final array
if not use_dask:
assert not actual.values.flags["C_CONTIGUOUS"]
assert actual.values.flags["F_CONTIGUOUS"]
# einsum has a constant string as of the first parameter, which makes
# it hard to pass to xarray.apply_ufunc.
# make sure dot() uses functools.partial(einsum, subscripts), which
# can be pickled, and not a lambda, which can't.
pickle.loads(pickle.dumps(xr.dot(da_a)))
@pytest.mark.parametrize("use_dask", [True, False])
def test_dot_align_coords(use_dask) -> None:
# GH 3694
if use_dask:
if not has_dask:
pytest.skip("test for dask.")
a = np.arange(30 * 4).reshape(30, 4)
b = np.arange(30 * 4 * 5).reshape(30, 4, 5)
# use partially overlapping coords
coords_a = {"a": np.arange(30), "b": np.arange(4)}
coords_b = {"a": np.arange(5, 35), "b": np.arange(1, 5)}
da_a = xr.DataArray(a, dims=["a", "b"], coords=coords_a)
da_b = xr.DataArray(b, dims=["a", "b", "c"], coords=coords_b)
if use_dask:
da_a = da_a.chunk({"a": 3})
da_b = da_b.chunk({"a": 3})
# join="inner" is the default
actual = xr.dot(da_a, da_b)
# `dot` sums over the common dimensions of the arguments
expected = (da_a * da_b).sum(["a", "b"])
xr.testing.assert_allclose(expected, actual)
actual = xr.dot(da_a, da_b, dims=...)
expected = (da_a * da_b).sum()
xr.testing.assert_allclose(expected, actual)
with xr.set_options(arithmetic_join="exact"):
with pytest.raises(ValueError, match=r"cannot align.*join.*exact.*not equal.*"):
xr.dot(da_a, da_b)
# NOTE: dot always uses `join="inner"` because `(a * b).sum()` yields the same for all
# join method (except "exact")
with xr.set_options(arithmetic_join="left"):
actual = xr.dot(da_a, da_b)
expected = (da_a * da_b).sum(["a", "b"])
xr.testing.assert_allclose(expected, actual)
with xr.set_options(arithmetic_join="right"):
actual = xr.dot(da_a, da_b)
expected = (da_a * da_b).sum(["a", "b"])
xr.testing.assert_allclose(expected, actual)
with xr.set_options(arithmetic_join="outer"):
actual = xr.dot(da_a, da_b)
expected = (da_a * da_b).sum(["a", "b"])
xr.testing.assert_allclose(expected, actual)
def test_where() -> None:
cond = xr.DataArray([True, False], dims="x")
actual = xr.where(cond, 1, 0)
expected = xr.DataArray([1, 0], dims="x")
assert_identical(expected, actual)
def test_where_attrs() -> None:
cond = xr.DataArray([True, False], dims="x", attrs={"attr": "cond"})
x = xr.DataArray([1, 1], dims="x", attrs={"attr": "x"})
y = xr.DataArray([0, 0], dims="x", attrs={"attr": "y"})
actual = xr.where(cond, x, y, keep_attrs=True)
expected = xr.DataArray([1, 0], dims="x", attrs={"attr": "x"})
assert_identical(expected, actual)
@pytest.mark.parametrize("use_dask", [True, False])
@pytest.mark.parametrize("use_datetime", [True, False])
def test_polyval(use_dask, use_datetime) -> None:
if use_dask and not has_dask:
pytest.skip("requires dask")
if use_datetime:
xcoord = xr.DataArray(
pd.date_range("2000-01-01", freq="D", periods=10), dims=("x",), name="x"
)
x = xr.core.missing.get_clean_interp_index(xcoord, "x")
else:
x = np.arange(10)
xcoord = xr.DataArray(x, dims=("x",), name="x")
da = xr.DataArray(
np.stack((1.0 + x + 2.0 * x**2, 1.0 + 2.0 * x + 3.0 * x**2)),
dims=("d", "x"),
coords={"x": xcoord, "d": [0, 1]},
)
coeffs = xr.DataArray(
[[2, 1, 1], [3, 2, 1]],
dims=("d", "degree"),
coords={"d": [0, 1], "degree": [2, 1, 0]},
)
if use_dask:
coeffs = coeffs.chunk({"d": 2})
da_pv = xr.polyval(da.x, coeffs)
xr.testing.assert_allclose(da, da_pv.T)
@pytest.mark.parametrize("use_dask", [False, True])
@pytest.mark.parametrize(
"a, b, ae, be, dim, axis",
[
[
xr.DataArray([1, 2, 3]),
xr.DataArray([4, 5, 6]),
[1, 2, 3],
[4, 5, 6],
"dim_0",
-1,
],
[
xr.DataArray([1, 2]),
xr.DataArray([4, 5, 6]),
[1, 2],
[4, 5, 6],
"dim_0",
-1,
],
[
xr.Variable(dims=["dim_0"], data=[1, 2, 3]),
xr.Variable(dims=["dim_0"], data=[4, 5, 6]),
[1, 2, 3],
[4, 5, 6],
"dim_0",
-1,
],
[
xr.Variable(dims=["dim_0"], data=[1, 2]),
xr.Variable(dims=["dim_0"], data=[4, 5, 6]),
[1, 2],
[4, 5, 6],
"dim_0",
-1,
],
[ # Test dim in the middle:
xr.DataArray(
np.arange(0, 5 * 3 * 4).reshape((5, 3, 4)),
dims=["time", "cartesian", "var"],
coords=dict(
time=(["time"], np.arange(0, 5)),
cartesian=(["cartesian"], ["x", "y", "z"]),
var=(["var"], [1, 1.5, 2, 2.5]),
),
),
xr.DataArray(
np.arange(0, 5 * 3 * 4).reshape((5, 3, 4)) + 1,
dims=["time", "cartesian", "var"],
coords=dict(
time=(["time"], np.arange(0, 5)),
cartesian=(["cartesian"], ["x", "y", "z"]),
var=(["var"], [1, 1.5, 2, 2.5]),
),
),
np.arange(0, 5 * 3 * 4).reshape((5, 3, 4)),
np.arange(0, 5 * 3 * 4).reshape((5, 3, 4)) + 1,
"cartesian",
1,
],
[ # Test 1 sized arrays with coords:
xr.DataArray(
np.array([1]),
dims=["cartesian"],
coords=dict(cartesian=(["cartesian"], ["z"])),
),
xr.DataArray(
np.array([4, 5, 6]),
dims=["cartesian"],
coords=dict(cartesian=(["cartesian"], ["x", "y", "z"])),
),
[0, 0, 1],
[4, 5, 6],
"cartesian",
-1,
],
[ # Test filling in between with coords:
xr.DataArray(
[1, 2],
dims=["cartesian"],
coords=dict(cartesian=(["cartesian"], ["x", "z"])),
),
xr.DataArray(
[4, 5, 6],
dims=["cartesian"],
coords=dict(cartesian=(["cartesian"], ["x", "y", "z"])),
),
[1, 0, 2],
[4, 5, 6],
"cartesian",
-1,
],
],
)
def test_cross(a, b, ae, be, dim: str, axis: int, use_dask: bool) -> None:
expected = | np.cross(ae, be, axis=axis) | numpy.cross |
import time
import os
import csv
import copy
import math
import pickle
import cv2
import pyro
import pyro.distributions as dist
import torch
import numpy as np
from tqdm import tqdm
import transforms3d as tf3d
import pybullet as pb
import random
from PIL import Image
from generation.mujocoCabinetParts import build_cabinet, sample_cabinet
from generation.mujocoDrawerParts import build_drawer, sample_drawers
from generation.mujocoMicrowaveParts import build_microwave, sample_microwave
from generation.mujocoToasterOvenParts import build_toaster, sample_toaster
from generation.mujocoDoubleCabinetParts import build_cabinet2, sample_cabinet2, set_two_door_control
from generation.mujocoRefrigeratorParts import build_refrigerator, sample_refrigerator
from generation.utils import *
import generation.calibrations as calibrations
# pb_client = pb.connect(pb.GUI)
# pb.setGravity(0,0,-100)
def white_bg(img):
mask = 1 - (img > 0)
img_cp = copy.deepcopy(img)
img_cp[mask.all(axis=2)] = [255,255,255, 0]
return img_cp
def buffer_to_real(z, zfar, znear):
return 2*zfar*znear / (zfar + znear - (zfar - znear)*(2*z -1))
def vertical_flip(img):
return np.flip(img, axis=0)
class SceneGenerator():
def __init__(self, pb_client, root_dir='bull/test_cabinets/solo', masked=False, debug_flag=False):
'''
Class for generating simulated articulated object dataset.
params:
- root_dir: save in this directory
- start_idx: index of first image saved - useful in threading context
- depth_data: np array of depth images
- masked: should the background of depth images be 0s or 1s?
'''
self.scenes=[]
self.savedir=root_dir
self.masked = masked
self.img_idx = 0
self.depth_data=[]
self.debugging=debug_flag
# pb_client = pb.connect(pb.GUI)
self.pb_client = pb_client
# self.pb_client.setGravity(0,0,-9.8)
# Camera external settings
self.viewMatrix = pb.computeViewMatrix(
cameraEyePosition=[4,0,1],
cameraTargetPosition=[0,0,1],
cameraUpVector=[0,0,1]
)
# Camera internal settings
self.projectionMatrix = pb.computeProjectionMatrixFOV(
fov=45.,
aspect=1.0,
nearVal=0.1,
farVal=8.1
)
print(root_dir)
def write_urdf(self, filename, xml):
with open(filename, "w") as text_file:
text_file.write(xml)
def sample_obj(self, obj_type, mean_flag, left_only, cute_flag=False):
if obj_type == 'microwave':
l, w, h, t, left, mass = sample_microwave(mean_flag)
if mean_flag:
obj = build_microwave(l, w, h, t, left,
set_pose = [1.0, 0.0, -0.15],
set_rot = [0.0, 0.0, 0.0, 1.0] )
elif cute_flag:
base_xyz, base_angle = sample_pose()
base_quat = angle_to_quat(base_angle)
obj = build_microwave(l, w, h, t, left,
set_pose = [1.0, 0.0, -0.15],
set_rot = base_quat)
else:
obj = build_microwave(l, w, h, t, left)
camera_dist = max(1.25, 2*math.log(10*h))
camera_height = h/2.
elif obj_type == 'drawer':
l, w, h, t, left, mass = sample_drawers(mean_flag)
if mean_flag:
obj = build_drawer(l, w, h, t, left,
set_pose = [1.5, 0.0, -0.4],
set_rot = [0.0, 0.0, 0.0, 1.0] )
elif cute_flag:
base_xyz, base_angle = sample_pose()
base_quat = angle_to_quat(base_angle)
obj = build_drawer(l, w, h, t, left,
set_pose = [1.2, 0.0, -0.15],
set_rot = base_quat)
else:
obj = build_drawer(l, w, h, t, left)
camera_dist = max(2, 2*math.log(10*h))
camera_height = h/2.
elif obj_type == 'toaster':
l, w, h, t, left, mass = sample_toaster(mean_flag)
if mean_flag:
obj = build_toaster(l, w, h, t, left,
set_pose = [1.5, 0.0, -0.3],
set_rot = [0.0, 0.0, 0.0, 1.0] )
elif cute_flag:
base_xyz, base_angle = sample_pose()
base_quat = angle_to_quat(base_angle)
obj = build_toaster(l, w, h, t, left,
set_pose = [1.0, 0.0, -0.15],
set_rot = base_quat)
else:
obj = build_toaster(l, w, h, t, left)
camera_dist = max(1, 2*math.log(10*h))
camera_height = h/2.
elif obj_type == 'cabinet':
l, w, h, t, left, mass = sample_cabinet(mean_flag)
if mean_flag:
if left_only:
left=True
else:
left=False
obj = build_cabinet(l, w, h, t, left,
set_pose = [1.5, 0.0, -0.3],
set_rot = [0.0, 0.0, 0.0, 1.0] )
elif cute_flag:
base_xyz, base_angle = sample_pose()
base_quat = angle_to_quat(base_angle)
obj = build_cabinet(l, w, h, t, left,
set_pose = [1.5, 0.0, -0.15],
set_rot = base_quat)
else:
left = np.random.choice([True,False])
obj = build_cabinet(l, w, h, t, left)
camera_dist = 2*math.log(10*h)
camera_height = h/2.
elif obj_type == 'cabinet2':
l, w, h, t, left, mass = sample_cabinet2(mean_flag)
if mean_flag:
obj = build_cabinet2(l, w, h, t, left,
set_pose = [1.5, 0.0, -0.3],
set_rot = [0.0, 0.0, 0.0, 1.0] )
elif cute_flag:
base_xyz, base_angle = sample_pose()
base_quat = angle_to_quat(base_angle)
obj = build_cabinet2(l, w, h, t, left,
set_pose = [1.5, 0.0, -0.15],
set_rot = base_quat)
else:
obj = build_cabinet2(l, w, h, t, left)
camera_dist = 2*math.log(10*h)
camera_height = h/2.
elif obj_type == 'refrigerator':
l, w, h, t, left, mass = sample_refrigerator(mean_flag)
if mean_flag:
obj = build_refrigerator(l, w, h, t, left,
set_pose = [1.5, 0.0, -0.3],
set_rot = [0.0, 0.0, 0.0, 1.0])
elif cute_flag:
base_xyz, base_angle = sample_pose()
base_quat = angle_to_quat(base_angle)
obj = build_refrigerator(l, w, h, t, left,
set_pose = [2.5, 0.0, -0.75],
set_rot = base_quat)
else:
obj = build_refrigerator(l, w, h, t, left)
camera_dist = 2*math.log(10*h)
camera_height = h/2.
else:
raise 'uh oh, object not implemented!'
return obj, camera_dist, camera_height
def generate_scenes(self, N, objtype, write_csv=True, save_imgs=True, mean_flag=False, left_only=False, cute_flag=False, test=False, video=False):
fname=os.path.join(self.savedir, 'labels.csv')
self.img_idx = 0
with open(fname, 'a') as csvfile:
writ = csv.writer(csvfile, delimiter=',')
writ.writerow(['Object Name', 'Joint Type', 'Image Index', 'l_1', 'l_2', 'l_3', 'm_1', 'm_2', 'm_3',])
for i in tqdm(range(N)):
obj, camera_dist, camera_height = self.sample_obj(objtype, mean_flag, left_only, cute_flag=cute_flag)
xml=obj.xml
fname=os.path.join(self.savedir, 'scene'+str(i).zfill(6)+'.xml')
self.write_urdf(fname, xml)
self.scenes.append(fname)
self.take_images(fname, obj, camera_dist, camera_height, obj.joint_index, writ, test=test, video=video)
return
def take_images(self, filename, obj, camera_dist, camera_height, joint_index, writer, img_idx=0, debug=False, test=False, video=False):
objId, _ = pb.loadMJCF(filename)
# create normal texture image
x, y = np.meshgrid(np.linspace(-1,1, 128), np.linspace(-1,1, 128))
texture_img = (72*(np.stack([np.cos(16*x), | np.cos(16*y) | numpy.cos |
import NetworkLayer
import Utils
from NetworkConfig import NetworkConfig
from NetworkPerformanceTuner import NetworkPerformanceTuner
from NeuralNetworkMLP import NeuralNetwork
import numpy as np
nodes_per_layer = [3,2]
weights = 0
def weight_provider(num):
return np.arange(num)
config = NetworkConfig(nodes_per_layer, weight_provider, Utils.node_bias_provider,
Utils.sigmoid_function, Utils.sigmoid_derivative_function)
neural_network = NeuralNetwork.build(config, NetworkLayer.NetworkLayer.build)
learning_rate = 1
network_tuner = NetworkPerformanceTuner(neural_network, Utils.regular_layer_error_calculator,
Utils.cross_entropy_output_layer_calculator,
Utils.batch_cross_entropy_cost_function, learning_rate, config)
inputs = | np.arange(6.0) | numpy.arange |
"""
This file is part of the repo: https://github.com/tencent-ailab/hifi3dface
If you find the code useful, please cite our paper:
"High-Fidelity 3D Digital Human Creation from RGB-D Selfies."
<NAME>*, <NAME>*, <NAME>*, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>.
arXiv: https://arxiv.org/abs/2010.05562
Copyright (c) [2020] [Tencent AI Lab]
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
# from __future__ import absolute_import
# from __future__ import division
# from __future__ import print_function
import cv2, os, importlib, math
import os.path as osp
import numpy as np
import scipy.io as scio
import tensorflow as tf
def create_mtcnn_pb(sess):
pnet_fun = lambda img: sess.run(
("pnet/conv4-2/BiasAdd:0", "pnet/prob1:0"), feed_dict={"pnet/input:0": img}
)
rnet_fun = lambda img: sess.run(
("rnet/conv5-2/conv5-2:0", "rnet/prob1:0"), feed_dict={"rnet/input:0": img}
)
onet_fun = lambda img: sess.run(
("onet/conv6-2/conv6-2:0", "onet/conv6-3/conv6-3:0", "onet/prob1:0"),
feed_dict={"onet/input:0": img},
)
return pnet_fun, rnet_fun, onet_fun
def detect_face(img, minsize, pnet, rnet, onet, threshold, factor):
"""Detects faces in an image, and returns bounding boxes and points for them.
img: input image
minsize: minimum faces' size
pnet, rnet, onet: caffemodel
threshold: threshold=[th1, th2, th3], th1-3 are three steps's threshold
factor: the factor used to create a scaling pyramid of face sizes to detect in the image.
"""
factor_count = 0
total_boxes = np.empty((0, 9))
points = np.empty(0)
h = img.shape[0]
w = img.shape[1]
minl = np.amin([h, w])
m = 12.0 / minsize
minl = minl * m
# create scale pyramid
scales = []
while minl >= 12:
scales += [m * np.power(factor, factor_count)]
minl = minl * factor
factor_count += 1
# first stage
for scale in scales:
hs = int(np.ceil(h * scale))
ws = int(np.ceil(w * scale))
im_data = imresample(img, (hs, ws))
im_data = (im_data - 127.5) * 0.0078125
img_x = np.expand_dims(im_data, 0)
img_y = np.transpose(img_x, (0, 2, 1, 3))
out = pnet(img_y)
out0 = np.transpose(out[0], (0, 2, 1, 3))
out1 = np.transpose(out[1], (0, 2, 1, 3))
boxes, _ = generateBoundingBox(
out1[0, :, :, 1].copy(), out0[0, :, :, :].copy(), scale, threshold[0]
)
# inter-scale nms
pick = nms(boxes.copy(), 0.5, "Union")
if boxes.size > 0 and pick.size > 0:
boxes = boxes[pick, :]
total_boxes = np.append(total_boxes, boxes, axis=0)
numbox = total_boxes.shape[0]
if numbox > 0:
pick = nms(total_boxes.copy(), 0.7, "Union")
total_boxes = total_boxes[pick, :]
regw = total_boxes[:, 2] - total_boxes[:, 0]
regh = total_boxes[:, 3] - total_boxes[:, 1]
qq1 = total_boxes[:, 0] + total_boxes[:, 5] * regw
qq2 = total_boxes[:, 1] + total_boxes[:, 6] * regh
qq3 = total_boxes[:, 2] + total_boxes[:, 7] * regw
qq4 = total_boxes[:, 3] + total_boxes[:, 8] * regh
total_boxes = np.transpose(np.vstack([qq1, qq2, qq3, qq4, total_boxes[:, 4]]))
total_boxes = rerec(total_boxes.copy())
total_boxes[:, 0:4] = np.fix(total_boxes[:, 0:4]).astype(np.int32)
dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph = pad(total_boxes.copy(), w, h)
numbox = total_boxes.shape[0]
if numbox > 0:
# second stage
tempimg = np.zeros((24, 24, 3, numbox))
for k in range(0, numbox):
tmp = np.zeros((int(tmph[k]), int(tmpw[k]), 3))
tmp[dy[k] - 1 : edy[k], dx[k] - 1 : edx[k], :] = img[
y[k] - 1 : ey[k], x[k] - 1 : ex[k], :
]
if (
tmp.shape[0] > 0
and tmp.shape[1] > 0
or tmp.shape[0] == 0
and tmp.shape[1] == 0
):
tempimg[:, :, :, k] = imresample(tmp, (24, 24))
else:
return np.empty()
tempimg = (tempimg - 127.5) * 0.0078125
tempimg1 = np.transpose(tempimg, (3, 1, 0, 2))
out = rnet(tempimg1)
out0 = np.transpose(out[0])
out1 = np.transpose(out[1])
score = out1[1, :]
ipass = np.where(score > threshold[1])
total_boxes = np.hstack(
[total_boxes[ipass[0], 0:4].copy(), np.expand_dims(score[ipass].copy(), 1)]
)
mv = out0[:, ipass[0]]
if total_boxes.shape[0] > 0:
pick = nms(total_boxes, 0.7, "Union")
total_boxes = total_boxes[pick, :]
total_boxes = bbreg(total_boxes.copy(), np.transpose(mv[:, pick]))
total_boxes = rerec(total_boxes.copy())
numbox = total_boxes.shape[0]
if numbox > 0:
# third stage
total_boxes = np.fix(total_boxes).astype(np.int32)
dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph = pad(total_boxes.copy(), w, h)
tempimg = np.zeros((48, 48, 3, numbox))
for k in range(0, numbox):
tmp = np.zeros((int(tmph[k]), int(tmpw[k]), 3))
tmp[dy[k] - 1 : edy[k], dx[k] - 1 : edx[k], :] = img[
y[k] - 1 : ey[k], x[k] - 1 : ex[k], :
]
if (
tmp.shape[0] > 0
and tmp.shape[1] > 0
or tmp.shape[0] == 0
and tmp.shape[1] == 0
):
tempimg[:, :, :, k] = imresample(tmp, (48, 48))
else:
return np.empty()
tempimg = (tempimg - 127.5) * 0.0078125
tempimg1 = np.transpose(tempimg, (3, 1, 0, 2))
out = onet(tempimg1)
out0 = np.transpose(out[0])
out1 = np.transpose(out[1])
out2 = np.transpose(out[2])
score = out2[1, :]
points = out1
ipass = np.where(score > threshold[2])
points = points[:, ipass[0]]
total_boxes = np.hstack(
[total_boxes[ipass[0], 0:4].copy(), np.expand_dims(score[ipass].copy(), 1)]
)
mv = out0[:, ipass[0]]
w = total_boxes[:, 2] - total_boxes[:, 0] + 1
h = total_boxes[:, 3] - total_boxes[:, 1] + 1
points[0:5, :] = (
np.tile(w, (5, 1)) * points[0:5, :] + np.tile(total_boxes[:, 0], (5, 1)) - 1
)
points[5:10, :] = (
np.tile(h, (5, 1)) * points[5:10, :]
+ np.tile(total_boxes[:, 1], (5, 1))
- 1
)
if total_boxes.shape[0] > 0:
total_boxes = bbreg(total_boxes.copy(), np.transpose(mv))
pick = nms(total_boxes.copy(), 0.7, "Min")
total_boxes = total_boxes[pick, :]
points = points[:, pick]
return total_boxes, points
def generateBoundingBox(imap, reg, scale, t):
"""Use heatmap to generate bounding boxes"""
stride = 2
cellsize = 12
imap = np.transpose(imap)
dx1 = np.transpose(reg[:, :, 0])
dy1 = np.transpose(reg[:, :, 1])
dx2 = np.transpose(reg[:, :, 2])
dy2 = np.transpose(reg[:, :, 3])
y, x = np.where(imap >= t)
if y.shape[0] == 1:
dx1 = np.flipud(dx1)
dy1 = np.flipud(dy1)
dx2 = np.flipud(dx2)
dy2 = np.flipud(dy2)
score = imap[(y, x)]
reg = np.transpose(np.vstack([dx1[(y, x)], dy1[(y, x)], dx2[(y, x)], dy2[(y, x)]]))
if reg.size == 0:
reg = np.empty((0, 3))
bb = np.transpose(np.vstack([y, x]))
q1 = np.fix((stride * bb + 1) / scale)
q2 = np.fix((stride * bb + cellsize - 1 + 1) / scale)
boundingbox = np.hstack([q1, q2, np.expand_dims(score, 1), reg])
return boundingbox, reg
def imresample(img, sz):
im_data = cv2.resize(
img, (sz[1], sz[0]), interpolation=cv2.INTER_AREA
) # @UndefinedVariable
return im_data
def nms(boxes, threshold, method):
if boxes.size == 0:
return np.empty((0, 3))
x1 = boxes[:, 0]
y1 = boxes[:, 1]
x2 = boxes[:, 2]
y2 = boxes[:, 3]
s = boxes[:, 4]
area = (x2 - x1 + 1) * (y2 - y1 + 1)
I = np.argsort(s)
pick = np.zeros_like(s, dtype=np.int16)
counter = 0
while I.size > 0:
i = I[-1]
pick[counter] = i
counter += 1
idx = I[0:-1]
xx1 = np.maximum(x1[i], x1[idx])
yy1 = np.maximum(y1[i], y1[idx])
xx2 = np.minimum(x2[i], x2[idx])
yy2 = np.minimum(y2[i], y2[idx])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
if method is "Min":
o = inter / np.minimum(area[i], area[idx])
else:
o = inter / (area[i] + area[idx] - inter)
I = I[np.where(o <= threshold)]
pick = pick[0:counter]
return pick
# function [bboxA] = rerec(bboxA)
def rerec(bboxA):
"""Convert bboxA to square."""
h = bboxA[:, 3] - bboxA[:, 1]
w = bboxA[:, 2] - bboxA[:, 0]
l = np.maximum(w, h)
bboxA[:, 0] = bboxA[:, 0] + w * 0.5 - l * 0.5
bboxA[:, 1] = bboxA[:, 1] + h * 0.5 - l * 0.5
bboxA[:, 2:4] = bboxA[:, 0:2] + np.transpose(np.tile(l, (2, 1)))
return bboxA
# function [boundingbox] = bbreg(boundingbox,reg)
def bbreg(boundingbox, reg):
"""Calibrate bounding boxes"""
if reg.shape[1] == 1:
reg = np.reshape(reg, (reg.shape[2], reg.shape[3]))
w = boundingbox[:, 2] - boundingbox[:, 0] + 1
h = boundingbox[:, 3] - boundingbox[:, 1] + 1
b1 = boundingbox[:, 0] + reg[:, 0] * w
b2 = boundingbox[:, 1] + reg[:, 1] * h
b3 = boundingbox[:, 2] + reg[:, 2] * w
b4 = boundingbox[:, 3] + reg[:, 3] * h
boundingbox[:, 0:4] = np.transpose(np.vstack([b1, b2, b3, b4]))
return boundingbox
# function [dy edy dx edx y ey x ex tmpw tmph] = pad(total_boxes,w,h)
def pad(total_boxes, w, h):
"""Compute the padding coordinates (pad the bounding boxes to square)"""
tmpw = (total_boxes[:, 2] - total_boxes[:, 0] + 1).astype(np.int32)
tmph = (total_boxes[:, 3] - total_boxes[:, 1] + 1).astype(np.int32)
numbox = total_boxes.shape[0]
dx = np.ones((numbox), dtype=np.int32)
dy = np.ones((numbox), dtype=np.int32)
edx = tmpw.copy().astype(np.int32)
edy = tmph.copy().astype(np.int32)
x = total_boxes[:, 0].copy().astype(np.int32)
y = total_boxes[:, 1].copy().astype(np.int32)
ex = total_boxes[:, 2].copy().astype(np.int32)
ey = total_boxes[:, 3].copy().astype(np.int32)
tmp = np.where(ex > w)
edx.flat[tmp] = np.expand_dims(-ex[tmp] + w + tmpw[tmp], 1)
ex[tmp] = w
tmp = np.where(ey > h)
edy.flat[tmp] = np.expand_dims(-ey[tmp] + h + tmph[tmp], 1)
ey[tmp] = h
tmp = np.where(x < 1)
dx.flat[tmp] = np.expand_dims(2 - x[tmp], 1)
x[tmp] = 1
tmp = np.where(y < 1)
dy.flat[tmp] = np.expand_dims(2 - y[tmp], 1)
y[tmp] = 1
return dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph
def detect_with_MTCNN(origin_images_dir, out_dir, pb_path, mode="no_depth"):
print("MTCNN detect")
if os.path.exists(out_dir) is False:
os.makedirs(out_dir)
minsize = 20 # minimum size of face
threshold = [0.5, 0.6, 0.6] # three steps's threshold
factor = 0.709 # scale factor
with tf.Graph().as_default():
graph_def = tf.GraphDef()
graph_file = pb_path
with open(graph_file, "rb") as f:
print("hello")
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def, name="")
sess = tf.Session()
with sess.as_default():
tf.global_variables_initializer().run()
pnet, rnet, onet = create_mtcnn_pb(sess)
# find files
import glob
if mode == "depth":
files = glob.glob(osp.join(origin_images_dir, "*.jpg"))
files.extend(glob.glob(osp.join(origin_images_dir, "*.JPG")))
dep_files = glob.glob(osp.join(origin_images_dir, "*.png"))
dep_files.extend(glob.glob(osp.join(origin_images_dir, "*.PNG")))
files.sort()
dep_files.sort()
else:
files = glob.glob(osp.join(origin_images_dir, "*.jpg"))
files.extend(glob.glob(osp.join(origin_images_dir, "*.png")))
files.extend(glob.glob(osp.join(origin_images_dir, "*.JPG")))
files.extend(glob.glob(osp.join(origin_images_dir, "*.PNG")))
files.sort()
print("=========================")
# print("img:", files)
# print("Dep:", dep_files)
# detect face bbox
count = 0
names_list = []
dep_name_list = []
for index in range(0, len(files)):
img = cv2.imread(files[index])
bounding_boxes, points = detect_face(
img, minsize, pnet, rnet, onet, threshold, factor
) # bounding_boxes.shape: (n, 5) points.shape: (10, n)
if len(bounding_boxes) == 1:
points = np.transpose(points)
batch_imgs = img
batch_bboxes = bounding_boxes
batch_points = points
batch_names = files[index].split("/")[-1]
names_list.append(batch_names)
scio.savemat(
os.path.join(out_dir, batch_names[:-4] + ".mat"),
{
"batch_bboxes": batch_bboxes.astype(np.float64),
"batch_points": batch_points.astype(np.float64),
},
)
if mode == "depth":
dep_name_list.append(dep_files[index].split("/")[-1])
elif len(bounding_boxes) > 1:
print("too much face to detect by MTCNN, only select first person")
points = | np.transpose(points[:, 0:1]) | numpy.transpose |
'''
DESCRIPTION
----------
An assortment of code written for sanity checks on our 2017 TESS GI proposal
about difference imaging of clusters.
Most of this involving parsing Kharchenko et al (2013)'s table, hence the name
`parse_MWSC.py`.
The tools here do things like:
* Find how many open clusters we could observe
* Find how many member stars within those we could observe
* Compute TESS mags for everything (mostly via `ticgen`)
* Estimate blending effects, mainly through the dilution (computed just by
summing magnitudes appropriately)
* Using K+13's King profile fits, estimate the surface density of member stars.
It turns out that this radically underestimates the actual surface density
of stars (because of all the background blends). Moreover, for purposes of
motivating our difference imaging, "the number of stars in your aperture"
is more relevant than "a surface density", and even more relevant than both
of those is dilution.
So I settled on the dilution calculation.
The plotting scripts here also make the skymap figure of the proposal. (Where
are the clusters on the sky?)
USAGE
----------
From /src/, select desired functions from __main__ below. Then:
>>> python parse_MWSC.py > output.log
'''
import matplotlib.pyplot as plt, seaborn as sns
import pandas as pd, numpy as np
from astropy.table import Table
from astropy.io import ascii
from astropy.coordinates import SkyCoord
import astropy.units as u
from math import pi
import pickle, os
from scipy.interpolate import interp1d
global COLORS
COLORS = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', '#8c564b',
'#e377c2', '#7f7f7f', '#bcbd22', '#17becf']
# cite:
#
# <NAME>. & <NAME>. 2017, ticgen: A tool for calculating a TESS
# magnitude, and an expected noise level for stars to be observed by TESS.,
# v1.0.0, Zenodo, doi:10.5281/zenodo.888217
#
# and Stassun & friends (2017).
#import ticgen as ticgen
# # These two, from the website
# # http://dc.zah.uni-heidelberg.de/mwsc/q/clu/form
# # are actually outdated or something. They provided too few resuls..
# close_certain = pd.read_csv('../data/MWSC_search_lt_2000_pc_type_certain.csv')
# close_junk = pd.read_csv('../data/MWSC_search_lt_2000_pc_type_certain.csv')
def get_cluster_data():
# Downloaded the MWSC from
# http://cdsarc.u-strasbg.fr/viz-bin/Cat?cat=J%2FA%2BA%2F558%2FA53&target=http&
tab = Table.read('../data/Kharchenko_2013_MWSC.vot', format='votable')
df = tab.to_pandas()
for colname in ['Type', 'Name', 'n_Type', 'SType']:
df[colname] = [e.decode('utf-8') for e in list(df[colname])]
# From erratum:
# For the Sun-like star, a 4 Re planet produces a transit depth of 0.13%. The
# limiting magnitude for transits to be detectable is about I_C = 11.4 . This
# also corresponds to K_s ~= 10.6 and a maximum distance of 290 pc, assuming no
# extinction.
cinds = np.array(df['d']<500)
close = df[cinds]
finds = np.array(df['d']<1000)
far = df[finds]
N_c_r0 = int(np.sum(close['N1sr0']))
N_c_r1 = int(np.sum(close['N1sr1']))
N_c_r2 = int(np.sum(close['N1sr2']))
N_f_r0 = int(np.sum(far['N1sr0']))
N_f_r1 = int(np.sum(far['N1sr1']))
N_f_r2 = int(np.sum(far['N1sr2']))
type_d = {'a':'association', 'g':'globular cluster', 'm':'moving group',
'n':'nebulosity/presence of nebulosity', 'r':'remnant cluster',
's':'asterism', '': 'no label'}
ntype_d = {'o':'object','c':'candidate','':'no label'}
print('*'*50)
print('\nMilky Way Star Clusters (close := <500pc)'
'\nN_clusters: {:d}'.format(len(close))+\
'\nN_stars (in core): {:d}'.format(N_c_r0)+\
'\nN_stars (in central part): {:d}'.format(N_c_r1)+\
'\nN_stars (in cluster): {:d}'.format(N_c_r2))
print('\n'+'*'*50)
print('\nMilky Way Star Clusters (far := <1000pc)'
'\nN_clusters: {:d}'.format(len(far))+\
'\nN_stars (in core): {:d}'.format(N_f_r0)+\
'\nN_stars (in central part): {:d}'.format(N_f_r1)+\
'\nN_stars (in cluster): {:d}'.format(N_f_r2))
print('\n'+'*'*50)
####################
# Post-processing. #
####################
# Compute mean density
mean_N_star_per_sqdeg = df['N1sr2'] / (pi * df['r2']**2)
df['mean_N_star_per_sqdeg'] = mean_N_star_per_sqdeg
# Compute King profiles
king_profiles, theta_profiles = [], []
for rt, rc, k, d in zip(np.array(df['rt']),
np.array(df['rc']),
np.array(df['k']),
np.array(df['d'])):
sigma, theta = get_king_proj_density_profile(rt, rc, k, d)
king_profiles.append(sigma)
theta_profiles.append(theta)
df['king_profile'] = king_profiles
df['theta'] = theta_profiles
ra = np.array(df['RAJ2000'])
dec = np.array(df['DEJ2000'])
c = SkyCoord(ra=ra*u.degree, dec=dec*u.degree, frame='icrs')
galactic_long = np.array(c.galactic.l)
galactic_lat = np.array(c.galactic.b)
ecliptic_long = np.array(c.barycentrictrueecliptic.lon)
ecliptic_lat = np.array(c.barycentrictrueecliptic.lat)
df['galactic_long'] = galactic_long
df['galactic_lat'] = galactic_lat
df['ecliptic_long'] = ecliptic_long
df['ecliptic_lat'] = ecliptic_lat
cinds = np.array(df['d']<500)
close = df[cinds]
finds = np.array(df['d']<1000)
far = df[finds]
return close, far, df
def distance_histogram(df):
plt.close('all')
f,ax = plt.subplots(figsize=(4,4))
hist, bin_edges = np.histogram(
df['d'],
bins=np.append(np.logspace(1,6,1e3), 1e7),
normed=False)
ax.step(bin_edges[:-1], np.cumsum(hist), 'k-', where='post')
ax.set_xlabel('distance [pc]')
ax.set_ylabel('cumulative N clusters in MWSC')
ax.set_xlim([5e1,1e4])
ax.set_xscale('log')
ax.set_yscale('log')
f.tight_layout()
f.savefig('d_cumdistribn_MWSC.pdf', dpi=300, bbox_inches='tight')
def angular_scale_cumdist(close, far):
colors = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', '#8c564b',
'#e377c2', '#7f7f7f', '#bcbd22', '#17becf']
plt.close('all')
f,ax = plt.subplots(figsize=(4,4))
axt = ax.twiny()
scale_d = {'r0': 'angular radius of the core (0 if no core)',
'r1': '"central" radius',
'r2': 'cluster radius'}
ix = 0
for t, dat in [('$d<0.5$ kpc',close), ('$d<1$ kpc',far)]:
for k in ['r2']:
hist, bin_edges = np.histogram(
dat[k],
bins=np.append(np.logspace(-2,1,1e3), 1e7),
normed=False)
ax.step(bin_edges[:-1], np.cumsum(hist),
where='post', label=t+' '+scale_d[k])
ix += 1
def tick_function(angle_deg):
tess_px = 21*u.arcsec
vals = angle_deg/tess_px.to(u.deg).value
return ['%.1f' % z for z in vals]
ax.legend(loc='upper left', fontsize='xx-small')
ax.set_xlabel('ang scale [deg]')
ax.set_ylabel('cumulative N clusters in MWSC')
ax.set_xscale('log')
#ax.set_yscale('log')
axt.set_xscale('log')
axt.set_xlim(ax.get_xlim())
new_tick_locations = np.array([1e-2, 1e-1, 1e0, 1e1])
axt.set_xticks(new_tick_locations)
axt.set_xticklabels(tick_function(new_tick_locations))
axt.set_xlabel('angular scale [TESS pixels]')
f.tight_layout()
f.savefig('angscale_cumdistribn_MWSC.pdf', dpi=300, bbox_inches='tight')
def angular_scale_hist(close, far):
colors = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', '#8c564b',
'#e377c2', '#7f7f7f', '#bcbd22', '#17becf']
plt.close('all')
f,ax = plt.subplots(figsize=(4,4))
axt = ax.twiny()
scale_d = {'r0': 'angular radius of the core (0 if no core)',
'r1': '"central" radius',
'r2': 'cluster radius'}
ix = 0
for t, dat in [('$d<0.5$ kpc',close), ('$d<1$ kpc',far)]:
for k in ['r2']:
hist, bin_edges = np.histogram(
dat[k],
bins=np.append(np.logspace(-2,1,7), 1e7),
normed=False)
ax.step(bin_edges[:-1], hist, where='post', label=t+' '+scale_d[k],
alpha=0.7)
ix += 1
def tick_function(angle_deg):
tess_px = 21*u.arcsec
vals = angle_deg/tess_px.to(u.deg).value
return ['%.1f' % z for z in vals]
ax.legend(loc='best', fontsize='xx-small')
ax.set_xlabel('ang scale [deg]')
ax.set_ylabel('N clusters in MWSC')
ax.set_xscale('log')
#ax.set_yscale('log')
axt.set_xscale('log')
axt.set_xlim(ax.get_xlim())
new_tick_locations = np.array([1e-2, 1e-1, 1e0, 1e1])
axt.set_xticks(new_tick_locations)
axt.set_xticklabels(tick_function(new_tick_locations))
axt.set_xlabel('angular scale [TESS pixels]')
f.tight_layout()
f.savefig('angscale_distribn_MWSC.pdf', dpi=300, bbox_inches='tight')
def mean_density_hist(close, far):
colors = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', '#8c564b',
'#e377c2', '#7f7f7f', '#bcbd22', '#17becf']
plt.close('all')
f,ax = plt.subplots(figsize=(4,4))
axt = ax.twiny()
ix = 0
for t, dat in [('$d<0.5$ kpc',close), ('$d<1$ kpc',far)]:
hist, bin_edges = np.histogram(
dat['mean_N_star_per_sqdeg'],
bins=np.append(np.logspace(0,4,9), 1e7),
normed=False)
ax.step(bin_edges[:-1], hist, where='post', label=t,
alpha=0.7)
ix += 1
def tick_function(N_star_per_sqdeg):
tess_px = 21*u.arcsec
tess_px_area = tess_px**2
deg_per_tess_px = tess_px_area.to(u.deg**2).value
vals = N_star_per_sqdeg * deg_per_tess_px
outstrs = ['%.1E'%z for z in vals]
outstrs = ['$'+o[0] + r'\! \cdot \! 10^{\mathrm{-}' + o[-1] + r'}$' \
for o in outstrs]
return outstrs
ax.legend(loc='best', fontsize='xx-small')
ax.set_xlabel('mean areal density [stars/$\mathrm{deg}^{2}$]')
ax.set_ylabel('N clusters in MWSC')
ax.set_xscale('log')
#ax.set_yscale('log')
axt.set_xscale('log')
axt.set_xlim(ax.get_xlim())
new_tick_locations = np.logspace(0,4,5)
axt.set_xticks(new_tick_locations)
axt.set_xticklabels(tick_function(new_tick_locations))
axt.set_xlabel('mean areal density [stars/$\mathrm{(TESS\ px)}^{2}$]')
f.tight_layout()
f.savefig('mean_density_distribn_MWSC.pdf', dpi=300, bbox_inches='tight')
def plot_king_profiles(close, far):
colors = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', '#8c564b',
'#e377c2', '#7f7f7f', '#bcbd22', '#17becf']
plt.close('all')
f, axs = plt.subplots(figsize=(4,7), nrows=2, ncols=1, sharex=True)
for theta, profile in zip(close['theta'], close['king_profile']):
axs[0].plot(theta, profile, alpha=0.2, c=colors[0])
for theta, profile in zip(far['theta'], far['king_profile']):
axs[1].plot(theta, profile, alpha=0.1, c=colors[1])
# Add text in top right.
axs[0].text(0.95, 0.95, '$d < 500\ \mathrm{pc}$', verticalalignment='top',
horizontalalignment='right', transform=axs[0].transAxes,
fontsize='large')
axs[1].text(0.95, 0.95, '$d < 1\ \mathrm{kpc}$', verticalalignment='top',
horizontalalignment='right', transform=axs[1].transAxes,
fontsize='large')
xmin, xmax = 1, 1e3
for ax in axs:
ax.set_xscale('log')
ax.set_xlim([xmin, xmax])
if ax == axs[1]:
ax.xaxis.set_ticks_position('both')
ax.set_xlabel('angular distance [TESS px]')
ax.tick_params(which='both', direction='in', zorder=0)
ax.set_ylabel(r'$\Sigma(r)$ [stars/$\mathrm{(TESS\ px)}^{2}$]')
f.tight_layout(h_pad=0)
f.savefig('king_density_profiles_close_MWSC.pdf', dpi=300,
bbox_inches='tight')
def get_king_proj_density_profile(r_t, r_c, k, d):
'''
r_t: King's tidal radius [pc]
r_c: King's core radius [pc]
k: normalization [pc^{-2}]
d: distance [pc]
returns density profile in number per sq tess pixel
'''
# Eq 4 of Ernst et al, 2010 https://arxiv.org/pdf/1009.0710.pdf
# citing King (1962).
r = np.logspace(-2, 2.4, num=int(2e4))
X = 1 + (r/r_c)**2
C = 1 + (r_t/r_c)**2
vals = k * (X**(-1/2) - C**(-1/2))**2
#NOTE: this fails when r_t does not exist. This might be important...
vals[r>r_t] = 0
# vals currently in number per square parsec. want in number per TESS px.
# first convert to number per square arcsec
# N per sq arcsec. First term converts to 1/AU^2. Then the angular surface
# density scales as the square of the distance (same number of things,
# smaller angle)
sigma = vals * 206265**(-2) * d**2
tess_px = 21*u.arcsec
arcsec_per_px = 21
sigma_per_sq_px = sigma * arcsec_per_px**2 # N per px^2
# r is in pc. we want the profile vs angular distance.
AU_per_pc = 206265
r *= AU_per_pc # r now in AU
theta = r / d # angular distance in arcsec
tess_px = 21 # arcsec per px
theta *= (1/tess_px) # angular distance in px
return sigma_per_sq_px, theta
def make_wget_script(df):
'''
to download stellar data for each cluster, need to run a script of wgets.
this function makes the script.
'''
# get MWSC ids in "0012", "0007" format
mwsc = np.array(df['MWSC'])
mwsc_ids = np.array([str(int(f)).zfill(4) for f in mwsc])
names = | np.array(df['Name']) | numpy.array |
import argparse
import cv2
import datetime
import math
import numpy as np
import os
import PIL
import random
from scipy.spatial.distance import cdist
import shutil
import sys
import torch
import torch.nn.init
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision.datasets as dset
import torchvision.transforms as transforms
from torch.autograd import Variable
import torch.backends.cudnn as cudnn
from demo_superpoint import *
# Training settings
parser = argparse.ArgumentParser(description='PyTorch SEKD')
# Model options
parser.add_argument('--dataroot', type=str,
default='/home/songyoff/projects/data/hpatches/hpatches_benchmark_homo/hpatches_seq_resize/',
help='Path of the dataset.')
parser.add_argument('--model_path', type=str,
default='superpoint_v1.pth',
help='Path of the model.')
# Device options
parser.add_argument('--use_cuda', action='store_true', default=True,
help='Enable using CUDA for acceleration.')
parser.add_argument('--gpu_ids', default='0', type=str,
help='GPU id(s) used by the cuda.')
parser.add_argument('--seed', type=int, default=0, metavar='S',
help='random seed (default: 0)')
args = parser.parse_args()
args.confidence_threshold = 0.001
args.maxinum_points = 500
args.nms_radius = 4
args.refine_radius = 4
args.detector_cell = 8
args.use_cuda = args.use_cuda and torch.cuda.is_available()
if args.use_cuda:
gpu_ids = [int(index) for index in args.gpu_ids.replace(',', ' ').split()]
args.gpu_ids = gpu_ids
print (("NOT " if not args.use_cuda else "") + "Using cuda")
if args.use_cuda:
cudnn.benchmark = True
torch.cuda.manual_seed_all(args.seed)
torch.backends.cudnn.deterministic = True
# set random seeds
random.seed(args.seed)
torch.manual_seed(args.seed)
np.random.seed(args.seed)
def calculate_displacement(response_map, xs, ys, radius = 4):
assert(xs.shape == ys.shape)
num_points = xs.shape[0]
xs_offset = np.zeros(xs.shape)
ys_offset = np.zeros(ys.shape)
#return xs_offset, ys_offset
local_index_mask = | np.zeros([2*radius + 1, 2*radius+1, 2]) | numpy.zeros |
import pandas as pd
import numpy as np
import glob
from neuropixels import generalephys_mua as ephys_mua
from neuropixels.generalephys import get_waveform_duration,get_waveform_PTratio,get_waveform_repolarizationslope,option234_positions
from scipy.cluster.vq import kmeans2
import seaborn as sns;sns.set_style("ticks")
import matplotlib.pyplot as plt
import h5py
import matplotlib.path as mpath
import matplotlib.lines as mlines
import matplotlib.patches as mpatches
from matplotlib.collections import PatchCollection
import os
def get_peak_waveform_from_template(template):
max = 0
ind=0
peak = np.zeros(np.shape(template.T)[0])
for i,wv in enumerate(template.T):
if np.max(np.abs(wv)) > max:
max = np.max(np.abs(wv))
ind = i
peak = wv
return peak
def df_from_phy_multimouse(folder,expnum='1',recnum='1',site_positions = option234_positions,**kwargs):
if 'est' not in folder:
base_folder = os.path.basename(folder)
cohort_ = os.path.basename(base_folder).split('_')[-2]
mouse_ = os.path.basename(base_folder).split('_')[-1]
#traverse down tree to data
if 'open-ephys-neuropix' in base_folder:
try:
rec_folder = glob.glob(folder+'/*')[0]
print(rec_folder, 'hey')
except:
print(base_folder)
return None
else:
rec_folder = folder
print(rec_folder)
raw_path = os.path.join(rec_folder,'recording'+str(recnum),'continuous')
if len(glob.glob(raw_path+'/*100.0*'))>0:
raw_path = glob.glob(raw_path+'/*100.0*')[0]
print('loading from '+raw_path)
else:
print('could not find data folder for '+raw_path)
if os.path.isfile(os.path.join(raw_path,'spike_clusters.npy')) :
# df = df_from_phy(raw_path,site_positions = ephys.option234_positions,cluster_file='KS2',cohort=cohort,mouse=mouse)
path = raw_path
units = ephys.load_phy_template(path,cluster_file='cluster_group',site_positions=site_positions)
#structures is a dictionary that defines the bounds of the structure e.g.:{'v1':(0,850), 'hpc':(850,2000)}
mouse = [];experiment=[];cell = [];ypos = [];xpos = [];waveform=[];template=[];structure=[];times=[]
index = []; count = 1; cohort = []
probe_id=[]
depth=[];#print(list(nwb_data.keys()));print(list(nwb_data['processing'].keys()));
for unit in list(units.keys()):
if 'probe' in kwargs.keys():
probe_id.extend([kwargs['probe']])
else:
probe_id.extend(['A'])
if 'mouse' in kwargs.keys():
mouse.extend([kwargs['mouse']])
else:
mouse.extend([mouse_])
if 'experiment' in kwargs.keys():
experiment.extend([kwargs['experiment']])
else:
experiment.extend(['placeholder'])
if 'cohort' in kwargs.keys():
cohort.extend([kwargs['cohort']])
else:
cohort.extend([cohort_])
xpos.extend([units[unit]['xpos']])
ypos.extend([units[unit]['ypos']])
template.extend([units[unit]['template']])
times.append(units[unit]['times'])
waveform.append(units[unit]['waveform_weights'])
df = pd.DataFrame(index=index)
df = df.fillna(np.nan)
# df['nwb_id'] = nwb_id
df['mouse'] = mouse
df['experiment'] = experiment
df['probe'] = probe_id
# df['structure'] = structure
df['cell'] = units.keys()
df['cohort'] = cohort
df['times'] = times
df['ypos'] = ypos
df['xpos'] = xpos
# df['depth'] = depth
df['waveform'] = waveform
df['template'] = template
return df
def df_from_phy(folder,expnum='1',recnum='1',site_positions = option234_positions,**kwargs):
# if 'est' not in folder:
# base_folder = os.path.basename(folder)
# cohort_ = os.path.basename(base_folder).split('_')[-2]
# mouse_ = os.path.basename(base_folder).split('_')[-1]
#traverse down tree to data
# if 'open-ephys-neuropix' in base_folder:
# try:
# rec_folder = glob.glob(folder+'/*')[0]
# except:
# print(base_folder)
# return None
# else:
# rec_folder = folder
# raw_path = os.path.join(rec_folder,'experiment'+str(expnum),'recording'+str(recnum),'continuous')
# if len(glob.glob(raw_path+'/*100.0*'))>0:
# raw_path = glob.glob(raw_path+'/*100.0*')[0]
# print('loading from '+raw_path)
# else:
# print('could not find data folder for '+raw_path)
raw_path=folder
if 'cohort' in kwargs.keys():
cohort = kwargs['cohort']
else:
cohort = None
if 'mouse' in kwargs.keys():
mouse = kwargs['mouse']
else:
mouse = None
# df = df_from_phy(raw_path,site_positions = ephys.option234_positions,cluster_file='KS2',cohort=cohort,mouse=mouse)
path = raw_path
#units = ephys.load_phy_template(path,cluster_file='KS2',site_positions=site_positions)
units = ephys_mua.load_phy_template_mua(path,site_positions=site_positions)
#structures is a dictionary that defines the bounds of the structure e.g.:{'v1':(0,850), 'hpc':(850,2000)}
mouse = [];experiment=[];cell = [];ypos = [];xpos = [];waveform=[];template=[];structure=[];times=[]
index = []; count = 1; cohort = []
probe_id=[]
depth=[];#print(list(nwb_data.keys()));print(list(nwb_data['processing'].keys()));
for unit in list(units.index):
if 'probe' in kwargs.keys():
probe_id.extend([kwargs['probe']])
else:
probe_id.extend(['A'])
if 'mouse' in kwargs.keys():
mouse.extend([kwargs['mouse']])
else:
mouse.extend([mouse_])
if 'experiment' in kwargs.keys():
experiment.extend([kwargs['experiment']])
else:
experiment.extend(['placeholder'])
if 'cohort' in kwargs.keys():
cohort.extend([kwargs['cohort']])
else:
cohort.extend([cohort_])
df = units
df['mouse'] = mouse
df['experiment'] = experiment
df['probe'] = probe_id
# df['structure'] = structure
df['cell'] = units.index
df['cohort'] = cohort
df['times'] = units['times']
df['ypos'] = units['ypos']
df['xpos'] = units['xpos']
# df['depth'] = xpos
df['waveform'] = units['waveform_weights']
df['template'] = units['template']
return df
def df_from_nwb(nwb_data,structures=None,insertion_angle=55,nwbid=0):
if type(nwb_data)==str:
#print(nwb_data)
nwbid = nwb_data
nwb_data = h5py.File(nwb_data)
else:
nwb_data = nwb_data
#structures is a dictionary that defines the bounds of the structure e.g.:{'v1':(0,850), 'hpc':(850,2000)}
mouse = [];experiment=[];cell = [];ypos = [];xpos = [];waveform=[];template=[];structure=[];times=[]
index = []; count = 1
nwb_id = [];probe_id=[]
depth=[];#print(list(nwb_data.keys()));print(list(nwb_data['processing'].keys()));
if 'processing' in nwb_data.keys():
for probe in list(nwb_data['processing'].keys()):
if 'UnitTimes' in list(nwb_data['processing'][probe].keys()):
for i,u in enumerate(list(nwb_data['processing'][probe]['UnitTimes'].keys())):
if u != 'unit_list':
nwb_id.append(nwbid)
probe_id.append(probe)
index.append(count);count+=1
mouse.append(str(np.array(nwb_data.get('identifier'))))
experiment.append(1)
cell.append(u)
times.append(np.array(nwb_data['processing'][probe]['UnitTimes'][u]['times']));# print(list(nwb_data['processing'][probe]['UnitTimes'][u].keys()))
if 'ypos' in list(nwb_data['processing'][probe]['UnitTimes'][u].keys()):
ypos.append(np.array(nwb_data['processing'][probe]['UnitTimes'][u]['ypos']))
has_ypos = True
else:
ypos.append(None)
has_ypos = False
if 'depth' in list(nwb_data['processing'][probe]['UnitTimes'][u].keys()):
depth.append(np.array(nwb_data['processing'][probe]['UnitTimes'][u]['depth']))
else:
if has_ypos:
depth.append(np.array(nwb_data['processing'][probe]['UnitTimes'][u]['ypos']))
else:
depth.append(None)
if 'xpos' in list(nwb_data['processing'][probe]['UnitTimes'][u].keys()):
xpos.append(np.array(nwb_data['processing'][probe]['UnitTimes'][u]['xpos']))
has_xpos = True
else:
xpos.append(None)
has_xpos = False
template.append(np.array(nwb_data['processing'][probe]['UnitTimes'][u]['template']))
waveform.append(get_peak_waveform_from_template(template[-1]))
if not structures == None:
structur = None
for struct, bounds in structures.iteritems():
if ypos[-1] > bounds[0] and ypos[-1]< bounds[1] :
structur=struct
else:
structur = None
structure.append(structur)
df = pd.DataFrame(index=index)
df = df.fillna(np.nan)
df['nwb_id'] = nwb_id
df['mouse'] = mouse
df['experiment'] = experiment
df['probe'] = probe_id
df['structure'] = structure
df['cell'] = cell
df['times'] = times
df['ypos'] = ypos
df['xpos'] = xpos
df['depth'] = depth
df['waveform'] = waveform
df['template'] = template
return df
def classify_waveform_shape(df,plots=False,save_plots=False,basepath='',kmeans=0):
durations = np.zeros(np.shape(df)[0])
PTratio = np.zeros(np.shape(df)[0])
repolarizationslope = np.zeros(np.shape(df)[0])
for i,waveform in enumerate(df.waveform):
# try:
durations[i]=get_waveform_duration(waveform)
PTratio[i]=get_waveform_PTratio(waveform)
repolarizationslope[i]=get_waveform_repolarizationslope(waveform,window=18)
# except:
# durations[i]=np.nan
# PTratio[i]=np.nan
# repolarizationslope[i]=np.nan
df['waveform_duration'] = durations
df['waveform_PTratio'] = PTratio
df['waveform_repolarizationslope'] = repolarizationslope
waveform_k = kmeans2(np.vstack(((durations-np.min(durations))/np.max((durations-np.min(durations))),
(PTratio-np.min(PTratio))/np.max((PTratio-np.min(PTratio))),
(repolarizationslope-np.min(repolarizationslope))/np.max((repolarizationslope-np.min(repolarizationslope))))).T,
2, iter=300, thresh=5e-6,minit='points')
# waveform_k = kmeans2(np.vstack((durations/np.max(durations),PTratio/np.max(PTratio))).T, 2, iter=300, thresh=5e-6,minit='points')
# waveform_k = kmeans2(np.vstack((durations/np.max(durations),(repolarizationslope-np.min(repolarizationslope))/np.max(repolarizationslope))).T, 2, iter=900, thresh=5e-7,minit='points')
#assign fs and rs to the kmeans results
if np.mean(durations[np.where(waveform_k[1]==0)[0]]) < np.mean(durations[np.where(waveform_k[1]==1)[0]]):
fs_k = 0;rs_k = 1
waveform_class_ids = ['fs','rs']
else:
rs_k = 0;fs_k = 1
waveform_class_ids = ['rs','fs']
waveform_class = [waveform_class_ids[k] for k in waveform_k[1]]
#uncomment this to ignore the preceding kmeans and just split on the marginal distribution of durations
if kmeans==0:
waveform_class = ['fs' if duration < 0.0004 else 'rs' for i,duration in enumerate(durations) ]
else:
waveform_k = kmeans2(np.vstack(((durations-np.min(durations))/np.max((durations-np.min(durations))),
(PTratio-np.min(PTratio))/np.max((PTratio-np.min(PTratio))),
(repolarizationslope-np.min(repolarizationslope))/np.max((repolarizationslope-np.min(repolarizationslope))))).T,
kmeans, iter=300, thresh=5e-6,minit='points')
# waveform_k = kmeans2(np.vstack((durations/np.max(durations),PTratio/np.max(PTratio))).T, 2, iter=300, thresh=5e-6,minit='points')
# waveform_k = kmeans2(np.vstack((durations/np.max(durations),(repolarizationslope-np.min(repolarizationslope))/np.max(repolarizationslope))).T, 2, iter=900, thresh=5e-7,minit='points')
#assign fs and rs to the kmeans results
if np.mean(durations[np.where(waveform_k[1]==0)[0]]) < np.mean(durations[np.where(waveform_k[1]==1)[0]]):
fs_k = 0;rs_k = 1
waveform_class_ids = ['fs','rs']
else:
rs_k = 0;fs_k = 1
waveform_class_ids = ['rs','fs']
waveform_class = [waveform_class_ids[k] for k in waveform_k[1]]
#force upwards spikes to have the own class, because we're not sure how they fit in this framework
waveform_class = [waveform_class[i] if ratio < 1.0 else 'up' for i,ratio in enumerate(PTratio) ]
df['waveform_class']=waveform_class
#mark narrow upwards spikes as axons
waveform_class = ['axon' if all([duration < 0.0004,waveform_class[i]=='up']) else waveform_class[i] for i,duration in enumerate(durations) ]
df['waveform_class']=waveform_class
# #mark narrow downward spike at the very bottom of cortex as axons
#waveform_class = ['axon' if all([duration < 0.0004,waveform_class[i]=='fs',df['depth'][i+1] > 750, df['depth'][i+1]<1050]) else waveform_class[i] for i,duration in enumerate(durations) ]
df['waveform_class']=waveform_class
if plots:
plot_waveform_classification(durations, PTratio, repolarizationslope,df,save_plots=save_plots,basepath=basepath)
return df
def plot_waveform_classification(durations, PTratio, repolarizationslope, df,save_plots=False, basepath=''):
f,ax = plt.subplots(1,3,figsize=(8,3))
ax[0].plot(durations[np.where(df.waveform_class=='rs')[0]],PTratio[np.where(df.waveform_class=='rs')[0]],'o',ms=3.2)
ax[0].plot(durations[np.where(df.waveform_class=='fs')[0]],PTratio[np.where(df.waveform_class=='fs')[0]],'o',ms=3.2)
#ax[0].plot(durations[np.where(df.waveform_class=='up')[0]],PTratio[np.where(df.waveform_class=='up')[0]],'o',ms=3.2)
ax[0].plot(durations[np.where(df.waveform_class=='axon')[0]],PTratio[np.where(df.waveform_class=='axon')[0]],'o',ms=3.2)
ax[0].set_xlabel('width (sec)')
ax[0].set_ylabel('peak/trough ratio')
ax[1].plot(durations[np.where(df.waveform_class=='rs')[0]],repolarizationslope[np.where(df.waveform_class=='rs')[0]],'o',ms=3.2)
ax[1].plot(durations[np.where(df.waveform_class=='fs')[0]],repolarizationslope[np.where(df.waveform_class=='fs')[0]],'o',ms=3.2)
#ax[1].plot(durations[np.where(df.waveform_class=='up')[0]],repolarizationslope[np.where(df.waveform_class=='up')[0]],'o',ms=3.2)
ax[1].plot(durations[np.where(df.waveform_class=='axon')[0]],repolarizationslope[np.where(df.waveform_class=='axon')[0]],'o',ms=3.2)
ax[1].set_xlabel('width (sec)')
ax[1].set_ylabel('repolarization slope')
ax[2].plot(PTratio[np.where(df.waveform_class=='rs')[0]],repolarizationslope[np.where(df.waveform_class=='rs')[0]],'o',ms=3.2)
ax[2].plot(PTratio[np.where(df.waveform_class=='fs')[0]],repolarizationslope[np.where(df.waveform_class=='fs')[0]],'o',ms=3.2)
#ax[2].plot(PTratio[np.where(df.waveform_class=='up')[0]],repolarizationslope[np.where(df.waveform_class=='up')[0]],'o',ms=3.2)
ax[2].plot(PTratio[np.where(df.waveform_class=='axon')[0]],repolarizationslope[np.where(df.waveform_class=='axon')[0]],'o',ms=3.2)
ax[2].set_ylabel('repolarization slope')
ax[2].set_xlabel('peak/trough ratio')
ax[0].set_xlim(0.0,0.0015);ax[1].set_xlim(0.0,0.0015)
ax[0].set_ylim(0,1.1);ax[2].set_xlim(0,1.1)
plt.tight_layout()
for axis in ax:
# ephys.cleanAxes(axis,bottomLabels=True,leftLabels=True)
axis.locator_params(axis='x',nbins=4)
ax[2].legend(loc='upper right')
panelname = 'waveforms_clusters'
plt.tight_layout()
if save_plots:
plt.gcf().savefig(os.path.join(basepath,'figures','panels',panelname+'.png'),fmt='png',dpi=300)
plt.gcf().savefig(os.path.join(basepath,'figures','panels',panelname+'.eps'),fmt='eps')
nbins = 36
plt.hist(durations[np.where(df.waveform_class=='rs')[0]],range=(0,0.0015),bins=nbins)
plt.hist(durations[np.where(df.waveform_class=='fs')[0]],range=(0,0.0015),bins=nbins)
plt.hist(durations[np.where(df.waveform_class=='axon')[0]],range=(0,0.0015),bins=nbins)
plt.figure()
plt.hist((durations[np.where(df.waveform_class=='rs')[0]],durations[ | np.where(df.waveform_class=='fs') | numpy.where |
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import numpy as np
import math
import matplotlib.pyplot as plt
plt.switch_backend('agg')
plt.style.use('ggplot')
class Prior(object):
def __init__(self, type):
self.type = type
def sample(self, shape):
if self.type == "uniform":
return np.random.uniform(-1.0, 1.0, shape)
else:
return | np.random.normal(0, 1, shape) | numpy.random.normal |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import random
import logging
import numpy as np
from data import FaceIter
import mxnet as mx
from mxnet import ndarray as nd
import argparse
import mxnet.optimizer as optimizer
import mxcommon.resnet_dcn as resnet_dcn
import lfw
import sklearn
from sklearn.decomposition import PCA
from center_loss import *
logger = logging.getLogger()
logger.setLevel(logging.INFO)
class AccMetric(mx.metric.EvalMetric):
def __init__(self):
self.axis = 1
super(AccMetric, self).__init__(
'acc', axis=self.axis,
output_names=None, label_names=None)
self.losses = []
def update(self, labels, preds):
#print(len(labels), len(preds))
#print(preds[1].asnumpy())
loss = preds[2].asnumpy()[0]
if len(self.losses)==100:
print('triplet loss', sum(self.losses)/len(self.losses))
self.losses = []
self.losses.append(loss)
preds = [preds[1]] #use softmax output
for label, pred_label in zip(labels, preds):
#print(label.shape, pred_label.shape)
if pred_label.shape != label.shape:
pred_label = mx.ndarray.argmax(pred_label, axis=self.axis)
pred_label = pred_label.asnumpy().astype('int32').flatten()
label = label.asnumpy().astype('int32').flatten()
#print(label)
#print(label, pred_label)
assert label.shape==pred_label.shape
self.sum_metric += (pred_label.flat == label.flat).sum()
self.num_inst += len(pred_label.flat)
def parse_args():
parser = argparse.ArgumentParser(description='Train face network')
# general
parser.add_argument('--prefix', default='../model/face',
help='directory to save model.')
parser.add_argument('--load-epoch', type=int, default=0,
help='load epoch.')
parser.add_argument('--end-epoch', type=int, default=20,
help='training epoch size.')
parser.add_argument('--retrain', action='store_true', default=False,
help='true means continue training.')
args = parser.parse_args()
return args
def get_symbol(args, arg_params, aux_params):
new_args = dict({k:arg_params[k] for k in arg_params if 'fc' not in k})
data_shape = (3,args.image_size,args.image_size)
image_shape = ",".join([str(x) for x in data_shape])
layers = 152
_,_,embeddings,_ = resnet_dcn.get_cls_symbol(128, layers, image_shape, use_deformable=False)
all_layers = embeddings.get_internals()
#print(all_layers)
layer_names = ['_plus10','_plus46', '_plus49']
#layer_names = ['plus2', '_plus10','_plus46', '_plus49']
layers = []
for name in layer_names:
layers.append( all_layers[name+"_output"] )
out_sym = mx.symbol.Group(layers)
out_name = out_sym.list_outputs()
#arg_name = embeddings.list_arguments()
#aux_name = embeddings.list_auxiliary_states()
data_shape_dict = {'data' : (args.batch_size,)+data_shape}
arg_shape, out_shape, aux_shape = out_sym.infer_shape(**data_shape_dict)
#print(out_shape)
out_shape_dict = dict(zip(out_name, out_shape))
for k,v in out_shape_dict.iteritems():
print(k,v)
layers = []
for i in xrange(len(layer_names)):
name = layer_names[i]+"_output"
_layer = all_layers[name]
_kernel = out_shape_dict[name][2]//5
if _kernel>1:
layer = mx.sym.Pooling(data=_layer, kernel=(_kernel, _kernel), stride=(_kernel,_kernel), pad=(0,0), pool_type='max')
else:
layer = _layer
layer = mx.symbol.Convolution(data=layer, kernel=(3, 3), pad=(1,1), num_filter=128)
layers.append(layer)
body = mx.symbol.concat(*layers, dim=1)
body = mx.symbol.Convolution(data=body, kernel=(1, 1), pad=(0,0), num_filter=128)
body = mx.sym.Pooling(data=body, global_pool=True, kernel=(5, 5), pool_type='avg', name='last_pool')
embeddings = mx.sym.Flatten(data=body)
_, out_shape, _= embeddings.infer_shape(**data_shape_dict)
print(out_shape)
#print(arg_shape)
#sys.exit(0)
l2_embeddings = mx.symbol.L2Normalization(embeddings)
batch_size = args.batch_size//args.ctx_num
anchor = mx.symbol.slice_axis(l2_embeddings, axis=0, begin=0, end=batch_size//3)
positive = mx.symbol.slice_axis(l2_embeddings, axis=0, begin=batch_size//3, end=2*batch_size//3)
negative = mx.symbol.slice_axis(l2_embeddings, axis=0, begin=2*batch_size//3, end=batch_size)
ap = anchor - positive
an = anchor - negative
ap = ap*ap
an = an*an
ap = mx.symbol.sum(ap, axis=1, keepdims=1) #(T,1)
an = mx.symbol.sum(an, axis=1, keepdims=1) #(T,1)
loss_scale = [1.0, 0.0, 0.0]
#triplet_loss = mx.symbol.broadcast_maximum(0.0, ap-an+args.margin) #(T,1)
triplet_loss = mx.symbol.Activation(data = (ap-an+args.margin), act_type='relu')
triplet_loss = mx.symbol.sum(triplet_loss)/(batch_size//3)
triplet_loss = mx.symbol.MakeLoss(data = triplet_loss, grad_scale = loss_scale[0])
data = mx.symbol.Variable('data')
gt_label = mx.symbol.Variable('softmax_label')
fc = mx.symbol.FullyConnected(data = embeddings, num_hidden = args.num_classes, name="fc2")
softmax = mx.symbol.SoftmaxOutput(data=fc, label = gt_label, name='softmax', grad_scale = loss_scale[1])
if loss_scale[2]>0.0:
_center_loss = mx.symbol.Custom(data = l2_embeddings, label = gt_label, name='center_loss', op_type='centerloss'
, num_class= args.num_classes, alpha = 0.5, scale=loss_scale[2], batchsize=batch_size)
out = mx.symbol.Group([mx.symbol.BlockGrad(l2_embeddings), softmax, triplet_loss, _center_loss])
else:
out = mx.symbol.Group([mx.symbol.BlockGrad(l2_embeddings), softmax, triplet_loss])
#out = triplet_loss
#out = softmax
return (out, new_args, aux_params)
def train_net(args):
ctx = []
cvd = os.environ['CUDA_VISIBLE_DEVICES'].strip()
if len(cvd)>0:
for i in xrange(len(cvd.split(','))):
ctx.append(mx.gpu(i))
if len(ctx)==0:
ctx = [mx.cpu()]
print('use cpu')
else:
print('gpu num:', len(ctx))
prefix = args.prefix
end_epoch = args.end_epoch
pretrained = '../model/resnet-152'
load_epoch = args.load_epoch
args.image_size = 160
per_batch_size = 60
args.ctx_num = len(ctx)
args.batch_size = per_batch_size*args.ctx_num
#args.all_batch_size = args.batch_size*args.ctx_num
args.bag_size = 3600
args.margin = 0.2
args.num_classes = 10575 #webface
data_shape = (3,args.image_size,args.image_size)
begin_epoch = 0
base_lr = 0.05
base_wd = 0.0002
base_mom = 0.0
lr_decay = 0.98
if not args.retrain:
#load and initialize params
print(pretrained)
_, arg_params, aux_params = mx.model.load_checkpoint(pretrained, load_epoch)
sym, arg_params, aux_params = get_symbol(args, arg_params, aux_params)
#arg_params, aux_params = load_param(pretrained, epoch, convert=True)
data_shape_dict = {'data': (args.batch_size, 3, args.image_size, args.image_size), 'softmax_label': (args.batch_size,)}
resnet_dcn.init_weights(sym, data_shape_dict, arg_params, aux_params)
else:
pretrained = args.prefix
sym, arg_params, aux_params = mx.model.load_checkpoint(pretrained, load_epoch)
begin_epoch = load_epoch
end_epoch = begin_epoch+10
base_wd = 0.00005
lr_decay = 0.5
base_lr = 0.015
# infer max shape
model = mx.mod.Module(
context = ctx,
symbol = sym,
#label_names = [],
#fixed_param_prefix = fixed_param_prefix,
)
train_dataiter = FaceIter(
path_imglist = "/raid5data/dplearn/faceinsight_align_webface.lst",
data_shape = data_shape,
mod = model,
ctx_num = args.ctx_num,
batch_size = args.batch_size,
bag_size = args.bag_size,
images_per_person = 5,
)
#_dice = DiceMetric()
_acc = AccMetric()
eval_metrics = [mx.metric.create(_acc)]
# rpn_eval_metric, rpn_cls_metric, rpn_bbox_metric, eval_metric, cls_metric, bbox_metric
#for child_metric in [fcn_loss_metric]:
# eval_metrics.add(child_metric)
# callback
#batch_end_callback = callback.Speedometer(input_batch_size, frequent=args.frequent)
#epoch_end_callback = mx.callback.module_checkpoint(mod, prefix, period=1, save_optimizer_states=True)
# decide learning rate
#lr_step = '10,20,30'
#train_size = 4848
#nrof_batch_in_epoch = int(train_size/input_batch_size)
#print('nrof_batch_in_epoch:', nrof_batch_in_epoch)
#lr_factor = 0.1
#lr_epoch = [float(epoch) for epoch in lr_step.split(',')]
#lr_epoch_diff = [epoch - begin_epoch for epoch in lr_epoch if epoch > begin_epoch]
#lr = base_lr * (lr_factor ** (len(lr_epoch) - len(lr_epoch_diff)))
#lr_iters = [int(epoch * train_size / batch_size) for epoch in lr_epoch_diff]
#print 'lr', lr, 'lr_epoch_diff', lr_epoch_diff, 'lr_iters', lr_iters
#lr_scheduler = MultiFactorScheduler(lr_iters, lr_factor)
# optimizer
#optimizer_params = {'momentum': 0.9,
# 'wd': 0.0005,
# 'learning_rate': base_lr,
# 'rescale_grad': 1.0,
# 'clip_gradient': None}
initializer = mx.init.Xavier(rnd_type='gaussian', factor_type="in", magnitude=2)
#opt = optimizer.SGD(learning_rate=base_lr, momentum=0.9, wd=base_wd, rescale_grad=(1.0/args.batch_size))
opt = optimizer.SGD(learning_rate=base_lr, momentum=base_mom, wd=base_wd, rescale_grad=1.0)
#opt = optimizer.AdaGrad(learning_rate=base_lr, wd=base_wd, rescale_grad=1.0)
_cb = mx.callback.Speedometer(args.batch_size, 10)
lfw_dir = '/raid5data/dplearn/lfw_mtcnn'
lfw_pairs = lfw.read_pairs(os.path.join(lfw_dir, 'pairs.txt'))
lfw_paths, issame_list = lfw.get_paths(lfw_dir, lfw_pairs, 'png')
imgs = []
lfw_data_list = []
for flip in [0,1]:
lfw_data = nd.empty((len(lfw_paths), 3, args.image_size, args.image_size))
i = 0
for path in lfw_paths:
with open(path, 'rb') as fin:
_bin = fin.read()
img = mx.image.imdecode(_bin)
img = nd.transpose(img, axes=(2, 0, 1))
if flip==1:
img = img.asnumpy()
for c in xrange(img.shape[0]):
img[c,:,:] = np.fliplr(img[c,:,:])
img = nd.array( img )
#print(img.shape)
lfw_data[i][:] = img
i+=1
if i%1000==0:
print('loading lfw', i)
print(lfw_data.shape)
lfw_data_list.append(lfw_data)
def lfw_test(nbatch):
print('testing lfw..')
embeddings_list = []
for i in xrange( len(lfw_data_list) ):
lfw_data = lfw_data_list[i]
embeddings = None
ba = 0
while ba<lfw_data.shape[0]:
bb = min(ba+args.batch_size, lfw_data.shape[0])
_data = nd.slice_axis(lfw_data, axis=0, begin=ba, end=bb)
_label = nd.ones( (bb-ba,) )
db = mx.io.DataBatch(data=(_data,), label=(_label,))
model.forward(db, is_train=False)
net_out = model.get_outputs()
_embeddings = net_out[0].asnumpy()
if embeddings is None:
embeddings = np.zeros( (lfw_data.shape[0], _embeddings.shape[1]) )
embeddings[ba:bb,:] = _embeddings
ba = bb
embeddings_list.append(embeddings)
acc_list = []
embeddings = embeddings_list[0]
_, _, accuracy, val, val_std, far = lfw.evaluate(embeddings, issame_list, nrof_folds=10)
acc_list.append(np.mean(accuracy))
print('[%d]Accuracy: %1.3f+-%1.3f' % (nbatch, np.mean(accuracy), np.std(accuracy)))
print('Validation rate: %2.5f+-%2.5f @ FAR=%2.5f' % (val, val_std, far))
embeddings = np.concatenate(embeddings_list, axis=1)
embeddings = sklearn.preprocessing.normalize(embeddings)
print(embeddings.shape)
_, _, accuracy, val, val_std, far = lfw.evaluate(embeddings, issame_list, nrof_folds=10)
acc_list.append(np.mean(accuracy))
print('[%d]Accuracy-Flip: %1.3f+-%1.3f' % (nbatch, np.mean(accuracy), | np.std(accuracy) | numpy.std |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
#<NAME> 140401097
def git_oraya_kadar(nerden,nereye,degerler):
yeni_dizi=[]
for i in range(nerden,nereye):
yeni_dizi.append(degerler[i])
return yeni_dizi
# In[2]:
def yazdir2(buldugumSonuclar,Denklemin_katSayilari,derece,KORELASYON,baslangic,son):
dosya = open('sonuc.txt','a+')
dosya.write('\n---------denklem ('+str(derece)+') derece---------\n')
dosya.write('KORELASYON DEGERI: '+str(KORELASYON)+'\n')
dosya.write('denklemi:\n'+ Denklemin_katSayilari)
dosya.write('\n\n')
dosya.write('['+str(baslangic)+'-'+str(son)+']'+'arasindaki verileri'+'\n')
dosya.write('\ngercek degerler , buldugum degerler , (E=y-a0-a1x-a2x^2..am x^m)\n')
for i in range(len(degerler)):
dosya.write(str(degerler[i])+'\t\t\t'+str(buldugumSonuclar[i])+'\t\t\t'+str(degerler[i]-buldugumSonuclar[i])+'\n')
dosya.write('\n')
# In[6]:
import math
import numpy as np
# dosya okuma
file=open("veriler.txt")
degerler = []
degerler_temp = []
korelasyonlar=[]
for line in file.readlines():
line=line.rstrip('\n')
degerler.append(line)
degerler_temp.append(line)
file.close()
#list`in degerleri stringten integere cevirme
for i in range(0,len(degerler)):
degerler[i] = int(degerler[i])
degerler_temp[i] = int(degerler_temp[i])
#------------------------------------------------
n= len(degerler)
xlerin_karelerin_toplami ,xlerin_kupulerin_toplami,xlerin_4d_toplami = 0,0,0
xlerin_5d_toplami ,xlerin_6d_toplami, xlerin_7d_toplami= 0,0,0
xlerin_8d_toplami ,xlerin_9d_toplami ,xlerin_10d_toplami= 0,0,0
xlerin_11d_toplami ,xlerin_12d_toplami,x_y_carpim_toplami= 0,0,0
x_kare_y_carpim_toplami ,x_kupu_y_carpim_toplami,x4d_y_carpim_toplami=0,0,0
x5d_y_carpim_toplami ,x6d_y_carpim_toplami,Xler_toplami=0,0,0
#---------------------------
xler = [i+1 for i in range(n)]
Xler_toplami=sum(xler)
xler_ortalama = np.mean(xler)
Yler_toplami= sum(degerler)
Yler_ortalama = np.mean(degerler)
Yler_karelerin_toplami= sum([pow(degerler[i],2) for i in range(n)])
xlerin_karelerin_toplami= sum([pow(i+1,2) for i in range(n)])
xlerin_kupulerin_toplami= sum([pow(i+1,3) for i in range(n)])
xlerin_4d_toplami= sum([pow(i+1,4) for i in range(n)])
xlerin_5d_toplami= sum([pow(i+1,5) for i in range(n)])
xlerin_6d_toplami= sum([pow(i+1,6) for i in range(n)])
xlerin_7d_toplami= sum([pow(i+1,7) for i in range(n)])
xlerin_8d_toplami= sum([pow(i+1,8) for i in range(n)])
xlerin_9d_toplami= sum([pow(i+1,9) for i in range(n)])
xlerin_10d_toplami= sum([pow(i+1,10) for i in range(n)])
xlerin_11d_toplami= sum([pow(i+1,11) for i in range(n)])
xlerin_12d_toplami= sum([pow(i+1,12) for i in range(n)])
x_y_carpim_toplami= sum([(i+1)*degerler[i] for i in range(n)])
x_kare_y_carpim_toplami = sum([pow(i+1,2)*degerler[i] for i in range(n)])
x_kupu_y_carpim_toplami = sum([pow(i+1,3)*degerler[i] for i in range(n)])
x4d_y_carpim_toplami=sum([pow(i+1,4)*degerler[i] for i in range(n)])
x5d_y_carpim_toplami=sum([pow(i+1,5)*degerler[i] for i in range(n)])
x6d_y_carpim_toplami=sum([pow(i+1,6)*degerler[i] for i in range(n)])
#----------------------------------------------------------------------------
#korelasyonu bulmak
def Sr(degerler,buldugumDegerler):
Sr=0
for i in range(0,len(degerler)):
Sr+=(degerler[i]-buldugumDegerler[i])**2
return Sr
def St(degerler):
St=0
for y in degerler:
St+=(y-Yler_ortalama)**2
return St
def R_Kare(degerler,buldugumDegerler):
return ((abs((St(degerler)-Sr(degerler,buldugumDegerler)))/St(degerler)))**(1/2)
def en_iyi_korelasyonu_Bul(R_Kare_listesi):
r,derece=abs(1-R_Kare_listesi[0]),0
for i in range(1,len(R_Kare_listesi)):
if(abs(1-R_Kare_listesi[i]) < r):
r,derece=R_Kare_listesi[i],i
return (r,derece+1)
#----birinci dereceden denklemi------------------------------------------------------
A1=(((n*x_y_carpim_toplami)-(Xler_toplami*Yler_toplami))/(n*xlerin_karelerin_toplami-(pow(Xler_toplami,2))))
A0= ((Yler_toplami/n) - A1*(Xler_toplami/n))
denk_1=str(A0)+" "+str(A1)+'x'
birinci_denkleminin_buldugum_sonuclari=[A0+A1*i for i in range(len(xler))]
R1=R_Kare(degerler,birinci_denkleminin_buldugum_sonuclari)
korelasyonlar.append(R1)
yazdir2(birinci_denkleminin_buldugum_sonuclari,denk_1,1,R1 ,1,len(degerler))
print("*Korelasyon_katsayısı:",R1,"\n")
print("-birinci dereceden denklemi ","y =",A0,A1,"x")
#-----ikinci dereceden denklem---------------------------------------------------------------
Matrix2= [[n,Xler_toplami,xlerin_karelerin_toplami],
[Xler_toplami,xlerin_karelerin_toplami,xlerin_kupulerin_toplami],
[xlerin_karelerin_toplami,xlerin_kupulerin_toplami,xlerin_4d_toplami]]
det = np.linalg.det(Matrix2)
Matrix_temp = [i[:] for i in Matrix2]
Matrix2[0][0],Matrix2[1][0],Matrix2[2][0]= Yler_toplami,x_y_carpim_toplami,x_kare_y_carpim_toplami
det1 = np.linalg.det(Matrix2)
Matrix2= [i[:] for i in Matrix_temp]
Matrix_temp = [i[:] for i in Matrix2]
Matrix_temp[0][1],Matrix_temp[1][1],Matrix_temp[2][1]= Yler_toplami,x_y_carpim_toplami,x_kare_y_carpim_toplami
det2 = np.linalg.det(Matrix_temp)
Matrix_temp = [i[:] for i in Matrix2]
Matrix_temp[0][2],Matrix_temp[1][2],Matrix_temp[2][2]= Yler_toplami,x_y_carpim_toplami,x_kare_y_carpim_toplami
det3 = np.linalg.det(Matrix_temp)
a20,a21,a22=(det1/det),(det2/det),(det3/det)
denk_2=str(a20)+"x^0 "+str(a21)+'x '+str(a22)+'x^2'
ikinci_denkleminin_buldugum_sonuclari =[a20+(a21*i)+(a22*pow(i,2)) for i in range(len(xler))]
R2=R_Kare(degerler,ikinci_denkleminin_buldugum_sonuclari)
korelasyonlar.append(R2)
yazdir2(ikinci_denkleminin_buldugum_sonuclari,denk_2,2,R2,1,len(degerler))
print("*Korelasyon_katsayısı:",R2,"\n")
print("\n-ikinci dereceden polinom denklemi=\n",a20,"x^0",a21,"x^1",a22,"x^2")
#----- üçüncü dereceden denklem--------------------------------------------------------------
Matrix3= [[n,Xler_toplami,xlerin_karelerin_toplami, xlerin_kupulerin_toplami],
[Xler_toplami,xlerin_karelerin_toplami,xlerin_kupulerin_toplami,xlerin_4d_toplami],
[xlerin_karelerin_toplami,xlerin_kupulerin_toplami,xlerin_4d_toplami,xlerin_5d_toplami],
[xlerin_kupulerin_toplami,xlerin_4d_toplami,xlerin_5d_toplami,xlerin_6d_toplami]]
det = np.linalg.det(Matrix3)
Matrix_temp = [i[:] for i in Matrix3]
Matrix3[0][0],Matrix3[1][0],Matrix3[2][0],Matrix3[3][0]= Yler_toplami,x_y_carpim_toplami,x_kare_y_carpim_toplami,x_kupu_y_carpim_toplami
det1 = np.linalg.det(Matrix3)
Matrix3= [i[:] for i in Matrix_temp]
Matrix3[0][1],Matrix3[1][1],Matrix3[2][1],Matrix3[3][1]= Yler_toplami,x_y_carpim_toplami,x_kare_y_carpim_toplami,x_kupu_y_carpim_toplami
det2 = np.linalg.det(Matrix3)
Matrix3= [i[:] for i in Matrix_temp]
Matrix3[0][2],Matrix3[1][2],Matrix3[2][2],Matrix3[3][2]= Yler_toplami,x_y_carpim_toplami,x_kare_y_carpim_toplami,x_kupu_y_carpim_toplami
det3 = np.linalg.det(Matrix3)
Matrix3= [i[:] for i in Matrix_temp]
Matrix3[0][3],Matrix3[1][3],Matrix3[2][3],Matrix3[3][3]= Yler_toplami,x_y_carpim_toplami,x_kare_y_carpim_toplami,x_kupu_y_carpim_toplami
det4 = np.linalg.det(Matrix3)
a30,a31,a32,a33=(det1/det),(det2/det),(det3/det),(det4/det)
denk_3=str(a30)+"x^0 "+str(a31)+'x '+str(a32)+'x^2 '+str(a33)+'x^3 '
ucuncu_denkleminin_buldugum_sonuclari =[a30+(a31*i)+(a32*pow(i,2))+(a33*pow(i,3)) for i in range(len(xler))]
R3=R_Kare(degerler,ucuncu_denkleminin_buldugum_sonuclari)
korelasyonlar.append(R3)
yazdir2(ikinci_denkleminin_buldugum_sonuclari,denk_3,3,R3,1,len(degerler))
print("*Korelasyon_katsayısı:",R3,"\n")
print("\n-üçüncü dereceden polinom denklemi=\n",a30,"x^0",a31,"x^1",a32,"x^2",a33,"x^3")
#----- dörtüncü dereceden denklem--------------------------------------------------------------
Matrix4= [[n,Xler_toplami,xlerin_karelerin_toplami, xlerin_kupulerin_toplami, xlerin_4d_toplami],
[Xler_toplami,xlerin_karelerin_toplami,xlerin_kupulerin_toplami,xlerin_4d_toplami,xlerin_5d_toplami],
[xlerin_karelerin_toplami,xlerin_kupulerin_toplami,xlerin_4d_toplami,xlerin_5d_toplami,xlerin_6d_toplami],
[xlerin_kupulerin_toplami,xlerin_4d_toplami,xlerin_5d_toplami,xlerin_6d_toplami,xlerin_7d_toplami],
[xlerin_4d_toplami,xlerin_5d_toplami,xlerin_6d_toplami,xlerin_7d_toplami,xlerin_8d_toplami]]
det = np.linalg.det(Matrix4)
Matrix_temp = [i[:] for i in Matrix4]
Matrix4[0][0],Matrix4[1][0],Matrix4[2][0],Matrix4[3][0],Matrix4[4][0]= Yler_toplami,x_y_carpim_toplami,x_kare_y_carpim_toplami,x_kupu_y_carpim_toplami,x4d_y_carpim_toplami
det1 = np.linalg.det(Matrix4)
Matrix4= [i[:] for i in Matrix_temp]
Matrix4[0][1],Matrix4[1][1],Matrix4[2][1],Matrix4[3][1],Matrix4[4][1]= Yler_toplami,x_y_carpim_toplami,x_kare_y_carpim_toplami,x_kupu_y_carpim_toplami,x4d_y_carpim_toplami
det2 = np.linalg.det(Matrix4)
Matrix4= [i[:] for i in Matrix_temp]
Matrix4[0][2],Matrix4[1][2],Matrix4[2][2],Matrix4[3][2],Matrix4[4][2]= Yler_toplami,x_y_carpim_toplami,x_kare_y_carpim_toplami,x_kupu_y_carpim_toplami,x4d_y_carpim_toplami
det3 = np.linalg.det(Matrix4)
Matrix4= [i[:] for i in Matrix_temp]
Matrix4[0][3],Matrix4[1][3],Matrix4[2][3],Matrix4[3][3],Matrix4[4][3]= Yler_toplami,x_y_carpim_toplami,x_kare_y_carpim_toplami,x_kupu_y_carpim_toplami,x4d_y_carpim_toplami
det4 = | np.linalg.det(Matrix4) | numpy.linalg.det |
import sys
import csv
import numpy as np
import pandas as pd
import os
from pathlib import Path
import get_cmip_path_v2
import xarray as xr
import core_component as cc
## Read from shell script
ii = int(sys.argv[1])
mip=sys.argv[2]
expr=sys.argv[3]
##**********************************************************
## TEST code
## INPUT
#mip="CMIP" ##CMIP or ScenarioMIP
#ii = 1
#expr='historical' ## historical or ssp126 or ssp245 or ssp585
##**********************************************************
with open('file_list/file_list_'+expr+'.csv') as file:
path_list = file.readlines()
path_list = [line.rstrip() for line in path_list]
tpath = path_list[ii]
dum = tpath.split('/')
fname = 'gm_tas_'+mip+'_'+expr+'_'+dum[7]+'_'+dum[9]+'_'+dum[12]+'.csv'
file_list = []
for cp, cd, cf in os.walk(tpath):
for f in cf:
file_list.append(os.path.join(tpath,f))
flag = 0
for ff in file_list:
# flag = 0
ds = xr.open_dataset(ff)
n_time = len(ds['time'][:]) ## Number of time entry
tavg_tmp = | np.zeros(n_time) | numpy.zeros |
import sys
import numpy as np
def coadd_cameras(flux_cam, wave_cam, ivar_cam, mask_cam=None):
"""Adds spectra from the three cameras as long as they have the same number of wavelength bins.
This is not a replacement for desispec.coaddition.coadd_cameras,
but a simpler (versatile and faster) implementation which uses only numpy.
This also assumes the input spectra grid are already aligned
(i.e. same wavelength grid in the overlapping regions),
This is likely the case if the spectra are from the official data releases.
Parameters
----------
flux_cam : dict
Dictionary containing the flux values from the three cameras
wave_cam : dict
Dictionary containing the wavelength values from the three cameras
ivar_cam : dict
Dictionary containing the inverse variance values from the three cameras
mask_cam : dict, optional
Dictionary containing the mask values from the three cameras
Returns
-------
Tuple
returns the combined flux, wavelength and inverse variance grids.
"""
sbands = np.array(["b", "r", "z"]) # bands sorted by inc. wavelength
# create wavelength array
wave = None
tolerance = 0.0001 # A , tolerance
shifts = {}
for b in sbands:
wave_camera = np.atleast_2d(wave_cam[b].copy())
if wave is None:
wave = wave_camera
else:
shifts[b] = np.sum(
np.all((wave + tolerance) < wave_camera[:, 0][:, None], axis=0)
)
wave = np.append(
wave,
wave_camera[
:, np.all(wave_camera > (wave[:, -1][:, None] + tolerance), axis=0)
],
axis=1,
)
nwave = wave.shape[1]
blue = sbands[0]
ntarget = len(flux_cam[blue])
flux = None
ivar = None
mask = None
for b in sbands:
flux_camera = np.atleast_2d(flux_cam[b].copy())
ivar_camera = np.atleast_2d(ivar_cam[b].copy())
ivar_camera[ivar_camera <= 0] = 0
if mask_cam is not None:
mask_camera = np.atleast_2d(mask_cam[b].astype(bool))
ivar_camera[mask_camera] = 0
if flux is None:
flux = np.zeros((ntarget, nwave), dtype=flux_cam[blue].dtype)
flux[:, : flux_camera.shape[1]] += flux_camera * ivar_camera
ivar = np.zeros((ntarget, nwave), dtype=flux_cam[blue].dtype)
ivar[:, : ivar_camera.shape[1]] += ivar_camera
if mask is not None:
mask = np.ones((ntarget, nwave), dtype=mask_cam[blue].dtype)
mask[:, : mask_camera.shape[1]] &= mask_camera
else:
flux[:, shifts[b] : (shifts[b] + flux_camera.shape[1])] += (
flux_camera * ivar_camera
)
ivar[:, shifts[b] : (shifts[b] + ivar_camera.shape[1])] += ivar_camera
if mask is not None:
mask[:, shifts[b] : (shifts[b] + mask_camera.shape[1])] &= mask_camera
flux = flux / ivar
flux[~ | np.isfinite(flux) | numpy.isfinite |
import numpy
from .operator import Operator
class NetketOperatorWrapper(Operator):
def __init__(self, netket_operator, hilbert_state_shape,
max_number_of_local_connections=None, should_calc_unused=None):
assert numpy.prod(hilbert_state_shape) == netket_operator.hilbert.size
super(NetketOperatorWrapper, self).__init__(hilbert_state_shape)
self.netket_operator = netket_operator
self.should_calc_unused = should_calc_unused
self.max_number_of_local_connections = max_number_of_local_connections
self.estimated_number_of_local_connections = max_number_of_local_connections
self._check_if_old_netket()
if max_number_of_local_connections is None:
self.estimated_number_of_local_connections = self._calculate_num_of_local_connectios_from_netket_operator()
if self.should_calc_unused is None:
self.should_calc_unused = False
else:
if self.should_calc_unused is None:
self.should_calc_unused = True
def random_states(self, num_of_states):
import netket
random_engine = netket.utils.RandomEngine()
hilbert_space = self.netket_operator.hilbert
results = numpy.zeros((num_of_states, hilbert_space.size))
for i in range(num_of_states):
hilbert_space.random_vals(results[i, :], random_engine)
return numpy.reshape(results, (num_of_states, ) + self.hilbert_state_shape)
def _check_if_old_netket(self):
res = self.netket_operator.get_conn(numpy.array([1]*self.netket_operator.hilbert.size))
self.old_netket = len(res) == 3
def _calculate_num_of_local_connectios_from_netket_operator(self):
random_state = self.random_states(1)
res = self.netket_operator.get_conn(random_state.flatten())
if self.old_netket:
mel, _, _ = res
else:
_, mel = res
return len(mel)
def new_netket_find_conn(self, sample):
conn_list, mel_list = [], []
batch_size = sample.shape[0]
for i in range(batch_size):
flat_input = sample[i, ...].flatten()
sample_conn, sample_mel = self.netket_operator.get_conn(flat_input)
conn_list.append(sample_conn)
mel_list.append(sample_mel)
assert numpy.all(sample_conn[0,:] == flat_input)
self.estimated_number_of_local_connections = max(numpy.max([len(x) for x in mel_list]), self.estimated_number_of_local_connections)
all_conn = numpy.zeros((self.estimated_number_of_local_connections,) + sample.shape)
batch_mel = numpy.zeros((self.estimated_number_of_local_connections, sample.shape[0]), dtype=numpy.complex128)
for i in range(batch_size):
all_conn[:len(conn_list[i]), i, ...] = conn_list[i].reshape((-1,) + sample.shape[1:])
batch_mel[:len(mel_list[i]), i] = mel_list[i]
if self.should_calc_unused:
all_conn_use = batch_mel != 0.0
else:
all_conn_use = numpy.ones((self.estimated_number_of_local_connections, batch_size), dtype=numpy.bool)
all_conn_use[0, :] = True
return all_conn, batch_mel, all_conn_use
def old_netket_find_conn(self, sample):
all_conn = numpy.zeros((self.estimated_number_of_local_connections,) + sample.shape)
batch_mel = numpy.zeros((self.estimated_number_of_local_connections, sample.shape[0]), dtype=numpy.complex128)
batch_size = sample.shape[0]
for i in range(batch_size):
flat_input = sample[i, ...].flatten()
sample_conn_list, sample_mel = self.netket_operator.get_conn(flat_input)
sample_mel, to_change_idx_list, to_change_vals_list = self.netket_operator.get_conn(flat_input)
if len(sample_mel) > self.estimated_number_of_local_connections:
print('wrong max_number_of_local_connections fixing and continue recursively')
self.estimated_number_of_local_connections = len(sample_mel)
return self.find_conn(sample)
sample_mel = sample_mel + [0.0] * (self.estimated_number_of_local_connections - len(sample_mel))
batch_mel[:, i] = numpy.array(sample_mel)
self_conn_idx = -1
for j, (to_change_idx, to_change_vals) in enumerate(zip(to_change_idx_list, to_change_vals_list)):
conn = flat_input.copy()
if len(to_change_idx) == 0:
self_conn_idx = j
for to_change_id, to_change_val in zip(to_change_idx, to_change_vals):
conn[to_change_id] = to_change_val
all_conn[j, i, ...] = conn.reshape(sample.shape[1:])
assert self_conn_idx >= 0
if self_conn_idx != 0:
temp_conn = all_conn[0, ...]
all_conn[0, ...] = all_conn[self_conn_idx, ...]
all_conn[self_conn_idx, ...] = temp_conn
tmp_mel = batch_mel[0, i]
batch_mel[0, i] = batch_mel[self_conn_idx, i]
batch_mel[self_conn_idx, i] = tmp_mel
if self.should_calc_unused:
all_conn_use = batch_mel != 0.0
else:
all_conn_use = | numpy.ones((self.estimated_number_of_local_connections, batch_size), dtype=numpy.bool) | numpy.ones |
# This module has been generated automatically from space group information
# obtained from the Computational Crystallography Toolbox
#
"""
Space groups
This module contains a list of all the 230 space groups that can occur in
a crystal. The variable space_groups contains a dictionary that maps
space group numbers and space group names to the corresponding space
group objects.
.. moduleauthor:: <NAME> <<EMAIL>>
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2013 The Mosaic Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file LICENSE.txt, distributed as part of this software.
#-----------------------------------------------------------------------------
import numpy as N
class SpaceGroup(object):
"""
Space group
All possible space group objects are created in this module. Other
modules should access these objects through the dictionary
space_groups rather than create their own space group objects.
"""
def __init__(self, number, symbol, transformations):
"""
:param number: the number assigned to the space group by
international convention
:type number: int
:param symbol: the Hermann-Mauguin space-group symbol as used
in PDB and mmCIF files
:type symbol: str
:param transformations: a list of space group transformations,
each consisting of a tuple of three
integer arrays (rot, tn, td), where
rot is the rotation matrix and tn/td
are the numerator and denominator of the
translation vector. The transformations
are defined in fractional coordinates.
:type transformations: list
"""
self.number = number
self.symbol = symbol
self.transformations = transformations
self.transposed_rotations = N.array([N.transpose(t[0])
for t in transformations])
self.phase_factors = N.exp(N.array([(-2j*N.pi*t[1])/t[2]
for t in transformations]))
def __repr__(self):
return "SpaceGroup(%d, %s)" % (self.number, repr(self.symbol))
def __len__(self):
"""
:return: the number of space group transformations
:rtype: int
"""
return len(self.transformations)
def symmetryEquivalentMillerIndices(self, hkl):
"""
:param hkl: a set of Miller indices
:type hkl: Scientific.N.array_type
:return: a tuple (miller_indices, phase_factor) of two arrays
of length equal to the number of space group
transformations. miller_indices contains the Miller
indices of each reflection equivalent by symmetry to the
reflection hkl (including hkl itself as the first element).
phase_factor contains the phase factors that must be applied
to the structure factor of reflection hkl to obtain the
structure factor of the symmetry equivalent reflection.
:rtype: tuple
"""
hkls = N.dot(self.transposed_rotations, hkl)
p = N.multiply.reduce(self.phase_factors**hkl, -1)
return hkls, p
space_groups = {}
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(1, 'P 1', transformations)
space_groups[1] = sg
space_groups['P 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(2, 'P -1', transformations)
space_groups[2] = sg
space_groups['P -1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(3, 'P 1 2 1', transformations)
space_groups[3] = sg
space_groups['P 1 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(4, 'P 1 21 1', transformations)
space_groups[4] = sg
space_groups['P 1 21 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(5, 'C 1 2 1', transformations)
space_groups[5] = sg
space_groups['C 1 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(6, 'P 1 m 1', transformations)
space_groups[6] = sg
space_groups['P 1 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(7, 'P 1 c 1', transformations)
space_groups[7] = sg
space_groups['P 1 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(8, 'C 1 m 1', transformations)
space_groups[8] = sg
space_groups['C 1 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(9, 'C 1 c 1', transformations)
space_groups[9] = sg
space_groups['C 1 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(10, 'P 1 2/m 1', transformations)
space_groups[10] = sg
space_groups['P 1 2/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(11, 'P 1 21/m 1', transformations)
space_groups[11] = sg
space_groups['P 1 21/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(12, 'C 1 2/m 1', transformations)
space_groups[12] = sg
space_groups['C 1 2/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(13, 'P 1 2/c 1', transformations)
space_groups[13] = sg
space_groups['P 1 2/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(14, 'P 1 21/c 1', transformations)
space_groups[14] = sg
space_groups['P 1 21/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(15, 'C 1 2/c 1', transformations)
space_groups[15] = sg
space_groups['C 1 2/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(16, 'P 2 2 2', transformations)
space_groups[16] = sg
space_groups['P 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(17, 'P 2 2 21', transformations)
space_groups[17] = sg
space_groups['P 2 2 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(18, 'P 21 21 2', transformations)
space_groups[18] = sg
space_groups['P 21 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(19, 'P 21 21 21', transformations)
space_groups[19] = sg
space_groups['P 21 21 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(20, 'C 2 2 21', transformations)
space_groups[20] = sg
space_groups['C 2 2 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(21, 'C 2 2 2', transformations)
space_groups[21] = sg
space_groups['C 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(22, 'F 2 2 2', transformations)
space_groups[22] = sg
space_groups['F 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(23, 'I 2 2 2', transformations)
space_groups[23] = sg
space_groups['I 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(24, 'I 21 21 21', transformations)
space_groups[24] = sg
space_groups['I 21 21 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(25, 'P m m 2', transformations)
space_groups[25] = sg
space_groups['P m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(26, 'P m c 21', transformations)
space_groups[26] = sg
space_groups['P m c 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(27, 'P c c 2', transformations)
space_groups[27] = sg
space_groups['P c c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(28, 'P m a 2', transformations)
space_groups[28] = sg
space_groups['P m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(29, 'P c a 21', transformations)
space_groups[29] = sg
space_groups['P c a 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(30, 'P n c 2', transformations)
space_groups[30] = sg
space_groups['P n c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(31, 'P m n 21', transformations)
space_groups[31] = sg
space_groups['P m n 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(32, 'P b a 2', transformations)
space_groups[32] = sg
space_groups['P b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(33, 'P n a 21', transformations)
space_groups[33] = sg
space_groups['P n a 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(34, 'P n n 2', transformations)
space_groups[34] = sg
space_groups['P n n 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(35, 'C m m 2', transformations)
space_groups[35] = sg
space_groups['C m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(36, 'C m c 21', transformations)
space_groups[36] = sg
space_groups['C m c 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(37, 'C c c 2', transformations)
space_groups[37] = sg
space_groups['C c c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(38, 'A m m 2', transformations)
space_groups[38] = sg
space_groups['A m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(39, 'A b m 2', transformations)
space_groups[39] = sg
space_groups['A b m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(40, 'A m a 2', transformations)
space_groups[40] = sg
space_groups['A m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(41, 'A b a 2', transformations)
space_groups[41] = sg
space_groups['A b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(42, 'F m m 2', transformations)
space_groups[42] = sg
space_groups['F m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(43, 'F d d 2', transformations)
space_groups[43] = sg
space_groups['F d d 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(44, 'I m m 2', transformations)
space_groups[44] = sg
space_groups['I m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(45, 'I b a 2', transformations)
space_groups[45] = sg
space_groups['I b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(46, 'I m a 2', transformations)
space_groups[46] = sg
space_groups['I m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(47, 'P m m m', transformations)
space_groups[47] = sg
space_groups['P m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(48, 'P n n n :2', transformations)
space_groups[48] = sg
space_groups['P n n n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(49, 'P c c m', transformations)
space_groups[49] = sg
space_groups['P c c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(50, 'P b a n :2', transformations)
space_groups[50] = sg
space_groups['P b a n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(51, 'P m m a', transformations)
space_groups[51] = sg
space_groups['P m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(52, 'P n n a', transformations)
space_groups[52] = sg
space_groups['P n n a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(53, 'P m n a', transformations)
space_groups[53] = sg
space_groups['P m n a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(54, 'P c c a', transformations)
space_groups[54] = sg
space_groups['P c c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(55, 'P b a m', transformations)
space_groups[55] = sg
space_groups['P b a m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(56, 'P c c n', transformations)
space_groups[56] = sg
space_groups['P c c n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(57, 'P b c m', transformations)
space_groups[57] = sg
space_groups['P b c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(58, 'P n n m', transformations)
space_groups[58] = sg
space_groups['P n n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(59, 'P m m n :2', transformations)
space_groups[59] = sg
space_groups['P m m n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(60, 'P b c n', transformations)
space_groups[60] = sg
space_groups['P b c n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(61, 'P b c a', transformations)
space_groups[61] = sg
space_groups['P b c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(62, 'P n m a', transformations)
space_groups[62] = sg
space_groups['P n m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(63, 'C m c m', transformations)
space_groups[63] = sg
space_groups['C m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(64, 'C m c a', transformations)
space_groups[64] = sg
space_groups['C m c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(65, 'C m m m', transformations)
space_groups[65] = sg
space_groups['C m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(66, 'C c c m', transformations)
space_groups[66] = sg
space_groups['C c c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(67, 'C m m a', transformations)
space_groups[67] = sg
space_groups['C m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(68, 'C c c a :2', transformations)
space_groups[68] = sg
space_groups['C c c a :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(69, 'F m m m', transformations)
space_groups[69] = sg
space_groups['F m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(70, 'F d d d :2', transformations)
space_groups[70] = sg
space_groups['F d d d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(71, 'I m m m', transformations)
space_groups[71] = sg
space_groups['I m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(72, 'I b a m', transformations)
space_groups[72] = sg
space_groups['I b a m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(73, 'I b c a', transformations)
space_groups[73] = sg
space_groups['I b c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(74, 'I m m a', transformations)
space_groups[74] = sg
space_groups['I m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(75, 'P 4', transformations)
space_groups[75] = sg
space_groups['P 4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(76, 'P 41', transformations)
space_groups[76] = sg
space_groups['P 41'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(77, 'P 42', transformations)
space_groups[77] = sg
space_groups['P 42'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(78, 'P 43', transformations)
space_groups[78] = sg
space_groups['P 43'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(79, 'I 4', transformations)
space_groups[79] = sg
space_groups['I 4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(80, 'I 41', transformations)
space_groups[80] = sg
space_groups['I 41'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(81, 'P -4', transformations)
space_groups[81] = sg
space_groups['P -4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(82, 'I -4', transformations)
space_groups[82] = sg
space_groups['I -4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(83, 'P 4/m', transformations)
space_groups[83] = sg
space_groups['P 4/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(84, 'P 42/m', transformations)
space_groups[84] = sg
space_groups['P 42/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(85, 'P 4/n :2', transformations)
space_groups[85] = sg
space_groups['P 4/n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(86, 'P 42/n :2', transformations)
space_groups[86] = sg
space_groups['P 42/n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(87, 'I 4/m', transformations)
space_groups[87] = sg
space_groups['I 4/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(88, 'I 41/a :2', transformations)
space_groups[88] = sg
space_groups['I 41/a :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(89, 'P 4 2 2', transformations)
space_groups[89] = sg
space_groups['P 4 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(90, 'P 4 21 2', transformations)
space_groups[90] = sg
space_groups['P 4 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(91, 'P 41 2 2', transformations)
space_groups[91] = sg
space_groups['P 41 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(92, 'P 41 21 2', transformations)
space_groups[92] = sg
space_groups['P 41 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(93, 'P 42 2 2', transformations)
space_groups[93] = sg
space_groups['P 42 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(94, 'P 42 21 2', transformations)
space_groups[94] = sg
space_groups['P 42 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(95, 'P 43 2 2', transformations)
space_groups[95] = sg
space_groups['P 43 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(96, 'P 43 21 2', transformations)
space_groups[96] = sg
space_groups['P 43 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(97, 'I 4 2 2', transformations)
space_groups[97] = sg
space_groups['I 4 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(98, 'I 41 2 2', transformations)
space_groups[98] = sg
space_groups['I 41 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(99, 'P 4 m m', transformations)
space_groups[99] = sg
space_groups['P 4 m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(100, 'P 4 b m', transformations)
space_groups[100] = sg
space_groups['P 4 b m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(101, 'P 42 c m', transformations)
space_groups[101] = sg
space_groups['P 42 c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(102, 'P 42 n m', transformations)
space_groups[102] = sg
space_groups['P 42 n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(103, 'P 4 c c', transformations)
space_groups[103] = sg
space_groups['P 4 c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(104, 'P 4 n c', transformations)
space_groups[104] = sg
space_groups['P 4 n c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(105, 'P 42 m c', transformations)
space_groups[105] = sg
space_groups['P 42 m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(106, 'P 42 b c', transformations)
space_groups[106] = sg
space_groups['P 42 b c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(107, 'I 4 m m', transformations)
space_groups[107] = sg
space_groups['I 4 m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(108, 'I 4 c m', transformations)
space_groups[108] = sg
space_groups['I 4 c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(109, 'I 41 m d', transformations)
space_groups[109] = sg
space_groups['I 41 m d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(110, 'I 41 c d', transformations)
space_groups[110] = sg
space_groups['I 41 c d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(111, 'P -4 2 m', transformations)
space_groups[111] = sg
space_groups['P -4 2 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(112, 'P -4 2 c', transformations)
space_groups[112] = sg
space_groups['P -4 2 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(113, 'P -4 21 m', transformations)
space_groups[113] = sg
space_groups['P -4 21 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(114, 'P -4 21 c', transformations)
space_groups[114] = sg
space_groups['P -4 21 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(115, 'P -4 m 2', transformations)
space_groups[115] = sg
space_groups['P -4 m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(116, 'P -4 c 2', transformations)
space_groups[116] = sg
space_groups['P -4 c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(117, 'P -4 b 2', transformations)
space_groups[117] = sg
space_groups['P -4 b 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(118, 'P -4 n 2', transformations)
space_groups[118] = sg
space_groups['P -4 n 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(119, 'I -4 m 2', transformations)
space_groups[119] = sg
space_groups['I -4 m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(120, 'I -4 c 2', transformations)
space_groups[120] = sg
space_groups['I -4 c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(121, 'I -4 2 m', transformations)
space_groups[121] = sg
space_groups['I -4 2 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(122, 'I -4 2 d', transformations)
space_groups[122] = sg
space_groups['I -4 2 d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(123, 'P 4/m m m', transformations)
space_groups[123] = sg
space_groups['P 4/m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(124, 'P 4/m c c', transformations)
space_groups[124] = sg
space_groups['P 4/m c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(125, 'P 4/n b m :2', transformations)
space_groups[125] = sg
space_groups['P 4/n b m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(126, 'P 4/n n c :2', transformations)
space_groups[126] = sg
space_groups['P 4/n n c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(127, 'P 4/m b m', transformations)
space_groups[127] = sg
space_groups['P 4/m b m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(128, 'P 4/m n c', transformations)
space_groups[128] = sg
space_groups['P 4/m n c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(129, 'P 4/n m m :2', transformations)
space_groups[129] = sg
space_groups['P 4/n m m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(130, 'P 4/n c c :2', transformations)
space_groups[130] = sg
space_groups['P 4/n c c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(131, 'P 42/m m c', transformations)
space_groups[131] = sg
space_groups['P 42/m m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(132, 'P 42/m c m', transformations)
space_groups[132] = sg
space_groups['P 42/m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(133, 'P 42/n b c :2', transformations)
space_groups[133] = sg
space_groups['P 42/n b c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(134, 'P 42/n n m :2', transformations)
space_groups[134] = sg
space_groups['P 42/n n m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(135, 'P 42/m b c', transformations)
space_groups[135] = sg
space_groups['P 42/m b c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(136, 'P 42/m n m', transformations)
space_groups[136] = sg
space_groups['P 42/m n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(137, 'P 42/n m c :2', transformations)
space_groups[137] = sg
space_groups['P 42/n m c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(138, 'P 42/n c m :2', transformations)
space_groups[138] = sg
space_groups['P 42/n c m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(139, 'I 4/m m m', transformations)
space_groups[139] = sg
space_groups['I 4/m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(140, 'I 4/m c m', transformations)
space_groups[140] = sg
space_groups['I 4/m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(141, 'I 41/a m d :2', transformations)
space_groups[141] = sg
space_groups['I 41/a m d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(142, 'I 41/a c d :2', transformations)
space_groups[142] = sg
space_groups['I 41/a c d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(143, 'P 3', transformations)
space_groups[143] = sg
space_groups['P 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(144, 'P 31', transformations)
space_groups[144] = sg
space_groups['P 31'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(145, 'P 32', transformations)
space_groups[145] = sg
space_groups['P 32'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(146, 'R 3 :H', transformations)
space_groups[146] = sg
space_groups['R 3 :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(147, 'P -3', transformations)
space_groups[147] = sg
space_groups['P -3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(148, 'R -3 :H', transformations)
space_groups[148] = sg
space_groups['R -3 :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(149, 'P 3 1 2', transformations)
space_groups[149] = sg
space_groups['P 3 1 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = | N.array([0,0,0]) | numpy.array |
import numpy as np
import pandas as pd
import xarray as xr
from scipy import stats
from scipy.special import digamma
from statsrat import resp_fun
from . import kernel
# https://github.com/LukasNeugebauer/LCM_python/blob/master/LCM.py
# https://github.com/sjgershm/LCM
class model:
'''
Class for Bayesian latent cause learning models (Anderson, 1991; Gershman, Blei & Niv, 2010).
Attributes
----------
name: str
Model name.
kernel: function
Temporal kernel for distance dependent prior on latent causes.
par_names: list
Names of the model's free parameters (strings).
pars: dict
Information about model parameters (min, max, default, description).
Methods
-------
simulate(self, trials, par_val = None, n_z = 10, n_p = 50, random_resp = False, ident = 'sim', sim_type = 'local_vb')
Simulate a trial sequence once with known model parameters using
either the .local_vb() or .particle() method. This is just a wrapper
for those simulation methods.
local_vb(self, trials, par_val = None, n_z = 10, random_resp = False, ident = 'sim')
Simulate the model using a combination of local MAP and variational Bayes.
particle_filter(self, trials, par_val = None, n_z = 10, n_p = 50, random_resp = False, ident = 'sim')
Simulate the model using a particle filter algorithm.
Notes
-----
The local_vb method uses a local MAP approximation for two purposes:
approximating the prior on latent causes, and deciding when to add a
new latent cause. Everything else is done via streaming variational Bayes.
Currently both outcomes (y) and predictor stimuli (x) are drawn from independent
normal distributions with the following hyperpriors:
mu | sigma^2 ~ N(tau1/n, sigma^2/n)
1/sigma^2 ~ Gamma((n + 3)/2, (n tau2 - tau1^2)/(2 n))
In the future I may add other distribution options, e.g. multinomial or Bernoulli.
*** MODIFY THE MINIMUM FOR THE NUMERATOR TO THE PARTICLE FILTER METHOD ***
*** EXPLAIN THE NORMAL DISTRIBUTION PARAMETERIZATION ***
Relevant Papers
---------------
<NAME>. (1991).
The adaptive nature of human categorization.
Psychological Review, 98(3), 409.
<NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2013).
Streaming variational Bayes.
ArXiv Preprint ArXiv:1307.6769.
<NAME>., <NAME>., & <NAME>. (2010).
Context, learning, and extinction.
Psychological Review, 117(1), 197–209.
<NAME>., & <NAME>. (2012).
Exploring a latent cause theory of classical conditioning.
Learning & Behavior, 40(3), 255–268.
<NAME>., & <NAME>. (2017). Streaming clustering with Bayesian nonparametric models.
Neurocomputing, 258, 52–62.
<NAME>., <NAME>., & <NAME>. (2010).
Rational approximations to rational models: Alternative algorithms
for category learning. Psychological Review, 117(4), 1144–1167.
<NAME>., <NAME>., & <NAME>. (n.d.).
Time-Sensitive Dirichlet Process Mixture Models.
'''
def __init__(self, name, kernel):
'''
Parameters
----------
'''
self.name = name
self.kernel = kernel
# determine the model's parameter space
self.par_names = kernel.par_names + ['prior_tau2_x', 'prior_nu_x', 'prior_tau2_y', 'prior_nu_y', 'stick', 'alpha', 'resp_scale']
self.pars = pars.loc[self.par_names]
def simulate(self, trials, par_val = None, n_z = 10, n_p = 50, random_resp = False, ident = 'sim', sim_type = 'local_vb'):
'''
Simulate a trial sequence once with known model parameters using
either the .local_vb() or .particle() method.
Parameters
----------
trials: dataset (xarray)
Time step level experimental data (cues, outcomes etc.).
par_val: list, optional
Learning model parameters (floats or ints).
n_z: int, optional
Maximum number of latent causes. Defaults to 10.
n_p: int, optional
Number of particles. Defaults to 50. Only relevant if using
the .particle() simulation methods (i.e. sim_type = 'particle').
random_resp: str, optional
Whether or not simulated responses should be random. Defaults
to false, in which case behavior (b) is identical to expected
behavior (b_hat); this saves some computation time. If true
and resp_type is 'choice', then discrete responses are selected
using b_hat as choice probabilities. If true and resp_type is
'exct' or 'supr' then a small amount of normally distributed
noise (sd = 0.01) is added to b_hat.
ident: str, optional
Individual participant identifier. Defaults to 'sim'.
sim_type: str, optional
Determines what kind of simulation to perform. The options are
'local_vb' (combination of local MAP and mean field variational
Bayes updates) and 'particle' (particle filter). Defaults to
'local_vb'.
Returns
-------
ds: dataset
Simulation data.
Notes
-----
The .simulate() method is just a wrapper for the .local_vb() and
.particle() methods, with the choice between these method indicated
by the sim_type argument. The .local_vb() and .particle() methods
can also be used on their own, without using the .simulate() method
as a wrapper. The .simulate() method is only present in latent cause
models in order to interface with the rest of the Statrat package
(e.g. functions for performing model fitting and OATs).
Note on terminology: "cluster" = "latent cause"
'''
method_dict = {'local_vb': lambda par_val: self.local_vb(trials, par_val, n_z, random_resp, ident),
'particle': lambda par_val: self.particle_filter(trials, par_val, n_z, n_p, random_resp, ident)}
return method_dict[sim_type](par_val)
def local_vb(self, trials, par_val = None, n_z = 10, random_resp = False, ident = 'sim'):
'''
Simulate the model using a combination of local MAP and variational Bayes.
Parameters
----------
trials: dataset (xarray)
Time step level experimental data (cues, outcomes etc.).
par_val: list, optional
Learning model parameters (floats or ints).
n_z: int, optional
Maximum number of latent causes. Defaults to 10.
random_resp: str, optional
Whether or not simulated responses should be random. Defaults
to false, in which case behavior (b) is identical to expected
behavior (b_hat); this saves some computation time. If true
and resp_type is 'choice', then discrete responses are selected
using b_hat as choice probabilities. If true and resp_type is
'exct' or 'supr' then a small amount of normally distributed
noise (sd = 0.01) is added to b_hat.
ident: str, optional
Individual participant identifier. Defaults to 'sim'.
sim_type: str, optional
Determines what kind of simulation to perform. The options are
'local_vb' (combination of local MAP and mean field variational
Bayes updates) and 'particle' (particle filter). Defaults to
'local_vb'.
Returns
-------
ds: dataset
Simulation data.
Explanation of variables in ds
------------------------------
y_psb: indicator vector for outcomes (y) that are possible on the trial (from the learner's perspective)
y_lrn: indicator vector for outcomes (y) for which there is feedback and hence learning will occur
y_hat: outcome predictions
b_hat: expected value of behavioral response
b: vector representing actual behavioral response (identical to b_hat unless the random_resp argument is set to True)
est_mu_x: estimated mean of x
est_sigma_x: estimated standard deviation of x
est_precision_x: estimated precision of x
est_mu_y: estimated mean of y
est_sigma_y: estimated standard deviation of y
est_precision_y: estimated precision of y
n: estimated number of observations assigned to each latent cause
z: hard latent cause assignments
phi_x: posterior of latent causes after observing x, but before observing y
phi: posterior of latent causes after observing both x and y
N: estimated number of latent causes
E_log_prior: expected log-prior for latent causes
E_log_lik_x: expected log-likelihood of x for latent causes
E_log_lik_y: expected log-likelihood of y for latent causes
b_index: index of behavioral response (only present if response type is 'choice' and random_resp is True)
b_name: name of behavioral response (only present if response type is 'choice' and random_resp is True)
'''
# use default parameters unless others are given
if par_val is None:
sim_pars = self.pars['default']
else:
# check that parameter values are within acceptable limits; if so assemble into a pandas series
# for some reason, the optimization functions go slightly outside the specified bounds
abv_min = par_val >= self.pars['min'] - 0.0001
blw_max = par_val <= self.pars['max'] + 0.0001
all_ok = np.prod(abv_min & blw_max)
assert all_ok, 'par_val outside acceptable limits'
sim_pars = pd.Series(par_val, self.pars.index)
# set stuff up
x = np.array(trials['x'], dtype = 'float64')
y = np.array(trials['y'], dtype = 'float64')
y_psb = np.array(trials['y_psb'], dtype = 'float64')
y_lrn = np.array(trials['y_lrn'], dtype = 'float64')
x_names = list(trials.x_name.values)
y_names = list(trials.y_name.values)
n_t = x.shape[0] # number of time points
n_x = x.shape[1] # number of stimulus attributes
n_y = y.shape[1] # number of outcomes/response options
y_hat = np.zeros((n_t, n_y)) # outcome predictions
b_hat = np.zeros((n_t, n_y)) # expected behavior
time = trials['time'].values # real world time (in arbitrary units, starting at 0)
x_sofar = np.zeros(n_x) # keep track of cues (x) observed so far
# prior for x parameters
tau1_x = np.zeros((n_t + 1, n_z, n_x))
tau2_x = sim_pars['prior_tau2_x']*np.ones((n_t + 1, n_z, n_x))
nu_x = sim_pars['prior_nu_x']*np.ones((n_t + 1, n_z, n_x))
# prior for y parameters
tau1_y = np.zeros((n_t + 1, n_z, n_y))
tau2_y = sim_pars['prior_tau2_y']*np.ones((n_t + 1, n_z, n_y))
nu_y = sim_pars['prior_nu_y']*np.ones((n_t + 1, n_z, n_y))
E_log_prior = np.zeros((n_t, n_z))
E_log_lik_x = np.zeros((n_t, n_z))
E_log_lik_y = np.zeros((n_t, n_z))
est_mu_x = np.zeros((n_t, n_z, n_x))
prior_E_eta2_x = -(sim_pars['prior_nu_x']*(sim_pars['prior_nu_x'] + 3))/(2*sim_pars['prior_nu_x']*sim_pars['prior_tau2_x'])
est_sigma_x = (1/np.sqrt(-2*prior_E_eta2_x))* | np.ones((n_t, n_z, n_x)) | numpy.ones |
import torch
import torch.nn as nn
import torchvision as tv
from torchvision import transforms
from torchvision.datasets.folder import default_loader
from torchvision.ops import RoIPool, RoIAlign
import numpy as np
from pathlib import Path
from PIL import Image
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
print(device)
class AdaptiveConcatPool2d(nn.Module):
def __init__(self, sz=None):
super().__init__()
sz = sz or (1,1)
self.ap = nn.AdaptiveAvgPool2d(sz)
self.mp = nn.AdaptiveMaxPool2d(sz)
def forward(self, x): return torch.cat([self.mp(x), self.ap(x)], 1)
def get_idx(batch_size, n_output, device=None):
idx = torch.arange(float(batch_size), dtype=torch.float, device=device).view(1, -1)
idx = idx.repeat(n_output, 1, ).t()
idx = idx.contiguous().view(-1, 1)
return idx
def get_blockwise_rois(blk_size, img_size=None):
if img_size is None: img_size = [1, 1]
y = np.linspace(0, img_size[0], num=blk_size[0] + 1)
x = np.linspace(0, img_size[1], num=blk_size[1] + 1)
a = []
for n in range(len(y) - 1):
for m in range(len(x) - 1):
a += [x[m], y[n], x[m + 1], y[n + 1]]
return a
class RoIPoolModel(nn.Module):
rois = None
def __init__(self, backbone='resnet18', pretrained=False): # set to true if you need to train it
super().__init__()
if backbone is 'resnet18':
model = tv.models.resnet18(pretrained=pretrained) #
cut = -2
spatial_scale = 1/32
self.model_type = self.__class__.__name__
self.body = nn.Sequential(*list(model.children())[:cut])
self.head = nn.Sequential(
AdaptiveConcatPool2d(),
nn.Flatten(),
nn.BatchNorm1d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True),
nn.Dropout(p=0.25, inplace=False),
nn.Linear(in_features=1024, out_features=512, bias=True),
nn.ReLU(inplace=True),
nn.BatchNorm1d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True),
nn.Dropout(p=0.5, inplace=False),
nn.Linear(in_features=512, out_features=1, bias=True)
)
self.roi_pool = RoIPool((2,2), spatial_scale)
def forward(self, x):
# compatitble with fastai model
if isinstance(x, list) or isinstance(x, tuple):
im_data, self.rois = x
else:
im_data = x
feats = self.body(im_data)
batch_size = im_data.size(0)
if self.rois is not None:
rois_data = self.rois.view(-1, 4)
n_output = int(rois_data.size(0) / batch_size)
idx = get_idx(batch_size, n_output, im_data.device)
indexed_rois = torch.cat((idx, rois_data), 1)
feats = self.roi_pool(feats, indexed_rois)
preds = self.head(feats)
return preds.view(batch_size, -1)
def input_block_rois(self, blk_size=(20, 20), img_size=(1, 1), batch_size=1, include_image=True, device=None):
a = [0, 0, img_size[1], img_size[0]] if include_image else []
a += get_blockwise_rois(blk_size, img_size)
t = torch.tensor(a).float().to(device)
self.rois = t.unsqueeze(0).repeat(batch_size, 1, 1).view(batch_size, -1).view(-1, 4)
def img_files_in(path):
IMAGE_EXTS = '.jpg', '.jpeg', '.bmp', '.png'
a = [f for f in Path(path).rglob('*.*') if f.name.lower().endswith(IMAGE_EXTS)]
return np.array(a) # a[mask]
def get_peak(data):
from collections import Counter
L = int(data)
most_common, num_most_common = Counter(L).most_common(1)[0]
return most_common
def normalize(x, peak, std_left, std_right, N_std, new_peak=None):
if new_peak is None:
new_peak = peak
x = np.array(x)
left, right = x < peak, x >= peak
x [left] = new_peak + new_peak*(x[left]-peak)/(N_std*std_left)
x [right] = new_peak + (100-new_peak)*(x[right]-peak)/(N_std*std_right)
# x [x < 0] = 0
# x [x > 100] = 100
return x.tolist()
class InferenceModel:
blk_size = 20, 20
categories = 'Bad', 'Poor', 'Fair', 'Good', 'Excellent'
def __init__(self, model, path_to_model_state: Path):
self.transform = transforms.ToTensor()
model_state = torch.load(path_to_model_state, map_location=lambda storage, loc: storage)
self.model = model
self.model.load_state_dict(model_state["model"])
self.model = self.model.to(device)
self.model.eval()
def predict_from_file(self, image_path: Path, render=False):
image = default_loader(image_path)
return self.predict(image)
def predict_from_pil_image(self, image: Image):
image = image.convert("RGB")
return self.predict(image)
# normalization
N_std = 3.5
new_peak = None
norm_params = 72, 7.798274017370107, 4.118047289170692
def normalize(self, x):
x = normalize(x, *self.norm_params,
N_std=self.N_std, new_peak=self.new_peak)
return np.clip(x, 0, 99.9) # 100//20==5 out-of-range
def adapt_from_dir(self, path):
from collections import Counter
global_scores = [model.predict_from_file(f)['global_score'] for f in img_files_in(PATH)]
x = np.array(global_scores)
x_peak, _ = Counter(x.astype(int)).most_common(1)[0]
# get std based on the peak value
left, right = x < x_peak, x >= x_peak
std_left = np.concatenate([x[left], 2*x_peak-x[left]]).std() # reflection
std_right = | np.concatenate([x[right], 2*x_peak-x[right]]) | numpy.concatenate |
"""Gym environment for the Real Robot Challenge Phase 1 (Simulation)."""
import time
import pybullet as p
import numpy as np
import gym
import itertools
from scipy.spatial.transform import Rotation as R
from rrc_simulation import TriFingerPlatform
from rrc_simulation import camera
from rrc_simulation.code.utils import sample_cube_surface_points, apply_transform, VisualMarkers, is_valid_action, action_type_to
from pybullet_planning import plan_joint_motion
from pybullet_planning.interfaces.robots.collision import get_collision_fn
from rrc_simulation.gym_wrapper.envs import cube_env
from rrc_simulation.gym_wrapper.envs.cube_env import ActionType
from rrc_simulation import collision_objects
import cv2
import copy
import functools
from rrc_simulation.code.align_rotation import align_rotation
from rrc_simulation.code.const import EXCEP_MSSG
class FlatObservationWrapper(gym.ObservationWrapper):
def __init__(self, env):
super().__init__(env)
low = [
self.observation_space[name].low.flatten()
for name in self.observation_names
]
high = [
self.observation_space[name].high.flatten()
for name in self.observation_names
]
self.observation_space = gym.spaces.Box(
low=np.concatenate(low), high=np.concatenate(high)
)
def observation(self, obs):
observation = [obs[name].flatten() for name in self.observation_names]
observation = np.concatenate(observation)
return observation
class IKActionWrapper(gym.Wrapper):
def __init__(self, env):
super().__init__(env)
self.action_space = gym.spaces.Box(low=-np.ones(9), high=np.ones(9),
dtype=np.float32)
self._ob = None
self.frameskip = self.unwrapped.frameskip
def reset(self):
self._ob = self.env.reset()
self.dt = self.unwrapped.platform.simfinger.time_step_s
self.ik = self.unwrapped.platform.simfinger.pinocchio_utils.inverse_kinematics
return self._ob
def _solve_ik(self, action):
tips = self._ob['robot_tip_positions']
joints = self._ob['robot_position']
new_robot_pos = []
action = action * self.frameskip * self.dt
tips_desired = [tips[i] + action[3*i: 3*(i+1)] for i in range(3)]
for i in range(3):
new_joints = self.ik(i, tips_desired[i], joints)
new_robot_pos.append(new_joints[3*i:3*(i+1)])
new_pos = np.concatenate(new_robot_pos)
for i in range(3):
try:
self._verify_ik(new_pos, tips_desired[i], i)
except Exception as e:
print(e)
return np.clip(new_pos, self.env.action_space.low,
self.env.action_space.high)
def step(self, action):
self._ob, r, done, info = self.env.step(self._solve_ik(action))
return self._ob, r, done, info
def _verify_ik(self, j, x, finger_id, tol=0.001):
x_actual = self.unwrapped.platform.forward_kinematics(j)[finger_id]
dist = np.linalg.norm(x - x_actual)
if dist >= tol:
raise ValueError(f"IK Failed with error: {dist}!")
class JointConfInitializationWrapper(gym.Wrapper):
'''
Every time it resets, sample three points on the cube
and initialize fingers so that tips are on the points.
'''
def __init__(self, env, heuristic="pinch"):
super().__init__(env)
self.heuristic = heuristic
def reset(self):
obs = self.env.reset()
return self.set_init_pose(obs)
def set_init_pose(self, obs):
if self.env.visualization:
self.visual_markers = VisualMarkers()
if getattr(self.env, 'ik', None) is None:
self.ik = self.env.platform.simfinger.pinocchio_utils.inverse_kinematics
# get initial joint angles and apply
init_tips, init_joints = self.get_initial_conf(obs)
self.init_tip_positions = init_tips
self.env.platform.simfinger.reset_finger_positions_and_velocities(init_joints)
# modify the first observation
if "robot_position" in obs:
obs['robot_position'] = init_joints
if "robot_tip_positions" in obs:
obs["robot_tip_positions"] = init_tips
return obs
def get_initial_conf(self, obs):
tips = obs['robot_tip_positions']
joints = obs['robot_position']
org_joint_conf = self.get_joint_conf()
num_trials = 20000
retry = 0
while retry < num_trials:
# reset joint configuration (neccesary because planner messes
# up with the pose)
self.env.platform.simfinger.reset_finger_positions_and_velocities(
org_joint_conf)
self.cube_tip_positions = sample_cube_surface_points(
cube_halfwidth=0.0500,
heuristic=self.heuristic,
shrink_region=0.5
)
target_tip_positions = apply_transform(obs['object_position'],
obs['object_orientation'],
self.cube_tip_positions)
if self.heuristic is 'center_of_two':
self.used_finger_ids = self.select_two_fingers(obs['goal_object_position'] - obs['object_position'])
_, inds = self.assign_positions_to_fingers(
tips[self.used_finger_ids, :],
target_tip_positions[:len(self.used_finger_ids)]
)
inds = self.used_finger_ids[list(inds)].tolist()
inds = inds + [3 - sum(inds)]
target_tip_positions = target_tip_positions[inds, :]
else:
target_tip_positions, inds = self.assign_positions_to_fingers(
tips, target_tip_positions)
self.cube_tip_positions = self.cube_tip_positions[inds, :]
# Run IK to get the target joint configuration
target_joint_conf = self.solve_for_joint_conf(target_tip_positions, joints)
if target_joint_conf is None:
continue
# Validate that the joint conf is reachable (IK solver sometimes returns an infeasible solution)
if not is_valid_action(target_joint_conf, action_type='position'):
# print('(initial conf) IK solver returned infeasible joint conf:', target_joint_conf)
continue
# Run motion planning to test the feasibility.
# Without this, sometimes fingers are initialized to go through the cube
planned_motion = plan_joint_motion(
self.env.platform.simfinger.finger_id,
self.env.platform.simfinger.pybullet_link_indices,
target_joint_conf,
self_collisions=True,
obstacles=[self.env.platform.cube.block],
diagnosis=False
)
# Test if the end pose is in collision
if planned_motion is not None:
obstacle = self._create_dummy_goal_object(obs) # dummy object for collision check
collision_fn = get_collision_fn(
self.env.platform.simfinger.finger_id,
self.env.platform.simfinger.pybullet_link_indices,
obstacles=[obstacle.block],
self_collisions=True,
max_distance=0
)
endpose = self._get_endpose(obs)
# Validate if endpose is reachable (IK solver sometimes returns an infeasible solution)
if endpose is not None and is_valid_action(endpose, action_type='position'):
endpose_in_collision = collision_fn(endpose, diagnosis=False)
# if endpose_in_collision:
# print('endpose is in collision')
else:
# print('IK solver returned infeasible joint conf:', endpose)
endpose_in_collision = True
del obstacle
if not endpose_in_collision:
break
retry += 1
# print('motion planning failed. retrying...\tcount:', retry)
if planned_motion is None:
raise RuntimeError('No feasible path to the target position is found.')
# reset joint configuration (neccesary because planner messes up with the pose)
self.env.platform.simfinger.reset_finger_positions_and_velocities(org_joint_conf)
# visualize sampled points
if self.env.visualization:
self.visual_markers.add(target_tip_positions, color=(0, 1, 1, 0.5))
return target_tip_positions, target_joint_conf
def get_joint_conf(self):
robot_id = self.env.platform.simfinger.finger_id
link_indices = self.env.platform.simfinger.pybullet_link_indices
joint_conf = [joint_state[0] for joint_state in p.getJointStates(
robot_id, link_indices)]
return np.asarray(joint_conf)
def assign_positions_to_fingers(self, tips, goal_tips):
min_cost = 1000000
opt_tips = []
opt_inds = []
for v in itertools.permutations(range(len(tips))):
sorted_tips = goal_tips[v, :]
cost = | np.linalg.norm(sorted_tips - tips) | numpy.linalg.norm |
import numpy as np
from PIL import Image
from scipy import special
# PSF functions
def scalar_a(x):
if x == 0:
return 1.0
else:
return (special.jn(1,2*np.pi*x)/(np.pi*x))**2
a = np.vectorize(scalar_a)
def s_b(x, NA=0.8, n=1.33):
if x == 0:
return 0
else:
return (NA/n)**2*(special.jn(2,2*np.pi*x)/(np.pi*x))**2
b = np.vectorize(s_b)
def h00(x, NA=0.8, n=1.33):
return a(x) + 2*b(x, NA, n)
def h20(x, NA=0.8, n=1.33):
return (-a(x) + 4*b(x, NA, n))/np.sqrt(5)
# OTF functions
def myacos(x):
return np.nan_to_num(np.arccos(np.abs(x/2)))
def mysqrt(x):
return np.nan_to_num((np.abs(x/2))*np.sqrt(1 - (np.abs(x/2))**2))
def A(x):
return (2/np.pi)*(myacos(x) - mysqrt(x))
def B(x, NA=0.8, n=1.33):
N = (1/(np.pi))*((NA/n)**2)
poly = (3.0 - 2.0*(np.abs(x/2)**2))
return N*(myacos(x) - poly*mysqrt(x))
def H00(x, NA=0.8, n=1.33):
return (A(x) + 2*B(x, NA=NA, n=n))/(1 + (NA/n)**2)
def H20(x, NA=0.8, n=1.33):
return (-A(x) + 4*B(x, NA=NA, n=n))/(np.sqrt(5)*(1 + (NA/n)**2))
# File I/O
def save_tiff(image, filename):
im = Image.fromarray(image) # float32
im.save(filename, "TIFF")
def load_tiff(filename):
image = Image.open(filename, mode='r')
return np.array(image, dtype='float32')
def cs(arr):
return arr[:, np.int(arr.shape[0]/2)]
# Fourier transform
def myfft(image, pad=1000):
N = image.shape[0]
padded_image = np.pad(image, pad_width=pad, mode='constant')
F = np.fft.fftshift(
np.fft.fftn(
np.fft.ifftshift(padded_image)
))
xF = np.fft.fftshift(np.fft.fftfreq(2*pad + N, 4/N))
return xF, | np.abs(F) | numpy.abs |
#! /usr/bin/env python
import warnings
warnings.filterwarnings('ignore')
import numpy as np
import scipy.interpolate as spi
from astropy.io import fits as pf
import matplotlib.pyplot as plt
import multiprocessing as mp
from ..lib import manageevent as me
from ..lib import sort_nicely as sn
from ..lib import centroid, suntimecorr, utc_tt
import time, os, sys, shutil
#import hst_scan as hst
from importlib import reload
#reload(hst)
def reduceWFC3(eventname, eventdir, madVariable=False, madVarSet=False, isplots=False):
'''
Reduces data images and calculated optimal spectra.
Parameters
----------
isplots : Set True to produce plots
Returns
-------
None
Remarks
-------
Requires eventname_params file to intialize event object
Steps
-----
1. Read in all data frames and header info
2. Record JD, scan direction, etc
3. Group images by frame, batch, and orbit number
4. Calculate centroid of direct image(s)
5. Calculate trace and 1D+2D wavelength solutions
6. Make flats, apply flat field correction
7. Manually mask regions
8. Apply light-time correction
9. Compute difference frames
10. Compute scan length
11. Perform outlier rejection of BG region
12. Background subtraction
13. Compute 2D drift, apply rough (integer-pixel) correction
14. Full-frame outlier rejection for time-series stack of NDRs
15. Apply sub-pixel 2D drift correction
16. Extract spectrum through summation
17. Compute median frame
18. Optimal spectral extraction
19. Save results, plot figures
History
-------
Written by <NAME> January 2017
'''
evpname = eventname + '_params'
#exec 'import ' + evpname + ' as evp' in locals()
#exec('import ' + evpname + ' as evp', locals())
exec('import ' + evpname + ' as evp', globals())
reload(evp)
t0 = time.time()
# Initialize event object
# All parameters are specified in this file
ev = evp.event_init()
try:
aux = evp.aux_init()
except:
print("Need to update event file to include auxiliary object.")
return
ev.eventdir = eventdir
# Create directories
if not os.path.exists(ev.eventdir):
os.makedirs(ev.eventdir)
if not os.path.exists(ev.eventdir+"/figs"):
os.makedirs(ev.eventdir+"/figs")
# Copy ev_params file
shutil.copyfile(evpname + '.py', ev.eventdir+'/'+evpname+'.py')
# Reset attribute for MAD variable (added by <NAME>)
if madVariable:
setattr(ev,madVariable,madVarSet)
ev.madVarStr = madVariable
ev.madVariable = madVarSet
# Object
ev.obj_list = [] #Do not rename ev.obj_list!
if ev.objfile == None:
#Retrieve files within specified range
for i in range(ev.objstart,ev.objend):
ev.obj_list.append(ev.loc_sci + ev.filebase + str(i).zfill(4) + ".fits")
elif ev.objfile == 'all':
#Retrieve all files from science directory
for fname in os.listdir(ev.loc_sci):
ev.obj_list.append(ev.loc_sci +'/'+ fname)
ev.obj_list = sn.sort_nicely(ev.obj_list)
else:
#Retrieve filenames from list
files = np.genfromtxt(ev.objfile, dtype=str, comments='#')
for fname in files:
ev.obj_list.append(ev.loc_sci +'/'+ fname)
# handle = open(ev.objfile)
# for line in handle:
# print(line)
# ev.obj_list.append(ev.loc_sci + line)
# handle.close()
ev.n_files = len(ev.obj_list)
#Determine image size and filter/grism
hdulist = pf.open(ev.obj_list[0].rstrip())
nx = hdulist['SCI',1].header['NAXIS1']
ny = hdulist['SCI',1].header['NAXIS2']
ev.grism = hdulist[0].header['FILTER']
ev.detector = hdulist[0].header['DETECTOR']
ev.flatoffset = [[-1*hdulist['SCI',1].header['LTV2'], -1*hdulist['SCI',1].header['LTV1']]]
n_reads = hdulist['SCI',1].header['SAMPNUM']
hdulist.close()
# Record JD and exposure times
print('Reading data & headers, recording JD and exposure times...')
ywindow = ev.ywindow[0]
xwindow = ev.xwindow[0]
subny = ywindow[1] - ywindow[0]
subnx = xwindow[1] - xwindow[0]
subdata = np.zeros((ev.n_files,n_reads,subny,subnx))
suberr = np.zeros((ev.n_files,n_reads,subny,subnx))
data_mhdr = []
data_hdr = []
ev.jd = np.zeros(ev.n_files)
ev.exptime = np.zeros(ev.n_files)
for m in range(ev.n_files):
data, err, hdr, mhdr = hst.read(ev.obj_list[m].rstrip())
subdata[m] = data[0,:,ywindow[0]:ywindow[1],xwindow[0]:xwindow[1]]
suberr [m] = err [0,:,ywindow[0]:ywindow[1],xwindow[0]:xwindow[1]]
data_mhdr.append(mhdr[0])
data_hdr.append(hdr[0])
ev.jd[m] = 2400000.5 + 0.5*(data_mhdr[m]['EXPSTART'] + data_mhdr[m]['EXPEND'])
ev.exptime[m] = data_mhdr[m]['EXPTIME']
# Assign scan direction
ev.scandir = np.zeros(ev.n_files, dtype=int)
ev.n_scan0 = 0
ev.n_scan1 = 0
try:
scan0 = data_mhdr[0]['POSTARG2']
scan1 = data_mhdr[1]['POSTARG2']
for m in range(ev.n_files):
if data_mhdr[m]['POSTARG2'] == scan0:
ev.n_scan0 += 1
elif data_mhdr[m]['POSTARG2'] == scan1:
ev.scandir[m] = 1
ev.n_scan1 += 1
else:
print('WARNING: Unknown scan direction for file ' + str(m) + '.')
print("# of files in scan direction 0: " + str(ev.n_scan0))
print("# of files in scan direction 1: " + str(ev.n_scan1))
except:
ev.n_scan0 = ev.n_files
print("Unable to determine scan direction, assuming unidirectional.")
# Group frames into frame, batch, and orbit number
ev.framenum, ev.batchnum, ev.orbitnum = hst.groupFrames(ev.jd)
# Determine read noise and gain
ev.readNoise = np.mean((data_mhdr[0]['READNSEA'],
data_mhdr[0]['READNSEB'],
data_mhdr[0]['READNSEC'],
data_mhdr[0]['READNSED']))
print('Read noise: ' + str(ev.readNoise))
print('Gain: ' + str(ev.gain))
#ev.v0 = (ev.readNoise/ev.gain)**2 #Units of ADU
ev.v0 = ev.readNoise**2 #Units of electrons
# Calculate centroid of direct image(s)
ev.img_list = []
if isinstance(ev.directfile, str) and ev.directfile.endswith('.fits'):
ev.img_list.append(ev.loc_cal + ev.directfile)
else:
#Retrieve filenames from list
handle = open(ev.directfile)
for line in handle:
ev.img_list.append(ev.loc_cal + line)
handle.close()
ev.n_img = len(ev.img_list)
ev.centroid, ev.directim = hst.imageCentroid(ev.img_list, ev.centroidguess, ev.centroidtrim, ny, ev.obj_list[0])
"""
# Calculate theoretical centroids along spatial scan direction
ev.centroids = []
for j in range(ev.n_img):
ev.centroids.append([])
for i in range(ev.n_spec):
# Can assume that scan direction is only in y direction (no x component)
# because we will apply drift correction to make it so
ev.centroids[j].append([np.zeros(subny)+ev.centroid[j][0],ev.centroid[j][1]])
# Calculate trace
print("Calculating 2D trace and wavelength assuming " + ev.grism + " filter/grism...")
ev.xrange = []
for i in range(ev.n_spec):
ev.xrange.append(np.arange(ev.xwindow[i][0],ev.xwindow[i][1]))
ev.trace2d = []
ev.wave2d = []
for j in range(ev.n_img):
ev.trace2d.append([])
ev.wave2d.append([])
for i in range(ev.n_spec):
ev.trace2d[j].append(hst.calcTrace(ev.xrange[i], ev.centroids[j][i], ev.grism))
ev.wave2d[j].append(hst.calibrateLambda(ev.xrange[i], ev.centroids[j][i], ev.grism)/1e4) #wavelength in microns
if ev.detector == 'IR':
print("Calculating slit shift values using last frame...")
i = 0 #Use first spectrum
j = -1 #Use last image
#spectrum = subdata[j]
spectrum = pf.getdata(ev.obj_list[j])
ev.slitshift, ev.shift_values, ev.yfit = hst.calc_slitshift2(spectrum, ev.xrange[i], ev.ywindow[i], ev.xwindow[i])
ev.wavegrid = ev.wave2d
ev.wave = []
for j in range(ev.n_img):
ev.wave.append([])
for i in range(ev.n_spec):
ev.wave[j].append(np.mean(ev.wavegrid[j][i],axis=0))
else:
# Assume no slitshift for UVIS
ev.yfit = range(ev.ywindow[0][1] - ev.ywindow[0][0])
ev.slitshift = np.zeros(ev.ywindow[0][1] - ev.ywindow[0][0])
ev.shift_values = np.zeros(len(ev.yfit))
# Make list of master flat field frames
subflat = np.ones((ev.n_img,ev.n_spec,subny,subnx))
flatmask = np.ones((ev.n_img,ev.n_spec,subny,subnx))
if ev.flatfile == None:
print('No flat frames found.')
flat_hdr = None
flat_mhdr = None
else:
print('Loading flat frames...')
for j in range(ev.n_img):
tempflat, tempmask = hst.makeflats(ev.flatfile, ev.wavegrid[j], ev.xwindow, ev.ywindow, ev.flatoffset, ev.n_spec, ny, nx, sigma=ev.flatsigma)
for i in range(ev.n_spec):
subflat[j][i] = tempflat[i][ywindow[0]:ywindow[1],xwindow[0]:xwindow[1]]
flatmask[j][i] = tempmask[i][ywindow[0]:ywindow[1],xwindow[0]:xwindow[1]]
# Manually mask regions [specnum, colstart, colend]
if hasattr(ev, 'manmask'):
print("\rMasking manually identified bad pixels.")
for j in range(ev.n_img):
for i in range(len(ev.manmask)):
ind, colstart, colend, rowstart, rowend = ev.manmask[i]
n = ind % ev.n_spec
flatmask[j][n][rowstart:rowend,colstart:colend] = 0 #ev.window[:,ind][0]:ev.window[:,ind][1]
# Calculate reduced image
for m in range(ev.n_files):
#Select appropriate flat, mask, and slitshift
if ev.n_img == (np.max(ev.orbitnum)+1):
j = int(ev.orbitnum[m])
else:
j = 0
for n in range(n_reads):
subdata[m][n] /= subflat[j][0]
"""
# Read in drift2D from previous iteration
# np.save("drift2D.npy",ev.drift2D)
#try:
# drift2D = np.load("drift2D.npy")
#except:
# print("drift2D.npy not found.")
drift2D = np.zeros((ev.n_files,n_reads-1,2))
# Calculate centroids for each grism frame
ev.centroids = np.zeros((ev.n_files,n_reads-1,2))
for m in range(ev.n_files):
for n in range(n_reads-1):
ev.centroids[m,n] = np.array([ev.centroid[0][0]+drift2D[m,n,0],
ev.centroid[0][1]+drift2D[m,n,1]])
#ev.centroids[m,n] = np.array([np.zeros(subny)+ev.centroid[0][0]+drift2D[m,n,0],
# np.zeros(subnx)+ev.centroid[0][1]+drift2D[m,n,1]])
# Calculate trace
print("Calculating 2D trace and wavelength assuming " + ev.grism + " filter/grism...")
ev.xrange = np.arange(ev.xwindow[0][0],ev.xwindow[0][1])
trace2d = np.zeros((ev.n_files,n_reads-1,subny,subnx))
wave2d = np.zeros((ev.n_files,n_reads-1,subny,subnx))
for m in range(ev.n_files):
for n in range(n_reads-1):
trace2d[m,n] = hst.calcTrace(ev.xrange, ev.centroids[m,n], ev.grism)
wave2d[m,n] = hst.calibrateLambda(ev.xrange, ev.centroids[m,n], ev.grism)/1e4 #wavelength in microns
# Assume no slitshift
ev.yfit = range(ev.ywindow[0][1] - ev.ywindow[0][0])
ev.slitshift = np.zeros(ev.ywindow[0][1] - ev.ywindow[0][0])
ev.shift_values = np.zeros(len(ev.yfit))
ev.wave = np.mean(wave2d, axis=2)
print("Wavelength Range: %.3f - %.3f" % (np.min(ev.wave), np.max(ev.wave)))
#iwmax = np.where(ev.wave[0][0]>1.65)[0][0]
#print(ev.wave[0,0])
#print(ev.wave[0,1])
#print(ev.centroids)
# Make list of master flat field frames
subflat = np.ones((ev.n_files,subny,subnx))
flatmask = np.ones((ev.n_files,subny,subnx))
if ev.flatfile == None:
print('No flat frames found.')
flat_hdr = None
flat_mhdr = None
else:
print('Loading flat frames...')
print(ev.flatfile)
for m in range(ev.n_files):
tempflat, tempmask = hst.makeflats(ev.flatfile, [np.mean(wave2d[m],axis=0)], ev.xwindow, ev.ywindow, ev.flatoffset, ev.n_spec, ny, nx, sigma=ev.flatsigma)
#tempflat = [pf.getdata(ev.flatfile)]
#tempmask = [np.ones(tempflat[0].shape)]
subflat[m] = tempflat[0][ywindow[0]:ywindow[1],xwindow[0]:xwindow[1]]
flatmask[m] = tempmask[0][ywindow[0]:ywindow[1],xwindow[0]:xwindow[1]]
# Manually mask regions [specnum, colstart, colend]
if hasattr(ev, 'manmask'):
print("\rMasking manually identified bad pixels.")
for m in range(ev.n_files):
for i in range(len(ev.manmask)):
ind, colstart, colend, rowstart, rowend = ev.manmask[i]
flatmask[m][rowstart:rowend,colstart:colend] = 0
#FINDME: Change flat field
#subflat[:,:,28] /= 1.015
#subflat[:,:,50] /= 1.015
#subflat[:,:,70] *= 1.01
"""
plt.figure(2)
plt.clf()
plt.imshow(np.copy(subdata[10,-1]),origin='lower',aspect='auto',
vmin=0,vmax=25000,cmap=plt.cm.RdYlBu_r)
plt.ylim(65,95)
plt.show()
"""
# Calculate reduced image
subdata /= subflat[:,np.newaxis]
#subdata /= np.mean(subflat,axis=0)[np.newaxis,np.newaxis]
"""
# FINDME
# Perform self flat field calibration
# drift2D_int = np.round(edrift2D,0)
# Identify frames outside SAA
iNoSAA = np.where(np.round(drift2D[:,0,0],0)==0)[0]
# Select subregion with lots of photons
normdata = np.copy(subdata[iNoSAA,-1,69:91,15:147])
normmask = flatmask[iNoSAA,69:91,15:147]
normdata[np.where(normmask==0)] = 0
# Normalize flux in each row to remove ramp/transit/variable scan rate
normdata /= np.sum(normdata,axis=2)[:,:,np.newaxis]
# Divide by mean spectrum to remove wavelength dependence
normdata /= np.mean(normdata,axis=(0,1))[np.newaxis,np.newaxis,:]
# Average frames to get flat-field correction
flat_norm = np.mean(normdata,axis=0)
flat_norm[np.where(np.mean(normmask,axis=0)<1)] = 1
'''
normdata /= np.mean(normdata,axis=(1,2))[:,np.newaxis,np.newaxis]
flat_window = np.median(normdata,axis=0)
medflat = np.median(flat_window, axis=0)
flat_window /= medflat
flat_window /= np.median(flat_window,axis=1)[:,np.newaxis]
flat_norm = flat_window/np.mean(flat_window)
'''
plt.figure(3)
plt.clf()
plt.imshow(np.copy(subdata[10,-1]),origin='lower',aspect='auto',
vmin=0,vmax=25000,cmap=plt.cm.RdYlBu_r)
plt.ylim(65,95)
ff = np.load('ff.npy')
subff = ff[ywindow[0]:ywindow[1],xwindow[0]:xwindow[1]]
#subdata[:,:,69:91,15:147] /= flat_norm
subdata /= subff
plt.figure(4)
plt.clf()
plt.imshow(subdata[10,-1],origin='lower',aspect='auto',vmin=0,vmax=25000,cmap=plt.cm.RdYlBu_r)
plt.ylim(65,95)
plt.figure(1)
plt.clf()
plt.imshow(flat_norm,origin='lower',aspect='auto')
plt.colorbar()
plt.tight_layout()
plt.pause(0.1)
ev.flat_norm = flat_norm
return ev
"""
"""
if isplots:
# Plot normalized flat fields
plt.figure(1000, figsize=(12,8))
plt.clf()
plt.suptitle('Master Flat Frames')
for i in range(ev.n_spec):
for j in range(ev.n_img):
#plt.subplot(ev.n_spec,ev.n_img,i*ev.n_img+j+1)
plt.subplot(2,np.ceil(ev.n_img/2.),i*ev.n_img+j+1)
plt.title(str(j) +','+ str(i))
plt.imshow(subflat[j][i], origin='lower')
plt.tight_layout()
plt.savefig(ev.eventdir + '/figs/fig1000-Flats.png')
# Plot masks
plt.figure(1001, figsize=(12,8))
plt.clf()
plt.suptitle('Mask Frames')
for i in range(ev.n_spec):
for j in range(ev.n_img):
#plt.subplot(ev.n_spec,ev.n_img,i*ev.n_img+j+1)
plt.subplot(2,np.ceil(ev.n_img/2.),i*ev.n_img+j+1)
plt.title(str(j) +','+ str(i))
plt.imshow(flatmask[j][i], origin='lower')
plt.tight_layout()
plt.savefig(ev.eventdir + '/figs/fig1001-Masks.png')
if ev.detector == 'IR':
# Plot Slit shift
plt.figure(1004, figsize=(12,8))
plt.clf()
plt.suptitle('Model Slit Tilts/Shifts')
plt.plot(ev.shift_values, ev.yfit, '.')
plt.plot(ev.slitshift, range(ev.ywindow[0][0],ev.ywindow[0][1]), 'r-', lw=2)
plt.xlim(-1,1)
plt.savefig(ev.eventdir + '/figs/fig1004-SlitTilt.png')
plt.pause(0.1)
"""
ev.ra = data_mhdr[0]['RA_TARG']*np.pi/180
ev.dec = data_mhdr[0]['DEC_TARG']*np.pi/180
if ev.horizonsfile != None:
# Apply light-time correction, convert to BJD_TDB
# Horizons file created for HST around time of observations
print("Converting times to BJD_TDB...")
ev.bjd_corr = suntimecorr.suntimecorr(ev.ra, ev.dec, ev.jd, ev.horizonsfile)
bjdutc = ev.jd + ev.bjd_corr/86400.
ev.bjdtdb = utc_tt.utc_tt(bjdutc,ev.leapdir)
print('BJD_corr range: ' + str(ev.bjd_corr[0]) + ', ' + str(ev.bjd_corr[-1]))
else:
print("No Horizons file found.")
ev.bjdtdb = ev.jd
if n_reads > 1:
ev.n_reads = n_reads
# Subtract pairs of subframes
diffdata = np.zeros((ev.n_files,ev.n_reads-1,subny,subnx))
differr = np.zeros((ev.n_files,ev.n_reads-1,subny,subnx))
for m in range(ev.n_files):
for n in range(n_reads-1):
#diffmask[m,n] = np.copy(flatmask[j][0])
#diffmask[m,n][np.where(suberr[m,n ] > diffthresh*np.std(suberr[m,n ]))] = 0
#diffmask[m,n][np.where(suberr[m,n+1] > diffthresh*np.std(suberr[m,n+1]))] = 0
diffdata[m,n] = subdata[m,n+1]-subdata[m,n]
differr [m,n] = np.sqrt(suberr[m,n+1]**2+suberr[m,n]**2)
else:
# FLT data has already been differenced
# FLT files subtract first from last, 2 reads
ev.n_reads = 2
diffdata = subdata
differr = suberr
diffmask = np.zeros((ev.n_files,ev.n_reads-1,subny,subnx))
guess = np.zeros((ev.n_files,ev.n_reads-1),dtype=int)
for m in range(ev.n_files):
#Select appropriate mask
#if ev.n_img == (np.max(ev.orbitnum)+1):
# j = int(ev.orbitnum[m])
#else:
# j = 0
for n in range(n_reads-1):
diffmask[m,n] = np.copy(flatmask[m][0])
try:
diffmask[m,n][ np.where(differr[m,n] > ev.diffthresh*
np.median(differr[m,n],axis=1)[:,np.newaxis])] = 0
#diffdata[m,n] *= diffmask[m,n]
except:
# May fail for FLT files
print("Diffthresh failed.")
foo = diffdata[m,n]*diffmask[m,n]
guess[m,n] = np.median(np.where(foo > np.mean(foo))[0]).astype(int)
# Guess may be skewed if first file is zeros
if guess[m,0] < 0 or guess[m,0] > subny:
guess[m,0] = guess[m,1]
# Compute full scan length
ev.scanHeight = np.zeros(ev.n_files)
for m in range(ev.n_files):
scannedData = np.sum(subdata[m,-1], axis=1)
xmin = np.min(guess[m])
xmax = np.max(guess[m])
scannedData/= np.median(scannedData[xmin:xmax+1])
scannedData-= 0.5
#leftEdge = np.where(scannedData > 0)/2)[0][0]
#rightEdge = np.where(scannedData > 0)/2)[0][-1]
#yrng = range(leftEdge-5, leftEdge+5, 1)
yrng = range(subny)
spline = spi.UnivariateSpline(yrng, scannedData[yrng], k=3, s=0)
roots = spline.roots()
try:
ev.scanHeight[m] = roots[1]-roots[0]
except:
pass
#Outlier rejection of sky background along time axis
print("Performing background outlier rejection...")
import sigrej, optspex
for p in range(2):
iscan = np.where(ev.scandir == p)[0]
if len(iscan) > 0:
for n in range(ev.n_reads-1):
# Set limits on the sky background
x1 = (guess[iscan,n].min()-ev.fitbghw).astype(int)
x2 = (guess[iscan,n].max()+ev.fitbghw).astype(int)
bgdata1 = diffdata[iscan,n,:x1 ]
bgmask1 = diffmask[iscan,n,:x1 ]
bgdata2 = diffdata[iscan,n, x2:]
bgmask2 = diffmask[iscan,n, x2:]
bgerr1 = np.median(suberr[iscan,n,:x1 ])
bgerr2 = np.median(suberr[iscan,n, x2:])
estsig1 = [bgerr1 for j in range(len(ev.sigthresh))]
estsig2 = [bgerr2 for j in range(len(ev.sigthresh))]
diffmask[iscan,n,:x1 ] = sigrej.sigrej(bgdata1, ev.sigthresh, bgmask1, estsig1)
diffmask[iscan,n, x2:] = sigrej.sigrej(bgdata2, ev.sigthresh, bgmask2, estsig2)
# Write background
#global bg, diffmask
def writeBG(arg):
background, mask, m, n = arg
bg[m,n] = background
diffmask[m,n] = mask
return
# STEP 3: Fit sky background with out-of-spectra data
# FINDME: parallelrize bg subtraction
print("Performing background subtraction...")
x1 = np.zeros((ev.n_files,ev.n_reads-1), dtype=int)
x2 = np.zeros((ev.n_files,ev.n_reads-1), dtype=int)
bg = np.zeros((diffdata.shape))
if ev.ncpu == 1:
# Only 1 CPU
for m in range(ev.n_files):
for n in range(ev.n_reads-1):
x1[m,n] = (guess[m,n]-ev.fitbghw).astype(int)
x2[m,n] = (guess[m,n]+ev.fitbghw).astype(int)
writeBG(hst.fitbg(diffdata[m,n], diffmask[m,n], x1[m,n], x2[m,n],
ev.bgdeg, ev.p3thresh, isplots, m, n, ev.n_files))
else:
# Multiple CPUs
pool = mp.Pool(ev.ncpu)
for m in range(ev.n_files):
for n in range(ev.n_reads-1):
x1[m,n] = (guess[m,n]-ev.fitbghw).astype(int)
x2[m,n] = (guess[m,n]+ev.fitbghw).astype(int)
res = pool.apply_async(hst.fitbg, args=(diffdata[m,n], diffmask[m,n], x1[m,n], x2[m,n],
ev.bgdeg, ev.p3thresh, isplots, m, n, ev.n_files), callback=writeBG)
pool.close()
pool.join()
res.wait()
print(" Done.")
# STEP 2: Calculate variance
bgerr = np.std(bg, axis=2)/np.sqrt(np.sum(diffmask, axis=2))
bgerr[np.where(np.isnan(bgerr))] = 0.
ev.v0 += np.mean(bgerr**2)
variance = abs(diffdata) / ev.gain + ev.v0
#variance = abs(subdata*submask) / gain + v0
# Perform background subtraction
diffdata -= bg
#
'''
foo = np.sum(diffdata*diffmask, axis=2)
guess = []
for i in range(nreads-1):
guess.append(np.median(np.where(foo[i] > np.mean(foo[i]))[0]).astype(int))
guess = np.array(guess)
# Guess may be skewed if first file is zeros
if guess[0] < 0 or guess[0] > subnx:
guess[0] = guess[1]
'''
# Write drift2D
def writeDrift2D(arg):
drift2D, m, n = arg
# Assign to array of spectra and uncertainties
ev.drift2D[m,n] = drift2D
return
'''
# Calulate drift2D
def calcDrift2D():#im1, im2, m, n):
print("test")
drift2D = imr.chi2_shift(im1, im2, boundary='constant', nthreads=4,
zeromean=False, return_error=False)
return (drift2D, m, n)
'''
print("Calculating 2D drift...")
#FINDME: instead of calculating scanHeight, consider fitting stretch factor
ev.drift2D = np.zeros((ev.n_files, ev.n_reads-1, 2))
if ev.ncpu == 1:
# Only 1 CPU
for m in range(ev.n_files):
p = int(ev.scandir[m])
for n in range(ev.n_reads-1):
writeDrift2D(hst.calcDrift2D(diffdata[ev.iref[p],n]*diffmask[ev.iref[p],n],
diffdata[m,n]*diffmask[m,n], m, n, ev.n_files))
else:
# Multiple CPUs
pool = mp.Pool(ev.ncpu)
for m in range(ev.n_files):
p = int(ev.scandir[m])
for n in range(ev.n_reads-1):
#res = pool.apply_async(hst.calcDrift2D)
res = pool.apply_async(hst.calcDrift2D, args=(diffdata[ev.iref[p],n]*diffmask[ev.iref[p],n],
diffdata[m,n]*diffmask[m,n], m, n, ev.n_files), callback=writeDrift2D)
pool.close()
pool.join()
res.wait()
print(" Done.")
#np.save("drift2D.npy",ev.drift2D)
#global shiftdata, shiftmask
print("Performing rough, pixel-scale drift correction...")
import scipy.ndimage.interpolation as spni
ev.drift2D_int = np.round(ev.drift2D,0)
shiftdata = np.zeros(diffdata.shape)
shiftmask = np.zeros(diffmask.shape)
shiftvar = np.zeros(diffdata.shape)
shiftbg = np.zeros(diffdata.shape)
# Correct for drift by integer pixel numbers, no interpolation
for m in range(ev.n_files):
for n in range(ev.n_reads-1):
shiftdata[m,n] = spni.shift(diffdata[m,n], -1*ev.drift2D_int[m,n,::-1], order=0,
mode='constant', cval=0)
shiftmask[m,n] = spni.shift(diffmask[m,n], -1*ev.drift2D_int[m,n,::-1], order=0,
mode='constant', cval=0)
shiftvar [m,n] = spni.shift(variance[m,n], -1*ev.drift2D_int[m,n,::-1], order=0,
mode='constant', cval=0)
shiftbg [m,n] = spni.shift(bg [m,n], -1*ev.drift2D_int[m,n,::-1], order=0,
mode='constant', cval=0)
"""
# spni.shift does not handle constant boundaries correctly
if ev.drift2D_int[m,n,0] > 0:
shiftdata[m,n,:,-1*ev.drift2D_int[m,n,0]:] = 0
shiftmask[m,n,:,-1*ev.drift2D_int[m,n,0]:] = 0
shiftvar [m,n,:,-1*ev.drift2D_int[m,n,0]:] = 0
shiftbg [m,n,:,-1*ev.drift2D_int[m,n,0]:] = 0
elif ev.drift2D_int[m,n,0] < 0:
#print(m,n,-1*ev.drift2D_int[m,n,0])
shiftdata[m,n,:,:-1*ev.drift2D_int[m,n,0]] = 0
shiftmask[m,n,:,:-1*ev.drift2D_int[m,n,0]] = 0
shiftvar [m,n,:,:-1*ev.drift2D_int[m,n,0]] = 0
shiftbg [m,n,:,:-1*ev.drift2D_int[m,n,0]] = 0
"""
# Outlier rejection of full frame along time axis
print("Performing full-frame outlier rejection...")
for p in range(2):
iscan = np.where(ev.scandir == p)[0]
if len(iscan) > 0:
for n in range(ev.n_reads-1):
#y1 = guess[ev.iref,n] - ev.spec_width
#y2 = guess[ev.iref,n] + ev.spec_width
#estsig = [differr[ev.iref,n,y1:y2] for j in range(len(ev.sigthresh))]
shiftmask[iscan,n] = sigrej.sigrej(shiftdata[iscan,n], ev.sigthresh, shiftmask[iscan,n])#, estsig)
"""
# Replace bad pixels using 2D Gaussian kernal along x and time axes
def writeReplacePixels(arg):
shift, m, n, i, j = arg
shiftdata[m,n,i,j] = shift
return
#import smoothing
#reload(smoothing)
ny, nx, sy, sx = (2,2,1,1)
wherebad = np.array(np.where(shiftmask==0)).T
#smdata = np.copy(shiftdata)
print("Replacing " + str(len(wherebad)) + " bad pixels...")
k = 0
ktot = len(wherebad)
#FINDME: multiple CPUs is inefficient
if ev.ncpu >= 1:
# Only 1 CPU
for m,n,i,j in wherebad:
#sys.stdout.write('\r'+str(k+1)+'/'+str(len(wherebad)))
#sys.stdout.flush()
writeReplacePixels(hst.replacePixels(shiftdata[:,n,:,j], shiftmask[:,n,:,j], m, n, i, j, k, ktot, ny, nx, sy, sx))
#Pad image initially with zeros
#newim = np.zeros(np.array(shiftdata[:,n,:,j].shape) + 2*np.array((ny, nx)))
#newim[ny:-ny, nx:-nx] = shiftdata[:,n,:,j]
#Calculate kernel
#gk = smoothing.gauss_kernel_mask2((ny,nx), (sy,sx), (m,i), shiftmask[:,n,:,j])
#shiftdata[m,n,i,j] = np.sum(gk * newim[m:m+2*ny+1, i:i+2*nx+1])
k += 1
else:
# Multiple CPUs
pool = mp.Pool(ev.ncpu)
for m,n,i,j in wherebad:
res = pool.apply_async(hst.replacePixels, args=(shiftdata[:,n,:,j], shiftmask[:,n,:,j], m, n, i, j, k, ktot, ny, nx, sy, sx), callback=writeReplacePixels)
k += 1
pool.close()
pool.join()
res.wait()
print(" Done.")
"""
if isplots >= 3:
for m in range(ev.n_files):
for n in range(ev.n_reads-1):
plt.figure(1010)
plt.clf()
plt.suptitle(str(m) + "," + str(n))
plt.subplot(211)
plt.imshow(shiftdata[m,n]*shiftmask[m,n], origin='lower', aspect='auto', vmin=0, vmax=500)
plt.subplot(212)
#plt.imshow(submask[i], origin='lower', aspect='auto', vmax=1)
mean = np.median(shiftbg[m,n])
std = np.std(shiftbg[m,n])
plt.imshow(shiftbg[m,n], origin='lower', aspect='auto',vmin=mean-3*std,vmax=mean+3*std)
plt.savefig(ev.eventdir+'/figs/fig1010-'+str(m)+'-'+str(n)+'-Image+Background.png')
#plt.pause(0.1)
"""
apdata = np.zeros((ev.n_files,ev.n_reads-1,ev.spec_width*2,subnx))
apmask = np.zeros((ev.n_files,ev.n_reads-1,ev.spec_width*2,subnx))
apvar = np.zeros((ev.n_files,ev.n_reads-1,ev.spec_width*2,subnx))
apbg = np.zeros((ev.n_files,ev.n_reads-1,ev.spec_width*2,subnx))
for n in range(ev.n_reads-1):
y1 = guess[ev.iref,n] - ev.spec_width
y2 = guess[ev.iref,n] + ev.spec_width
apdata[:,n] = shiftdata[:,n,y1:y2]
apmask[:,n] = shiftmask[:,n,y1:y2]
apvar [:,n] = shiftvar [:,n,y1:y2]
apbg [:,n] = shiftbg [:,n,y1:y2]
"""
print("Performing sub-pixel drift correction...")
istart = 0
#corrdata = np.zeros(diffdata.shape)
#corrmask = np.zeros(diffdata.shape)
# Select aperture data
apdata = | np.zeros((ev.n_files,ev.n_reads-1,ev.spec_width*2,subnx)) | numpy.zeros |
from math import sqrt
from numba import njit
import numpy as np
import flare.kernels.cutoffs as cf
from flare.kernels.kernels import coordination_number, q_value_mc
@njit
def get_2_body_arrays(
positions,
atom: int,
cell,
r_cut,
cutoff_2,
species,
sweep,
nspecie,
species_mask,
twobody_mask,
):
"""Returns distances, coordinates, species of atoms, and indices of neighbors
in the 2-body local environment. This method is implemented outside
the AtomicEnvironment class to allow for njit acceleration with Numba.
:param positions: Positions of atoms in the structure.
:type positions: np.ndarray
:param atom: Index of the central atom of the local environment.
:type atom: int
:param cell: 3x3 array whose rows are the Bravais lattice vectors of the
cell.
:type cell: np.ndarray
:param cutoff_2: 2-body cutoff radius.
:type cutoff_2: np.ndarray
:param species: Numpy array of species represented by their atomic numbers.
:type species: np.ndarray
:param nspecie: number of atom types to define bonds
:type: int
:param species_mask: mapping from atomic number to atom types
:type: np.ndarray
:param twobody_mask: mapping from the types of end atoms to bond types
:type: np.ndarray
:return: Tuple of arrays describing pairs of atoms in the 2-body local
environment.
bond_array_2: Array containing the distances and relative
coordinates of atoms in the 2-body local environment. First column
contains distances, remaining columns contain Cartesian coordinates
divided by the distance (with the origin defined as the position of the
central atom). The rows are sorted by distance from the central atom.
bond_positions_2: Coordinates of atoms in the 2-body local environment.
etypes: Species of atoms in the 2-body local environment represented by
their atomic number.
bond_indices: Structure indices of atoms in the local environment.
:rtype: np.ndarray, np.ndarray, np.ndarray, np.ndarray
"""
noa = len(positions)
pos_atom = positions[atom]
super_count = sweep.shape[0] ** 3
coords = np.zeros((noa, 3, super_count), dtype=np.float64)
dists = np.zeros((noa, super_count), dtype=np.float64)
cutoff_count = 0
vec1 = cell[0]
vec2 = cell[1]
vec3 = cell[2]
sepcut = False
bcn = 0
if nspecie > 1 and cutoff_2 is not None:
sepcut = True
bc = species_mask[species[atom]]
bcn = nspecie * bc
# record distances and positions of images
for n in range(noa):
diff_curr = positions[n] - pos_atom
im_count = 0
if sepcut and (species_mask is not None) and (cutoff_2 is not None):
bn = species_mask[species[n]]
r_cut = cutoff_2[twobody_mask[bn + bcn]]
for s1 in sweep:
for s2 in sweep:
for s3 in sweep:
im = diff_curr + s1 * vec1 + s2 * vec2 + s3 * vec3
dist = sqrt(im[0] * im[0] + im[1] * im[1] + im[2] * im[2])
if (dist < r_cut) and (dist != 0):
dists[n, im_count] = dist
coords[n, :, im_count] = im
cutoff_count += 1
im_count += 1
# create 2-body bond array
bond_indices = np.zeros(cutoff_count, dtype=np.int8)
bond_array_2 = np.zeros((cutoff_count, 4), dtype=np.float64)
bond_positions_2 = np.zeros((cutoff_count, 3), dtype=np.float64)
etypes = np.zeros(cutoff_count, dtype=np.int8)
bond_count = 0
for m in range(noa):
spec_curr = species[m]
if sepcut and (species_mask is not None) and (cutoff_2 is not None):
bm = species_mask[species[m]]
r_cut = cutoff_2[twobody_mask[bm + bcn]]
for im_count in range(super_count):
dist_curr = dists[m, im_count]
if (dist_curr < r_cut) and (dist_curr != 0):
coord = coords[m, :, im_count]
bond_array_2[bond_count, 0] = dist_curr
bond_array_2[bond_count, 1:4] = coord / dist_curr
bond_positions_2[bond_count, :] = coord
etypes[bond_count] = spec_curr
bond_indices[bond_count] = m
bond_count += 1
# sort by distance
sort_inds = bond_array_2[:, 0].argsort()
bond_array_2 = bond_array_2[sort_inds]
bond_positions_2 = bond_positions_2[sort_inds]
bond_indices = bond_indices[sort_inds]
etypes = etypes[sort_inds]
return bond_array_2, bond_positions_2, etypes, bond_indices
@njit
def get_3_body_arrays(
bond_array_2,
bond_positions_2,
ctype,
etypes,
r_cut,
cutoff_3,
nspecie,
species_mask,
cut3b_mask,
):
"""Returns distances and coordinates of triplets of atoms in the
3-body local environment.
:param bond_array_2: 2-body bond array.
:type bond_array_2: np.ndarray
:param bond_positions_2: Coordinates of atoms in the 2-body local
environment.
:type bond_positions_2: np.ndarray
:param ctype: atomic number of the center atom
:type: int
:param cutoff_3: 3-body cutoff radius.
:type cutoff_3: np.ndarray
:param nspecie: number of atom types to define bonds
:type: int
:param species_mask: mapping from atomic number to atom types
:type: np.ndarray
:param cut3b_mask: mapping from the types of end atoms to bond types
:type: np.ndarray
:return: Tuple of 4 arrays describing triplets of atoms in the 3-body local
environment.
bond_array_3: Array containing the distances and relative
coordinates of atoms in the 3-body local environment. First column
contains distances, remaining columns contain Cartesian coordinates
divided by the distance (with the origin defined as the position of the
central atom). The rows are sorted by distance from the central atom.
cross_bond_inds: Two dimensional array whose row m contains the indices
of atoms n > m that are within a distance cutoff_3 of both atom n and the
central atom.
cross_bond_dists: Two dimensional array whose row m contains the
distances from atom m of atoms n > m that are within a distance cutoff_3
of both atom n and the central atom.
triplet_counts: One dimensional array of integers whose entry m is the
number of atoms that are within a distance cutoff_3 of atom m.
:rtype: (np.ndarray, np.ndarray, np.ndarray, np.ndarray)
"""
sepcut = False
if nspecie > 1 and cutoff_3 is not None:
bc = species_mask[ctype]
bcn = nspecie * bc
r_cut = np.max(cutoff_3)
sepcut = True
# get 3-body bond array
ind_3_l = np.where(bond_array_2[:, 0] > r_cut)[0]
if ind_3_l.shape[0] > 0:
ind_3 = ind_3_l[0]
else:
ind_3 = bond_array_2.shape[0]
bond_array_3 = bond_array_2[0:ind_3, :]
bond_positions_3 = bond_positions_2[0:ind_3, :]
cut_m = r_cut
cut_n = r_cut
cut_mn = r_cut
# get cross bond array
cross_bond_inds = np.zeros((ind_3, ind_3), dtype=np.int8) - 1
cross_bond_dists = np.zeros((ind_3, ind_3), dtype=np.float64)
triplet_counts = | np.zeros(ind_3, dtype=np.int8) | numpy.zeros |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 18 11:59:13 2019
@author: matthew
"""
#%%
def fastica_MEG(X, n_comp=None,
algorithm="parallel", whiten=True, fun="logcosh", fun_prime='',
fun_args={}, maxit=200, tol=1e-04, w_init=None, verbose = True):
"""Perform Fast Independent Component Analysis.
Parameters
----------
X : (p, n) array
Array with n observations (statistical units) measured on p variables.
n_comp : int, optional
Number of components to extract. If None no dimension reduction
is performed.
algorithm : {'parallel','deflation'}
Apply an parallel or deflational FASTICA algorithm.
whiten: boolean, optional
If true perform an initial whitening of the data. Do not set to
false unless the data is already white, as you will get incorrect
results.
If whiten is true, the data is assumed to have already been
preprocessed: it should be centered, normed and white.
fun : String or Function
The functional form of the G function used in the
approximation to neg-entropy. Could be either 'logcosh', 'exp',
or 'cube'.
You can also provide your own function but in this case, its
derivative should be provided via argument fun_prime
fun_prime : Empty string ('') or Function
See fun.
fun_args : Optional dictionnary
If empty and if fun='logcosh', fun_args will take value
{'alpha' : 1.0}
maxit : int
Maximum number of iterations to perform
tol : float
A positive scalar giving the tolerance at which the
un-mixing matrix is considered to have converged
w_init : (n_comp,n_comp) array
Initial un-mixing array of dimension (n.comp,n.comp).
If None (default) then an array of normal r.v.'s is used
Results
-------
K : (n_comp, p) array
pre-whitening matrix that projects data onto th first n.comp
principal components. Returned only if whiten is True
W : (n_comp, n_comp) array
estimated un-mixing matrix
The mixing matrix can be obtained by::
w = np.asmatrix(W) * K.T
A = w.T * (w * w.T).I
S : (n_comp, n) array
estimated source matrix
Notes
-----
The data matrix X is considered to be a linear combination of
non-Gaussian (independent) components i.e. X = SA where columns of S
contain the independent components and A is a linear mixing
matrix. In short ICA attempts to `un-mix' the data by estimating an
un-mixing matrix W where S = W K X.
Implemented using FastICA:
<NAME> and <NAME>, Independent Component Analysis:
Algorithms and Applications, Neural Networks, 13(4-5), 2000,
pp. 411-430
2017/07/19 | Merged into one function by MEG and included a PCA function for whitening
2017/07/20 | fixed bug when giving the function whitened data
2018/02/22 | Return a boolean flag describing if algorithm converged or not (only works with symetric estimation)
"""
import numpy as np
from scipy import linalg
def _ica_def(X, tol, g, gprime, fun_args, maxit, w_init):
"""Deflationary FastICA using fun approx to neg-entropy function
Used internally by FastICA.
"""
def _gs_decorrelation(w, W, j):
""" Gram-Schmidt-like decorrelation. """
t = np.zeros_like(w)
for u in range(j):
t = t + np.dot(w, W[u]) * W[u]
w -= t
return w
n_comp = w_init.shape[0]
W = np.zeros((n_comp, n_comp), dtype=float)
# j is the index of the extracted component
for j in range(n_comp):
w = w_init[j, :].copy()
w /= np.sqrt((w**2).sum())
n_iterations = 0
# we set lim to tol+1 to be sure to enter at least once in next while
lim = tol + 1
while ((lim > tol) & (n_iterations < (maxit-1))):
wtx = np.dot(w.T, X)
gwtx = g(wtx, fun_args)
g_wtx = gprime(wtx, fun_args)
w1 = (X * gwtx).mean(axis=1) - g_wtx.mean() * w
_gs_decorrelation(w1, W, j)
w1 /= np.sqrt((w1**2).sum())
lim = np.abs(np.abs((w1 * w).sum()) - 1)
w = w1
n_iterations = n_iterations + 1
W[j, :] = w
return W # XXXX for deflation, a converged term isn't returned
def _ica_par(X, tol, g, gprime, fun_args, maxit, w_init):
"""Parallel FastICA.
Used internally by FastICA.
2017/05/10 | edit to?
"""
def _sym_decorrelation(W):
""" Symmetric decorrelation """
K = W @ W.T
s, u = linalg.eigh(K)
# u (resp. s) contains the eigenvectors (resp. square roots of
# the eigenvalues) of W * W.T
u, W = [np.asmatrix(e) for e in (u, W)]
W = (u * np.diag(1.0/np.sqrt(s)) * u.T) * W # W = (W * W.T) ^{-1/2} * W
return np.asarray(W)
n, p = X.shape
W = _sym_decorrelation(w_init)
# we set lim to tol+1 to be sure to enter at least once in next while
lim = tol + 1
it = 0
hist_lim = np.zeros((1, maxit)) #initiate array for history of change of W
hist_W = | np.zeros((w_init.size, maxit)) | numpy.zeros |
# -*- coding: utf-8 -*-
"""Classes for elastic backscatter calculation"""
from ELDAmwl.bases.factory import BaseOperation
from ELDAmwl.bases.factory import BaseOperationFactory
from ELDAmwl.component.registry import registry
from ELDAmwl.errors.exceptions import NoValidDataPointsForCalibration
from ELDAmwl.rayleigh import RayleighLidarRatio
from ELDAmwl.utils.constants import NC_FILL_INT
from ELDAmwl.utils.numerical import closest_bin
from ELDAmwl.utils.numerical import integral_profile
import numpy as np
import xarray as xr
class CalcBscProfileKF(BaseOperation):
"""
calculates elast backscatter profile with Klett-Fernald method
uses equations and symbols from Althausen et al. JOTECH 2000
(https://journals.ametsoc.org/view/journals/atot/17/11/1520-0426_2000_017_1469_swcal_2_0_co_2.xml)
"""
name = 'CalcBscProfileKF'
elast_sig = None
rayl_scat = None
range_axis = None
error_params = None
calibration = None
def run(self, **kwargs):
"""calculates elast backscatter profile with Klett-Fernald method
Keyword Args:
elast_sig (xarray.DataSet):
already smoothed elastic signal with \
variables 'data', 'error', 'qf', 'altitude',
'binres', 'mol_extinction', 'mol_bckscatter', 'assumed_particle_lidar_ratio'
range_axis (xarray.DataArray): range axis of the elast_signal with variable 'data'
error_params (addict.Dict):
with keys 'lowrange' and 'highrange' =
maximum allowable relative statistical error
calibration (addict.Dict):
with keys 'cal_first_lev',
'cal_last_lev', and 'calibr_value'.
calibr_value is the assumed backscatter ratio at calibration level
Returns:
bsc (xarray.DataSet) with variables
'data' (particle backscatter coefficient),
'error' (the uncertainty contains only the uncertainty of the calibration,
not the uncertainty of signal noise nor uncertainty of lidar ratio estimation),
'qf' (same as in elast_sig),
'binres' (same as in elast_sig),
'calibration_bin' (altitude bin where the KF integration starts.
backward integration below, forward integration above)
"""
assert 'elast_sig' in kwargs
assert 'error_params' in kwargs
assert 'calibration' in kwargs
# prepare
elast_sig = kwargs['elast_sig']
calibration = kwargs['calibration']
error_params = kwargs['error_params']
rayl_lr = RayleighLidarRatio()(wavelength=elast_sig.emission_wavelength).run()
rayl_bsc = elast_sig.mol_backscatter
if 'range_axis' in kwargs:
range_axis = kwargs['range_axis']
else:
range_axis = elast_sig.altitude
num_times = elast_sig.dims['time']
# calculate difference profile between particle and Rayleigh lidar ratio
lidar_ratio = elast_sig.assumed_particle_lidar_ratio
lr_diff = lidar_ratio - rayl_lr
# prepare empty arrays
calibr_factor = np.ones(num_times) * np.nan
calibr_bin = np.ones(num_times, dtype=int) * NC_FILL_INT
calibr_factor_err = np.ones(num_times) * np.nan
# sqr_rel_calibr_err = np.ones(times) * np.nan
M = np.full(rayl_bsc.shape, np.nan)
A = np.full(rayl_bsc.shape, np.nan)
A_int = np.full(rayl_bsc.shape, np.nan)
B = np.full(rayl_bsc.shape, np.nan)
B_err = np.full(rayl_bsc.shape, np.nan)
# 1) calculate calibration factor
for t in range(num_times):
# convert elast_sig.ds (xr.Dataset) into pd.Dataframe for easier selection of calibration window
df_sig = elast_sig.data.isel(
{'level': range(calibration['cal_first_lev'][t],
calibration['cal_last_lev'][t] + 1),
'time': t})\
.to_dataframe()
mean_sig = df_sig.data.mean()
sem_sig = df_sig.data.sem()
rel_sem_sig = sem_sig / mean_sig
df_rayl = rayl_bsc.isel({'level':
range(calibration['cal_first_lev'][t],
calibration['cal_last_lev'][t] + 1),
'time': t})\
.to_dataframe()
mean_rayl_bsc = df_rayl.mol_backscatter.mean()
# assume that rayleigh backscatter has no uncertainty
if rel_sem_sig > error_params.err_threshold.highrange:
self.logger.error('relative error of signal in calibration window is larger than error threshold')
raise NoValidDataPointsForCalibration
else:
calibr_factor[t] = mean_sig / mean_rayl_bsc / calibration.calibr_value.value
calibr_factor_err[t] = calibr_factor[t] * \
np.sqrt(np.square(rel_sem_sig) + np.square(calibration.calibr_value.rel_error))
# 2) find signal bin which has the value closest to the mean of the calibration window
calibr_bin[t] = closest_bin(
elast_sig.data[t].values,
elast_sig.err[t].values,
first_bin=calibration['cal_first_lev'][t],
last_bin=calibration['cal_last_lev'][t],
search_value=mean_sig)
if calibr_bin[t] is None:
self.logger.error('cannot find altitude bin close enough to mean signal within calibration window')
raise NoValidDataPointsForCalibration
# 3) calculate M, A, A_int, B, and B_err
M[t, calibr_bin[t]:] = integral_profile(rayl_bsc[t].values,
range_axis=range_axis[t].values,
first_bin=calibr_bin[t])
M[t, :calibr_bin[t] + 1] = integral_profile(rayl_bsc[t].values,
range_axis=range_axis[t].values,
first_bin=calibr_bin[t],
last_bin=0)
M[t, calibr_bin[t]] = 0
A[t] = elast_sig.data[t] * | np.exp(-2 * lr_diff[t] * M[t]) | numpy.exp |
# -*- coding: utf-8 -*-
import copy
import os
import numpy as np
from scipy import misc
from skimage.transform import resize
from scipy import ndimage
################################################################################
#
# Utility functions for performing object localization.
#
################################################################################
def prepareCAM(snet):
''' Prepares the network for generating Class Activation Mappings '''
# Adds the output for heatmap generation
snet.getStage(1).model.add_output(name='GAP/conv', input='CAM_conv/relu')
snet.getStage(1).setOptimizer()
# Get weights (position 0 -> no food, positions 1 -> food)
W = snet.getStage(1).model.get_weights()[-2]
b = snet.getStage(1).model.get_weights()[-1] # recover bias although it will not be used
return W
def loadImagesDataset(ds, init, final, load_original=True):
'''
Loads a list of images and their pre-processed representations "X" ready for applying a forward pass.
The images loaded are stored in the Dataset object "test" division.
'''
X = ds.getX('test', init, final, normalization=False, meanSubstraction=True, dataAugmentation=False)
if (load_original):
images = np.transpose(
ds.getX('test', init, final, normalization=False, meanSubstraction=False, dataAugmentation=False),
(0, 2, 3, 1))
images_ = copy.copy(images)
images[:, :, :, 0] = images_[:, :, :, 2]
images[:, :, :, 2] = images_[:, :, :, 0]
return [images, X]
return X
def loadImagesExternal(ds, list_imgs, load_original=True):
'''
Loads a list of images and their pre-processed representations "X" ready for applying a forward pass.
The images loaded are external to the Dataset object.
'''
X = ds.loadImages(list_imgs, False, True, False, external=True)
if (load_original):
images = np.transpose(ds.loadImages(list_imgs, False, False, False, external=True), (0, 2, 3, 1))
images_ = copy.copy(images)
images[:, :, :, 0] = images_[:, :, :, 2]
images[:, :, :, 2] = images_[:, :, :, 0]
return [images, X]
return X
def applyForwardPass(snet, X):
'''
Applies a forward pass through the GAP network on the pre-processed "X" images.
'''
# Apply forward pass
# X = snet.forwardUntilStage(X,1)['inception_4e']
X = snet.forwardUntilStage(X, 1)[snet._Staged_Network__inNames[1]]
predictions = np.argmax(snet.getStage(1).predictOnBatch(X, out_name='GAP/softmax'), axis=1)
X = snet.getStage(1).predictOnBatch(X, out_name='GAP/conv')
return [X, predictions]
# def computeCAM(snet, X, W, reshape_size=[256, 256]):
# '''
# Applies a forward pass of the pre-processed samples "X" in the GAP net "snet" and generates the resulting
# CAM "maps" using the GAP weights "W" with the defined size "reshape_size".
# '''
#
# # Apply forward pass in GAP model
# [X, predictions] = applyForwardPass(snet, X)
#
# # Compute heatmaps (CAMs) for each class [n_samples, n_classes, height, width]
# maps = np.zeros((X.shape[0], W.shape[1], reshape_size[0], reshape_size[1]))
# for s in range(X.shape[0]):
# weighted_activation = np.dot(np.transpose(W), np.reshape(X[s], (W.shape[0], X.shape[2]*X.shape[3])))
# map = np.reshape(weighted_activation, (W.shape[1], X.shape[2], X.shape[3]))
# maps[s] = resize(map, tuple([W.shape[1]]+reshape_size), order=1, preserve_range=True)
#
# return [maps, predictions]
def computeCAM(snet, X, W, reshape_size=[256, 256], n_top_convs=20):
'''
Applies a forward pass of the pre-processed samples "X" in the GAP net "snet" and generates the resulting
CAM "maps" using the GAP weights "W" with the defined size "reshape_size".
Additionally, it returns the best "n_top_convs" convolutional features for each of the classes. The ranking is
computed considering the weight Wi assigned to the i-th feature map.
'''
# Apply forward pass in GAP model
[X, predictions] = applyForwardPass(snet, X)
# Get indices of best convolutional features for each class
ind_best = np.zeros((W.shape[1], n_top_convs))
for c in range(W.shape[1]):
ind_best[c, :] = np.argsort(W[:, c])[::-1][:n_top_convs]
# Compute heatmaps (CAMs) for each class [n_samples, n_classes, height, width]
maps = np.zeros((X.shape[0], W.shape[1], reshape_size[0], reshape_size[1]))
# Store top convolutional features
convs = np.zeros((X.shape[0], W.shape[1], n_top_convs, reshape_size[0], reshape_size[1]))
for s in range(X.shape[0]):
weighted_activation = np.dot(np.transpose(W), np.reshape(X[s], (W.shape[0], X.shape[2] * X.shape[3])))
map = np.reshape(weighted_activation, (W.shape[1], X.shape[2], X.shape[3]))
maps[s] = resize(map, tuple([W.shape[1]] + reshape_size), order=1, preserve_range=True)
for c in range(W.shape[1]):
for enum_conv, i_conv in enumerate(ind_best[c]):
convs[s, c, enum_conv] = resize(X[s, i_conv], reshape_size, order=1, preserve_range=True)
return [maps, predictions, convs]
# def getBestConvFeatures(snet, X, W, reshape_size=[256, 256], n_top_convs=20):
# '''
# Returns the best "n_top_convs" convolutional features for each of the classes. The ranking is
# computed considering the weight Wi assigned to the i-th feature map.
# '''
# # Apply forward pass in GAP model
# [X, predictions] = applyForwardPass(snet, X)
#
# # Get indices of best convolutional features for each class
# ind_best = np.zeros((W.shape[1], n_top_convs))
# for c in range(W.shape[1]):
# ind_best[c,:] = np.argsort(W[:,c])[::-1][:20]
#
# # Store top convolutional features
# convs = np.zeros((X.shape[0], W.shape[1], n_top_convs, reshape_size[0], reshape_size[1]))
# for s in range(X.shape[0]):
# for c in range(W.shape[1]):
# for enum_conv, i_conv in enumerate(ind_best[c]):
# convs[s,c,enum_conv] = resize(X[s,i_conv], reshape_size, order=1, preserve_range=True)
#
# return convs
def bbox(img, mode='width_height'):
'''
Returns a bounding box covering all the non-zero area in the image.
"mode" : "width_height" returns width in [2] and height in [3], "max" returns xmax in [2] and ymax in [3]
'''
rows = np.any(img, axis=1)
cols = np.any(img, axis=0)
y, ymax = np.where(rows)[0][[0, -1]]
x, xmax = np.where(cols)[0][[0, -1]]
if (mode == 'width_height'):
return x, y, xmax - x, ymax - y
elif (mode == 'max'):
return x, y, xmax, ymax
def computeIoU(GT, pred):
'''
Calculates the Intersectino over Union value of two bounding boxes.
'''
intersection = max(0, min(GT[2], pred[2]) - max(GT[0], pred[0])) * max(0, min(GT[3], pred[3]) - max(GT[1], pred[1]))
gt_area = (GT[2] - GT[0]) * float((GT[3] - GT[1]))
pred_area = (pred[2] - pred[0]) * float((pred[3] - pred[1]))
union = gt_area + pred_area - intersection
return intersection / union
def getBBoxesFromCAMs(CAMs, reshape_size=[256, 256], percentage_heat=0.4, size_restriction=0.1, box_expansion=0.2,
use_gpu=True):
'''
Reference:
Bolaños, Marc, and <NAME>. "Simultaneous Food Localization and Recognition." arXiv preprint arXiv:1604.07953 (2016).
Description:
Extracts a set of bounding boxes from the generated CAMs which contain food instances.
This functions should only be called if the current image has been predicted as Food by the GAP FoodvsNon-food detector!
Arguments:
:param CAMs: list of class activation maps generated by the CAM network
:param reshape_size: reshape proportions used for transorming the CAM for extracting bounding boxes
:param percentage_heat: minimum percentage allowed for considering a detection (aka 't' in reference paper)
:param size_restriction: remove all regions covering less than a certain percentage size of the original image (aka 's' in reference paper)
:param box_expansion: expand the bounding boxes by a certain percentage (aka 'e' in reference paper)
:param use_gpu: boolean indicating if we want to use the GPU for applying NMS
:return: [predicted_bboxes, predicted_scores], containing a list of bboxes coordinates on the first position
and a list of their corresponding scores on the second position
'''
try:
from nms.gpu_nms import gpu_nms
from nms.cpu_nms import cpu_nms
except:
raise Exception(
"Cython is required for running this function:\npip install cython\nRun the following command inside "
"kernel_wrapper/extra/nms after its installation:\npython setup.py build_ext --inplace")
predicted_bboxes = []
predicted_scores = []
# Get all computed maps (if we are also using convolutional features)
all_maps = CAMs
for map in all_maps:
# map = misc.imread(maps_dir[dataset]+'/'+samples_detection[dataset]['all_ids'][s]+'_CAM.jpg') # CAM only
# map = misc.imread(map_path) # CAM and convolutional features
new_reshape_size = reshape_size
# Resize map to original size
map = resize(map, tuple(new_reshape_size), order=1, preserve_range=True)
# Detect regions above a certain percentage of the max heat
bb_thres = np.max(map) * percentage_heat
# Compute binary selected region
binary_heat = map
binary_heat = np.where(binary_heat > bb_thres, 255, 0)
# Get biggest connected component
min_size = new_reshape_size[0] * new_reshape_size[1] * size_restriction
labeled, nr_objects = ndimage.label(binary_heat) # get connected components
[objects, counts] = np.unique(labeled, return_counts=True) # count occurrences
biggest_components = np.argsort(counts[1:])[::-1]
selected_components = [1 if counts[i + 1] >= min_size else 0 for i in
biggest_components] # check minimum size restriction
biggest_components = biggest_components[:min([np.sum(selected_components), 9999])] # get all bboxes
# Extract each component (which will become a bbox prediction)
map = map / 255.0 # normalize map
# Get bboxes
for selected, comp in zip(selected_components, biggest_components):
if (selected):
max_heat = np.where(labeled == comp + 1, 255, 0) # get the biggest
# Draw bounding box on original image
box = list(bbox(max_heat))
# expand box before final detection
x_exp = box[2] * box_expansion
y_exp = box[3] * box_expansion
box[0] = max([0, box[0] - x_exp / 2])
box[1] = max([0, box[1] - y_exp / 2])
# change width and height by xmax and ymax
box[2] += box[0]
box[3] += box[1]
box[2] = min([new_reshape_size[1] - 1, box[2] + x_exp])
box[3] = min([new_reshape_size[0] - 1, box[3] + y_exp])
predicted_bboxes.append(box)
# Get score for current bbox
score = | np.mean(map[box[1]:box[3], box[0]:box[2]]) | numpy.mean |
from SciDataTool.Functions import UnitError
from SciDataTool.Functions.fft_functions import (
comp_fft_freqs,
comp_fft_time,
comp_nthoctave_axis,
)
from numpy import (
pi,
log10,
sqrt,
square,
column_stack,
exp,
real,
imag,
cos,
sin,
ndarray,
nan,
isnan,
abs as np_abs,
sum as np_sum,
angle as np_angle,
where,
argwhere,
array,
take,
zeros,
floor,
ones,
)
# List of the unit symbols, their normalizing value and their dimensions "MLTTempAngleCurrent"
unit_symbols = [
("dimless", 1.0, (0, 0, 0, 0, 0, 0)), # dimensionless
("degC", 1.0, (0, 0, 0, 1, 0, 0)), # degree Celsius
("rad", 1.0, (0, 0, 0, 0, 1, 0)), # radians
("°", pi / 180, (0, 0, 0, 0, 1, 0)), # degree (angle)
("rpm", 2 * pi / 60, (0, 0, -1, 0, 1, 0)), # rotation per minute
("min", 60.0, (0, 0, 1, 0, 0, 0)), # minute
("Ohm", 1.0, (1, 2, -3, 0, 0, -2)), # Ohm
("At", 1.0, (0, 0, 0, 0, 0, 1)), # Ampere-tour
("Wb", 1.0, (1, 2, -2, 0, 0, -1)), # Weber
("Mx", 1.0e-8, (1, 2, -2, 0, 0, -1)), # Maxwell
("Hz", 1.0, (0, 0, -1, 0, 0, 0)), # Herz
("Pa", 1.0, (1, -1, -2, 0, 0, 0)), # Pascal
("g", 1.0e-3, (0, 1, 0, 0, 0, 0)), # gram
("s", 1.0, (0, 0, 1, 0, 0, 0)), # second
("h", 3600.0, (0, 0, 1, 0, 0, 0)), # hour
# ("K", 1.0, (0,0,0,1,0,0)), # Kelvin
("A", 1.0, (0, 0, 0, 0, 0, 1)), # Ampere
("J", 1.0, (1, 2, -2, 0, 0, 0)), # Joule
("W", 1.0, (1, 2, -3, 0, 0, 0)), # Watt
("N", 1.0, (1, 1, -2, 0, 0, 0)), # Newton
("C", 1.0, (0, 0, 1, 0, 0, 1)), # Coulomb
("T", 1.0, (1, 0, -2, 0, 0, -1)), # Tesla
("G", 1.0e-4, (1, 0, -2, 0, 0, -1)), # Gauss
("V", 1.0, (1, 2, -3, 0, 0, -1)), # Volt
("F", 1.0, (-1, -2, 4, 0, 0, 2)), # Farrad
("H", 1.0, (1, -2, -2, 0, 0, -2)), # Henry
("m", 1.0, (1, 0, 0, 0, 0, 0)), # meter
]
# Dictionnary of the prefixes and their values
unit_prefixes = {
"Y": 1e24,
"Z": 1e21,
"E": 1e18,
"P": 1e15,
"T": 1e12,
"G": 1e9,
"M": 1e6,
"k": 1e3,
"h": 1e2,
"da": 1e1,
"": 1.0,
"d": 1e-1,
"c": 1e-2,
"m": 1e-3,
"µ": 1e-6, # ('MICRO SIGN' U+00B5)
"u": 1e-6,
"μ": 1e-6, # ('GREEK SMALL LETTER MU' U+03BC)
"n": 1e-9,
"p": 1e-12,
"f": 1e-15,
"a": 1e-18,
"z": 1e-21,
"y": 1e-24,
}
def get_dim_prefix(unit_str):
if unit_str == "":
unit_str = "dimless"
p = 1 # power of the unit
dim = None
for key in unit_symbols:
if key[0] in unit_str:
if unit_str.rsplit(key[0], 1)[1].isdigit():
p = int(unit_str.rsplit(key[0], 1)[1])
dim = [p * d for d in key[2]]
prefix_str = unit_str.rsplit(key[0], 1)[0]
if prefix_str in unit_prefixes.keys():
prefix = (unit_prefixes.get(prefix_str) * key[1]) ** p
else:
raise UnitError("Prefix " + prefix_str + " unknown")
break
if not dim:
raise UnitError("Unit " + unit_str + " unknown")
return (dim, prefix)
def get_unit_derivate(unit_str, axis_unit):
unit_str = unit_str.replace("*", "").replace(" ", "").replace("^", "")
if axis_unit == "Hz":
axis_unit = "s"
if axis_unit == "rad":
axis_unit = "m"
p = 0
if "/" in unit_str:
unit_num = unit_str.split("/")[0]
unit_denom = unit_str.split("/")[1]
else:
unit_num = unit_str
unit_denom = ""
if axis_unit in unit_num:
p = 1
if unit_num.rsplit(axis_unit, 1)[1].isdigit():
p = int(unit_num.rsplit(axis_unit, 1)[1])
p_new = p - 1
# Case axis_unit must be withdrawn
if p_new == 0:
unit_str = unit_str.replace(axis_unit, "")
elif p_new == 1:
unit_str = unit_str.replace(axis_unit + str(p), axis_unit)
else:
unit_str = unit_str.replace(axis_unit + str(p), axis_unit + str(p_new))
elif axis_unit in unit_denom:
p = 1
if unit_denom.rsplit(axis_unit, 1)[1].isdigit():
p = int(unit_denom.rsplit(axis_unit, 1)[1])
p_new = p + 1
# Case p was 1
if p_new == 2:
unit_str = unit_str.replace(axis_unit, axis_unit + "2")
elif p_new == 1:
unit_str = unit_str.replace(axis_unit + str(p), axis_unit)
else:
unit_str = unit_str.replace(axis_unit + str(p), axis_unit + str(p_new))
else:
# Case axis_unit was not in unit_str
if unit_denom != "":
unit_str += axis_unit
else:
unit_str += "/" + axis_unit
return unit_str
def get_unit_integrate(unit_str, axis_unit):
unit_str = unit_str.replace("*", "").replace(" ", "").replace("^", "")
if axis_unit == "Hz":
axis_unit = "s"
if axis_unit == "rad":
axis_unit = "m"
p = 0
if "/" in unit_str:
unit_num = unit_str.split("/")[0]
unit_denom = unit_str.split("/")[1]
else:
unit_num = unit_str
unit_denom = ""
if axis_unit in unit_num:
p = 1
if unit_num.rsplit(axis_unit, 1)[1].isdigit():
p = int(unit_num.rsplit(axis_unit, 1)[1])
p_new = p + 1
# Case p was 1
if p_new == 2:
unit_str = unit_str.replace(axis_unit, axis_unit + "2")
elif p_new == 1:
unit_str = unit_str.replace(axis_unit + str(p), axis_unit)
else:
unit_str = unit_str.replace(axis_unit + str(p), axis_unit + str(p_new))
elif axis_unit in unit_denom:
p = 1
if unit_denom.rsplit(axis_unit, 1)[1].isdigit():
p = int(unit_denom.rsplit(axis_unit, 1)[1])
p_new = p - 1
# Case axis_unit must be withdrawn
if p_new == 0:
unit_denom = unit_denom.replace(axis_unit, "")
if unit_denom == "":
unit_str = unit_num
else:
unit_str = unit_str.replace(axis_unit, "")
elif p_new == 1:
unit_str = unit_str.replace(axis_unit + str(p), axis_unit)
else:
unit_str = unit_str.replace(axis_unit + str(p), axis_unit + str(p_new))
else:
# Case axis_unit was not in unit_str
if unit_denom != "":
unit_str = unit_num + axis_unit + "/" + unit_denom
else:
unit_str += axis_unit
return unit_str
def convert(values, unit1, unit2):
"""Converts values from unit1 to unit2
Parameters
----------
values: ndarray
Values of the field to convert
unit1: str
start unit
unit2: str
final unit
Returns
-------
ndarray of the converted field
"""
# Critical band to frequency conversion (for acoustic loudness)
# <NAME>. (1990). "Analytical expressions for the tonotopic sensory scale".
# The Journal of the Acoustical Society of America. 88: 97. doi:10.1121/1.399849
if unit1 == "Bark" and unit2 == "Hz":
return 1960 * (values + 0.53) / (26.28 - values)
elif unit1 == "Hz" and unit2 == "Bark":
return (26.81 * values / (1960 + values)) - 0.53
elif unit1 == "dimless" and unit2 == "":
return values
elif unit1 == "" and unit2 == "dimless":
return values
# Generic conversion
else:
unit1_save = unit1
unit2_save = unit2
# Format the strings
unit1 = unit1.replace("*", "").replace(" ", "").replace("^", "")
unit2 = unit2.replace("*", "").replace(" ", "").replace("^", "")
# Unit1 parsing
if "/" in unit1:
dim1_denom, prefix1_denom = get_dim_prefix(unit1.split("/")[1])
unit1 = unit1.split("/")[0]
else:
dim1_denom = [0, 0, 0, 0, 0, 0]
prefix1_denom = 1.0
dim1_num, prefix1_num = get_dim_prefix(unit1)
# Unit2 parsing
if "/" in unit2:
dim2_denom, prefix2_denom = get_dim_prefix(unit2.split("/")[1])
unit2 = unit2.split("/")[0]
else:
dim2_denom = [0, 0, 0, 0, 0, 0]
prefix2_denom = 1.0
dim2_num, prefix2_num = get_dim_prefix(unit2)
# Check compatibility
dim1 = [i - j for i, j in zip(dim1_num, dim1_denom)]
dim2 = [i - j for i, j in zip(dim2_num, dim2_denom)]
if dim1 != dim2:
raise UnitError(
"Units " + unit1_save + " and " + unit2_save + " do not match"
)
else:
return (
values * (prefix1_num / prefix1_denom) / (prefix2_num / prefix2_denom)
)
def to_dB(values, unit, ref_value=1.0):
"""Converts values into dB normalized with ref_value
Parameters
----------
values: ndarray
Values of the field to convert
unit: str
Unit
ref_value: float
Reference value
Returns
-------
ndarray of the converted field
"""
if ref_value != 1:
if isinstance(values, ndarray):
values[values < ref_value] = ref_value
else:
if values < ref_value:
values = ref_value
mask = values != 0
try:
convert(values, unit, "W")
values_dB = 10.0 * where(mask, | log10(values / ref_value, where=mask) | numpy.log10 |
from keras.utils import np_utils
from keras.datasets import mnist
import numpy as np
def load_mnist(digits = None, conv = False):
# Get MNIST test data
#X_train, Y_train, X_test, Y_test = data_mnist()
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(60000, 784)
X_test = X_test.reshape(10000, 784)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255.
X_test /= 255.
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, 10)
Y_test = np_utils.to_categorical(y_test, 10)
# collect the corresponding digits
if digits is not None:
ind_train = []
ind_test = []
for i in digits:
ind_train = ind_train + list(np.where(Y_train[:, i] == 1)[0])
ind_test = ind_test + list( | np.where(Y_test[:, i] == 1) | numpy.where |
#!/usr/bin/python
#-*- coding:utf-8 -*-
import sys
import struct
import numpy as np
from torch import tensor
from torch.nn import functional as fn
def convolution3d_f32():
para = []
# init the input data and parameters
batch = int(np.random.randint(1, high=4, size=1))
in_channel = int(np.random.randint(2, high=8, size=1))
in_depth = int(np.random.randint(16, high=128, size=1))
in_height = int(np.random.randint(16, high=128, size=1))
in_width = int(np.random.randint(16, high=128, size=1))
out_channel = int(np.random.randint(8, high=16, size=1))
stride_d = int(np.random.randint(1, high=5, size=1))
stride_h = int(np.random.randint(1, high=5, size=1))
stride_w = int(np.random.randint(1, high=5, size=1))
kernel_d = int(np.random.randint(stride_d, high=8, size=1))
kernel_h = int( | np.random.randint(stride_h, high=8, size=1) | numpy.random.randint |
import numpy as np
import keras
import tensorflow as tf
from matplotlib import pyplot as plt
import pandas as pd
from keras.layers.embeddings import Embedding
from keras.layers import concatenate, Lambda
import os, sys
from weather_model import Seq2Seq_MVE_subnets_swish, weather_conv1D, CausalCNN, RNN_builder, Seq2Seq, Seq2Seq_MVE, Seq2Seq_MVE_subnets
from keras.models import load_model, model_from_json
#from utils import random_sine, plot_prediction
#learning_rate = 0.01
#decay = 0 # Learning rate decay
model_save_path = '../models/'
#loss = "mse" # Other loss functions are possible, see Keras documentation.
# Regularisation isn't really needed for this application
#lambda_regulariser = None #0.000001 # Will not be used if regulariser is None
#regulariser = None # Possible regulariser: keras.regularizers.l2(lambda_regulariser)
#steps_per_epoch = 200 # batch_size * steps_per_epoch = total number of training examples
#num_signals = 2 # The number of random sine waves the compose the signal. The more sine waves, the harder the problem.
def crop(dimension, start, end):
# Crops (or slices) a Tensor on a given dimension from start to end
# example : to crop tensor x[:, :, 5:10]
# call slice(2, 5, 10) as you want to crop on the second dimension
def func(x):
if dimension == 0:
return x[start: end]
if dimension == 1:
return x[:, start: end]
if dimension == 2:
return x[:, :, start: end]
if dimension == 3:
return x[:, :, :, start: end]
if dimension == 4:
return x[:, :, :, :, start: end]
return Lambda(func)
class WeatherConv1D:
def __init__(self, regulariser=None,
lr=0.001, decay=0, loss="mse",
layers=[35, 35], batch_size=256,
input_len=37, input_features=29,
strides_len=3, kernel_size=5):
self.regulariser = regulariser
self.layers = layers
self.lr = lr
self.decay = decay
self.loss = loss
self.pred_result = None
self.batch_size = batch_size
self.input_len = input_len
self.input_features = input_features
self.kernel_strides = strides_len
self.kernel_size = kernel_size
print('Initialized!')
def build_graph(self):
keras.backend.clear_session() # clear session/graph
self.model = weather_conv1D(self.layers, self.lr,
self.decay, self.loss, self.input_len,
self.input_features, self.kernel_strides, self.kernel_size)
print(self.model.summary())
def sample_batch(self, data_inputs, ground_truth, ruitu_inputs, batch_size, certain_id=None, certain_feature=None):
max_i, _, max_j, _ = data_inputs.shape # Example: (1148, 37, 10, 9)-(sample_ind, timestep, sta_id, features)
if certain_id == None and certain_feature == None:
id_ = np.random.randint(max_j, size=batch_size)
i = np.random.randint(max_i, size=batch_size)
batch_ouputs = ground_truth[i,:,id_,:]
batch_ruitu = ruitu_inputs[i,:,id_,:]
elif certain_id != None:
pass
return batch_ruitu, batch_ouputs
def order_batch(self, data_inputs, ground_truth, ruitu_inputs, batch_size, certain_id=None, certain_feature=None):
pass #TODO:
def fit(self, train_input_ruitu, train_labels,
val_input_ruitu, val_labels, batch_size,
iterations=300, validation=True):
self.optimizer = keras.optimizers.Adam(lr=self.lr, decay=self.decay)
self.model.compile(optimizer = self.optimizer, loss=self.loss)
print('Train batch size: {}'.format(batch_size))
print('Validation on data size of {};'.format(val_input_ruitu.shape[0]))
for i in range(iterations):
batch_ruitu, batch_labels = self.sample_batch(train_input_ruitu, train_labels,
train_input_ruitu, batch_size=batch_size)
loss_ = self.model.train_on_batch(x=[batch_ruitu],
y=[batch_labels])
if (i+1)%50 == 0:
print('Iteration:{}/{}. Training batch loss:{}'.
format(i+1, iterations, loss_))
if validation :
self.evaluate(val_input_ruitu, val_labels, each_station_display=False)
print('###'*10)
print('Train finish! Total validation loss:')
self.evaluate(val_input_ruitu, val_labels, each_station_display=True)
def evaluate(self, data_input_ruitu, data_labels, each_station_display=False):
all_loss=[]
for i in range(10): # iterate for each station. (sample_ind, timestep, staionID, features)
val_loss= self.model.evaluate(x=[data_input_ruitu[:,:,i,:]],
y=[data_labels[:,:,i,:]], verbose=False)
all_loss.append(val_loss)
if each_station_display:
print('\tFor station 9000{}, evaluated loss: {}'.format(i+1, val_loss))
print('Mean evaluated loss on all stations:', np.mean(all_loss))
#return np.mean(all_loss)
def predict(self, batch_ruitu):
pred_result_list = []
for i in range(10):
#print('Predict for station: 9000{}'.format(i+1))
result = self.model.predict(x=[batch_ruitu[:,:,i,:]])
result = np.squeeze(result, axis=0)
#all_pred[i] = result
pred_result_list.append(result)
#pass
pred_result = np.stack(pred_result_list, axis=0)
#return all_pred, pred_result
print('Predict shape (10,37,3) means (stationID, timestep, features). Features include: t2m, rh2m and w10m')
self.pred_result = pred_result
return pred_result
def renorm_for_submit(self, pred_mean, pred_var):
if self.pred_result is None:
print('You must run self.predict(batch_inputs, batch_ruitu) firstly!!')
else:
df_empty = pd.DataFrame(columns=['FORE_data', 't2m', 'rh2m', 'w10m'])
target_list=['t2m','rh2m','w10m']
self.obs_range_dic={'t2m':[-30,42], # Official value: [-20,42]
'rh2m':[0.0,100.0],
'w10m':[0.0, 30.0]}
for j, target_v in enumerate(self.target_list):
series_ids = pd.Series()
series_targets = pd.Series()
renorm_value = renorm(self.pred_result[:,:,j], self.obs_range_dic[target_v][0], self.obs_range_dic[target_v][1])
for i in range(10):
if i != 9:
id_num = '0'+str(i+1)
else:
id_num = str(10)
sta_name_time = '900'+id_num+'_'
time_str_list=[]
for t in range(37):
if t < 10:
time_str= sta_name_time + '0'+ str(t)
else:
time_str = sta_name_time + str(t)
time_str_list.append(time_str)
series_id = pd.Series(time_str_list)
series_target = pd.Series(renorm_value[i])
series_ids = pd.concat([series_ids, series_id])
series_targets = pd.concat([series_targets, series_target])
df_empty['FORE_data'] = series_ids
df_empty[target_v] = series_targets
return df_empty
class CausalCNN_Class(WeatherConv1D):
def __init__(self, regulariser,lr, decay, loss,
n_filters, strides_len, kernel_size, seq_len,
input_features, output_features, dilation_rates):
self.regulariser=regulariser
self.n_filters=n_filters
self.lr=lr
self.decay=decay
self.loss=loss
self.seq_len=seq_len
self.input_features=input_features
self.output_features = output_features
self.strides_len=strides_len
self.kernel_size=kernel_size
self.dilation_rates=dilation_rates
def build_graph(self):
keras.backend.clear_session() # clear session/graph
self.model = CausalCNN(self.n_filters, self.lr,
self.decay, self.loss,
self.seq_len, self.input_features,
self.strides_len, self.kernel_size,
self.dilation_rates)
print(self.model.summary())
class FNN(WeatherConv1D):
def __init__(self, regulariser,lr, decay, loss,
layers, batch_size, seq_len, input_features, output_features):
self.regulariser=regulariser
self.layers=layers
self.lr=lr
self.decay=decay
self.loss=loss
self.seq_len=seq_len
self.input_features=input_features
self.output_features = output_features
def build_graph(self):
keras.backend.clear_session() # clear session/graph
self.model = weather_fnn(self.layers, self.lr,
self.decay, self.loss, self.seq_len,
self.input_features, self.output_features)
print(self.model.summary())
class Enc_Dec:
def __init__(self, num_input_features, num_output_features, num_decoder_features,
input_sequence_length, target_sequence_length,
num_steps_to_predict, regulariser = None,
lr=0.001, decay=0, loss = "mse",
layers=[35, 35]):
self.num_input_features = num_input_features
self.num_output_features = num_output_features
self.num_decoder_features = num_decoder_features
self.input_sequence_length = input_sequence_length
self.target_sequence_length = target_sequence_length
self.num_steps_to_predict = num_steps_to_predict
self.regulariser = regulariser
self.layers = layers
self.lr = lr
self.decay = decay
self.loss = loss
self.pred_result = None
self.train_loss=[]
self.target_list=['t2m','rh2m','w10m']
self.obs_range_dic={'t2m':[-30,42], # Official value: [-20,42]
'rh2m':[0.0,100.0],
'w10m':[0.0, 30.0]}
print('Initialized!')
def build_graph(self):
keras.backend.clear_session() # clear session/graph
self.optimiser = keras.optimizers.Adam(lr=self.lr, decay=self.decay)
# Define an input sequence.
encoder_inputs = keras.layers.Input(shape=(None, self.num_input_features), name='encoder_inputs')
# Create a list of RNN Cells, these are then concatenated into a single layer
# with the RNN layer.
encoder_cells = []
for hidden_neurons in self.layers:
encoder_cells.append(keras.layers.GRUCell(hidden_neurons,
kernel_regularizer = self.regulariser,
recurrent_regularizer = self.regulariser,
bias_regularizer = self.regulariser))
encoder = keras.layers.RNN(encoder_cells, return_state=True)
encoder_outputs_and_states = encoder(encoder_inputs)
# Discard encoder outputs and only keep the states.
encoder_states = encoder_outputs_and_states[1:]
# Define a decoder sequence.
decoder_inputs = keras.layers.Input(shape=(None, self.num_decoder_features), name='decoder_inputs')
decoder_cells = []
for hidden_neurons in self.layers:
decoder_cells.append(keras.layers.GRUCell(hidden_neurons,
kernel_regularizer = self.regulariser,
recurrent_regularizer = self.regulariser,
bias_regularizer = self.regulariser))
decoder = keras.layers.RNN(decoder_cells, return_sequences=True, return_state=True)
# Set the initial state of the decoder to be the ouput state of the encoder.
decoder_outputs_and_states = decoder(decoder_inputs, initial_state=encoder_states)
# Only select the output of the decoder (not the states)
decoder_outputs = decoder_outputs_and_states[0]
# Apply a dense layer with linear activation to set output to correct dimension
# and scale (tanh is default activation for GRU in Keras, our output sine function can be larger then 1)
decoder_dense1 = keras.layers.Dense(units=64,
activation='tanh',
kernel_regularizer = self.regulariser,
bias_regularizer = self.regulariser, name='dense_tanh')
output_dense = keras.layers.Dense(self.num_output_features,
activation='sigmoid',
kernel_regularizer = self.regulariser,
bias_regularizer = self.regulariser, name='output_sig')
#densen1=decoder_dense1(decoder_outputs)
decoder_outputs = output_dense(decoder_outputs)
# Create a model using the functional API provided by Keras.
self.model = keras.models.Model(inputs=[encoder_inputs, decoder_inputs], outputs=decoder_outputs)
print(self.model.summary())
def sample_batch(self, data_inputs, ground_truth, ruitu_inputs, batch_size, certain_id=None, certain_feature=None):
max_i, _, max_j, _ = data_inputs.shape # Example: (1148, 37, 10, 9)-(sample_ind, timestep, sta_id, features)
if certain_id == None and certain_feature == None:
id_ = np.random.randint(max_j, size=batch_size)
i = np.random.randint(max_i, size=batch_size)
batch_inputs = data_inputs[i,:,id_,:]
batch_ouputs = ground_truth[i,:,id_,:]
batch_ruitu = ruitu_inputs[i,:,id_,:]
elif certain_id != None:
pass
return batch_inputs, batch_ruitu, batch_ouputs
def fit(self, train_input_obs, train_input_ruitu, train_labels,
val_input_obs, val_input_ruitu, val_labels, batch_size,
iterations=300, validation=True):
self.model.compile(optimizer = self.optimiser, loss=self.loss)
print('Train batch size: {}'.format(batch_size))
print('Validation on data size of {};'.format(val_input_obs.shape[0]))
for i in range(iterations):
batch_inputs, batch_ruitu, batch_labels = self.sample_batch(train_input_obs, train_labels,
train_input_ruitu, batch_size=batch_size)
loss_ = self.model.train_on_batch(x=[batch_inputs, batch_ruitu],
y=[batch_labels])
if (i+1)%50 == 0:
print('Iteration:{}/{}. Training batch loss:{}'.
format(i+1, iterations, loss_))
if validation :
self.evaluate(val_input_obs, val_input_ruitu, val_labels, each_station_display=False)
print('###'*10)
print('Train finish! Total validation loss:')
self.evaluate(val_input_obs, val_input_ruitu, val_labels, each_station_display=True)
def evaluate(self, data_input_obs, data_input_ruitu, data_labels, each_station_display=False):
assert data_input_ruitu.shape[0] == data_input_obs.shape[0] == data_labels.shape[0], 'Shape Error'
#assert data_input_obs.shape[1] == 28 and data_input_obs.shape[2] == 10 and data_input_obs.shape[3] == 9, 'Error! Obs input shape must be (None, 28,10,9)'
assert data_input_ruitu.shape[1] == 37 and data_input_ruitu.shape[2] == 10 and data_input_ruitu.shape[3] == 29, 'Error! Ruitu input shape must be (None, 37,10,29)'
assert data_labels.shape[1] == 37 and data_labels.shape[2] == 10 and data_labels.shape[3] == 3, 'Error! Ruitu input shape must be (None, 37,10,3)'
all_loss=[]
for i in range(10): # iterate for each station. (sample_ind, timestep, staionID, features)
val_loss= self.model.evaluate(x=[data_input_obs[:,:,i,:], data_input_ruitu[:,:,i,:]],
y=[data_labels[:,:,i,:]], verbose=False)
all_loss.append(val_loss)
if each_station_display:
print('\tFor station 9000{}, evaluated loss: {}'.format(i+1, val_loss))
print('Mean evaluated loss on all stations:', np.mean(all_loss))
def predict(self, batch_inputs, batch_ruitu):
assert batch_ruitu.shape[0] == batch_inputs.shape[0], 'Shape Error'
assert batch_inputs.shape[1] == 28 and batch_inputs.shape[2] == 10 and batch_inputs.shape[3] == 9, 'Error! Obs input shape must be (None, 28,10,9)'
assert batch_ruitu.shape[1] == 37 and batch_ruitu.shape[2] == 10 and batch_ruitu.shape[3] == 29, 'Error! Ruitu input shape must be (None, 37,10, 29)'
#all_pred={}
pred_result_list = []
for i in range(10):
#print('Predict for station: 9000{}'.format(i+1))
result = self.model.predict(x=[batch_inputs[:,:,i,:], batch_ruitu[:,:,i,:]])
result = np.squeeze(result, axis=0)
#all_pred[i] = result
pred_result_list.append(result)
#pass
pred_result = np.stack(pred_result_list, axis=0)
#return all_pred, pred_result
print('Predict shape (10,37,3) means (stationID, timestep, features). Features include: t2m, rh2m and w10m')
self.pred_result = pred_result
return pred_result
def renorm_for_submit(self, pred_mean, pred_var=None):
'''
# TODO: Add three strategies for output
'''
assert self.pred_result is not None, 'You must run self.predict(batch_inputs, batch_ruitu) firstly!!'
assert pred_mean.shape == (10, 37, 3), 'Error! This funtion ONLY works for one data sample with shape (10, 37, 3). Any data shape (None, 10, 37, 3) will leads this error!'
df_empty = pd.DataFrame(columns=['FORE_data', 't2m', 'rh2m', 'w10m'])
for j, target_v in enumerate(self.target_list):
series_ids = pd.Series()
series_targets = pd.Series()
renorm_value = renorm(pred_mean[:,:,j], self.obs_range_dic[target_v][0], self.obs_range_dic[target_v][1])
for i in range(10):
if i != 9:
id_num = '0'+str(i+1)
else:
id_num = str(10)
sta_name_time = '900'+id_num+'_'
time_str_list=[]
for t in range(37):
if t < 10:
time_str= sta_name_time + '0'+ str(t)
else:
time_str = sta_name_time + str(t)
time_str_list.append(time_str)
series_id = pd.Series(time_str_list)
series_target = pd.Series(renorm_value[i])
series_ids = pd.concat([series_ids, series_id])
series_targets = pd.concat([series_targets, series_target])
df_empty['FORE_data'] = series_ids
df_empty[target_v] = series_targets
return df_empty
#pass
def plot_prediction(self, x, y_true, y_pred, input_ruitu=None):
"""Plots the predictions.
Arguments
---------
x: Input sequence of shape (input_sequence_length,
dimension_of_signal)
y_true: True output sequence of shape (input_sequence_length,
dimension_of_signal)
y_pred: Predicted output sequence (input_sequence_length,
dimension_of_signal)
input_ruitu: Ruitu output sequence
"""
plt.figure(figsize=(12, 3))
output_dim = x.shape[-1]# feature dimension
for j in range(output_dim):
past = x[:, j]
true = y_true[:, j]
pred = y_pred[:, j]
if input_ruitu is not None:
ruitu = input_ruitu[:, j]
label1 = "Seen (past) values" if j==0 else "_nolegend_"
label2 = "True future values" if j==0 else "_nolegend_"
label3 = "Predictions" if j==0 else "_nolegend_"
label4 = "Ruitu values" if j==0 else "_nolegend_"
plt.plot(range(len(past)), past, "o-g",
label=label1)
plt.plot(range(len(past),
len(true)+len(past)), true, "x--g", label=label2)
plt.plot(range(len(past), len(pred)+len(past)), pred, "o--y",
label=label3)
if input_ruitu is not None:
plt.plot(range(len(past), len(ruitu)+len(past)), ruitu, "o--r",
label=label4)
plt.legend(loc='best')
plt.title("Predictions v.s. true values v.s. Ruitu")
plt.show()
class RNN_Class(WeatherConv1D):
def __init__(self, num_output_features, num_decoder_features,
target_sequence_length,
num_steps_to_predict, regulariser = None,
lr=0.001, decay=0, loss = "mse",
layers=[35, 35]):
self.num_output_features = num_output_features
self.num_decoder_features = num_decoder_features
self.target_sequence_length = target_sequence_length
self.num_steps_to_predict = num_steps_to_predict
self.regulariser = regulariser
self.layers = layers
self.lr = lr
self.decay = decay
self.loss = loss
self.pred_result = None
#self.batch_size = batch_size
print('Initialized!')
def build_graph(self):
keras.backend.clear_session() # clear session/graph
self.model = RNN_builder(self.num_output_features, self.num_decoder_features,
self.target_sequence_length,
self.num_steps_to_predict, self.regulariser,
self.lr, self.decay, self.loss, self.layers)
print(self.model.summary())
class Seq2Seq_Class(Enc_Dec):
def __init__(self, id_embd, time_embd,
num_input_features, num_output_features, num_decoder_features,
input_sequence_length, target_sequence_length,
num_steps_to_predict, regulariser = None,
lr=0.001, decay=0, loss = "mse",
layers=[35, 35], model_save_path='../models',
model_structure_name='seq2seq_model.json', model_weights_name='seq2seq_model_weights.h5'):
super().__init__(num_input_features, num_output_features, num_decoder_features,
input_sequence_length, target_sequence_length,
num_steps_to_predict, regulariser = None,
lr=lr, decay=decay, loss = loss,
layers=layers)
self.id_embd = id_embd
self.time_embd = time_embd
self.val_loss_list=[]
self.train_loss_list=[]
self.current_mean_val_loss = None
self.early_stop_limit = 10 # with the unit of Iteration Display
self.EARLY_STOP=False
self.pred_var_result = []
self.pi_dic={0.95:1.96, 0.9:1.645, 0.8:1.28, 0.68:1.}
self.target_list=['t2m','rh2m','w10m']
self.obs_range_dic={'t2m':[-30,42], # Official value: [-20,42]
'rh2m':[0.0,100.0],
'w10m':[0.0, 30.0]}
self.obs_and_output_feature_index_map = {'t2m':0,'rh2m':1,'w10m':2}
self.ruitu_feature_index_map = {'t2m':1,'rh2m':3,'w10m':4}
self.model_save_path = model_save_path
self.model_structure_name=model_structure_name
self.model_weights_name=model_weights_name
def build_graph(self):
#keras.backend.clear_session() # clear session/graph
self.optimizer = keras.optimizers.Adam(lr=self.lr, decay=self.decay)
self.model = Seq2Seq_MVE_subnets_swish(id_embd=True, time_embd=True,
lr=self.lr, decay=self.decay,
num_input_features=self.num_input_features, num_output_features=self.num_output_features,
num_decoder_features=self.num_decoder_features, layers=self.layers,
loss=self.loss, regulariser=self.regulariser)
def _mve_loss(y_true, y_pred):
pred_u = crop(2,0,3)(y_pred)
pred_sig = crop(2,3,6)(y_pred)
print(pred_sig)
#exp_sig = tf.exp(pred_sig) # avoid pred_sig is too small such as zero
#precision = 1./exp_sig
precision = 1./pred_sig
#log_loss= 0.5*tf.log(exp_sig)+0.5*precision*((pred_u-y_true)**2)
log_loss= 0.5*tf.log(pred_sig)+0.5*precision*((pred_u-y_true)**2)
log_loss=tf.reduce_mean(log_loss)
return log_loss
print(self.model.summary())
self.model.compile(optimizer = self.optimizer, loss=_mve_loss)
def sample_batch(self, data_inputs, ground_truth, ruitu_inputs, batch_size, certain_id=None, certain_feature=None):
max_i, _, max_j, _ = data_inputs.shape # Example: (1148, 37, 10, 9)-(sample_ind, timestep, sta_id, features)
id_ = np.random.randint(max_j, size=batch_size)
i = np.random.randint(max_i, size=batch_size)
batch_inputs = data_inputs[i,:,id_,:]
batch_ouputs = ground_truth[i,:,id_,:]
batch_ruitu = ruitu_inputs[i,:,id_,:]
# id used for embedding
if self.id_embd and (not self.time_embd):
expd_id = np.expand_dims(id_,axis=1)
batch_ids = np.tile(expd_id,(1,37))
return batch_inputs, batch_ruitu, batch_ouputs, batch_ids
elif (not self.id_embd) and (self.time_embd):
time_range = np.array(range(37))
batch_time = np.tile(time_range,(batch_size,1))
#batch_time = np.expand_dims(batch_time, axis=-1)
return batch_inputs, batch_ruitu, batch_ouputs, batch_time
elif (self.id_embd) and (self.time_embd):
expd_id = np.expand_dims(id_,axis=1)
batch_ids = np.tile(expd_id,(1,37))
time_range = np.array(range(37))
batch_time = np.tile(time_range,(batch_size,1))
#batch_time = np.expand_dims(batch_time, axis=-1)
return batch_inputs, batch_ruitu, batch_ouputs, batch_ids, batch_time
elif (not self.id_embd) and (not self.time_embd):
return batch_inputs, batch_ruitu, batch_ouputs
def fit(self, train_input_obs, train_input_ruitu, train_labels,
val_input_obs, val_input_ruitu, val_labels, val_ids, val_times, batch_size,
iterations=300, validation=True):
print('Train batch size: {}'.format(batch_size))
print('Validation on data size of {};'.format(val_input_obs.shape[0]))
early_stop_count = 0
for i in range(iterations):
batch_inputs, batch_ruitu, batch_labels, batch_ids, batch_time = self.sample_batch(train_input_obs, train_labels,
train_input_ruitu, batch_size=batch_size)
#batch_placeholders = np.zeros_like(batch_labels)
loss_ = self.model.train_on_batch(x=[batch_inputs, batch_ruitu, batch_ids, batch_time],
y=[batch_labels])
if (i+1)%50 == 0:
print('Iteration:{}/{}. Training batch MLE loss:{}'.
format(i+1, iterations, loss_))
if validation :
self.evaluate(val_input_obs, val_input_ruitu, val_labels, val_ids, val_times, each_station_display=False)
if len(self.val_loss_list) >0: # Early stopping
if(self.current_mean_val_loss) <= min(self.val_loss_list): # compare with the last early_stop_limit values except SELF
early_stop_count = 0
model_json = self.model.to_json()
with open(self.model_save_path+self.model_structure_name, "w") as json_file:
json_file.write(model_json)
self.model.save_weights(self.model_save_path+self.model_weights_name)
else:
early_stop_count +=1
print('Early-stop counter:', early_stop_count)
if early_stop_count == self.early_stop_limit:
self.EARLY_STOP=True
break
print('###'*10)
if self.EARLY_STOP:
print('Loading the best model before early-stop ...')
self.model.load_weights(self.model_save_path+self.model_weights_name)
print('Training finished! Detailed val MLE loss:')
self.evaluate(val_input_obs, val_input_ruitu, val_labels, val_ids, val_times, each_station_display=True)
def evaluate(self, data_input_obs, data_input_ruitu, data_labels, data_ids, data_time, each_station_display=False):
all_loss=[]
for i in range(10): # iterate for each station. (sample_ind, timestep, staionID, features)
#batch_placeholders = np.zeros_like(data_labels[:,:,i,:])
val_loss= self.model.evaluate(x=[data_input_obs[:,:,i,:], data_input_ruitu[:,:,i,:], data_ids[:,:,i], data_time],
y=[data_labels[:,:,i,:]], verbose=False)
all_loss.append(val_loss)
if each_station_display:
print('\tFor station 9000{}, val MLE loss: {}'.format(i+1, val_loss))
self.current_mean_val_loss = np.mean(all_loss)
print('Mean val MLE loss:', self.current_mean_val_loss)
self.val_loss_list.append(self.current_mean_val_loss)
def predict(self, batch_inputs, batch_ruitu, batch_ids, batch_times):
'''
Input:
Output:
pred_result (mean value) : (None, 10,37,3). i.e., (sample_nums, stationID, timestep, features)
pred_var_result (var value) : (None, 10,37,3)
'''
pred_result_list = []
pred_var_list = []
#pred_std_list =[]
for i in range(10):
result = self.model.predict(x=[batch_inputs[:,:,i,:], batch_ruitu[:,:,i,:], batch_ids[:,:,i], batch_times])
var_result = result[:,:,3:6] # Variance
result = result[:,:,0:3] # Mean
#result = np.squeeze(result, axis=0)
pred_result_list.append(result)
#var_result = np.squeeze(var_result, axis=0)
pred_var_list.append(var_result)
pred_result = np.stack(pred_result_list, axis=1)
pred_var_result = np.stack(pred_var_list, axis=1)
print('Predictive shape (None, 10,37,3) means (sample_nums, stationID, timestep, features). \
Features include: t2m, rh2m and w10m')
self.pred_result = pred_result
self.pred_var_result = pred_var_result
#self.pred_std_result = np.sqrt(np.exp(self.pred_var_result[:,:,i,j])) # Calculate standard deviation
return pred_result, pred_var_result
def renorm_for_visualization(self, obs_inputs, ruitu_inputs, pred_mean_result, pred_var_result, ground_truth=None):
'''
obs_inputs: (None, 28, 10, 9)
ruitu_inputs: (None, 37, 10, 29)
pred_mean_result: (None, 10, 37, 3)
pred_var_result: (None, 10, 37, 3)
ground_truth: (None, 37, 10, 3)
#self.target_list=['t2m','rh2m','w10m']
#self.obs_range_dic={'t2m':[-30,42],
# 'rh2m':[0.0,100.0],
# 'w10m':[0.0, 30.0]}
#self.obs_and_output_feature_index_map = {'t2m':0,'rh2m':1,'w10m':2}
#self.ruitu_feature_index_map = {'t2m':1,'rh2m':3,'w10m':4}
#TODO:
'''
for target_v in self.target_list:
temp1 = obs_inputs[:,:,:,self.obs_and_output_feature_index_map[target_v]]
temp2 = ruitu_inputs[:,:,:,self.ruitu_feature_index_map[target_v]]
temp3 = pred_mean_result[:,:,:,self.obs_and_output_feature_index_map[target_v]]
#temp4 = pred_var_result[:,:,:,self.obs_and_output_feature_index_map[target_v]]
obs_inputs[:,:,:,self.obs_and_output_feature_index_map[target_v]] = renorm(temp1, self.obs_range_dic[target_v][0], self.obs_range_dic[target_v][1])
ruitu_inputs[:,:,:,self.ruitu_feature_index_map[target_v]] = renorm(temp2, self.obs_range_dic[target_v][0], self.obs_range_dic[target_v][1])
pred_mean_result[:,:,:,self.obs_and_output_feature_index_map[target_v]] = renorm(temp3, self.obs_range_dic[target_v][0], self.obs_range_dic[target_v][1])
#pred_var_result[:,:,:,self.obs_and_output_feature_index_map[target_v]] = renorm(temp4, self.obs_range_dic[target_v][0], self.obs_range_dic[target_v][1])
if ground_truth is not None:
temp5 = ground_truth[:,:,:,self.obs_and_output_feature_index_map[target_v]]
ground_truth[:,:,:,self.obs_and_output_feature_index_map[target_v]] = renorm(temp5, self.obs_range_dic[target_v][0], self.obs_range_dic[target_v][1])
if ground_truth is not None:
return obs_inputs, ruitu_inputs, pred_mean_result, pred_var_result, ground_truth
else:
return obs_inputs, ruitu_inputs, pred_mean_result, pred_var_result
def calc_uncertainty_info(self, verbose=False):
'''
Verbose: Display uncertainty for each feature i.e., (t2m, rh2m, w10m)
#TODO: Refactor the double 'for' part.
'''
assert len(self.pred_var_result)>0, 'Error! You must run predict() before running calc_uncertainty_info()'
print('The uncertainty info are calculated on {} predicted samples with shape {}'
.format(len(self.pred_var_result), self.pred_var_result.shape))
#
if verbose:
assert self.target_list == ['t2m','rh2m','w10m'], 'ERROR, list changed!'
for j, target_v in enumerate(['t2m','rh2m','w10m']):
print('For feature {}:'.format(target_v))
for i in range(37):
#unctt_var = np.exp(self.pred_var_result[:,:,i,j])
unctt_std = np.sqrt(unctt_var)
unctt_mean_std = np.mean(unctt_std)
unctt_mean_var = np.mean(unctt_var)
#renorm_unctt_mean_std = renorm(unctt_mean_std, self.obs_range_dic[target_v][0], self.obs_range_dic[target_v][1])
print('\tTime:{}-Variance:{:.4f}; Std:{:.4f};'.
format(i+1, unctt_mean_var, unctt_mean_std))
else:
for i in range(37):
unctt_var = np.exp(self.pred_var_result[:,:,i,:])
unctt_std = np.sqrt(unctt_var)
unctt_mean_std = np.mean(unctt_std)
unctt_mean_var = np.mean(unctt_var)
#renorm_unctt_mean_std = 0
print('Time:{}-Variance:{:.4f}; Std:{:.4f};'.
format(i+1, unctt_mean_var, unctt_mean_std))
def minus_plus_std_strategy(self, pred_mean, pred_var, feature_name,\
timestep_to_ensemble=21, alpha=0):
'''
This stratergy aims to calculate linear weighted at specific timestep (timestep_to_ensemble) between prediction and ruitu as formula:
(alpha)*pred_mean + (1-alpha)*ruitu_inputs
pred_mean: (10, 37, 3)
pred_var: (10, 37, 3)
timestep_to_ensemble: int32 (From 0 to 36)
'''
print('Using minus_plus_var_strategy with alpha {}'.format(alpha))
assert 0<=timestep_to_ensemble<=36 , 'Please ensure 0<=timestep_to_ensemble<=36!'
assert -0.3<= alpha <=0.3, '-0.3<= alpha <=0.3!'
assert pred_mean.shape == (10, 37, 3), 'Error! This funtion ONLY works for \
one data sample with shape (10, 37, 3). Any data shape (None, 10, 37, 3) will leads this error!'
pred_std = np.sqrt(np.exp(pred_var))
print('alpha:',alpha)
pred_mean[:,timestep_to_ensemble:,self.obs_and_output_feature_index_map[feature_name]] = \
pred_mean[:,timestep_to_ensemble:,self.obs_and_output_feature_index_map[feature_name]] + \
alpha * pred_std[:,timestep_to_ensemble:,self.obs_and_output_feature_index_map[feature_name]]
return pred_mean
def linear_ensemble_strategy(self, pred_mean, pred_var, ruitu_inputs, feature_name,\
timestep_to_ensemble=21, alpha=1):
'''
This stratergy aims to calculate linear weighted at specific timestep (timestep_to_ensemble) between prediction and ruitu as formula:
(alpha)*pred_mean + (1-alpha)*ruitu_inputs
pred_mean: (10, 37, 3)
pred_var: (10, 37, 3)
ruitu_inputs: (37,10,29). Need Swamp to(10,37,29) FIRSTLY!!
timestep_to_ensemble: int32 (From 0 to 36)
'''
assert 0<= alpha <=1, 'Please ensure 0<= alpha <=1 !'
assert pred_mean.shape == (10, 37, 3), 'Error! This funtion ONLY works for \
one data sample with shape (10, 37, 3). Any data shape (None, 10, 37, 3) will leads this error!'
#pred_std = np.sqrt(np.exp(pred_var))
ruitu_inputs = np.swapaxes(ruitu_inputs,0,1)
print('alpha:',alpha)
pred_mean[:,timestep_to_ensemble:,self.obs_and_output_feature_index_map[feature_name]] = \
(alpha)*pred_mean[:,timestep_to_ensemble:,self.obs_and_output_feature_index_map[feature_name]] + \
(1-alpha)*ruitu_inputs[:,timestep_to_ensemble:, self.ruitu_feature_index_map[feature_name]]
print('Corrected pred_mean shape:', pred_mean.shape)
return pred_mean
def fuzzy_ensemble_strategy(self, pred_mean, pred_var, feature_name,\
timestep_to_ensemble=21, alpha=0):
'''
This stratergy aims to calculate linear weighted at specific timestep (timestep_to_ensemble) between prediction and ruitu as formula:
(alpha)*pred_mean + (1-alpha)*ruitu_inputs
pred_mean: (10, 37, 3)
pred_var: (10, 37, 3)
timestep_to_ensemble: int32 (From 0 to 36)
'''
print('Using fuzzy_ensemble_strategy with alpha {}'.format(alpha))
assert 0<=timestep_to_ensemble<=36 , 'Please ensure 0<=timestep_to_ensemble<=36!'
assert -0.4<= alpha <=0.4, 'Please ensure -0.4<= alpha <=0.4 !'
assert pred_mean.shape == (10, 37, 3), 'Error! This funtion ONLY works for \
one data sample with shape (10, 37, 3). Any data shape (None, 10, 37, 3) will leads this error!'
pred_std = np.sqrt(np.exp(pred_var))
#print('normalizing for Std. after timestep:', timestep_to_ensemble)
temp_std = pred_std[:,timestep_to_ensemble:,self.obs_and_output_feature_index_map[feature_name]]
norm_std = temp_std / np.max(temp_std)
#print('norm_std shape', norm_std.shape)
dim_0, dim_1 = norm_std.shape
reshaped_std = norm_std.reshape(-1)
from skfuzzy import trimf
fuzzy_degree = trimf(reshaped_std, [0., 1, 1.2])
fuzzy_degree = fuzzy_degree.reshape(dim_0, dim_1)
#print('fuzzy_degree shape:',fuzzy_degree.shape)
#print('temp_std shape:',temp_std.shape)
pred_mean[:,timestep_to_ensemble:,self.obs_and_output_feature_index_map[feature_name]] = \
pred_mean[:,timestep_to_ensemble:,self.obs_and_output_feature_index_map[feature_name]] + \
fuzzy_degree*alpha*temp_std
#pred_mean[:,timestep_to_ensemble:,self.obs_and_output_feature_index_map[feature_name]] + \
#alpha * pred_std[:,timestep_to_ensemble:,self.obs_and_output_feature_index_map[feature_name]]
#print('pred_mean.shape',pred_mean.shape)
return pred_mean
pass
def renorm_for_submit(self, pred_mean, pred_var, ruitu_inputs, timestep_to_ensemble=21, alpha=1):
'''
Overwrite for Seq2Seq_MVE Class
pred_mean: shape of (10, 37, 3)
pred_var: shape of (10, 37, 3)
ruitu_inputs: shape of (10, 37, 3)
timestep_to_ensemble: int32 (From 0 to 36)
# TODO: Add three strategies for output
'''
assert self.pred_result is not None, 'You must run self.predict(batch_inputs, batch_ruitu) firstly!!'
assert pred_mean.shape == (10, 37, 3), 'Error! This funtion ONLY works for one data sample with shape (10, 37, 3). Any data shape (None, 10, 37, 3) will leads this error!'
df_empty = pd.DataFrame(columns=['FORE_data', 't2m', 'rh2m', 'w10m'])
for j, target_v in enumerate(self.target_list):
series_ids = pd.Series()
series_targets = pd.Series()
#print('Feature {}, timestep_to_ensemble: {}, weighted alpha: {}'.
# format(target_v, timestep_to_ensemble, alpha))
#pred_mean = self.linear_ensemble_strategy(pred_mean, pred_var,
# ruitu_inputs, target_v, timestep_to_ensemble, alpha)
#pred_mean =self.minus_plus_std_strategy(pred_mean, pred_var, target_v,\
# timestep_to_ensemble, alpha)
#pred_mean = self.fuzzy_ensemble_strategy(pred_mean, pred_var, target_v,\
# timestep_to_ensemble, alpha=0.)
renorm_value = renorm(pred_mean[:,:,j], self.obs_range_dic[target_v][0], self.obs_range_dic[target_v][1])
for i in range(10):
if i != 9:
id_num = '0'+str(i+1)
else:
id_num = str(10)
sta_name_time = '900'+id_num+'_'
time_str_list=[]
for t in range(37):
if t < 10:
time_str= sta_name_time + '0'+ str(t)
else:
time_str = sta_name_time + str(t)
time_str_list.append(time_str)
series_id = pd.Series(time_str_list)
series_target = pd.Series(renorm_value[i])
series_ids = pd.concat([series_ids, series_id])
series_targets = pd.concat([series_targets, series_target])
df_empty['FORE_data'] = series_ids
df_empty[target_v] = series_targets
return df_empty
def plot_prediction(self, x, y_true, y_pred, intervals=None, input_ruitu=None, pi_degree=0.8, renorm_flag=False):
"""Plots the predictions.
Arguments
---------
x: Input sequence of shape (input_sequence_length,
dimension_of_signal) E.g. (28, 1)
y_true: True output sequence of shape (input_sequence_length,
dimension_of_signal) E.g. (35, 1)
y_pred: Predicted output sequence (input_sequence_length,
dimension_of_signal) E.g. (35, 1)
input_ruitu: Ruitu output sequence E.g. (35, 1)
pi_degree: Confident Level such as 0.95, 0.9, 0.8, and 0.68 etc.
"""
plt.figure(figsize=(12, 3))
output_dim = x.shape[-1]# feature dimension
for j in range(output_dim):
past = x[:, j]
true = y_true[:, j]
pred = y_pred[:, j]
if input_ruitu is not None:
ruitu = input_ruitu[:, j]
if intervals is not None:
pi_var = intervals[:, j]
pi_var = np.sqrt(np.exp(pi_var))
label1 = "Seen (past) values" if j==0 else "_nolegend_"
label2 = "True future values" if j==0 else "_nolegend_"
label3 = "Predictions" if j==0 else "_nolegend_"
label4 = "Ruitu values" if j==0 else "_nolegend_"
label5 = "Lower-Upper bound" if j==0 else "_nolegend_"
plt.plot(range(len(past)), past, "o-g",
label=label1)
plt.plot(range(len(past),
len(true)+len(past)), true, "x--g", label=label2)
plt.plot(range(len(past), len(pred)+len(past)), pred, ".--b",
label=label3)
if input_ruitu is not None:
plt.plot(range(len(past), len(ruitu)+len(past)), ruitu, ".--r",
label=label4)
if intervals is not None:
#print(intervals.shape)
print(pi_var.shape)
up_bound = pred + self.pi_dic[pi_degree]*pi_var
low_bound = pred - self.pi_dic[pi_degree]*pi_var
plt.fill_between(range(len(past), len(ruitu)+len(past)),
up_bound, low_bound, facecolor='blue', alpha=0.1)
plt.legend(loc='best')
plt.title("Predictions v.s. true values v.s. Ruitu")
plt.show()
class Enc_Dec_Embd(Enc_Dec):
def build_graph(self):
keras.backend.clear_session() # clear session/graph
self.optimiser = keras.optimizers.Adam(lr=self.lr, decay=self.decay)
# Define an input sequence.
encoder_inputs = keras.layers.Input(shape=(None, self.num_input_features), name='encoder_inputs')
# Create a list of RNN Cells, these are then concatenated into a single layer
# with the RNN layer.
encoder_cells = []
for hidden_neurons in self.layers:
encoder_cells.append(keras.layers.GRUCell(hidden_neurons,
kernel_regularizer = self.regulariser,
recurrent_regularizer = self.regulariser,
bias_regularizer = self.regulariser))
encoder = keras.layers.RNN(encoder_cells, return_state=True)
encoder_outputs_and_states = encoder(encoder_inputs)
# Discard encoder outputs and only keep the states.
encoder_states = encoder_outputs_and_states[1:]
# Define a decoder sequence.
decoder_inputs = keras.layers.Input(shape=(None, self.num_decoder_features), name='decoder_inputs')
decoder_inputs_id = keras.layers.Input(shape=(None,), name='id_inputs')
decoder_inputs_id_embd = Embedding(input_dim=10, output_dim=2, name='id_embedding')(decoder_inputs_id)
#decoder_inputs_time = keras.layers.Input(shape=(None,), name='time_inputs')
#decoder_inputs_time_embd = Embedding(input_dim=37, output_dim=2, name='time_embedding')(decoder_inputs_time)
decoder_concat = concatenate([decoder_inputs, decoder_inputs_id_embd], axis=-1)
decoder_cells = []
for hidden_neurons in self.layers:
decoder_cells.append(keras.layers.GRUCell(hidden_neurons,
kernel_regularizer = self.regulariser,
recurrent_regularizer = self.regulariser,
bias_regularizer = self.regulariser))
decoder = keras.layers.RNN(decoder_cells, return_sequences=True, return_state=True)
decoder_outputs_and_states = decoder(decoder_concat, initial_state=encoder_states)
decoder_outputs = decoder_outputs_and_states[0]
#decoder_dense1 = keras.layers.Dense(units=32,
# activation='relu',
# kernel_regularizer = self.regulariser,
# bias_regularizer = self.regulariser, name='dense_relu')
output_dense = keras.layers.Dense(self.num_output_features,
activation='sigmoid',
kernel_regularizer = self.regulariser,
bias_regularizer = self.regulariser, name='output_sig')
#densen1=decoder_dense1(decoder_outputs)
decoder_outputs = output_dense(decoder_outputs)
# Create a model using the functional API provided by Keras.
self.model = keras.models.Model(inputs=[encoder_inputs, decoder_inputs, decoder_inputs_id], outputs=decoder_outputs)
self.model.compile(optimizer = self.optimiser, loss=self.loss)
print(self.model.summary())
def sample_batch(self, data_inputs, ground_truth, ruitu_inputs, batch_size, certain_id=None, certain_feature=None):
max_i, _, max_j, _ = data_inputs.shape # Example: (1148, 37, 10, 9)-(sample_ind, timestep, sta_id, features)
if certain_id == None and certain_feature == None:
id_ = | np.random.randint(max_j, size=batch_size) | numpy.random.randint |
import pandas
import numpy as np
from cornellGrading import cornellQualtrics
import os
def genReadingAssignments(infile, outfile):
# generate reading assignments
# infile must be xlsx with two sheets (Readers & Canddiates)
# grab all input data
if isinstance(infile, str):
tmp = pandas.ExcelFile(infile, engine="openpyxl")
readers = tmp.parse("Readers")
candidates = tmp.parse("Candidates")
tmp.close()
readers = readers["Reader Names"].values
candidates = candidates["Candidate Names"].values
else:
readers = infile[0]
candidates = infile[1]
# Each person needs to be read by 2 readers
nperreader = int(np.round(len(candidates) * 2 / len(readers)))
# shuffle candidates and split by readers
clist = np.hstack((candidates.copy(), candidates.copy()))
np.random.shuffle(clist)
out = {}
for reader in readers:
tmp = clist[:nperreader]
while np.unique(tmp).size != tmp.size:
np.random.shuffle(clist)
tmp = clist[:nperreader]
out[reader] = tmp
clist = clist[nperreader:]
# check for unassigned
if len(clist) > 0:
for c in clist:
r = np.random.choice(readers, size=1)[0]
while c in out[r]:
r = np.random.choice(readers, size=1)[0]
out[r] = np.hstack((out[r], c))
# final consistency check
asslist = []
for key, val in out.items():
assert np.unique(val).size == val.size, "{} has non-unique list.".format(key)
asslist = | np.hstack((asslist, val)) | numpy.hstack |
import tensorflow as tf
from tensorflow.keras.applications.resnet50 import ResNet50
from tensorflow.keras.layers import GlobalAveragePooling2D, Dense
from tensorflow.keras.models import Model
from tensorflow.keras.callbacks import Callback, History
import tensorflow.keras.backend as K
from keras.objectives import mean_squared_error
from PIL import Image
import numpy as np
import pickle, glob, random, os, zipfile
from tensorflow.contrib.tpu.python.tpu import keras_support
def enumerate_layers():
# 確認用。サマリーとレイヤー名とindexの対応を調べる
resnet = ResNet50(include_top=False, weights="imagenet", input_shape=(224, 224, 3))
resnet.summary()
for i, layer in enumerate(resnet.layers):
print(i, layer.name)
def create_resnet():
# 転移学習用モデル
resnet = ResNet50(include_top=False, weights="imagenet", input_shape=(224, 224, 3))
for i in range(82):
# res4a_branch2a(82)から訓練させる
resnet.layers[i].trainable=False
x = GlobalAveragePooling2D()(resnet.output)
# ランドマーク9×2点
x = Dense(18, activation="sigmoid")(x)
model = Model(resnet.inputs, x)
return model
class CatGenerator:
def __init__(self):
with open("cats-dataset/cat_annotation.dat", "rb") as fp:
self.annotation_data = pickle.load(fp)
def flow_from_directory(self, batch_size, train=True, shuffle=True, use_data_augmentation=True):
source_dir = "cats-dataset/train" if train else "cats-dataset/test"
images = glob.glob(source_dir+"/*.jpg")
X_cache, y_cache = [], []
while True:
if shuffle:
np.random.shuffle(images)
for img_path in images:
with Image.open(img_path) as img:
width, height = img.size
img_array = np.asarray(img.resize((224, 224), Image.BILINEAR))
basename = os.path.basename(img_path)
data = self.annotation_data[basename]
# アノテーションを0~1に変換
annotation = np.zeros((9,2), dtype=np.float32)
annotation[:, 0] = data[2][:, 0] / width
annotation[:, 1] = data[2][:, 1] / height
annotation = np.clip(annotation, 0.0, 1.0)
if train and use_data_augmentation:
# 水平反転
if random.random() >= 0.5:
img_array = img_array[:, ::-1, :]
annotation[:, 0] = 1 - annotation[:, 0]
# 左目と右目の反転
annotation[0, :], annotation[1, :] = annotation[1, :], annotation[0, :].copy()
# 左耳と右耳の反転
annotation[3:6, :], annotation[6:9, :] = annotation[6:9, :], annotation[3:6, :].copy()
# PCA Color Augmentation
img_array = self.pca_color_augmentation(img_array)
X_cache.append(img_array)
y_cache.append(np.ravel(annotation))
if len(X_cache) == batch_size:
X_batch = np.asarray(X_cache, dtype=np.float32) / 255.0
y_batch = np.asarray(y_cache, dtype=np.float32)
X_cache, y_cache = [], []
yield X_batch, y_batch
def pca_color_augmentation(self, image_array_input):
assert image_array_input.ndim == 3 and image_array_input.shape[2] == 3
assert image_array_input.dtype == np.uint8
img = image_array_input.reshape(-1, 3).astype(np.float32)
img = (img - np.mean(img, axis=0)) / np.std(img, axis=0)
cov = np.cov(img, rowvar=False)
lambd_eigen_value, p_eigen_vector = np.linalg.eig(cov)
rand = np.random.randn(3) * 0.1
delta = np.dot(p_eigen_vector, rand*lambd_eigen_value)
delta = (delta * 255.0).astype(np.int32)[np.newaxis, np.newaxis, :]
img_out = | np.clip(image_array_input + delta, 0, 255) | numpy.clip |
#!/usr/bin/env python
# coding: utf-8
# # Numpy
# NumPy is a library for the Python programming language, adding support for large, multi-dimensional arrays and matrices, along with a large collection of high-level mathematical functions to operate on these arrays.
# In[ ]:
import numpy as np
# In[ ]:
arr = np.array([1,2])
# In[ ]:
print(arr)
print(type(arr))
print(arr.shape)
print(arr.size)
# In[ ]:
arr2 = np.array([[1,2,3],[4,5,6]])
# In[ ]:
print(arr2)
print(type(arr2))
print(arr2.shape)
print(arr2.size)
# In[ ]:
arr2[:,0:2]
# In[ ]:
arr2[0,:]
# In[ ]:
arr2
# ### Built in function
# In[ ]:
arr = np.zeros((4,3))
arr
# In[ ]:
arr = np.ones((4,4))
arr
# In[ ]:
arr = np.random.random((4,4))
arr
# In[ ]:
arr = np.full((4,4),6)
arr
# ### Reshaping an array
# In[ ]:
arr.reshape(2,2,4)
# ### Flatten an array
# In[ ]:
arr = np.random.random((2,4))
arr
# In[ ]:
arr.flatten()
# # Operators
# ### Unary Operator
# In[ ]:
arr = np.array([[1,2,3],
[4,5,6]])
print(arr.min()) # finding min
print(arr.max()) # finding max
print(arr.sum()) # finding sum
# ### Binary operators
# In[ ]:
arr1 = np.array([1,2])
arr2 = np.array([3,4])
print(arr1 + arr2) #sum of two array elementwise
print(arr1 * arr2) #multiply of two array elementwise
print(arr1.dot(arr2)) #sum of two array elementwise
# ## Matrix Multiplication
# In[ ]:
m1 = np.array([[1,2],[3,4]])
m2 = np.array([[2,4],[4,4]])
np.matmul(m1,m2)
# ## Sorting array
# In[ ]:
arr = | np.random.random((4,3)) | numpy.random.random |
import numpy as np
import matplotlib
# matplotlib.use("Agg")
import matplotlib.pyplot as plt
import os
# customized
from scipy.optimize import minimize, basinhopping
from scipy.signal import find_peaks
from Bayesian import FitParameters, Priors, Likelihoods, Posteriors
from Fitter import FitModes, PTSampler, ESSampler, LSSampler
def echelle(x, y, period, fmin=None, fmax=None, echelletype="single", offset=0.0):
'''
Generate a z-map for echelle plotting.
Input:
x: array-like[N,]
y: array-like[N,]
period: the large separation,
fmin: the lower boundary
fmax: the upper boundary
echelletype: single/replicated
offset: the horizontal shift
Output:
x, y:
two 1-d arrays.
z:
a 2-d array.
Exemplary call:
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(6,8))
ax1 = fig.add_subplot(111)
echx, echy, echz = echelle(tfreq,tpowers_o,dnu,numax-9.0*dnu,numax+9.0*dnu,echelletype="single",offset=offset)
levels = np.linspace(np.min(echz),np.max(echz),500)
ax1.contourf(echx,echy,echz,cmap="gray_r",levels=levels)
ax1.axis([np.min(echx),np.max(echx),np.min(echy),np.max(echy)])
if offset > 0.0:
ax1.set_xlabel("(Frequency - "+str("{0:.2f}").format(offset)+ ") mod "+str("{0:.2f}").format(dnu) + " ($\mu$Hz)")
if offset < 0.0:
ax1.set_xlabel("(Frequency + "+str("{0:.2f}").format(np.abs(offset))+ ") mod "+str("{0:.2f}").format(dnu) + " ($\mu$Hz)")
if offset == 0.0:
ax1.set_xlabel("Frequency mod "+str("{0:.2f}").format(dnu) + " ($\mu$Hz)")
plt.savefig("echelle.png")
'''
if not echelletype in ["single", "replicated"]:
raise ValueError("echelletype is on of 'single', 'replicated'.")
if len(x) != len(y):
raise ValueError("x and y must have equal size.")
if fmin is None: fmin=0.
if fmax is None: fmax=np.nanmax(x)
fmin = fmin - offset
fmax = fmax - offset
x = x - offset
if fmin <= 0.0:
fmin = 0.0
else:
fmin = fmin - (fmin % period)
# first interpolate
samplinginterval = np.median(x[1:-1] - x[0:-2]) * 0.1
xp = np.arange(fmin,fmax+period,samplinginterval)
yp = np.interp(xp, x, y)
n_stack = int((fmax-fmin)/period)
n_element = int(period/samplinginterval)
#print(n_stack,n_element,len())
morerow = 2
arr = np.arange(1,n_stack) * period # + period/2.0
arr2 = np.array([arr,arr])
yn = np.reshape(arr2,len(arr)*2,order="F")
yn = np.insert(yn,0,0.0)
yn = np.append(yn,n_stack*period) + fmin #+ offset
if echelletype == "single":
xn = np.arange(1,n_element+1)/n_element * period
z = np.zeros([n_stack*morerow,n_element])
for i in range(n_stack):
for j in range(i*morerow,(i+1)*morerow):
z[j,:] = yp[n_element*(i):n_element*(i+1)]
if echelletype == "replicated":
xn = np.arange(1,2*n_element+1)/n_element * period
z = np.zeros([n_stack*morerow,2*n_element])
for i in range(n_stack):
for j in range(i*morerow,(i+1)*morerow):
z[j,:] = np.concatenate([yp[n_element*(i):n_element*(i+1)],yp[n_element*(i+1):n_element*(i+2)]])
return xn, yn, z
class SolarlikePeakbagging:
"""docstring for SolarlikePeakbagging"""
def __init__(self, starname, outputdir, fnyq, numax):
"""
Docstring
"""
# super(SolarlikePeakbagging, self).__init__()
self._sep = "\\" if os.name=="nt" else "/"
self._starname = starname
self._outputdir = outputdir # "with a / in the end"
assert outputdir.endswith(self._sep), "outputdir should end with "+self._sep
self._fnyq = fnyq # in microHz (muHz)
# numax and dnu are only approximations
self._numax0 = numax # in microHz (muHz)
self._dnu0 = (self._numax0/3050)**0.77 * 135.1 # Stello+2009
# nu_max and delta_nu are accruate values
return
def parse_power_spectrum(self, freq, power, trimUpperLimitInDnu=None,
trimLowerLimitInDnu=None, ifSmooth=False):
"""
Pass the power spectrum in.
Input:
freq: np.array
frequency in muHz.
power: np.array
the background divided power spectrum (so now is s/b instead).
Optional input:
"""
assert len(freq) == len(power), "len(freq) != len(power)"
idx = np.array(np.zeros(len(freq))+1, dtype=bool)
freq = np.array(freq)
power = np.array(power)
if not trimUpperLimitInDnu is None:
idx = (idx) & (freq<=self._numax0+trimUpperLimitInDnu*self._dnu0)
if not trimLowerLimitInDnu is None:
idx = (idx) & (freq>=self._numax0-trimLowerLimitInDnu*self._dnu0)
self.freq = freq[idx]
self.power = power[idx]
ifSmooth: self.powers = self._smooth_power()
return
def _trim_power_spectrum(self, freq, power, powers=None, trimUpperLimitInDnu=None,
trimLowerLimitInDnu=None):
"""
Trim the power spectrum.
Input:
freq: np.array
frequency in muHz.
power: np.array
the background divided power spectrum (so now is s/b instead).
Optional input:
"""
idx = np.array(np.zeros(len(freq))+1, dtype=bool)
freq = np.array(freq)
power = np.array(power)
if not trimUpperLimitInDnu is None:
idx = (idx) & (freq<=self._numax0+trimUpperLimitInDnu*self._dnu0)
if not trimLowerLimitInDnu is None:
idx = (idx) & (freq>=self._numax0-trimLowerLimitInDnu*self._dnu0)
if powers is None:
return freq[idx], power[idx]
else:
return freq[idx], power[idx], powers[idx]
def _smooth_power(self, period=None):
if period is None: period = self._dnu0/15.0 # microHz
self.powers = self._smooth_wrapper(self.freq, self.power, period, "bartlett")
return
def _smooth_wrapper(self, x, y, period, windowtype, samplinginterval=None):
if samplinginterval is None: samplinginterval = np.median(x[1:-1] - x[0:-2])
if not windowtype in ["flat", "hanning", "hamming", "bartlett", "blackman"]:
raise ValueError("Window is one of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'")
xp = np.arange(np.min(x), np.max(x), samplinginterval)
yp = np.interp(xp, x, y)
window_len = int(period/samplinginterval)
if window_len % 2 == 0:
window_len = window_len + 1
if windowtype == "flat":
w = np.ones(window_len,"d")
else:
w = eval("np."+windowtype+"(window_len)")
ys = np.convolve(w/w.sum(),yp,mode="same")
yf = np.interp(x, xp, ys)
return yf
def guess_ppara(self, fixDnu=None):
"""
Docstring
"""
# smooth the power spectrum
self._smooth_power()
# slice the power spectrum
freq, power, powers = self._trim_power_spectrum(self.freq, self.power,
powers=self.powers, trimUpperLimitInDnu=3., trimLowerLimitInDnu=3.)
def ppara_model(ppara):
# initialize
x = freq
ymodel = np.zeros(len(freq))
if fixDnu is None:
dnu02, dnu, eps = ppara
else:
dnu02, eps = ppara
dnu = fixDnu
_xmin, _xmax = np.min(x), np.max(x)
# print(_xmin, _xmax, eps)
n_p = np.arange(int(_xmin/dnu-eps-1), int(_xmax/dnu-eps+1), 1)
nu_l0 = dnu*(n_p+eps)
nu_l2 = dnu*(n_p+eps)-dnu02
nu_l1 = dnu*(n_p+eps)+0.5*dnu
# l=0 template
for inu in nu_l0:
lw, center, maxima = 0.04*dnu, inu, 1.0
idx = (x>inu-lw) & (x<inu+lw)
ymodel[idx] = -(1.0/lw**2.0)*(x[idx] - center)**2.0 + maxima
# l=2 template
for inu in nu_l2:
lw, center, maxima = 0.03*dnu, inu, 0.6
idx = (x>inu-lw) & (x<inu+lw)
ymodel[idx] = -(1.0/lw**2.0)*(x[idx] - center)**2.0 + maxima
# l=1 template
for inu in nu_l1:
lw, center, maxima = 0.03*dnu, inu, 1.0
idx = (x>inu-lw) & (x<inu+lw)
ymodel[idx] = -(1.0/lw**2.0)*(x[idx] - center)**2.0 + maxima
ymodel[ymodel<0] = 0.
return ymodel
def corr_ppara(ppara):
ymodel = ppara_model(ppara)
y = (powers-1)/np.max(powers-1)
return -np.log(np.sum(ymodel*y)/np.sum(ymodel))*10.0
# set free parameters
if fixDnu is None:
init = [self._dnu0/10., self._dnu0, 0.5]
bounds = [[self._dnu0/20., self._dnu0/8.],
[self._dnu0*0.8, self._dnu0*1.2],
[0.0001, 1.-0.0001]]
names = ["dnu02", "dnu", "eps"]
else:
init = [self._dnu0/10., 0.5]
bounds = [[self._dnu0/20., self._dnu0/8.],
[0.0001, 1.-0.0001]]
names = ["dnu02", "eps"]
minimizer_kwargs = {"bounds":bounds}
res = basinhopping(corr_ppara, init, minimizer_kwargs=minimizer_kwargs)
ppara = res.x
if fixDnu is None:
self._dnu02, self.dnu, self.eps = ppara
else:
self._dnu02, self.eps = ppara
self.dnu = fixDnu
self.print_ppara()
self.print_ppara_tofile()
# plot - power spectrum
fig = plt.figure(figsize=(8,5))
ax1 = fig.add_subplot(111)
ymodel = ppara_model(ppara)
yobs = (powers-1)/np.max(powers-1)
ax1.plot(freq, ymodel, color="green")
ax1.plot(freq, yobs, color="black")
plt.savefig(self._outputdir+"ppara.png")
return
def set_ppara_fromfile(self, inputfile=None):
"""
Docstring
"""
if inputfile is None: inputfile = self._outputdir + "ppara.txt"
self._dnu02, self.dnu, self.eps = np.loadtxt(inputfile, delimiter=",")
return
def set_ppara(self, dnu02, dnu, eps):
"""
Docstring
"""
self._dnu02, self.dnu, self.eps = dnu02, dnu, eps
return
def print_ppara(self):
"""
Docstring
"""
print("dnu02 = ", self._dnu02)
print("dnu = ", self.dnu)
print("eps = ", self.eps)
return
def print_ppara_tofile(self):
"""
Docstring
"""
outputfile = self._outputdir + "ppara.txt"
print("Writing ppara to "+outputfile)
np.savetxt(outputfile, np.array([[self._dnu02, self.dnu, self.eps]]),
header="dnu02, dnu, eps", delimiter=",")
return
def guess_modeid(self, trimLowerLimitInDnu=9.0, trimUpperLimitInDnu=9.0,
height=2.0, prominence=1.5):
"""
An initial guess for all mode frequencies in the power spectrum.
After running this function, you should visually check the power
spectrum and see if the identified modes generated from the code
are correct (matched with your expectations).
Input:
Optional input:
trimLowerLimitInDnu: float, default: 9.0
the lower boundary of the power spectrum slice, in unit of dnu.
trimUpperLimitInDnu: float, default: 9.0
the upper boundary of the power spectrum slice, in unit of dnu.
height: float, default: 2.0
the minimum height for a peak to be recognised, in unit of power.
prominence: float, default: 1.5
the minimum prominence for a peak to be recognised, in unit of power.
Output:
Files containing necessary outputs.
1. table frequencyGuess.csv
Under development:
1. Improvement to mode identification - slide with spectrum and define probs.
"""
# smooth the power spectrum
self._smooth_power(period=self.dnu/50.)
# slice the power spectrum
freq, power, powers = self._trim_power_spectrum(self.freq, self.power,
powers=self.powers, trimUpperLimitInDnu=trimLowerLimitInDnu,
trimLowerLimitInDnu=trimUpperLimitInDnu)
dnu02, dnu, eps, numax = self._dnu02, self.dnu, self.eps, self._numax0
samplinginterval = np.median(freq[1:]-freq[:-1])
# assign l=0,1,2 region to the power spectrum
rfreq = freq/dnu % 1.0
lowc = [-dnu02/dnu/2.0, +0.10, -dnu02/dnu-0.05]
highc = [+0.10, 1.0-dnu02/dnu-0.05, -dnu02/dnu/2.0]
idx_l = []
for l in range(3):
dum1 = (rfreq>=eps+lowc[l]) & (rfreq<eps+highc[l])
dum2 = (rfreq>=eps+lowc[l]-1) & (rfreq<eps+highc[l]-1)
dum3 = (rfreq>=eps+lowc[l]+1) & (rfreq<eps+highc[l]+1)
idx_l.append((dum1|dum2|dum3))
# slice power spectrum into blocks
n_blocks = int(trimLowerLimitInDnu+trimUpperLimitInDnu)+1
# label_echx, label_echy, label_text = [[] for i in range(3)]
rfreq_init = (numax/dnu)%1.0
if rfreq_init-eps < 0.0: freq_init = numax-dnu*trimLowerLimitInDnu-dnu+np.abs(rfreq_init-eps)*dnu-dnu02-0.05*dnu
if rfreq_init-eps >=0.0: freq_init = numax-dnu*trimLowerLimitInDnu-np.abs(rfreq_init-eps)*dnu-dnu02-0.05*dnu
mode_l, mode_freq = [], []
# find peaks in each dnu range
for iblock in range(n_blocks):
freq_low, freq_high = freq_init+iblock*dnu, freq_init+(iblock+1)*dnu
idx_norder = np.all(np.array([freq>=freq_low,freq<freq_high]),axis=0)
# find peaks in each l range
tidx_l, tmode_freq, tmode_l = [], [], []
for l in range(3):
tidx_l.append(np.all(np.array([freq>=freq_low,freq<freq_high,idx_l[l]]),axis=0))
if len(freq[tidx_l[l]])==0: continue
tfreq, tpower, tpowers = freq[tidx_l[l]], power[tidx_l[l]], powers[tidx_l[l]]
meanlevel = np.median(tpowers)
# find the highest peak in this range as a guess for the radial mode
idx_peaks, properties = find_peaks(tpowers, height=(height,None),
distance=int(dnu02/samplinginterval/5.0), prominence=(prominence,None))
Npeaks = len(idx_peaks)
if Npeaks != 0:
if l != 1:
idx_maxpeak = idx_peaks[properties["peak_heights"] == properties["peak_heights"].max()]
tmode_freq.append(tfreq[idx_maxpeak[0]])
tmode_l.append(l)
else:
for ipeak in range(Npeaks):
tmode_freq.append(tfreq[idx_peaks[ipeak]])
tmode_l.append(l)
tmode_freq, tmode_l = np.array(tmode_freq), np.array(tmode_l)
mode_freq.append(tmode_freq)
mode_l.append(tmode_l)
# save a table
# but first let's associate each mode with a group number
mode_freq, mode_l = np.concatenate(mode_freq), np.concatenate(mode_l)
mode_freq_group, mode_l_group, mode_group = [], [], np.array([])
idx = np.argsort(mode_freq)
mode_freq, mode_l = mode_freq[idx], mode_l[idx]
dist = mode_freq[1:] - mode_freq[:-1]
group_idx = np.where(dist>=0.2*dnu)[0] + 1 #each element the new group start from
Ngroups = len(group_idx) + 1
group_idx = np.insert(group_idx,0,0)
group_idx = np.append(group_idx,len(mode_freq))
# just sort a bit
for igroup in range(Ngroups):
tmode_freq = mode_freq[group_idx[igroup]:group_idx[igroup+1]]
tmode_l = mode_l[group_idx[igroup]:group_idx[igroup+1]]
mode_freq_group.append(tmode_freq)
mode_l_group.append(tmode_l)
elements = group_idx[igroup+1] - group_idx[igroup]
for j in range(elements):
mode_group = np.append(mode_group,igroup)
mode_group = np.array(mode_group, dtype=int)
mode_freq = | np.concatenate(mode_freq_group) | numpy.concatenate |
import numpy as np
import os
from sklearn.preprocessing import MinMaxScaler
from sklearn.cluster import KMeans
from sklearn.metrics import accuracy_score
import matplotlib.pyplot as plt
from scipy import stats
from scipy.spatial import distance
import math
import pickle
from sklearn.neighbors import KNeighborsClassifier
from sklearn.externals import joblib
from sklearn.svm import SVC
from collections import Counter
import time
def get_data(path):
assert isinstance(path, str)
assert 'pickle' or 'pkl' in path
return pickle.load(open(path,'rb'))
def barcode_to_names(barcode_file):
assert isinstance(barcode_file, str)
barcode_dict = {}
with open(barcode_file,'r') as f:
data = f.readlines()
data = [x.split(',') for x in data]
for x in data:
barcode_dict[x[0]] = x[1].strip('\n')
return barcode_dict
def get_name_given_barcode(barcode, barcode_file):
assert isinstance(barcode_file, str)
assert isinstance(barcode, str)
barcode_dict = barcode_to_names(barcode_file)
return barcode_dict[barcode]
def computer_kmeans(data, k, max_iter=1000, random_state=42):
assert isinstance(data, np.ndarray)
assert isinstance(k, int)
assert k > 0
assert isinstance(max_iter, int)
assert isinstance(random_state, int)
kmeans = KMeans(n_clusters = k, max_iter=1000, random_state=42).fit(train_X)
return kmeans
def save_model(model, path, model_file_name):
assert isinstance(path, str)
assert isinstance(model_file_name, str)
joblib.dump(model, os.path.join(path, model_file_name))
print(f"Model saved at: {os.path.join(path, model_file_name)}")
def load_model(path):
assert isinstance(path, str)
return joblib.load(path)
def get_data_given_class(idx, X, Y):
assert isinstance(idx, int)
assert isinstance(X, np.ndarray)
assert isinstance(Y, np.ndarray)
indices = [i for i, x in enumerate(Y) if x == idx]
return X[indices]
def get_class_wise_data_dict(class_labels, X, Y):
assert isinstance(class_labels, list)
assert isinstance(X, np.ndarray)
assert isinstance(Y, np.ndarray)
data_dict = {}
for label in class_labels:
label = int(label)
data_dict[label] = get_data_given_class(label, X, Y)
return data_dict
def save_class_wise_stats(save_file, num_classes, gt, preds, barcodes_file, barcode_to_names_file):
assert isinstance(save_file, str)
assert isinstance(num_classes, int)
assert isinstance(gt, np.ndarray)
assert isinstance(preds, np.ndarray)
assert isinstance(barcodes, str)
assert isinstance(barcode_to_name_dict, dict)
barcodes = get_data(barcodes_file)
with open(os.path.join(save_file), 'w') as f:
header = 'barcode\tclass_name\tnum_clusters\n'
f.write(header + '\n')
for i in range(len(num_classes)):
indices = np.where(gt == i)
p = preds[indices]
mode = stats.mode(p)[0][0]
barcode = barcodes[i]
name = get_name_given_barcode(barcode, barcodes_to_names_file)
num_clusters = len(np.unique(p))
f.write(str(barcode) + '\t' + str(name) + '\t' + str(num_clusters) + '\n')
def save_cluster_wise_stats(save_file, num_clusters, gt, preds, barcode_file, barcode_to_name_file):
assert isinstance(save_file, str)
assert isinstance(num_clusters, int)
assert isinstance(gt, np.ndarray)
assert isinstance(preds, np.ndarray)
assert isinstance(barcode_file, str)
assert isinstance(barcode_to_name_file, str)
barcodes = get_data(barcode_file)
with open(save_file, 'w') as f:
header = 'cluster_id\tclass_mode(barcode)\tclass_mode(name)\tmode\ttotal\tcluster_purity\tnum_unique\tclass_count'
f.write(header + '\n')
cluster_dict = {}
for i in range(len(num_clusters)):
indices = np.where(preds == i)
cluster_dict[i] = gt[indices]
unique_objects = np.unique(gt[indices])
class_counters = Counter(gt[indices])
top_3 = class_counters.most_common(3)
top_3 = [(get_name_given_barcode(barcodes[x[0]], barcode_to_name_file)
,x[1]) for x in top_3]
mode = stats.mode(preds[indices])[0][0]
class_name = get_name_given_barcode(barcodes[mode], barcode_to_name_file)
cluster_purity = class_counters[mode] / preds[indices].shape[0]
f.write(str(i) + '\t' + str(int(barcodes[mode])) + '\t'+ str(class_name) +'\t'+
str(top_3[0][1]) +'\t' + str(len(gt[indices])) + '\t' +
str(round(cluster_purity, 2)) + '\t' + str(num_clusters) + '\t' + str(top_3) + '\n')
def get_mean_vectors(class_wise_data_dict):
assert isinstance(class_wise_data_dict, dict)
mean_vectors = []
for key, value in train_data_dict.items():
mean_vector = np.mean(value, axis=0)
mean_vectors.append(mean_vector)
return np.array(mean_vectors)
def infer_using_mean_vector(feature_vector, mean_vectors):
assert isinstance(feature_vector, np.ndarray)
assert isinstance(mean_vectors, np.ndarray)
dists = []
for c in mean_vectors:
dst = distance.euclidean(feature_vector, c)
dists.append(dst)
return np.argmin(dists)
def get_all_categories_models(path, num_category):
assert isinstance(path, str)
assert isinstance(num_category, int)
assert num_category > 0
models = []
for i in range(num_category):
model_path = os.path.join(path, 'category' + str(i+1) +'_meanvect_model.pkl')
models.append(load_model(model_path))
return models
def get_barcode_labels(path):
assert isinstance(path, str)
num_labels = len(os.listdir(path))
category_labels = []
for i in range(num_labels):
labels_file_path = os.path.join(path, 'category_' + str(i+1) + '.txt')
with open(labels_file_path, 'r') as f:
labels = f.readlines()
labels = [x.strip('\n') for x in labels]
category_labels.append(labels)
return category_labels
def predict_single(data_vector, aisle_model, category_models, barcode_labels):
assert isinstance(data_vector, np.ndarray)
assert isinstance(category_models, list)
assert isinstance(barcode_labels, list)
aisle_pred = infer_using_mean_vector(data_vector, aisle_model)
aisle_pred = 0
category_model = category_models[aisle_pred]
class_pred = infer_using_mean_vector(data_vector, category_model)
barcode_pred = barcode_labels[aisle_pred][class_pred]
return barcode_pred
def predict_end_to_end(X, aisle_model, category_models, barcode_labels):
assert isinstance(X, np.ndarray)
assert isinstance(category_models, list)
assert isinstance(barcode_labels, list)
preds = []
for i in range(X.shape[0]):
pred = predict_single(X[i], aisle_model, category_models, barcode_labels)
preds.append(pred)
return preds
def convert_labels_to_barcodes(Y, category, barcode_labels):
assert isinstance(Y, (np.ndarray, list))
assert isinstance(category, int)
assert category > 0
assert isinstance(barcode_labels, list)
labels = barcode_labels[category - 1]
barcodes = []
for y in Y:
barcodes.append(labels[y])
return barcodes
def evaluate(preds, gt):
assert isinstance(preds, list)
assert isinstance(gt, list)
assert len(preds) == len(gt)
total = len(gt)
correct = 0
for y, pred in zip(gt, preds):
if y == pred:
correct += 1
return correct, (correct / total)
category_names = ['Laundry','Biscuits','Cereals and Tea','Snacks and Kitchen Items','Hair Products', 'Beauty Products', 'Soaps','toothbrush_and_toothpaste']
correct = 0
total = 0
for i in range(len(category_names)):
category = i + 1
category_name = category_names[i]
print(f'{category}: {category_name}')
train_path = 'data/train_features'
valid_path = 'data/valid_features'
logs_root = 'logs/'
model_save_path = 'models'
barcode_file_path = 'data/barcodes.txt'
labels_file = 'data/labels.pkl'
ncm_model_path = 'ncm_models'
barcode_labels_path = 'Labels'
train_data_path = os.path.join(train_path, 'category' + str(category) + '_X.pickle')
train_labels_path = os.path.join(train_path, 'category' + str(category) + '_Y.pickle')
valid_data_path = os.path.join(valid_path, 'category' + str(category) + '_X.pickle')
valid_labels_path = os.path.join(valid_path, 'category' + str(category) + '_Y.pickle')
# train_data_path = 'data/features_8_aisle/train_X.pickle'
# train_labels_path = 'data/features_8_aisle/train_Y.pickle'
# valid_data_path = 'data/features_8_aisle/valid_X.pickle'
# valid_labels_path = 'data/features_8_aisle/valid_Y.pickle'
train_X = get_data(train_data_path)
train_Y = get_data(train_labels_path)
valid_X = get_data(valid_data_path)
valid_Y = get_data(valid_labels_path)
unique_labels = list(set(train_Y.tolist()))
print(f"No. of unique labels: {len(unique_labels)}")
barcode_to_name_dict = barcode_to_names(barcode_file_path)
barcodes = get_data(labels_file)
# kmeans = load_model(os.path.join(model_save_path, 'category' + str(category) + '_model.pkl'))
k = math.ceil(math.log(len(unique_labels), 2))
start = time.time()
kmeans = computer_kmeans(train_X, k=k)
print(f'Training took {time.time()-start}s')
save_model(kmeans, model_save_path, 'category' + str(category) + '_model.pkl')
continue
train_data_dict = get_class_wise_data_dict(unique_labels, train_X, train_Y)
valid_data_dict = get_class_wise_data_dict(unique_labels, valid_X, valid_Y)
aisle_model = load_model(os.path.join(ncm_model_path, '8_aisle_meanvect_model.pkl'))
category_models = get_all_categories_models(ncm_model_path, len(category_names))
barcode_labels = get_barcode_labels(barcode_labels_path)
train_barcode_Y = convert_labels_to_barcodes(train_Y, category,barcode_labels)
train_preds = predict_end_to_end(train_X, aisle_model, category_models, barcode_labels)
correct, accuracy_score = evaluate(train_preds, train_barcode_Y)
print(correct)
print(accuracy_score)
assert False
continue
# evaluate_end_to_end(trainX, trainY, aisle_model, category_models, category, barcode_labels)
# predict_end_to_end(trainX, train_Y, aisle_model, category_models)
print(f'Training took {time.time() - start} ms')
# save_model(mean_vectors, 'ncm_models', 'category' + str(category) + '_meanvect_model.pkl')
# save_model(mean_vectors, 'ncm_models', '8_aisle_' + '_meanvect_model.pkl')
train_correct = 0
start = time.time()
for key, value in train_data_dict.items():
correct = 0
for v in value:
pred = infer_using_mean_vector(v, mean_vectors)
if pred == key:
correct += 1
train_correct += correct
print(f'Evaluation on train data took {(time.time() - start)}s')
print(f"Correct: {train_correct}, Total:{train_Y.shape[0]}")
print(f"Train Accuracy: {(train_correct/ train_Y.shape[0])*100}")
valid_correct = 0
start = time.time()
for key, value in valid_data_dict.items():
correct = 0
for v in value:
pred = infer_using_mean_vector(v, mean_vectors)
if pred == key:
correct += 1
valid_correct += correct
print(f'Evaluation on valid data took {(time.time() - start)}s')
print(f"Correct: {valid_correct}, Total:{valid_Y.shape[0]}")
print(f"Valid Accuracy: {(valid_correct / valid_Y.shape[0])*100}")
continue
# get predictions from clusters
train_preds = kmeans.predict(train_X)
cluster_centers = kmeans.cluster_centers_
# save_class_wise_stats(os.path.join(logs_root,'470_items_class_statistics.csv'),
# len(unique_labels), train_Y, train_preds, barcodes, barcode_to_name_dict)
# dists = []
# for c in cluster_centers:
# dst = distance.euclidean(mean_vector, c)
# dists.append(dst)
# cluster = np.argmin(dists)
# labels_dict[key] = cluster
train_correct = 0
for key, value in train_data_dict.items():
correct = 0
for v in value:
dists = []
for c in mean_vectors:
dst = distance.euclidean(v, c)
dists.append(dst)
cluster = | np.argmin(dists) | numpy.argmin |
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 26 11:21:14 2015
@author: noore
"""
import numpy as np
import scipy
class LINALG(object):
@staticmethod
def svd(A):
# numpy.linalg.svd returns U, s, V such that
# A = U * s * V
# however, matlab and octave return U, S, V such that
# V needs to be conjugate transposed when multiplied:
# A = U * S * V.H
# we would like to stick to the latter standard, so we return
# the transposed V here (assuming it is real)
U, s, V = scipy.linalg.svd(A, full_matrices=True)
S = np.matrix(np.zeros(A.shape))
np.fill_diagonal(S, s)
U = np.matrix(U)
V = np.matrix(V)
return U, S, V.T
@staticmethod
def _zero_pad_S(S, cids_orig, cids_joined):
"""
takes a stoichiometric matrix with a given list of IDs 'cids' and adds
0-rows so that the list of IDs will be 'cids_joined'
"""
if not set(cids_orig).issubset(cids_joined):
raise Exception('The full list is missing some IDs in "cids"')
full_S = np.zeros((len(cids_joined), S.shape[1]))
for i, cid in enumerate(cids_orig):
S_row = S[i, :]
full_S[cids_joined.index(cid), :] = S_row
return np.matrix(full_S)
@staticmethod
def _invert_project(A, eps=1e-10):
n, m = A.shape
U, S, V = LINALG.svd(A)
inv_A = V * | np.linalg.pinv(S) | numpy.linalg.pinv |
import numpy as np
from scipy.integrate import odeint
class MorrisLecar:
"""
Creates a MorrisLecar model.
"""
def __init__(self, C=20, VL=-60, VCa=120, VK=-84, gL=2, gCa=4, gK=8,
V1=-1.2, V2=18, V3=12, V4=17.4, phi=0.06):
"""
Initializes the model.
Args:
C (int, float): Capacitance of the membrane.
VL (int, float): Potential L.
VCa (int, float): Potential Ca.
VK (int, float): Potential K.
gL (int, float): Conductance L.
gCa (int, float): Conductance Ca.
gK (int, float): Conductance K.
V1 (int, float): Potential at which Mss converges.
V2 (int, float): Reciprocal of slope of Mss.
V3 (int, float): Potential at which Nss converges.
V4 (int, float): Reciprocal of slope of Nss.
phi (int, float): Time scale recovery.
"""
self.C = C
self.VL = VL
self.VCa = VCa
self.VK = VK
self.gL = gL
self.gCa = gCa
self.gK = gK
self.V1 = V1
self.V2 = V2
self.V3 = V3
self.V4 = V4
self.phi = phi
self.t = None
self.dt = None
self.tvec = None
self.V = None
self.N = None
def __repr__(self):
"""
Visualize model parameters when printing.
"""
return (f'MorrisLecar(C={self.C}, VL={self.VL}, VCa={self.VCa}, VK={self.VK}, '
f'gL={self.gL}, gCa={self.gCa}, gK={self.gK}, V1={self.V1}, V2={self.V2}, '
f'V3={self.V3}, V4={self.V4}, phi={self.phi})')
def _system_equations(self, X, t, current):
"""
Defines the equations of the dynamical system for integration.
"""
Mss = (1 + | np.tanh((X[0] - self.V1) / self.V2) | numpy.tanh |
"""
main script for running the code to get sc-gmc nearest neighbors
2021-01-04
"""
import glob
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import aplpy as ap
from astropy.io import fits, ascii
from astropy.coordinates import SkyCoord, search_around_sky
from astropy.table import Table
from astropy.wcs import WCS
import astropy.units as u
import sys
sys.path.append('/cherokee1/turner/phangs/cf/utils')
from utils import *
import matplotlib
# non-interactive plots
matplotlib.use('agg')
# set path of the data dir
data_dir = '/cherokee1/turner/phangs/cf/data/'
# read in list of galaxies
master_galaxy_list = ascii.read('master_galaxy.list')
galaxy_list = ascii.read('galaxy.list')
gal_id = galaxy_list['id']
gal_alt_id = galaxy_list['alt_id']
gal_dist = galaxy_list['dist']
mkhist = True
# loop through all the galaxies in the list
for i in range(len(galaxy_list)):
# galaxy props
gal_name = gal_id[i]
dist = gal_dist[i]
print('')
print(gal_name)
# read in star cluster catalog [class 1 and 2 only for now]
sc_cat = fits.open(data_dir + '%s/hst/%s_phangshst_base_catalog.class12.fits'%(gal_name, gal_name))[1].data
# grab star cluster positions
sc_x, sc_y, = sc_cat['PHANGS_X'], sc_cat['PHANGS_Y']
sc_ra, sc_dec = sc_cat['PHANGS_RA'], sc_cat['PHANGS_DEC']
# grab star cluster ages
sc_age = sc_cat['PHANGS_AGE_MINCHISQ']
# read in GMC catalog
gmc_cat = fits.open(data_dir + '%s/alma/%s_12m+7m+tp_co21_nativeres_nativenoise_props.fits'%(gal_name, gal_name))[1].data
# grab center positions of the GMCs
gmc_ra, gmc_dec = gmc_cat['XCTR_DEG'], gmc_cat['YCTR_DEG']
# read in the overlap mask
mask_hdu = fits.open(data_dir + '%s/%s_hst_alma_overlap_mask.fits'%(gal_name, gal_name))
mask = mask_hdu[0].data
mask_header = mask_hdu[0].header
# convert star cluster x,y postions to integer pixels
sc_x_int = np.array([int(np.round(x)) for x in sc_x ])
sc_y_int = np.array([int(np.round(y)) for y in sc_y ])
# check if the clusters are within the overlap mask
sc_in_mask = np.array([True if mask[y,x] == 1 else False for y,x in zip(sc_y_int, sc_x_int)])
wfalse = np.where(sc_in_mask == False)[0]
# drop clusters outside of the mask
sc_ra = np.delete(sc_ra, wfalse)
sc_dec = np.delete(sc_dec, wfalse)
sc_x = | np.delete(sc_x, wfalse) | numpy.delete |
import pandas as pd
import numpy as np
import sys
import matplotlib.pyplot as plt
import seaborn as sns
def plot_conservation(out_path):
"""
Plotting the fraction of conserved binding sites for Brn2, Ebf2 and
Onecut2, based on multiGPS and edgeR results from Aydin et al., 2019
(Nature Neurosciece: PMID 31086315)
Parameters:
out_path: Filepath prefix for output bar plots (Manuscript Fig. 6A)
Returns: None
"""
# Defining the dataFrames using multiGPS and edgeR results \
# from Aydin et al., (2019) Nat. Neuroscience.
# Brn2
brn2 = pd.DataFrame([['shared', 6776], ['iA>iN', 2432], ['iN>iA', 1242]],
columns=['category', '#'])
brn2['#'] = brn2['#']/np.sum(brn2['#'])
# Ebf2
ebf2 = pd.DataFrame([['shared', 23331], ['iA>iN', 10687], ['iN>iA', 7921]],
columns=['category', '#'])
ebf2['#'] = ebf2['#']/np.sum(ebf2['#'])
# Onecut2
onecut2 = pd.DataFrame([['shared', 45416], ['iA>iN', 4622], ['iN>iA', 2965]],
columns=['category', '#'])
onecut2['#'] = onecut2['#']/np.sum(onecut2['#'])
# plot bar plots
sns.set_style('ticks')
fig, ax = plt.subplots()
plt.subplot(1, 3, 1)
plt.bar([0, 1, 2], onecut2['#'], width=0.5, color='#687466')
plt.yticks(fontsize=12)
plt.ylim(0, 1)
#
plt.subplot(1, 3, 2)
plt.bar([0, 1, 2], brn2['#'], width=0.5, color='#cd8d7b')
plt.yticks(fontsize=12)
plt.ylim(0, 1)
#
plt.subplot(1, 3, 3)
plt.bar([0, 1, 2], ebf2['#'], width=0.5, color='#fbc490')
plt.yticks(fontsize=12)
plt.ylim(0, 1)
#
sns.despine()
fig.tight_layout()
fig.set_size_inches(6, 4)
plt.savefig(out_path + 'Fig_6a.pdf')
def plot_embeddings(data_path, outpath):
"""
Plot 2-D latent embeddings for Brn2, Ebf2 and Onecut2.
Parameters:
data_path: Input file paths (N rows * 2 columns) storing the 2-D co-ordinates
for each binding site in the latent space. The embeddings must be derived
using latent_embeddings/get_latent_embeddings.py
Note: This function assumes that the files are saved with an \
".embedding.txt" extension. Provide only the prefix as an argument.
For example, if the 2-D embedding is stored in "~/Prefix/Oct4.embedding.txt",
call function as: plot_embeddings("~/Prefix/Oct4")
outpath: Output file path.
Returns: None
"""
transcription_factors = ['Brn2', 'Ebf2', 'Onecut2']
for tf in transcription_factors:
dat = | np.loadtxt(data_path + tf + '.embedding.txt') | numpy.loadtxt |
"""
Set of tools to postprocess the DEMs: statistics calculations, coregistration, filtering etc
"""
from __future__ import annotations
import concurrent.futures
import multiprocessing as mp
import os
import threading
from glob import glob
from typing import Callable
import cv2
import geoutils as gu
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import xdem
from skimage.morphology import disk
from tqdm import tqdm
# Turn off imshow's interpolation to avoid gaps spread in plots
plt.rcParams["image.interpolation"] = "none"
def calculate_stats(ddem, roi_mask, stable_mask):
"""
Returns main statistics of ddem:
fraction of coverage over glacier, number of obs, NMAD and median dh over stable terrain
Warning: ddems may contain NaNs on top of nodata values.
"""
# Get array of valid data (no nodata, no nan) and mask
data, mask = gu.spatial_tools.get_array_and_mask(ddem)
# Make sure input masks are 2D
roi_mask = roi_mask.squeeze()
stable_mask = stable_mask.squeeze()
# Calculate coverage over glaciers
nobs = np.sum(~mask[roi_mask])
ntot = np.sum(roi_mask)
roi_coverage = nobs / ntot
# Calculate statistics in stable terrain
nstable = np.sum(~mask[stable_mask])
nmad_stable = xdem.spatialstats.nmad(data[stable_mask])
med_stable = np.nanmedian(data[stable_mask])
return roi_coverage, nstable, med_stable, nmad_stable
def nmad_filter(
dh_array: np.ndarray, inlier_mask: np.ndarray, nmad_factor: float = 5, max_iter: int = 20, verbose: bool = False
) -> np.ndarray:
"""
Iteratively remove pixels where the elevation difference (dh_array) in stable terrain (inlier_mask) is larger \
than nmad_factor * NMAD.
Iterations will stop either when the NMAD change is less than 0.1, or after max_iter iterations.
:params dh_array: 2D array of elevation difference.
:params inlier_mask: 2D boolean array of areas to include in the analysis (inliers=True).
:param nmad_factor: The factor by which the stable dh NMAD has to be multiplied to calculate the outlier threshold
:param max_iter: Maximum number of iterations (normally not reached, just for safety)
:param verbose: set to True to print some statistics to screen.
:returns: 2D boolean array with updated inliers set to True
"""
# Mask unstable terrain
dh_stable = dh_array.copy()
dh_stable.mask[~inlier_mask] = True
nmad_before = xdem.spatialstats.nmad(dh_stable)
if verbose:
print(f"NMAD before: {nmad_before:.2f}")
print("Iteratively remove large outliers")
# Iteratively remove large outliers
for i in range(max_iter):
outlier_threshold = nmad_factor * nmad_before
dh_stable.mask[np.abs(dh_stable) > outlier_threshold] = True
nmad_after = xdem.spatialstats.nmad(dh_stable)
if verbose:
print(f"Remove pixels where abs(value) > {outlier_threshold:.2f} -> New NMAD: {nmad_after:.2f}")
# If NMAD change is loweer than a set threshold, stop iterations, otherwise stop after max_iter
if nmad_before - nmad_after < 0.1:
break
nmad_before = nmad_after
return ~dh_stable.mask
def spatial_filter_ref(ref_dem: np.ndarray, src_dem: np.ndarray, radius_pix: float, dh_thresh: float) -> np.ndarray:
"""
Masks all values where src_dem < min_ref - dh_thresh & src_dem > max_ref + dh_thresh.
where min_ref and max_ref are the min/max elevation of ref_dem within radius.
:param ref_dem: 2D array containing the reference elevation.
:param src_dem: 2D array containing the DEM to be filtered, of same size as ref_dem.
:param radius_pix: the radius of the disk where to calculate min/max ref elevation.
:param dh_thresh: the second elevation can be this far below/above the min/max ref elevation.
:returns: a boolean 2D array set to True for pixels to be masked
"""
# Sanity check
assert ref_dem.shape == src_dem.shape, "Input arrays have different shape"
assert | np.ndim(ref_dem) | numpy.ndim |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for running legacy optimizer code with DistributionStrategy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import reduce_util
from tensorflow.python.distribute import strategy_combinations
from tensorflow.python.distribute import strategy_test_lib
from tensorflow.python.distribute.single_loss_example import batchnorm_example
from tensorflow.python.distribute.single_loss_example import minimize_loss_example
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.keras.distribute import optimizer_combinations
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import control_flow_v2_toggles
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.ops.losses import losses_impl
from tensorflow.python.platform import test
VAR_MAP_V1 = {
"GradientDescent": ("dense/kernel", "dense/bias"),
"Adagrad": ("dense/kernel/Adagrad", "dense/kernel", "dense/bias/Adagrad",
"dense/bias"),
"Ftrl": ("dense/kernel/Ftrl", "dense/kernel", "dense/bias/Ftrl",
"dense/bias", "dense/kernel/Ftrl_1", "dense/bias/Ftrl_1"),
"RMSProp": ("dense/kernel", "dense/bias/RMSProp", "dense/bias/RMSProp_1",
"dense/bias", "dense/kernel/RMSProp_1", "dense/kernel/RMSProp")
}
VAR_MAP_V2 = {
"SGD": ("dense/bias", "SGD/learning_rate", "SGD/decay", "SGD/iter",
"dense/kernel", "SGD/momentum"),
"Adagrad":
("Adagrad/iter", "dense/bias", "dense/kernel", "Adagrad/learning_rate",
"Adagrad/decay", "Adagrad/dense/kernel/accumulator",
"Adagrad/dense/bias/accumulator")
}
class MinimizeLossStepTest(test.TestCase, parameterized.TestCase):
def _get_iterator(self, strategy, input_fn):
iterator = strategy.make_input_fn_iterator(lambda _: input_fn())
self.evaluate(iterator.initializer)
return iterator
@combinations.generate(
combinations.times(
optimizer_combinations.distributions_and_v1_optimizers(),
combinations.combine(mode=["graph"], use_callable_loss=[True, False])
+ combinations.combine(mode=["eager"], use_callable_loss=[True])) +
combinations.times(
optimizer_combinations.distributions_and_v2_optimizers(),
combinations.combine(
mode=["graph", "eager"], use_callable_loss=[True])) +
combinations.combine(
distribution=[strategy_combinations.tpu_strategy],
optimizer_fn=optimizer_combinations.optimizers_v2,
mode=["graph"],
use_callable_loss=[True]) + combinations.combine(
distribution=[strategy_combinations.tpu_strategy],
optimizer_fn=optimizer_combinations.optimizers_v1,
mode=["graph"],
use_callable_loss=[True, False]))
def testTrainNetwork(self, distribution, optimizer_fn, use_callable_loss):
with distribution.scope():
optimizer = optimizer_fn()
model_fn, dataset_fn, layer = minimize_loss_example(
optimizer, use_bias=True, use_callable_loss=use_callable_loss)
def step_fn(ctx, inputs):
del ctx # Unused
return distribution.group(
distribution.extended.call_for_each_replica(
model_fn, args=(inputs,)))
iterator = self._get_iterator(distribution, dataset_fn)
def run_step():
return distribution.extended.experimental_run_steps_on_iterator(
step_fn, iterator, iterations=2).run_op
if not context.executing_eagerly():
with self.cached_session() as sess:
run_step = sess.make_callable(run_step())
self.evaluate(variables_lib.global_variables_initializer())
weights, biases = [], []
for _ in range(5):
run_step()
weights.append(self.evaluate(layer.kernel))
biases.append(self.evaluate(layer.bias))
error = abs(numpy.add(numpy.squeeze(weights), numpy.squeeze(biases)) - 1)
is_not_increasing = all(y <= x for x, y in zip(error, error[1:]))
self.assertTrue(is_not_increasing)
@combinations.generate(
combinations.times(
optimizer_combinations.distributions_and_v1_optimizers(),
combinations.combine(mode=["graph"], use_callable_loss=[True, False])
+ combinations.combine(mode=["eager"], use_callable_loss=[True])) +
combinations.times(
optimizer_combinations.distributions_and_v2_optimizers(),
combinations.combine(
mode=["graph", "eager"], use_callable_loss=[True])))
def testTrainNetworkByCallForEachReplica(self, distribution, optimizer_fn,
use_callable_loss):
with distribution.scope():
optimizer = optimizer_fn()
model_fn, dataset_fn, layer = minimize_loss_example(
optimizer, use_bias=True, use_callable_loss=use_callable_loss)
iterator = self._get_iterator(distribution, dataset_fn)
def run_step():
return distribution.group(
distribution.extended.call_for_each_replica(
model_fn, args=(iterator.get_next(),)))
if not context.executing_eagerly():
with self.cached_session() as sess:
run_step = sess.make_callable(run_step())
self.evaluate(variables_lib.global_variables_initializer())
weights, biases = [], []
for _ in range(10):
run_step()
weights.append(self.evaluate(layer.kernel))
biases.append(self.evaluate(layer.bias))
error = abs(numpy.add(numpy.squeeze(weights), | numpy.squeeze(biases) | numpy.squeeze |
# ============================================================================
# 第八章 コージェネレーション設備
# Ver.12(エネルギー消費性能計算プログラム(住宅版)Ver.02.04~)
# ============================================================================
import numpy as np
import pyhees.section4_7 as hwh
import pyhees.section7_1 as dhw
import pyhees.section8_a as spec
import pyhees.section8_d as bb_dhw
import pyhees.section8_e as bb_hwh
from pyhees.section11_1 import load_outdoor, get_Theta_ex
# ============================================================================
# 5. ガス消費量
# ============================================================================
def calc_E_G_CG_d_t(bath_function, CG, E_E_dmd_d_t,
L_dashdash_k_d_t, L_dashdash_w_d_t, L_dashdash_s_d_t, L_dashdash_b1_d_t, L_dashdash_b2_d_t,
L_dashdash_ba1_d_t,
L_dashdash_ba2_d_t,
H_HS, H_MR, H_OR, A_A, A_MR, A_OR, region, mode_MR, mode_OR, L_T_H_rad):
"""1時間当たりのコージェネレーション設備のガス消費量 (1)
Args:
bath_function(str): ふろ機能の種類
CG(dict): コージェネレーション設備の仕様
E_E_dmd_d_t(ndarray): 1時間当たりの電力需要 (kWh/h)
L_dashdash_k_d_t(ndarray): 1時間当たりの台所水栓における太陽熱補正給湯熱負荷 (MJ/h)
L_dashdash_w_d_t(ndarray): 1時間当たりの洗面水栓における太陽熱補正給湯熱負荷 (MJ/h)
L_dashdash_s_d_t(ndarray): 1時間当たりの浴室シャワー水栓における太陽熱補正給湯熱負荷 (MJ/h)
L_dashdash_b1_d_t(ndarray): 1時間当たりの浴槽水栓湯はり時における太陽熱補正給湯熱負荷 (MJ/h)
L_dashdash_b2_d_t(ndarray): 1時間当たりの浴槽自動追焚時における太陽熱補正給湯熱負荷 (MJ/h)
L_dashdash_ba1_d_t(ndarray): 1時間当たりの浴槽水栓さし湯における太陽熱補正給湯熱負荷 (MJ/h)
L_dashdash_ba2_d_t(ndarray): 1時間当たりの浴槽追焚の太陽熱補正給湯熱負荷 (MJ/h)
H_HS: param H_MR:
H_OR: param A_A:
A_MR: param A_OR:
region: param mode_MR:
mode_OR: param L_T_H_rad:
H_MR:
A_A:
A_OR:
mode_MR:
L_T_H_rad:
Returns:
tuple: 1時間当たりのコージェネレーション設備の一次エネルギー消費量及び1時間当たりのコージェネレーション設備による発電量
"""
# ----- パラメータの取得 -----
if 'CG_category' in CG:
# 温水暖房への排熱利用
exhaust = spec.get_exhaust(CG['CG_category'])
# 排熱利用方式
exhaust_priority = spec.get_exhaust_priority(CG['CG_category'])
# バックアップボイラー(給湯)の給湯機の効率
e_rtd_DHW_BB = spec.get_e_rtd_BB_DHW(CG['CG_category'])
# バックアップボイラー(給湯、温水暖房)の種類
type_BB_HWH = spec.get_type_BB_HWH(CG['CG_category'])
# バックアップボイラー(温水暖房)の定格効率
e_rtd_BB_HWH = spec.get_e_rtd_BB_HWH(CG['CG_category'])
# バックアップボイラー(温水暖房)の定格能力 (W)
q_rtd_BB_HWH = spec.get_q_rtd_BB_HWH(CG['CG_category'])
# 発電ユニットの給湯排熱利用率
r_DHW_gen_PU_d = spec.get_r_DHW_gen_PU_d(CG['CG_category'])
# 発電ユニットの温水暖房排熱利用率
r_HWH_gen_PU_d = spec.get_r_HWH_gen_PU_d(CG['CG_category'])
# 発電ユニットの発電方式
PU_type = spec.get_PU_type(CG['CG_category'])
# 発電ユニットの発電量推定時の仮想発電量のパラメータ a_PU, a_DHW, a_HWH, b, c
param_E_E_gen_PU_Evt_d = spec.get_param_E_E_gen_PU_EVt_d(CG['CG_category'])
# 発電ユニットの排熱量推定時の仮想燃料消費量を求める係数
param_E_F_PU_HVt_d = spec.get_param_E_F_PU_HVt_d(CG['CG_category'])
# 発電ユニットの排熱量推定時の仮想排熱量の上限比を求める係数 a_DHW, a_HWH, b
param_r_H_gen_PU_HVt_d = spec.get_param_r_H_gen_PU_HVt_d(CG['CG_category'])
# 発電ユニットの日平均発電効率を求める係数 a_PU, a_DHW, a_HWH, b, 上限値, 下限値
param_e_E_PU_d = spec.get_param_e_E_PU_d(CG['CG_category'])
# 発電ユニットの日平均排熱効率を求める係数 a_PU, a_DHW, a_HWH, b, 上限値, 下限値
param_e_H_PU_d = spec.get_param_e_H_PU_d(CG['CG_category'])
# 定格発電出力 (W)
P_rtd_PU = spec.get_P_rtd_PU(CG['CG_category'])
# タンクユニットの補機消費電力 (給湯)
P_TU_aux_DHW = spec.get_P_TU_aux_DHW(CG['CG_category'])
# タンクユニットの補機消費電力 (温水暖房)
P_TU_aux_HWH = spec.get_P_TU_aux_HWH(CG['CG_category'])
# 逆潮流の評価
has_CG_reverse = CG['reverse'] if 'reverse' in CG else False
else:
# 温水暖房への排熱利用
exhaust = CG['exhaust']
# 排熱利用方式
exhaust_priority = CG['exhaust_priority']
# バックアップボイラー(給湯、温水暖房)の種類
type_BB_HWH = CG['type_BB_HWH']
# 付録D,Eより
if type_BB_HWH == 'ガス従来型' or type_BB_HWH == 'G_NEJ':
# バックアップボイラー(給湯)の給湯機の効率
e_rtd_DHW_BB = 0.782
# バックアップボイラー(温水暖房)の定格効率
e_rtd_BB_HWH = 0.82
# バックアップボイラー(温水暖房)の定格能力 (W)
q_rtd_BB_HWH = 17400
elif type_BB_HWH == 'ガス潜熱回収型' or type_BB_HWH == 'G_EJ':
# バックアップボイラー(給湯)の給湯機の効率
e_rtd_DHW_BB = 0.905
# バックアップボイラー(温水暖房)の定格効率
e_rtd_BB_HWH = 0.87
# バックアップボイラー(温水暖房)の定格能力 (W)
q_rtd_BB_HWH = 17400
else:
raise ValueError(type_BB_HWH)
# 発電ユニットの給湯排熱利用率
r_DHW_gen_PU_d = CG['r_DHW_gen_PU_d']
# 発電ユニットの温水暖房排熱利用率
r_HWH_gen_PU_d = CG['r_HWH_gen_PU_d']
# 発電ユニットの発電方式
if 'PU_type' in CG:
# 発電ユニットの発電方式
PU_type = CG['PU_type']
else:
# 付録A コージェネレーション設備の仕様
if CG['CG_category_param'] == 'PEFC':
CG_category = 'PEFC2'
elif CG['CG_category_param'] == 'SOFC':
CG_category = 'SOFC1'
elif CG['CG_category_param'] == 'GEC':
CG_category = 'GEC1'
else:
raise ValueError(CG['CG_category_param'])
PU_type = spec.get_PU_type(CG_category)
# 発電ユニットの発電量推定時の仮想発電量のパラメータ a_PU, a_DHW, a_HWH, b, c
param_E_E_gen_PU_Evt_d = CG['param_E_E_gen_PU_Evt_d']
# 発電ユニットの排熱量推定時の仮想燃料消費量を求める係数
if 'param_E_F_PU_HVt_d' in CG:
param_E_F_PU_HVt_d = CG['param_E_F_PU_HVt_d']
# 発電ユニットの排熱量推定時の仮想排熱量の上限比を求める係数 a_DHW, a_HWH, b
if 'param_r_H_gen_PU_HVt_d' in CG:
param_r_H_gen_PU_HVt_d = CG['param_r_H_gen_PU_HVt_d']
# 発電ユニットの日平均発電効率を求める係数 a_PU, a_DHW, a_HWH, b, 上限値, 下限値
param_e_E_PU_d = CG['param_e_E_PU_d']
# 発電ユニットの日平均排熱効率を求める係数 a_PU, a_DHW, a_HWH, b, 上限値, 下限値
param_e_H_PU_d = CG['param_e_H_PU_d']
# 定格発電出力 (W)
P_rtd_PU = CG['P_rtd_PU']
# タンクユニットの補機消費電力 (給湯)
P_TU_aux_DHW = CG['P_TU_aux_DHW']
# タンクユニットの補機消費電力 (温水暖房)
P_TU_aux_HWH = CG['P_TU_aux_HWH']
# 逆潮流の評価
has_CG_reverse = CG['reverse'] if 'reverse' in CG else False
# ----- 温水暖房用熱源機の負荷および温水供給運転率の計算 -----
if H_HS is not None and H_HS['type'] == 'コージェネレーションを使用する':
# 主たる居室、その他の居室という単位で設定された放熱機器を暖房区画ごとの配列に変換
rad_list = hwh.get_rad_list(H_MR, H_OR)
# 温水暖房用熱源機の往き温水温度
Theta_SW_hs_op = hwh.get_Theta_SW_hs_op(type_BB_HWH)
p_hs = hwh.calc_p_hs_d_t(Theta_SW_hs_op, rad_list, L_T_H_rad, A_A, A_MR, A_OR, region, mode_MR, mode_OR)
Theta_SW_d_t = hwh.get_Theta_SW_d_t(Theta_SW_hs_op, p_hs)
# 1時間当たりの温水暖房の熱負荷 (MJ/h)
L_HWH_d_t = hwh.calc_Q_dmd_H_hs_d_t(rad_list, H_HS['pipe_insulation'], H_HS['underfloor_pipe_insulation'],
Theta_SW_d_t, A_A, A_MR, A_OR, region,
mode_MR, mode_OR, L_T_H_rad)
# 処理暖房負荷
Q_T_H_rad = np.zeros((5, 24 * 365))
for i in [1, 3, 4, 5]:
if rad_list[i - 1] is None:
continue
# 1時間当たりの暖冷房区画iに設置された放熱器の最大暖房出力
A_HCZ = hwh.calc_A_HCZ_i(i, A_A, A_MR, A_OR)
R_type = '主たる居室' if i == 1 else 'その他の居室'
mode = mode_MR if i == 1 else mode_OR
Q_max_H_rad_d_t_i = hwh.calc_Q_max_H_rad_d_t_i(rad_list[i - 1], A_HCZ, Theta_SW_d_t, region, mode, R_type)
# 1時間当たりの暖冷房区画iに設置された放熱器の処理暖房負荷
Q_T_H_rad[i - 1, :] = hwh.calc_Q_T_H_rad_d_t_i(Q_max_H_rad_d_t_i, L_T_H_rad[i - 1])
# 温水暖房用熱源機の温水供給運転率
r_WS_HWH_d_t = hwh.calc_r_WS_hs_d_t(rad_list, L_HWH_d_t, Q_T_H_rad, Theta_SW_d_t, region, A_A, A_MR, A_OR,
mode_MR)
# 戻り温水温度 (9)
Theta_RW_hs = hwh.calc_Theta_RW_hs_d_t(Theta_SW_d_t, rad_list, H_HS['pipe_insulation'],
H_HS['underfloor_pipe_insulation'], A_A, A_MR, A_OR, region,
mode_MR, mode_OR,
L_T_H_rad)
# 定格能力の計算のためのパラメータの取得
rad_types = hwh.get_rad_type_list()
has_MR_hwh = H_MR['type'] in rad_types
if H_OR is not None:
has_OR_hwh = H_OR['type'] in rad_types
else:
has_OR_hwh = False
else:
L_HWH_d_t = np.zeros(24 * 365)
P_TU_aux_HWH = | np.zeros(24 * 365) | numpy.zeros |
from typing import Text, Dict, List, Optional
import numpy as np
import pytest
from rasa.core.featurizers.single_state_featurizer import SingleStateFeaturizer
from rasa.core.featurizers.single_state_featurizer import (
IntentTokenizerSingleStateFeaturizer,
)
from rasa.core.featurizers.tracker_featurizers import (
TrackerFeaturizer as TrackerFeaturizer,
)
from rasa.core.featurizers.tracker_featurizers import MaxHistoryTrackerFeaturizer
from rasa.core.featurizers.tracker_featurizers import IntentMaxHistoryTrackerFeaturizer
from rasa.core.featurizers.tracker_featurizers import FullDialogueTrackerFeaturizer
from rasa.shared.core.domain import Domain
from tests.core.utilities import user_uttered
from rasa.shared.nlu.training_data.features import Features
from rasa.shared.nlu.constants import INTENT, ACTION_NAME
from rasa.shared.core.constants import (
ACTION_LISTEN_NAME,
ACTION_UNLIKELY_INTENT_NAME,
USER,
PREVIOUS_ACTION,
)
from rasa.shared.core.events import ActionExecuted
from rasa.shared.core.trackers import DialogueStateTracker
from rasa.utils.tensorflow.constants import LABEL_PAD_ID
from rasa.core.exceptions import InvalidTrackerFeaturizerUsageError
def test_fail_to_load_non_existent_featurizer():
assert TrackerFeaturizer.load("non_existent_class") is None
def test_persist_and_load_tracker_featurizer(tmp_path: Text, moodbot_domain: Domain):
state_featurizer = SingleStateFeaturizer()
state_featurizer.prepare_for_training(moodbot_domain)
tracker_featurizer = MaxHistoryTrackerFeaturizer(state_featurizer)
tracker_featurizer.persist(tmp_path)
loaded_tracker_featurizer = TrackerFeaturizer.load(tmp_path)
assert loaded_tracker_featurizer is not None
assert loaded_tracker_featurizer.state_featurizer is not None
def test_convert_action_labels_to_ids(domain: Domain):
trackers_as_actions = [
["utter_greet", "utter_channel"],
["utter_greet", "utter_default", "utter_goodbye"],
]
tracker_featurizer = TrackerFeaturizer()
actual_output = tracker_featurizer._convert_labels_to_ids(
trackers_as_actions, domain
)
expected_output = np.array(
[
np.array(
[
domain.action_names_or_texts.index("utter_greet"),
domain.action_names_or_texts.index("utter_channel"),
],
),
np.array(
[
domain.action_names_or_texts.index("utter_greet"),
domain.action_names_or_texts.index("utter_default"),
domain.action_names_or_texts.index("utter_goodbye"),
],
),
],
)
assert expected_output.size == actual_output.size
for expected_array, actual_array in zip(expected_output, actual_output):
assert np.all(expected_array == actual_array)
def test_convert_intent_labels_to_ids(domain: Domain):
trackers_as_intents = [
["next_intent", "nlu_fallback", "out_of_scope", "restart"],
["greet", "hello", "affirm"],
]
tracker_featurizer = IntentMaxHistoryTrackerFeaturizer()
actual_labels = tracker_featurizer._convert_labels_to_ids(
trackers_as_intents, domain
)
expected_labels = np.array(
[
[
domain.intents.index("next_intent"),
domain.intents.index("nlu_fallback"),
domain.intents.index("out_of_scope"),
domain.intents.index("restart"),
],
[
domain.intents.index("greet"),
domain.intents.index("hello"),
domain.intents.index("affirm"),
LABEL_PAD_ID,
],
],
)
assert expected_labels.size == actual_labels.size
assert expected_labels.shape == actual_labels.shape
assert np.all(expected_labels == actual_labels)
def test_featurize_trackers_raises_on_missing_state_featurizer(domain: Domain):
tracker_featurizer = TrackerFeaturizer()
with pytest.raises(InvalidTrackerFeaturizerUsageError):
tracker_featurizer.featurize_trackers([], domain, precomputations=None)
def compare_featurized_states(
states1: List[Dict[Text, List[Features]]], states2: List[Dict[Text, List[Features]]]
) -> bool:
"""Compares two lists of featurized states and returns True if they
are identical and False otherwise.
"""
if len(states1) != len(states2):
return False
for state1, state2 in zip(states1, states2):
if state1.keys() != state2.keys():
return False
for key in state1.keys():
for feature1, feature2 in zip(state1[key], state2[key]):
if np.any((feature1.features != feature2.features).toarray()):
return False
if feature1.origin != feature2.origin:
return False
if feature1.attribute != feature2.attribute:
return False
if feature1.type != feature2.type:
return False
return True
def test_featurize_trackers_with_full_dialogue_tracker_featurizer(
moodbot_tracker: DialogueStateTracker,
moodbot_domain: Domain,
moodbot_features: Dict[Text, Dict[Text, Features]],
):
state_featurizer = SingleStateFeaturizer()
tracker_featurizer = FullDialogueTrackerFeaturizer(state_featurizer)
actual_features, actual_labels, entity_tags = tracker_featurizer.featurize_trackers(
[moodbot_tracker], moodbot_domain, precomputations=None,
)
expected_features = [
[
{},
{
ACTION_NAME: [moodbot_features["actions"][ACTION_LISTEN_NAME]],
INTENT: [moodbot_features["intents"]["greet"]],
},
{ACTION_NAME: [moodbot_features["actions"]["utter_greet"]]},
{
ACTION_NAME: [moodbot_features["actions"][ACTION_LISTEN_NAME]],
INTENT: [moodbot_features["intents"]["mood_unhappy"]],
},
{ACTION_NAME: [moodbot_features["actions"]["utter_cheer_up"]]},
{ACTION_NAME: [moodbot_features["actions"]["utter_did_that_help"]]},
{
ACTION_NAME: [moodbot_features["actions"][ACTION_LISTEN_NAME]],
INTENT: [moodbot_features["intents"]["deny"]],
},
]
]
assert actual_features is not None
assert len(actual_features) == len(expected_features)
for actual, expected in zip(actual_features, expected_features):
assert compare_featurized_states(actual, expected)
expected_labels = np.array([[0, 16, 0, 13, 14, 0, 15]])
assert actual_labels is not None
assert len(actual_labels) == 1
for actual, expected in zip(actual_labels, expected_labels):
assert np.all(actual == expected)
# moodbot doesn't contain e2e entities
assert not any([any(turn_tags) for turn_tags in entity_tags])
def test_trackers_ignore_action_unlikely_intent_with_full_dialogue_tracker_featurizer(
moodbot_domain: Domain, moodbot_features: Dict[Text, Dict[Text, Features]],
):
tracker = DialogueStateTracker.from_events(
"default",
[
ActionExecuted(ACTION_LISTEN_NAME),
user_uttered("greet"),
ActionExecuted(ACTION_UNLIKELY_INTENT_NAME),
ActionExecuted("utter_greet"),
ActionExecuted(ACTION_LISTEN_NAME),
user_uttered("mood_unhappy"),
ActionExecuted(ACTION_UNLIKELY_INTENT_NAME),
ActionExecuted("utter_cheer_up"),
ActionExecuted("utter_did_that_help"),
ActionExecuted(ACTION_LISTEN_NAME),
user_uttered("deny"),
ActionExecuted(ACTION_UNLIKELY_INTENT_NAME),
ActionExecuted("utter_goodbye"),
],
domain=moodbot_domain,
)
state_featurizer = SingleStateFeaturizer()
tracker_featurizer = FullDialogueTrackerFeaturizer(state_featurizer)
actual_features, actual_labels, entity_tags = tracker_featurizer.featurize_trackers(
[tracker],
moodbot_domain,
precomputations=None,
ignore_action_unlikely_intent=True,
)
expected_features = [
[
{},
{
ACTION_NAME: [moodbot_features["actions"][ACTION_LISTEN_NAME]],
INTENT: [moodbot_features["intents"]["greet"]],
},
{ACTION_NAME: [moodbot_features["actions"]["utter_greet"]]},
{
ACTION_NAME: [moodbot_features["actions"][ACTION_LISTEN_NAME]],
INTENT: [moodbot_features["intents"]["mood_unhappy"]],
},
{ACTION_NAME: [moodbot_features["actions"]["utter_cheer_up"]]},
{ACTION_NAME: [moodbot_features["actions"]["utter_did_that_help"]]},
{
ACTION_NAME: [moodbot_features["actions"][ACTION_LISTEN_NAME]],
INTENT: [moodbot_features["intents"]["deny"]],
},
]
]
assert actual_features is not None
assert len(actual_features) == len(expected_features)
for actual, expected in zip(actual_features, expected_features):
assert compare_featurized_states(actual, expected)
expected_labels = np.array([[0, 16, 0, 13, 14, 0, 15]])
assert actual_labels is not None
assert len(actual_labels) == 1
for actual, expected in zip(actual_labels, expected_labels):
assert | np.all(actual == expected) | numpy.all |
import os, fnmatch, sys, traceback, re
import dill as pickle
import scipy.interpolate as interp
import scipy.optimize as opti
import scipy.constants as constants
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import bead_util as bu
import configuration as config
import transfer_func_util as tf
import iminuit
plt.rcParams.update({'font.size': 14})
file_dict = {}
arr = [] ### FIRST BEAD ON ATTRACTOR
arr.append('/data/20190122/bead1/weigh/high_pressure_neg_0.5Hz_4pp')
arr.append('/data/20190122/bead1/weigh/low_pressure_neg_0.5Hz_4pp')
arr.append(['/data/20190122/bead1/weigh/low_pressure_pos_0.5Hz_4pp_2', \
'/data/20190122/bead1/weigh/low_pressure_pos_0.5Hz_4pp_3'])
file_dict['20190122'] = (arr, 5, 7)
arr = [] ### SECOND BEAD ON ATTRACTOR
arr.append('/data/20190123/bead2/weigh/high_pressure_neg_0.5Hz_4pp')
arr.append('/data/20190123/bead2/weigh/low_pressure_neg_0.5Hz_4pp')
arr.append('/data/20190123/bead2/weigh/low_pressure_pos_0.5Hz_4pp')
file_dict['20190123'] = (arr, 5, 7)
arr = [] ### THIRD BEAD ON ATTRACTOR
arr.append('/data/20190124/bead2/weigh/high_pressure_neg_0.5Hz_4pp')
arr.append('/data/20190124/bead2/weigh/low_pressure_neg_0.5Hz_4pp')
arr.append('/data/20190124/bead2/weigh/low_pressure_pos_0.5Hz_4pp')
file_dict['20190124'] = (arr, 5, 7)
arr = [] ###
arr.append('/data/old_trap/20200307/gbead1/weigh_2/4Vpp_lowp_0')
# arr.append('/data/old_trap/20200307/gbead1/weigh/6Vpp_lowp_0')
file_dict['20200307'] = (arr, 1, 0)
arr = [] ###
arr.append('/data/old_trap/20200322/gbead1/weigh/4Vpp_neg')
arr.append('/data/old_trap/20200322/gbead1/weigh/6Vpp_neg')
arr.append('/data/old_trap/20200322/gbead1/weigh/8Vpp_neg')
file_dict['20200322'] = (arr, 1, 0)
arr = [] ###
# arr.append('/data/old_trap/20200327/gbead1/weigh/4Vpp_neg_lowp')
# arr.append('/data/old_trap/20200327/gbead1/weigh/6Vpp_neg_lowp')
arr.append('/data/old_trap/20200327/gbead1/weigh/8Vpp_neg_lowp')
file_dict['20200327'] = (arr, 1, 0)
arr = [] ###
# arr.append('/data/old_trap/20200330/gbead3/weigh/6Vpp_neg_lowp')
arr.append('/data/old_trap/20200330/gbead3/weigh/8Vpp_neg_lowp')
file_dict['20200330'] = (arr, 1, 0)
arr = [] ###
arr.append('/data/old_trap/20200721/bead2/weigh/4Vpp_lowp_neg_1')
arr.append('/data/old_trap/20200721/bead2/weigh/6Vpp_lowp_neg_1')
file_dict['20200721'] = (arr, 1, 0)
arr = [] ###
arr.append('/data/old_trap/20200727/bead1/weigh/4Vpp_lowp_neg')
arr.append('/data/old_trap/20200727/bead1/weigh/6Vpp_lowp_neg')
file_dict['20200727'] = (arr, 1, 0)
file_dict = {'20200727': (arr, 1, 0)}
arr = [] ###
arr.append('/data/old_trap/20200924/bead1/weigh/4Vpp_lowp_neg')
arr.append('/data/old_trap/20200924/bead1/weigh/6Vpp_lowp_neg')
file_dict['20200924'] = (arr, 2, 1)
file_dict = {'20200924': (arr, 2, 1)}
arr = [] ###
arr.append('/data/old_trap/20201030/bead1/weigh/6Vpp_lowp_neg')
file_dict['20201030'] = (arr, 2, 1)
file_dict = {'20201030': (arr, 2, 1)}
arr = [] ###
arr.append('/data/old_trap/20201222/gbead1/weigh/8Vpp_lowp_neg')
file_dict['20201222'] = (arr, 2, 0)
file_dict = {'20201222': (arr, 2, 0)}
# xlim = (-10, 100)
xlim = (-40, 450)
# arr = [] ###
# arr.append('/data/new_trap/20200320/Bead1/Mass/derp')
# file_dict['20200320'] = (arr, 1, 0)
# file_dict = {'20200320': (arr, 1, 0)}
manual_charge = 0
# manual_charge = 25
# Noise data
#chopper = True
noise = False
n_noise = 7
noise_dirs = ['/data/20181211/bead2/weigh/noise/no_charge_0.5Hz_4pp', \
'/data/20181211/bead2/weigh/noise/no_drive', \
'/data/20181213/bead1/weigh/noise/no_charge_0.5Hz_4pp', \
'/data/20181213/bead1/weigh/noise/no-bead_0.5Hz_4pp', \
'/data/20181213/bead1/weigh/noise/no-bead_0.5Hz_4pp_pd-blocked', \
'/data/20181213/bead1/weigh/noise/no-bead_zfb-inject', \
'/data/20181213/bead1/weigh/noise/no-bead_zfb-inject_pd-blocked']
# new_trap = True
new_trap = False
#r_divider = 50000.0 / (3000.0 + 50000.0)
r_divider = 1.0
mon_fac = r_divider**(-1) * 100.0 # Tabor amplifier monitor is 100:1
sign = -1.0
# sign = 1.0
trans_gain = 100e3 # V/A
pd_gain = 0.25 # A/W
# line_filter_trans = 0.45
line_filter_trans = 1
# bs_fac = 0.01
bs_fac = 0.09
maxfiles = 1000 # Many more than necessary
lpf = 2500 # Hz
file_inds = (0, 500)
userNFFT = 2**12
diag = False
save = False
fullNFFT = False
correct_phase_shift = False
save_mass = False
print_res = True
plot = True
save_example = False
example_filename = '/home/cblakemore/plots/weigh_beads/example_extrapolation.png'
# upper_outlier = 95e-15 # in kg
# upper_outlier = 95e-13
upper_outlier = 600e-15
lower_outlier = 70e-15
# lower_outlier = 1e-15
try:
allres_dict = pickle.load(open('./allres.p', 'rb'))
except:
allres_dict = {}
try:
overall_mass_dict = pickle.load(open('./overall_masses.p', 'rb'))
except:
overall_mass_dict = {}
###########################################################
def line(x, a, b):
return a * x + b
def weigh_bead_efield(files, elec_ind, pow_ind, colormap='plasma', sort='time',\
file_inds=(0,10000), plot=True, print_res=False, pos=False, \
save_mass=False, new_trap=False, correct_phase_shift=False):
'''Loops over a list of file names, loads each file, diagonalizes,
then plots the amplitude spectral density of any number of data
or cantilever/electrode drive signals
INPUTS: files, list of files names to extract data
data_axes, list of pos_data axes to plot
cant_axes, list of cant_data axes to plot
elec_axes, list of electrode_data axes to plot
diag, boolean specifying whether to diagonalize
OUTPUTS: none, plots stuff
'''
date = re.search(r"\d{8,}", files[0])[0]
suffix = files[0].split('/')[-2]
if new_trap:
trap_str = 'new_trap'
else:
trap_str = 'old_trap'
charge_file = '/data/{:s}_processed/calibrations/charges/'.format(trap_str) + date
save_filename = '/data/{:s}_processed/calibrations/masses/'.format(trap_str) \
+ date + '_' + suffix + '.mass'
bu.make_all_pardirs(save_filename)
if pos:
charge_file += '_recharge.charge'
else:
charge_file += '.charge'
try:
nq = np.load(charge_file)[0]
found_charge = True
except:
found_charge = False
if not found_charge or manual_charge:
user_nq = input('No charge file or manual requested. Guess q: ')
nq = int(user_nq)
if correct_phase_shift:
print('Correcting anomalous phase-shift during analysis.')
# nq = -16
print('qbead: {:d} e'.format(int(nq)))
q_bead = nq * constants.elementary_charge
run_index = 0
masses = []
nfiles = len(files)
if not print_res:
print("Processing %i files..." % nfiles)
all_eforce = []
all_power = []
all_param = []
mass_vec = []
p_ac = []
p_dc = []
e_ac = []
e_dc = []
pressure_vec = []
zamp_avg = 0
zphase_avg = 0
zamp_N = 0
zfb_avg = 0
zfb_N = 0
power_avg = 0
power_N = 0
Nbad = 0
powpsd = []
for fil_ind, fil in enumerate(files):# 15-65
# 4
# if fil_ind == 16 or fil_ind == 4:
# continue
bu.progress_bar(fil_ind, nfiles)
# Load data
df = bu.DataFile()
try:
if new_trap:
df.load_new(fil)
else:
df.load(fil, load_other=True)
except Exception:
traceback.print_exc()
continue
try:
# df.calibrate_stage_position()
df.calibrate_phase()
except Exception:
traceback.print_exc()
continue
if ('20181129' in fil) and ('high' in fil):
pressure_vec.append(1.5)
else:
try:
pressure_vec.append(df.pressures['pirani'])
except Exception:
pressure_vec.append(0.0)
### Extract electrode data
if new_trap:
top_elec = df.electrode_data[1]
bot_elec = df.electrode_data[2]
else:
top_elec = mon_fac * df.other_data[elec_ind]
bot_elec = mon_fac * df.other_data[elec_ind+1]
fac = 1.0
if np.std(top_elec) < 0.5 * np.std(bot_elec) \
or np.std(bot_elec) < 0.5 * np.std(top_elec):
print('Adjusting electric field since only one electrode was digitized.')
fac = 2.0
nsamp = len(top_elec)
zeros = np.zeros(nsamp)
voltages = [zeros, top_elec, bot_elec, zeros, \
zeros, zeros, zeros, zeros]
efield = bu.trap_efield(voltages, new_trap=new_trap)
eforce2 = fac * sign * efield[2] * q_bead
tarr = np.arange(0, df.nsamp/df.fsamp, 1.0/df.fsamp)
# fig, axarr = plt.subplots(2,1,sharex=True,figsize=(10,8))
# axarr[0].plot(tarr, top_elec, label='Top elec.')
# axarr[0].plot(tarr, bot_elec, label='Bottom elec.')
# axarr[0].set_ylabel('Apparent Voltages [V]')
# axarr[0].legend(fontsize=12, loc='upper right')
# axarr[1].plot(tarr, efield[2])
# axarr[1].set_xlabel('Time [s]')
# axarr[1].set_ylabel('Apparent Electric Field [V/m]')
# fig.tight_layout()
# plt.show()
# input()
freqs = np.fft.rfftfreq(df.nsamp, d=1.0/df.fsamp)
drive_ind = np.argmax(np.abs(np.fft.rfft(eforce2)))
drive_freq = freqs[drive_ind]
zamp = np.abs( np.fft.rfft(df.zcal) * bu.fft_norm(df.nsamp, df.fsamp) * \
np.sqrt(freqs[1] - freqs[0]) )
zamp *= (1064.0e-9 / 2.0) * (1.0 / (2.9 * np.pi))
zphase = np.angle( np.fft.rfft(df.zcal) )
zamp_avg += zamp[drive_ind]
zamp_N += 1
#plt.loglog(freqs, zamp)
#plt.scatter(freqs[drive_ind], zamp[drive_ind], s=10, color='r')
#plt.show()
zfb = np.abs(np.fft.rfft(df.pos_fb[2]) * bu.fft_norm(df.nsamp, df.fsamp) * \
np.sqrt(freqs[1] - freqs[0]) )
zfb_avg += zfb[drive_ind]
zfb_N += 1
#eforce2 = (top_elec * e_top_func(0.0) + bot_elec * e_bot_func(0.0)) * q_bead
if noise:
e_dc.append(np.mean(eforce2))
e_ac_val = np.abs(np.fft.rfft(eforce2))[drive_ind]
e_ac.append(e_ac_val * bu.fft_norm(df.nsamp, df.fsamp) \
* np.sqrt(freqs[1] - freqs[0]) )
zphase_avg += (zphase[drive_ind] - np.angle(eforce2)[drive_ind])
if np.sum(df.power) == 0.0:
current = np.abs(df.other_data[pow_ind]) / trans_gain
else:
fac = 1e-6
current = fac * df.power / trans_gain
power = current / pd_gain
power = power / line_filter_trans
power = power / bs_fac
power_avg += np.mean(power)
power_N += 1
if noise:
p_dc.append(np.mean(power))
p_ac_val = np.abs(np.fft.rfft(power))[drive_ind]
p_ac.append(p_ac_val * bu.fft_norm(df.nsamp, df.fsamp) \
* np.sqrt(freqs[1] - freqs[0]) )
fft1 = np.fft.rfft(power)
fft2 = np.fft.rfft(df.pos_fb[2])
if not len(powpsd):
powpsd = np.abs(fft1)
Npsd = 1
else:
powpsd += np.abs(fft1)
Npsd += 1
# freqs = np.fft.rfftfreq(df.nsamp, d=1.0/df.fsamp)
# plt.loglog(freqs, np.abs(np.fft.rfft(eforce2)))
# plt.loglog(freqs, np.abs(np.fft.rfft(power)))
# plt.show()
# input()
# fig, axarr = plt.subplots(2,1,sharex=True,figsize=(10,8))
# axarr[0].plot(tarr, power)
# axarr[0].set_ylabel('Measured Power [Arb.]')
# axarr[1].plot(tarr, power)
# axarr[1].set_xlabel('Time [s]')
# axarr[1].set_ylabel('Measured Power [Arb.]')
# bot, top = axarr[1].get_ylim()
# axarr[1].set_ylim(1.05*bot, 0)
# fig.tight_layout()
# plt.show()
# input()
bins, dat, errs = bu.spatial_bin(eforce2, power, nbins=200, width=0.0, #width=0.05, \
dt=1.0/df.fsamp, harms=[1], \
add_mean=True, verbose=False, \
correct_phase_shift=correct_phase_shift, \
grad_sign=0)
dat = dat / np.mean(dat)
#plt.plot(bins, dat, 'o')
#plt.show()
popt, pcov = opti.curve_fit(line, bins*1.0e13, dat, \
absolute_sigma=False, maxfev=10000)
test_vals = np.linspace(np.min(eforce2*1.0e13), np.max(eforce2*1.0e13), 100)
fit = line(test_vals, *popt)
lev_force = -popt[1] / (popt[0] * 1.0e13)
mass = lev_force / (9.806)
#umass = ulev_force / 9.806
#lmass = llev_force / 9.806
if mass > upper_outlier or mass < lower_outlier:
print('Crazy mass: {:0.2f} pg.... ignoring'.format(mass*1e15))
# fig, axarr = plt.subplots(3,1,sharex=True)
# axarr[0].plot(eforce2)
# axarr[1].plot(power)
# axarr[2].plot(df.pos_data[2])
# ylims = axarr[1].get_ylim()
# axarr[1].set_ylim(ylims[0], 0)
# plt.show()
continue
all_param.append(popt)
all_eforce.append(bins)
all_power.append(dat)
mass_vec.append(mass)
if noise:
print('DC power: ', np.mean(p_dc), np.std(p_dc))
print('AC power: ', np.mean(p_ac), np.std(p_ac))
print('DC field: ', np.mean(e_dc), np.std(e_dc))
print('AC field: ', np.mean(e_ac), np.std(e_ac))
return
#plt.plot(mass_vec)
mean_popt = np.mean(all_param, axis=0)
mean_lev = | np.mean(mass_vec) | numpy.mean |
#!/usr/bin/env python
# Copyright 2021
# author: <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from scipy.stats import ttest_ind
import netCDF4 as nc
import pickle
import os
from PIL import Image as PIL_Image
import sys
import shutil
import glob
import datetime
import time
import calendar
from numpy import genfromtxt
from scipy.optimize import curve_fit
from scipy.cluster.vq import kmeans,vq
from scipy.interpolate import interpn, interp1d
from math import e as e_constant
import math
import matplotlib.dates as mdates
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.cm as cm
from matplotlib.collections import LineCollection
from matplotlib.ticker import (MultipleLocator, NullFormatter, ScalarFormatter)
from matplotlib.colors import ListedColormap, BoundaryNorm
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
import matplotlib
import warnings
warnings.filterwarnings("ignore")
plt.style.use('classic')
# font size
# font_size = 14
# matplotlib.rc('font', **{'family': 'serif', 'serif': ['Arial'], 'size': font_size})
# matplotlib.rc('font', weight='bold')
p_progress_writing = False
SMALL_SIZE = 8
MEDIUM_SIZE = 10
BIGGER_SIZE = 12
plt.rc('font', size=SMALL_SIZE) # controls default text sizes
plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
time_format = '%d-%m-%Y_%H:%M'
time_format_khan = '%Y%m%d.0%H'
time_format_mod = '%Y-%m-%d_%H:%M:%S'
time_format_twolines = '%H:%M\n%d-%m-%Y'
time_format_twolines_noYear_noMin_intMonth = '%H\n%d-%m'
time_format_twolines_noYear = '%H:%M\n%d-%b'
time_format_twolines_noYear_noMin = '%H\n%d-%b'
time_format_date = '%Y-%m-%d'
time_format_time = '%H:%M:%S'
time_format_parsivel = '%Y%m%d%H%M'
time_format_parsivel_seconds = '%Y%m%d%H%M%S'
time_str_formats = [
time_format,
time_format_mod,
time_format_twolines,
time_format_twolines_noYear,
time_format_date,
time_format_time,
time_format_parsivel
]
default_cm = cm.jet
cm_vir = cm.viridis
listed_cm_colors_list = ['silver', 'red', 'green', 'yellow', 'blue', 'black']
listed_cm = ListedColormap(listed_cm_colors_list, 'indexed')
colorbar_tick_labels_list_cloud_phase = ['Clear', 'Water', 'SLW', 'Mixed', 'Ice', 'Unknown']
listed_cm_colors_list_cloud_phase = ['white', 'red', 'green', 'yellow', 'blue', 'purple']
listed_cm_cloud_phase = ListedColormap(listed_cm_colors_list_cloud_phase, 'indexed')
avogadros_ = 6.022140857E+23 # molecules/mol
gas_const = 83144.598 # cm3 mbar k-1 mol-1
gas_const_2 = 8.3144621 # J mol-1 K-1
gas_const_water = 461 # J kg-1 K-1
gas_const_dry = 287 # J kg-1 K-1
boltzmann_ = gas_const / avogadros_ # cm3 mbar / k molecules
gravity_ = 9.80665 # m/s
poisson_ = 2/7 # for dry air (k)
latent_heat_v = 2.501E+6 # J/kg
latent_heat_f = 3.337E+5 # J/kg
latent_heat_s = 2.834E+6 # J/kg
heat_capacity__Cp = 1005.7 # J kg-1 K-1 dry air
heat_capacity__Cv = 719 # J kg-1 K-1 water vapor
Rs_da = 287.05 # Specific gas const for dry air, J kg^{-1} K^{-1}
Rs_v = 461.51 # Specific gas const for water vapour, J kg^{-1} K^{-1}
Cp_da = 1004.6 # Specific heat at constant pressure for dry air
Cv_da = 719. # Specific heat at constant volume for dry air
Cp_v = 1870. # Specific heat at constant pressure for water vapour
Cv_v = 1410. # Specific heat at constant volume for water vapour
Cp_lw = 4218 # Specific heat at constant pressure for liquid water
Epsilon = 0.622 # Epsilon=Rs_da/Rs_v; The ratio of the gas constants
degCtoK = 273.15 # Temperature offset between K and C (deg C)
rho_w = 1000. # Liquid Water density kg m^{-3}
grav = 9.80665 # Gravity, m s^{-2}
Lv = 2.5e6 # Latent Heat of vaporisation
boltzmann = 5.67e-8 # Stefan-Boltzmann constant
mv = 18.0153e-3 # Mean molar mass of water vapor(kg/mol)
m_a = 28.9644e-3 # Mean molar mass of air(kg/mol)
Rstar_a = 8.31432 # Universal gas constant for air (N m /(mol K))
path_output = '/g/data/k10/la6753/'
# Misc
class Object_create(object):
pass
def list_files_recursive(path_, filter_str=None):
# create list of raw spectra files
file_list = []
# r=root, d=directories, f = files
if filter_str is None:
for r, d, f in os.walk(path_):
for file in f:
file_list.append(os.path.join(r, file))
else:
for r, d, f in os.walk(path_):
for file in f:
if filter_str in file:
file_list.append(os.path.join(r, file))
return file_list
def list_files(path_, filter_str='*'):
file_list = sorted(glob.glob(str(path_ + filter_str)))
return file_list
def coincidence(arr_1,arr_2):
# only coincidences
check_ = arr_1 * arr_2
check_[check_ == check_] = 1
arr_1_checked = arr_1 * check_
arr_2_checked = arr_2 * check_
return arr_1_checked[~np.isnan(arr_1_checked)], arr_2_checked[~np.isnan(arr_2_checked)]
def array_2d_fill_gaps_by_interpolation_linear(array_):
rows_ = array_.shape[0]
cols_ = array_.shape[1]
output_array_X = np.zeros((rows_, cols_), dtype=float)
output_array_Y = np.zeros((rows_, cols_), dtype=float)
row_sum = np.sum(array_, axis=1)
col_index = np.arange(array_.shape[1])
col_sum = np.sum(array_, axis=0)
row_index = np.arange(array_.shape[0])
for r_ in range(array_.shape[0]):
if row_sum[r_] != row_sum[r_]:
# get X direction interpolation
coin_out = coincidence(col_index, array_[r_, :])
output_array_X[r_, :][np.isnan(array_[r_, :])] = np.interp(
col_index[np.isnan(array_[r_, :])], coin_out[0], coin_out[1])
for c_ in range(array_.shape[1]):
if col_sum[c_] != col_sum[c_]:
# get Y direction interpolation
coin_out = coincidence(row_index, array_[:, c_])
output_array_Y[:, c_][np.isnan(array_[:, c_])] = np.interp(
row_index[np.isnan(array_[:, c_])], coin_out[0], coin_out[1])
output_array = np.array(array_)
output_array[np.isnan(array_)] = 0
return output_array + ((output_array_X + output_array_Y)/2)
def array_2d_fill_gaps_by_interpolation_cubic(array_):
rows_ = array_.shape[0]
cols_ = array_.shape[1]
output_array_X = np.zeros((rows_, cols_), dtype=float)
output_array_Y = np.zeros((rows_, cols_), dtype=float)
row_sum = np.sum(array_, axis=1)
col_index = np.arange(array_.shape[1])
col_sum = np.sum(array_, axis=0)
row_index = np.arange(array_.shape[0])
for r_ in range(array_.shape[0]):
if row_sum[r_] != row_sum[r_]:
# get X direction interpolation
coin_out = coincidence(col_index, array_[r_, :])
interp_function = interp1d(coin_out[0], coin_out[1], kind='cubic')
output_array_X[r_, :][np.isnan(array_[r_, :])] = interp_function(col_index[np.isnan(array_[r_, :])])
for c_ in range(array_.shape[1]):
if col_sum[c_] != col_sum[c_]:
# get Y direction interpolation
coin_out = coincidence(row_index, array_[:, c_])
interp_function = interp1d(coin_out[0], coin_out[1], kind='cubic')
output_array_Y[:, c_][np.isnan(array_[:, c_])] = interp_function(row_index[np.isnan(array_[:, c_])])
output_array = np.array(array_)
output_array[np.isnan(array_)] = 0
return output_array + ((output_array_X + output_array_Y)/2)
def combine_2_time_series(time_1_reference, data_1, time_2, data_2,
forced_time_step=None, forced_start_time=None, forced_stop_time=None,
cumulative_var_1=False, cumulative_var_2=False):
"""
takes two data sets with respective time series, and outputs the coincident stamps from both data sets
It does this by using mean_discrete() for both sets with the same start stamp and averaging time, the averaging time
is the forced_time_step
:param time_1_reference: 1D array, same units as time_2, this series will define the returned time step reference
:param data_1: can be 1D or 2D array, first dimention most be same as time_1
:param time_2: 1D array, same units as time_1
:param data_2: can be 1D or 2D array, first dimention most be same as time_2
:param window_: optional, if 0 (default) the values at time_1 and time_2 most match exactly, else, the match can
be +- window_
:param forced_time_step: if not none, the median of the differential of the time_1_reference will be used
:param forced_start_time: if not none, the returned series will start at this time stamp
:param forced_stop_time: if not none, the returned series will stop at this time stamp
:param cumulative_var_1: True is you want the variable to be accumulated instead of means, only of 1D data
:param cumulative_var_2: True is you want the variable to be accumulated instead of means, only of 1D data
:return: Index_averaged_1: 1D array, smallest coincident time, without time stamp gaps
:return: Values_mean_1: same shape as data_1 both according to Index_averaged_1 times
:return: Values_mean_2: same shape as data_2 both according to Index_averaged_1 times
"""
# define forced_time_step
if forced_time_step is None:
forced_time_step = np.median(np.diff(time_1_reference))
# find time period
if forced_start_time is None:
first_time_stamp = max(np.nanmin(time_1_reference), np.nanmin(time_2))
else:
first_time_stamp = forced_start_time
if forced_stop_time is None:
last_time_stamp = min(np.nanmax(time_1_reference), np.nanmax(time_2))
else:
last_time_stamp = forced_stop_time
# do the averaging
print('starting averaging of data 1')
if cumulative_var_1:
Index_averaged_1, Values_mean_1 = mean_discrete(time_1_reference, data_1, forced_time_step,
first_time_stamp, last_index=last_time_stamp,
cumulative_parameter_indx=0)
else:
Index_averaged_1, Values_mean_1 = mean_discrete(time_1_reference, data_1, forced_time_step,
first_time_stamp, last_index=last_time_stamp)
print('starting averaging of data 2')
if cumulative_var_2:
Index_averaged_2, Values_mean_2 = mean_discrete(time_2, data_2, forced_time_step,
first_time_stamp, last_index=last_time_stamp,
cumulative_parameter_indx=0)
else:
Index_averaged_2, Values_mean_2 = mean_discrete(time_2, data_2, forced_time_step,
first_time_stamp, last_index=last_time_stamp)
# check that averaged indexes are the same
if np.nansum(np.abs(Index_averaged_1 - Index_averaged_2)) != 0:
print('error during averaging of series, times do no match ????')
return None, None, None
# return the combined, trimmed data
return Index_averaged_1, Values_mean_1, Values_mean_2
def split_str_chunks(s, n):
"""Produce `n`-character chunks from `s`."""
out_list = []
for start in range(0, len(s), n):
out_list.append(s[start:start+n])
return out_list
def coincidence_multi(array_list):
# only coincidences
parameters_list = array_list
check_ = parameters_list[0]
for param_ in parameters_list[1:]:
check_ = check_ * param_
check_[check_ == check_] = 1
new_arr_list = []
for param_ in parameters_list:
new_arr_list.append(param_ * check_)
check_ = check_ * param_
# delete empty rows_
list_list = []
for param_ in parameters_list:
t_list = []
for i in range(check_.shape[0]):
if check_[i] == check_[i]:
t_list.append(param_[i])
list_list.append(t_list)
# concatenate
ar_list = []
for ii in range(len(parameters_list)):
ar_list.append(np.array(list_list[ii]))
return ar_list
def coincidence_zero(arr_1,arr_2):
# only coincidences
check_ = arr_1 * arr_2
# delete empty rows_
list_1 = []
list_2 = []
for i in range(check_.shape[0]):
if check_[i] != 0:
list_1.append(arr_1[i])
list_2.append(arr_2[i])
return np.array(list_1), | np.array(list_2) | numpy.array |
import numpy as np
def unit_vector(vector):
""" Returns the unit vector of the vector. """
return vector / np.linalg.norm(vector)
def angle_between(v1, v2) -> float:
""" Returns the angle in radians between vectors 'v1' and 'v2' """
v1_u = unit_vector(v1)
v2_u = unit_vector(v2)
return (np.arccos(np.clip( | np.dot(v1_u, v2_u) | numpy.dot |
from copy import copy
from itertools import cycle, islice
import numpy as np
import pandas as pd
import pytest
from napari._tests.utils import check_layer_world_data_extent
from napari.layers import Shapes
from napari.layers.utils._text_constants import TextMode
from napari.utils.colormaps.standardize_color import transform_color
def _make_cycled_properties(values, length):
"""Helper function to make property values
Parameters
----------
values
The values to be cycled.
length : int
The length of the resulting property array
Returns
-------
cycled_properties : np.ndarray
The property array comprising the cycled values.
"""
cycled_properties = np.array(list(islice(cycle(values), 0, length)))
return cycled_properties
def test_empty_shapes():
shp = Shapes()
assert shp.ndim == 2
properties_array = {'shape_type': _make_cycled_properties(['A', 'B'], 10)}
properties_list = {'shape_type': list(_make_cycled_properties(['A', 'B'], 10))}
@pytest.mark.parametrize("properties", [properties_array, properties_list])
def test_properties(properties):
shape = (10, 4, 2)
np.random.seed(0)
data = 20 * np.random.random(shape)
layer = Shapes(data, properties=copy(properties))
np.testing.assert_equal(layer.properties, properties)
current_prop = {'shape_type': np.array(['B'])}
assert layer.current_properties == current_prop
# test removing shapes
layer.selected_data = {0, 1}
layer.remove_selected()
remove_properties = properties['shape_type'][2::]
assert len(layer.properties['shape_type']) == (shape[0] - 2)
assert np.all(layer.properties['shape_type'] == remove_properties)
# test selection of properties
layer.selected_data = {0}
selected_annotation = layer.current_properties['shape_type']
assert len(selected_annotation) == 1
assert selected_annotation[0] == 'A'
# test adding shapes with properties
new_data = np.random.random((1, 4, 2))
new_shape_type = ['rectangle']
layer.add(new_data, shape_type=new_shape_type)
add_properties = np.concatenate((remove_properties, ['A']), axis=0)
assert np.all(layer.properties['shape_type'] == add_properties)
# test copy/paste
layer.selected_data = {0, 1}
layer._copy_data()
assert np.all(layer._clipboard['properties']['shape_type'] == ['A', 'B'])
layer._paste_data()
paste_properties = np.concatenate((add_properties, ['A', 'B']), axis=0)
assert np.all(layer.properties['shape_type'] == paste_properties)
# test updating a property
layer.mode = 'select'
layer.selected_data = {0}
new_property = {'shape_type': np.array(['B'])}
layer.current_properties = new_property
updated_properties = layer.properties
assert updated_properties['shape_type'][0] == 'B'
@pytest.mark.parametrize("attribute", ['edge', 'face'])
def test_adding_properties(attribute):
"""Test adding properties to an existing layer"""
shape = (10, 4, 2)
np.random.seed(0)
data = 20 * np.random.random(shape)
layer = Shapes(data)
# add properties
properties = {'shape_type': _make_cycled_properties(['A', 'B'], shape[0])}
layer.properties = properties
np.testing.assert_equal(layer.properties, properties)
# add properties as a dataframe
properties_df = pd.DataFrame(properties)
layer.properties = properties_df
np.testing.assert_equal(layer.properties, properties)
# add properties as a dictionary with list values
properties_list = {
'shape_type': list(_make_cycled_properties(['A', 'B'], shape[0]))
}
layer.properties = properties_list
assert isinstance(layer.properties['shape_type'], np.ndarray)
# removing a property that was the _*_color_property should give a warning
setattr(layer, f'_{attribute}_color_property', 'shape_type')
properties_2 = {
'not_shape_type': _make_cycled_properties(['A', 'B'], shape[0])
}
with pytest.warns(RuntimeWarning):
layer.properties = properties_2
def test_data_setter_with_properties():
"""Test layer data on a layer with properties via the data setter"""
shape = (10, 4, 2)
np.random.seed(0)
data = 20 * np.random.random(shape)
properties = {'shape_type': _make_cycled_properties(['A', 'B'], shape[0])}
layer = Shapes(data, properties=properties)
# test setting to data with fewer shapes
n_new_shapes = 4
new_data = 20 * np.random.random((n_new_shapes, 4, 2))
layer.data = new_data
assert len(layer.properties['shape_type']) == n_new_shapes
# test setting to data with more shapes
n_new_shapes_2 = 6
new_data_2 = 20 * np.random.random((n_new_shapes_2, 4, 2))
layer.data = new_data_2
assert len(layer.properties['shape_type']) == n_new_shapes_2
# test setting to data with same shapes
new_data_3 = 20 * np.random.random((n_new_shapes_2, 4, 2))
layer.data = new_data_3
assert len(layer.properties['shape_type']) == n_new_shapes_2
def test_properties_dataframe():
"""Test if properties can be provided as a DataFrame"""
shape = (10, 4, 2)
np.random.seed(0)
data = 20 * np.random.random(shape)
properties = {'shape_type': _make_cycled_properties(['A', 'B'], shape[0])}
properties_df = pd.DataFrame(properties)
properties_df = properties_df.astype(properties['shape_type'].dtype)
layer = Shapes(data, properties=properties_df)
np.testing.assert_equal(layer.properties, properties)
def test_empty_layer_with_text_properties():
"""Test initializing an empty layer with text defined"""
default_properties = {'shape_type': np.array([1.5], dtype=float)}
text_kwargs = {'text': 'shape_type', 'color': 'red'}
layer = Shapes(
properties=default_properties,
text=text_kwargs,
)
assert layer.text._mode == TextMode.PROPERTY
assert layer.text.values.size == 0
np.testing.assert_allclose(layer.text.color, [1, 0, 0, 1])
# add a shape and check that the appropriate text value was added
layer.add(np.random.random((1, 4, 2)))
np.testing.assert_equal(layer.text.values, ['1.5'])
np.testing.assert_allclose(layer.text.color, [1, 0, 0, 1])
def test_empty_layer_with_text_formatted():
"""Test initializing an empty layer with text defined"""
default_properties = {'shape_type': np.array([1.5], dtype=float)}
layer = Shapes(
properties=default_properties,
text='shape_type: {shape_type:.2f}',
)
assert layer.text._mode == TextMode.FORMATTED
assert layer.text.values.size == 0
# add a shape and check that the appropriate text value was added
layer.add(np.random.random((1, 4, 2)))
np.testing.assert_equal(layer.text.values, ['shape_type: 1.50'])
@pytest.mark.parametrize("properties", [properties_array, properties_list])
def test_text_from_property_value(properties):
"""Test setting text from a property value"""
shape = (10, 4, 2)
np.random.seed(0)
data = 20 * np.random.random(shape)
layer = Shapes(data, properties=copy(properties), text='shape_type')
np.testing.assert_equal(layer.text.values, properties['shape_type'])
@pytest.mark.parametrize("properties", [properties_array, properties_list])
def test_text_from_property_fstring(properties):
"""Test setting text with an f-string from the property value"""
shape = (10, 4, 2)
np.random.seed(0)
data = 20 * np.random.random(shape)
layer = Shapes(
data, properties=copy(properties), text='type: {shape_type}'
)
expected_text = ['type: ' + v for v in properties['shape_type']]
np.testing.assert_equal(layer.text.values, expected_text)
# test updating the text
layer.text = 'type-ish: {shape_type}'
expected_text_2 = ['type-ish: ' + v for v in properties['shape_type']]
np.testing.assert_equal(layer.text.values, expected_text_2)
# copy/paste
layer.selected_data = {0}
layer._copy_data()
layer._paste_data()
expected_text_3 = expected_text_2 + ['type-ish: A']
np.testing.assert_equal(layer.text.values, expected_text_3)
# add shape
layer.selected_data = {0}
new_shape = np.random.random((1, 4, 2))
layer.add(new_shape)
expected_text_4 = expected_text_3 + ['type-ish: A']
np.testing.assert_equal(layer.text.values, expected_text_4)
@pytest.mark.parametrize("properties", [properties_array, properties_list])
def test_set_text_with_kwarg_dict(properties):
text_kwargs = {
'text': 'type: {shape_type}',
'color': [0, 0, 0, 1],
'rotation': 10,
'translation': [5, 5],
'anchor': 'upper_left',
'size': 10,
'visible': True,
}
shape = (10, 4, 2)
np.random.seed(0)
data = 20 * np.random.random(shape)
layer = Shapes(data, properties=copy(properties), text=text_kwargs)
expected_text = ['type: ' + v for v in properties['shape_type']]
np.testing.assert_equal(layer.text.values, expected_text)
for property, value in text_kwargs.items():
if property == 'text':
continue
layer_value = getattr(layer._text, property)
np.testing.assert_equal(layer_value, value)
@pytest.mark.parametrize("properties", [properties_array, properties_list])
def test_text_error(properties):
"""creating a layer with text as the wrong type should raise an error"""
shape = (10, 4, 2)
np.random.seed(0)
data = 20 * np.random.random(shape)
# try adding text as the wrong type
with pytest.raises(TypeError):
Shapes(data, properties=copy(properties), text=123)
def test_refresh_text():
"""Test refreshing the text after setting new properties"""
shape = (10, 4, 2)
np.random.seed(0)
data = 20 * np.random.random(shape)
properties = {'shape_type': ['A'] * shape[0]}
layer = Shapes(data, properties=copy(properties), text='shape_type')
new_properties = {'shape_type': ['B'] * shape[0]}
layer.properties = new_properties
np.testing.assert_equal(layer.text.values, new_properties['shape_type'])
def test_nd_text():
"""Test slicing of text coords with nD shapes"""
shapes_data = [
[[0, 10, 10, 10], [0, 10, 20, 20], [0, 10, 10, 20], [0, 10, 20, 10]],
[[1, 20, 30, 30], [1, 20, 50, 50], [1, 20, 50, 30], [1, 20, 30, 50]],
]
properties = {'shape_type': ['A', 'B']}
text_kwargs = {'text': 'shape_type', 'anchor': 'center'}
layer = Shapes(shapes_data, properties=properties, text=text_kwargs)
assert layer.ndim == 4
layer._slice_dims(point=[0, 10, 0, 0], ndisplay=2)
np.testing.assert_equal(layer._indices_view, [0])
np.testing.assert_equal(layer._view_text_coords[0], [[15, 15]])
layer._slice_dims(point=[1, 0, 0, 0], ndisplay=3)
np.testing.assert_equal(layer._indices_view, [1])
np.testing.assert_equal(layer._view_text_coords[0], [[20, 40, 40]])
@pytest.mark.parametrize("properties", [properties_array, properties_list])
def test_data_setter_with_text(properties):
"""Test layer data on a layer with text via the data setter"""
shape = (10, 4, 2)
np.random.seed(0)
data = 20 * np.random.random(shape)
layer = Shapes(data, properties=copy(properties), text='shape_type')
# test setting to data with fewer shapes
n_new_shapes = 4
new_data = 20 * np.random.random((n_new_shapes, 4, 2))
layer.data = new_data
assert len(layer.text.values) == n_new_shapes
# test setting to data with more shapes
n_new_shapes_2 = 6
new_data_2 = 20 * np.random.random((n_new_shapes_2, 4, 2))
layer.data = new_data_2
assert len(layer.text.values) == n_new_shapes_2
# test setting to data with same shapes
new_data_3 = 20 * np.random.random((n_new_shapes_2, 4, 2))
layer.data = new_data_3
assert len(layer.text.values) == n_new_shapes_2
def test_rectangles():
"""Test instantiating Shapes layer with a random 2D rectangles."""
# Test a single four corner rectangle
shape = (1, 4, 2)
np.random.seed(0)
data = 20 * np.random.random(shape)
layer = Shapes(data)
assert layer.nshapes == shape[0]
assert np.all(layer.data[0] == data[0])
assert layer.ndim == shape[2]
assert np.all([s == 'rectangle' for s in layer.shape_type])
# Test multiple four corner rectangles
shape = (10, 4, 2)
data = 20 * np.random.random(shape)
layer = Shapes(data)
assert layer.nshapes == shape[0]
assert np.all([np.all(ld == d) for ld, d in zip(layer.data, data)])
assert layer.ndim == shape[2]
assert np.all([s == 'rectangle' for s in layer.shape_type])
# Test a single two corner rectangle, which gets converted into four
# corner rectangle
shape = (1, 2, 2)
data = 20 * np.random.random(shape)
layer = Shapes(data)
assert layer.nshapes == 1
assert len(layer.data[0]) == 4
assert layer.ndim == shape[2]
assert np.all([s == 'rectangle' for s in layer.shape_type])
# Test multiple two corner rectangles
shape = (10, 2, 2)
data = 20 * np.random.random(shape)
layer = Shapes(data)
assert layer.nshapes == shape[0]
assert np.all([len(ld) == 4 for ld in layer.data])
assert layer.ndim == shape[2]
assert np.all([s == 'rectangle' for s in layer.shape_type])
def test_rectangles_with_shape_type():
"""Test instantiating rectangles with shape_type in data"""
# Test (rectangle, shape_type) tuple
shape = (1, 4, 2)
np.random.seed(0)
vertices = 20 * np.random.random(shape)
data = (vertices, "rectangle")
layer = Shapes(data)
assert layer.nshapes == shape[0]
assert np.all(layer.data[0] == data[0])
assert layer.ndim == shape[2]
assert np.all([s == 'rectangle' for s in layer.shape_type])
# Test (list of rectangles, shape_type) tuple
shape = (10, 4, 2)
vertices = 20 * np.random.random(shape)
data = (vertices, "rectangle")
layer = Shapes(data)
assert layer.nshapes == shape[0]
assert np.all([np.all(ld == d) for ld, d in zip(layer.data, vertices)])
assert layer.ndim == shape[2]
assert np.all([s == 'rectangle' for s in layer.shape_type])
# Test list of (rectangle, shape_type) tuples
data = [(vertices[i], "rectangle") for i in range(shape[0])]
layer = Shapes(data)
assert layer.nshapes == shape[0]
assert np.all([np.all(ld == d) for ld, d in zip(layer.data, vertices)])
assert layer.ndim == shape[2]
assert np.all([s == 'rectangle' for s in layer.shape_type])
def test_rectangles_roundtrip():
"""Test a full roundtrip with rectangles data."""
shape = (10, 4, 2)
np.random.seed(0)
data = 20 * np.random.random(shape)
layer = Shapes(data)
new_layer = Shapes(layer.data)
assert np.all([nd == d for nd, d in zip(new_layer.data, layer.data)])
def test_integer_rectangle():
"""Test instantiating rectangles with integer data."""
shape = (10, 2, 2)
np.random.seed(1)
data = np.random.randint(20, size=shape)
layer = Shapes(data)
assert layer.nshapes == shape[0]
assert np.all([len(ld) == 4 for ld in layer.data])
assert layer.ndim == shape[2]
assert np.all([s == 'rectangle' for s in layer.shape_type])
def test_negative_rectangle():
"""Test instantiating rectangles with negative data."""
shape = (10, 4, 2)
np.random.seed(0)
data = 20 * np.random.random(shape) - 10
layer = Shapes(data)
assert layer.nshapes == shape[0]
assert np.all([np.all(ld == d) for ld, d in zip(layer.data, data)])
assert layer.ndim == shape[2]
assert np.all([s == 'rectangle' for s in layer.shape_type])
def test_empty_rectangle():
"""Test instantiating rectangles with empty data."""
shape = (0, 0, 2)
np.random.seed(0)
data = 20 * np.random.random(shape)
layer = Shapes(data)
assert layer.nshapes == shape[0]
assert np.all([np.all(ld == d) for ld, d in zip(layer.data, data)])
assert layer.ndim == shape[2]
assert np.all([s == 'rectangle' for s in layer.shape_type])
def test_3D_rectangles():
"""Test instantiating Shapes layer with 3D planar rectangles."""
# Test a single four corner rectangle
np.random.seed(0)
planes = np.tile(np.arange(10).reshape((10, 1, 1)), (1, 4, 1))
corners = np.random.uniform(0, 10, size=(10, 4, 2))
data = np.concatenate((planes, corners), axis=2)
layer = Shapes(data)
assert layer.nshapes == len(data)
assert np.all([np.all(ld == d) for ld, d in zip(layer.data, data)])
assert layer.ndim == 3
assert np.all([s == 'rectangle' for s in layer.shape_type])
def test_ellipses():
"""Test instantiating Shapes layer with a random 2D ellipses."""
# Test a single four corner ellipses
shape = (1, 4, 2)
np.random.seed(0)
data = 20 * np.random.random(shape)
layer = Shapes(data, shape_type='ellipse')
assert layer.nshapes == shape[0]
assert np.all(layer.data[0] == data[0])
assert layer.ndim == shape[2]
assert np.all([s == 'ellipse' for s in layer.shape_type])
# Test multiple four corner ellipses
shape = (10, 4, 2)
np.random.seed(0)
data = 20 * np.random.random(shape)
layer = Shapes(data, shape_type='ellipse')
assert layer.nshapes == shape[0]
assert np.all([np.all(ld == d) for ld, d in zip(layer.data, data)])
assert layer.ndim == shape[2]
assert np.all([s == 'ellipse' for s in layer.shape_type])
# Test a single ellipse center radii, which gets converted into four
# corner ellipse
shape = (1, 2, 2)
np.random.seed(0)
data = 20 * np.random.random(shape)
layer = Shapes(data, shape_type='ellipse')
assert layer.nshapes == 1
assert len(layer.data[0]) == 4
assert layer.ndim == shape[2]
assert np.all([s == 'ellipse' for s in layer.shape_type])
# Test multiple center radii ellipses
shape = (10, 2, 2)
np.random.seed(0)
data = 20 * np.random.random(shape)
layer = Shapes(data, shape_type='ellipse')
assert layer.nshapes == shape[0]
assert np.all([len(ld) == 4 for ld in layer.data])
assert layer.ndim == shape[2]
assert np.all([s == 'ellipse' for s in layer.shape_type])
def test_ellipses_with_shape_type():
"""Test instantiating ellipses with shape_type in data"""
# Test single four corner (vertices, shape_type) tuple
shape = (1, 4, 2)
np.random.seed(0)
vertices = 20 * np.random.random(shape)
data = (vertices, "ellipse")
layer = Shapes(data)
assert layer.nshapes == shape[0]
assert np.all(layer.data[0] == data[0])
assert layer.ndim == shape[2]
assert np.all([s == 'ellipse' for s in layer.shape_type])
# Test multiple four corner (list of vertices, shape_type) tuple
shape = (10, 4, 2)
np.random.seed(0)
vertices = 20 * np.random.random(shape)
data = (vertices, "ellipse")
layer = Shapes(data)
assert layer.nshapes == shape[0]
assert np.all([np.all(ld == d) for ld, d in zip(layer.data, vertices)])
assert layer.ndim == shape[2]
assert np.all([s == 'ellipse' for s in layer.shape_type])
# Test list of four corner (vertices, shape_type) tuples
shape = (10, 4, 2)
np.random.seed(0)
vertices = 20 * np.random.random(shape)
data = [(vertices[i], "ellipse") for i in range(shape[0])]
layer = Shapes(data)
assert layer.nshapes == shape[0]
assert np.all([np.all(ld == d) for ld, d in zip(layer.data, vertices)])
assert layer.ndim == shape[2]
assert np.all([s == 'ellipse' for s in layer.shape_type])
# Test single (center-radii, shape_type) ellipse
shape = (1, 2, 2)
np.random.seed(0)
data = (20 * np.random.random(shape), "ellipse")
layer = Shapes(data)
assert layer.nshapes == 1
assert len(layer.data[0]) == 4
assert layer.ndim == shape[2]
assert np.all([s == 'ellipse' for s in layer.shape_type])
# Test (list of center-radii, shape_type) tuple
shape = (10, 2, 2)
np.random.seed(0)
center_radii = 20 * np.random.random(shape)
data = (center_radii, "ellipse")
layer = Shapes(data)
assert layer.nshapes == shape[0]
assert np.all([len(ld) == 4 for ld in layer.data])
assert layer.ndim == shape[2]
assert np.all([s == 'ellipse' for s in layer.shape_type])
# Test list of (center-radii, shape_type) tuples
shape = (10, 2, 2)
np.random.seed(0)
center_radii = 20 * np.random.random(shape)
data = [(center_radii[i], "ellipse") for i in range(shape[0])]
layer = Shapes(data)
assert layer.nshapes == shape[0]
assert np.all([len(ld) == 4 for ld in layer.data])
assert layer.ndim == shape[2]
assert np.all([s == 'ellipse' for s in layer.shape_type])
def test_4D_ellispse():
"""Test instantiating Shapes layer with 4D planar ellipse."""
# Test a single 4D ellipse
np.random.seed(0)
data = [
[
[3, 5, 108, 108],
[3, 5, 108, 148],
[3, 5, 148, 148],
[3, 5, 148, 108],
]
]
layer = Shapes(data, shape_type='ellipse')
assert layer.nshapes == len(data)
assert np.all([np.all(ld == d) for ld, d in zip(layer.data, data)])
assert layer.ndim == 4
assert np.all([s == 'ellipse' for s in layer.shape_type])
def test_ellipses_roundtrip():
"""Test a full roundtrip with ellipss data."""
shape = (10, 4, 2)
np.random.seed(0)
data = 20 * np.random.random(shape)
layer = Shapes(data, shape_type='ellipse')
new_layer = Shapes(layer.data, shape_type='ellipse')
assert np.all([nd == d for nd, d in zip(new_layer.data, layer.data)])
def test_lines():
"""Test instantiating Shapes layer with a random 2D lines."""
# Test a single two end point line
shape = (1, 2, 2)
np.random.seed(0)
data = 20 * np.random.random(shape)
layer = Shapes(data, shape_type='line')
assert layer.nshapes == shape[0]
assert np.all(layer.data[0] == data[0])
assert layer.ndim == shape[2]
assert np.all([s == 'line' for s in layer.shape_type])
# Test multiple lines
shape = (10, 2, 2)
np.random.seed(0)
data = 20 * np.random.random(shape)
layer = Shapes(data, shape_type='line')
assert layer.nshapes == shape[0]
assert np.all([np.all(ld == d) for ld, d in zip(layer.data, data)])
assert layer.ndim == shape[2]
assert np.all([s == 'line' for s in layer.shape_type])
def test_lines_with_shape_type():
"""Test instantiating lines with shape_type"""
# Test (single line, shape_type) tuple
shape = (1, 2, 2)
np.random.seed(0)
end_points = 20 * np.random.random(shape)
data = (end_points, 'line')
layer = Shapes(data)
assert layer.nshapes == shape[0]
assert np.all(layer.data[0] == end_points[0])
assert layer.ndim == shape[2]
assert np.all([s == 'line' for s in layer.shape_type])
# Test (multiple lines, shape_type) tuple
shape = (10, 2, 2)
np.random.seed(0)
end_points = 20 * np.random.random(shape)
data = (end_points, "line")
layer = Shapes(data)
assert layer.nshapes == shape[0]
assert np.all([np.all(ld == d) for ld, d in zip(layer.data, end_points)])
assert layer.ndim == shape[2]
assert np.all([s == 'line' for s in layer.shape_type])
# Test list of (line, shape_type) tuples
shape = (10, 2, 2)
np.random.seed(0)
end_points = 20 * np.random.random(shape)
data = [(end_points[i], "line") for i in range(shape[0])]
layer = Shapes(data)
assert layer.nshapes == shape[0]
assert np.all([np.all(ld == d) for ld, d in zip(layer.data, end_points)])
assert layer.ndim == shape[2]
assert np.all([s == 'line' for s in layer.shape_type])
def test_lines_roundtrip():
"""Test a full roundtrip with line data."""
shape = (10, 2, 2)
np.random.seed(0)
data = 20 * np.random.random(shape)
layer = Shapes(data, shape_type='line')
new_layer = Shapes(layer.data, shape_type='line')
assert np.all([nd == d for nd, d in zip(new_layer.data, layer.data)])
def test_paths():
"""Test instantiating Shapes layer with a random 2D paths."""
# Test a single path with 6 points
shape = (1, 6, 2)
np.random.seed(0)
data = 20 * np.random.random(shape)
layer = Shapes(data, shape_type='path')
assert layer.nshapes == shape[0]
assert np.all(layer.data[0] == data[0])
assert layer.ndim == shape[2]
assert np.all([s == 'path' for s in layer.shape_type])
# Test multiple paths with different numbers of points
data = [
20 * np.random.random((np.random.randint(2, 12), 2)) for i in range(10)
]
layer = Shapes(data, shape_type='path')
assert layer.nshapes == len(data)
assert np.all([np.all(ld == d) for ld, d in zip(layer.data, data)])
assert layer.ndim == 2
assert np.all([s == 'path' for s in layer.shape_type])
def test_paths_with_shape_type():
"""Test instantiating paths with shape_type in data"""
# Test (single path, shape_type) tuple
shape = (1, 6, 2)
np.random.seed(0)
path_points = 20 * np.random.random(shape)
data = (path_points, "path")
layer = Shapes(data)
assert layer.nshapes == shape[0]
assert np.all(layer.data[0] == path_points[0])
assert layer.ndim == shape[2]
assert np.all([s == 'path' for s in layer.shape_type])
# Test (list of paths, shape_type) tuple
path_points = [
20 * np.random.random((np.random.randint(2, 12), 2)) for i in range(10)
]
data = (path_points, "path")
layer = Shapes(data)
assert layer.nshapes == len(path_points)
assert np.all([np.all(ld == d) for ld, d in zip(layer.data, path_points)])
assert layer.ndim == 2
assert np.all([s == 'path' for s in layer.shape_type])
# Test list of (path, shape_type) tuples
data = [(path_points[i], "path") for i in range(len(path_points))]
layer = Shapes(data)
assert layer.nshapes == len(data)
assert np.all([np.all(ld == d) for ld, d in zip(layer.data, path_points)])
assert layer.ndim == 2
assert np.all([s == 'path' for s in layer.shape_type])
def test_paths_roundtrip():
"""Test a full roundtrip with path data."""
np.random.seed(0)
data = [
20 * np.random.random((np.random.randint(2, 12), 2)) for i in range(10)
]
layer = Shapes(data, shape_type='path')
new_layer = Shapes(layer.data, shape_type='path')
assert np.all(
[np.all(nd == d) for nd, d in zip(new_layer.data, layer.data)]
)
def test_polygons():
"""Test instantiating Shapes layer with a random 2D polygons."""
# Test a single polygon with 6 points
shape = (1, 6, 2)
np.random.seed(0)
data = 20 * np.random.random(shape)
layer = Shapes(data, shape_type='polygon')
assert layer.nshapes == shape[0]
assert np.all(layer.data[0] == data[0])
assert layer.ndim == shape[2]
assert np.all([s == 'polygon' for s in layer.shape_type])
# Test multiple polygons with different numbers of points
data = [
20 * np.random.random((np.random.randint(2, 12), 2)) for i in range(10)
]
layer = Shapes(data, shape_type='polygon')
assert layer.nshapes == len(data)
assert np.all([np.all(ld == d) for ld, d in zip(layer.data, data)])
assert layer.ndim == 2
assert np.all([s == 'polygon' for s in layer.shape_type])
def test_polygons_with_shape_type():
"""Test 2D polygons with shape_type in data"""
# Test single (polygon, shape_type) tuple
shape = (1, 6, 2)
np.random.seed(0)
vertices = 20 * np.random.random(shape)
data = (vertices, 'polygon')
layer = Shapes(data)
assert layer.nshapes == shape[0]
assert np.all(layer.data[0] == vertices[0])
assert layer.ndim == shape[2]
assert np.all([s == 'polygon' for s in layer.shape_type])
# Test (list of polygons, shape_type) tuple
polygons = [
20 * np.random.random((np.random.randint(2, 12), 2)) for i in range(10)
]
data = (polygons, 'polygon')
layer = Shapes(data)
assert layer.nshapes == len(polygons)
assert np.all([np.all(ld == d) for ld, d in zip(layer.data, polygons)])
assert layer.ndim == 2
assert np.all([s == 'polygon' for s in layer.shape_type])
# Test list of (polygon, shape_type) tuples
data = [(polygons[i], 'polygon') for i in range(len(polygons))]
layer = Shapes(data)
assert layer.nshapes == len(polygons)
assert np.all([np.all(ld == d) for ld, d in zip(layer.data, polygons)])
assert layer.ndim == 2
assert np.all([s == 'polygon' for s in layer.shape_type])
def test_polygon_roundtrip():
"""Test a full roundtrip with polygon data."""
np.random.seed(0)
data = [
20 * np.random.random((np.random.randint(2, 12), 2)) for i in range(10)
]
layer = Shapes(data, shape_type='polygon')
new_layer = Shapes(layer.data, shape_type='polygon')
assert np.all(
[np.all(nd == d) for nd, d in zip(new_layer.data, layer.data)]
)
def test_mixed_shapes():
"""Test instantiating Shapes layer with a mix of random 2D shapes."""
# Test multiple polygons with different numbers of points
np.random.seed(0)
shape_vertices = [
20 * np.random.random((np.random.randint(2, 12), 2)) for i in range(5)
] + list(np.random.random((5, 4, 2)))
shape_type = ['polygon'] * 5 + ['rectangle'] * 3 + ['ellipse'] * 2
layer = Shapes(shape_vertices, shape_type=shape_type)
assert layer.nshapes == len(shape_vertices)
assert np.all(
[np.all(ld == d) for ld, d in zip(layer.data, shape_vertices)]
)
assert layer.ndim == 2
assert np.all([s == so for s, so in zip(layer.shape_type, shape_type)])
# Test roundtrip with mixed data
new_layer = Shapes(layer.data, shape_type=layer.shape_type)
assert np.all(
[np.all(nd == d) for nd, d in zip(new_layer.data, layer.data)]
)
assert np.all(
[ns == s for ns, s in zip(new_layer.shape_type, layer.shape_type)]
)
def test_mixed_shapes_with_shape_type():
"""Test adding mixed shapes with shape_type in data"""
np.random.seed(0)
shape_vertices = [
20 * np.random.random((np.random.randint(2, 12), 2)) for i in range(5)
] + list(np.random.random((5, 4, 2)))
shape_type = ['polygon'] * 5 + ['rectangle'] * 3 + ['ellipse'] * 2
# Test multiple (shape, shape_type) tuples
data = list(zip(shape_vertices, shape_type))
layer = Shapes(data)
assert layer.nshapes == len(shape_vertices)
assert np.all(
[np.all(ld == d) for ld, d in zip(layer.data, shape_vertices)]
)
assert layer.ndim == 2
assert np.all([s == so for s, so in zip(layer.shape_type, shape_type)])
def test_data_shape_type_overwrites_meta():
"""Test shape type passed through data property overwrites metadata shape type"""
shape = (10, 4, 2)
np.random.seed(0)
vertices = 20 * np.random.random(shape)
data = (vertices, "ellipse")
layer = Shapes(data, shape_type='rectangle')
assert np.all([s == 'ellipse' for s in layer.shape_type])
data = [(vertices[i], "ellipse") for i in range(shape[0])]
layer = Shapes(data, shape_type='rectangle')
assert np.all([s == 'ellipse' for s in layer.shape_type])
def test_changing_shapes():
"""Test changing Shapes data."""
shape_a = (10, 4, 2)
shape_b = (20, 4, 2)
np.random.seed(0)
vertices_a = 20 * np.random.random(shape_a)
vertices_b = 20 * np.random.random(shape_b)
layer = Shapes(vertices_a)
assert layer.nshapes == shape_a[0]
layer.data = vertices_b
assert layer.nshapes == shape_b[0]
assert np.all([np.all(ld == d) for ld, d in zip(layer.data, vertices_b)])
assert layer.ndim == shape_b[2]
assert np.all([s == 'rectangle' for s in layer.shape_type])
# setting data with shape type
data_a = (vertices_a, "ellipse")
layer.data = data_a
assert layer.nshapes == shape_a[0]
assert np.all([np.all(ld == d) for ld, d in zip(layer.data, vertices_a)])
assert layer.ndim == shape_a[2]
assert np.all([s == 'ellipse' for s in layer.shape_type])
# setting data with fewer shapes
smaller_data = vertices_a[:5]
current_edge_color = layer._data_view.edge_color
current_edge_width = layer._data_view.edge_widths
current_face_color = layer._data_view.face_color
current_z = layer._data_view.z_indices
layer.data = smaller_data
assert layer.nshapes == smaller_data.shape[0]
assert np.allclose(layer._data_view.edge_color, current_edge_color[:5])
assert np.allclose(layer._data_view.face_color, current_face_color[:5])
assert np.allclose(layer._data_view.edge_widths, current_edge_width[:5])
assert np.allclose(layer._data_view.z_indices, current_z[:5])
# setting data with added shapes
current_edge_color = layer._data_view.edge_color
current_edge_width = layer._data_view.edge_widths
current_face_color = layer._data_view.face_color
current_z = layer._data_view.z_indices
bigger_data = vertices_b
layer.data = bigger_data
assert layer.nshapes == bigger_data.shape[0]
assert np.allclose(layer._data_view.edge_color[:5], current_edge_color)
assert np.allclose(layer._data_view.face_color[:5], current_face_color)
assert np.allclose(layer._data_view.edge_widths[:5], current_edge_width)
assert np.allclose(layer._data_view.z_indices[:5], current_z)
def test_changing_shape_type():
"""Test changing shape type"""
np.random.seed(0)
rectangles = 20 * np.random.random((10, 4, 2))
layer = Shapes(rectangles, shape_type='rectangle')
layer.shape_type = "ellipse"
assert np.all([s == 'ellipse' for s in layer.shape_type])
def test_adding_shapes():
"""Test adding shapes."""
# Start with polygons with different numbers of points
np.random.seed(0)
data = [
20 * np.random.random((np.random.randint(2, 12), 2)) for i in range(5)
]
# shape_type = ['polygon'] * 5 + ['rectangle'] * 3 + ['ellipse'] * 2
layer = Shapes(data, shape_type='polygon')
new_data = np.random.random((5, 4, 2))
new_shape_type = ['rectangle'] * 3 + ['ellipse'] * 2
layer.add(new_data, shape_type=new_shape_type)
all_data = data + list(new_data)
all_shape_type = ['polygon'] * 5 + new_shape_type
assert layer.nshapes == len(all_data)
assert np.all([np.all(ld == d) for ld, d in zip(layer.data, all_data)])
assert layer.ndim == 2
assert np.all([s == so for s, so in zip(layer.shape_type, all_shape_type)])
# test adding data with shape_type
new_vertices = np.random.random((5, 4, 2))
new_shape_type2 = ['ellipse'] * 3 + ['rectangle'] * 2
new_data2 = list(zip(new_vertices, new_shape_type2))
layer.add(new_data2)
all_vertices = all_data + list(new_vertices)
all_shape_type = all_shape_type + new_shape_type2
assert layer.nshapes == len(all_vertices)
assert np.all([np.all(ld == d) for ld, d in zip(layer.data, all_vertices)])
assert layer.ndim == 2
assert np.all([s == so for s, so in zip(layer.shape_type, all_shape_type)])
def test_adding_shapes_to_empty():
"""Test adding shapes to empty."""
data = np.empty((0, 0, 2))
np.random.seed(0)
layer = Shapes(np.empty((0, 0, 2)))
assert len(layer.data) == 0
data = [
20 * np.random.random((np.random.randint(2, 12), 2)) for i in range(5)
] + list(np.random.random((5, 4, 2)))
shape_type = ['path'] * 5 + ['rectangle'] * 3 + ['ellipse'] * 2
layer.add(data, shape_type=shape_type)
assert layer.nshapes == len(data)
assert np.all([np.all(ld == d) for ld, d in zip(layer.data, data)])
assert layer.ndim == 2
assert np.all([s == so for s, so in zip(layer.shape_type, shape_type)])
def test_selecting_shapes():
"""Test selecting shapes."""
data = 20 * np.random.random((10, 4, 2))
np.random.seed(0)
layer = Shapes(data)
layer.selected_data = {0, 1}
assert layer.selected_data == {0, 1}
layer.selected_data = {9}
assert layer.selected_data == {9}
layer.selected_data = set()
assert layer.selected_data == set()
def test_removing_all_shapes_empty_list():
"""Test removing all shapes with an empty list."""
data = 20 * np.random.random((10, 4, 2))
np.random.seed(0)
layer = Shapes(data)
assert layer.nshapes == 10
layer.data = []
assert layer.nshapes == 0
def test_removing_all_shapes_empty_array():
"""Test removing all shapes with an empty list."""
data = 20 * np.random.random((10, 4, 2))
np.random.seed(0)
layer = Shapes(data)
assert layer.nshapes == 10
layer.data = np.empty((0, 2))
assert layer.nshapes == 0
def test_removing_selected_shapes():
"""Test removing selected shapes."""
np.random.seed(0)
data = [
20 * np.random.random((np.random.randint(2, 12), 2)) for i in range(5)
] + list(np.random.random((5, 4, 2)))
shape_type = ['polygon'] * 5 + ['rectangle'] * 3 + ['ellipse'] * 2
layer = Shapes(data, shape_type=shape_type)
# With nothing selected no points should be removed
layer.remove_selected()
assert len(layer.data) == len(data)
# Select three shapes and remove them
layer.selected_data = {1, 7, 8}
layer.remove_selected()
keep = [0] + list(range(2, 7)) + [9]
data_keep = [data[i] for i in keep]
shape_type_keep = [shape_type[i] for i in keep]
assert len(layer.data) == len(data_keep)
assert len(layer.selected_data) == 0
assert np.all([np.all(ld == d) for ld, d in zip(layer.data, data_keep)])
assert layer.ndim == 2
assert np.all(
[s == so for s, so in zip(layer.shape_type, shape_type_keep)]
)
def test_changing_modes():
"""Test changing modes."""
np.random.seed(0)
data = 20 * np.random.random((10, 4, 2))
layer = Shapes(data)
assert layer.mode == 'pan_zoom'
assert layer.interactive is True
layer.mode = 'select'
assert layer.mode == 'select'
assert layer.interactive is False
layer.mode = 'direct'
assert layer.mode == 'direct'
assert layer.interactive is False
layer.mode = 'vertex_insert'
assert layer.mode == 'vertex_insert'
assert layer.interactive is False
layer.mode = 'vertex_remove'
assert layer.mode == 'vertex_remove'
assert layer.interactive is False
layer.mode = 'add_rectangle'
assert layer.mode == 'add_rectangle'
assert layer.interactive is False
layer.mode = 'add_ellipse'
assert layer.mode == 'add_ellipse'
assert layer.interactive is False
layer.mode = 'add_line'
assert layer.mode == 'add_line'
assert layer.interactive is False
layer.mode = 'add_path'
assert layer.mode == 'add_path'
assert layer.interactive is False
layer.mode = 'add_polygon'
assert layer.mode == 'add_polygon'
assert layer.interactive is False
layer.mode = 'pan_zoom'
assert layer.mode == 'pan_zoom'
assert layer.interactive is True
def test_name():
"""Test setting layer name."""
np.random.seed(0)
data = 20 * np.random.random((10, 4, 2))
layer = Shapes(data)
assert layer.name == 'Shapes'
layer = Shapes(data, name='random')
assert layer.name == 'random'
layer.name = 'shps'
assert layer.name == 'shps'
def test_visiblity():
"""Test setting layer visibility."""
np.random.seed(0)
data = 20 * np.random.random((10, 4, 2))
layer = Shapes(data)
assert layer.visible is True
layer.visible = False
assert layer.visible is False
layer = Shapes(data, visible=False)
assert layer.visible is False
layer.visible = True
assert layer.visible is True
def test_opacity():
"""Test setting opacity."""
shape = (10, 4, 2)
np.random.seed(0)
data = 20 * np.random.random(shape)
layer = Shapes(data)
# Check default opacity value of 0.7
assert layer.opacity == 0.7
# Select data and change opacity of selection
layer.selected_data = {0, 1}
assert layer.opacity == 0.7
layer.opacity = 0.5
assert layer.opacity == 0.5
# Add new shape and test its width
new_shape = np.random.random((1, 4, 2))
layer.selected_data = set()
layer.add(new_shape)
assert layer.opacity == 0.5
# Instantiate with custom opacity
layer2 = Shapes(data, opacity=0.2)
assert layer2.opacity == 0.2
# Check removing data shouldn't change opacity
layer2.selected_data = {0, 2}
layer2.remove_selected()
assert len(layer2.data) == shape[0] - 2
assert layer2.opacity == 0.2
def test_blending():
"""Test setting layer blending."""
np.random.seed(0)
data = 20 * np.random.random((10, 4, 2))
layer = Shapes(data)
assert layer.blending == 'translucent'
layer.blending = 'additive'
assert layer.blending == 'additive'
layer = Shapes(data, blending='additive')
assert layer.blending == 'additive'
layer.blending = 'opaque'
assert layer.blending == 'opaque'
@pytest.mark.filterwarnings("ignore:elementwise comparison fail:FutureWarning")
@pytest.mark.parametrize("attribute", ['edge', 'face'])
def test_switch_color_mode(attribute):
"""Test switching between color modes"""
shape = (10, 4, 2)
np.random.seed(0)
data = 20 * np.random.random(shape)
# create a continuous property with a known value in the last element
continuous_prop = np.random.random((shape[0],))
continuous_prop[-1] = 1
properties = {
'shape_truthiness': continuous_prop,
'shape_type': _make_cycled_properties(['A', 'B'], shape[0]),
}
initial_color = [1, 0, 0, 1]
color_cycle = ['red', 'blue']
color_kwarg = f'{attribute}_color'
colormap_kwarg = f'{attribute}_colormap'
color_cycle_kwarg = f'{attribute}_color_cycle'
args = {
color_kwarg: initial_color,
colormap_kwarg: 'gray',
color_cycle_kwarg: color_cycle,
}
layer = Shapes(data, properties=properties, **args)
layer_color_mode = getattr(layer, f'{attribute}_color_mode')
layer_color = getattr(layer, f'{attribute}_color')
assert layer_color_mode == 'direct'
np.testing.assert_allclose(
layer_color, np.repeat([initial_color], shape[0], axis=0)
)
# there should not be an edge_color_property
color_property = getattr(layer, f'_{attribute}_color_property')
assert color_property == ''
# transitioning to colormap should raise a warning
# because there isn't an edge color property yet and
# the first property in shapes.properties is being automatically selected
with pytest.warns(UserWarning):
setattr(layer, f'{attribute}_color_mode', 'colormap')
color_property = getattr(layer, f'_{attribute}_color_property')
assert color_property == next(iter(properties))
layer_color = getattr(layer, f'{attribute}_color')
np.testing.assert_allclose(layer_color[-1], [1, 1, 1, 1])
# switch to color cycle
setattr(layer, f'{attribute}_color_mode', 'cycle')
setattr(layer, f'{attribute}_color', 'shape_type')
color = getattr(layer, f'{attribute}_color')
layer_color = transform_color(color_cycle * int(shape[0] / 2))
np.testing.assert_allclose(color, layer_color)
# switch back to direct, edge_colors shouldn't change
setattr(layer, f'{attribute}_color_mode', 'direct')
new_edge_color = getattr(layer, f'{attribute}_color')
np.testing.assert_allclose(new_edge_color, color)
@pytest.mark.parametrize("attribute", ['edge', 'face'])
def test_color_direct(attribute: str):
"""Test setting face/edge color directly."""
shape = (10, 4, 2)
np.random.seed(0)
data = 20 * np.random.random(shape)
layer_kwargs = {f'{attribute}_color': 'black'}
layer = Shapes(data, **layer_kwargs)
color_array = transform_color(['black'] * shape[0])
current_color = getattr(layer, f'current_{attribute}_color')
layer_color = getattr(layer, f'{attribute}_color')
assert current_color == 'black'
assert len(layer.edge_color) == shape[0]
np.testing.assert_allclose(color_array, layer_color)
# With no data selected changing color has no effect
setattr(layer, f'current_{attribute}_color', 'blue')
current_color = getattr(layer, f'current_{attribute}_color')
assert current_color == 'blue'
np.testing.assert_allclose(color_array, layer_color)
# Select data and change edge color of selection
selected_data = {0, 1}
layer.selected_data = {0, 1}
current_color = getattr(layer, f'current_{attribute}_color')
assert current_color == 'black'
setattr(layer, f'current_{attribute}_color', 'green')
colorarray_green = transform_color(['green'] * len(layer.selected_data))
color_array[list(selected_data)] = colorarray_green
layer_color = getattr(layer, f'{attribute}_color')
np.testing.assert_allclose(color_array, layer_color)
# Add new shape and test its color
new_shape = np.random.random((1, 4, 2))
layer.selected_data = set()
setattr(layer, f'current_{attribute}_color', 'blue')
layer.add(new_shape)
color_array = np.vstack([color_array, transform_color('blue')])
layer_color = getattr(layer, f'{attribute}_color')
assert len(layer_color) == shape[0] + 1
np.testing.assert_allclose(color_array, layer_color)
# Check removing data adjusts colors correctly
layer.selected_data = {0, 2}
layer.remove_selected()
assert len(layer.data) == shape[0] - 1
layer_color = getattr(layer, f'{attribute}_color')
assert len(layer_color) == shape[0] - 1
np.testing.assert_allclose(
layer_color,
np.vstack((color_array[1], color_array[3:])),
)
# set the color directly
setattr(layer, f'{attribute}_color', 'black')
color_array = np.tile([[0, 0, 0, 1]], (len(layer.data), 1))
layer_color = getattr(layer, f'{attribute}_color')
np.testing.assert_allclose(color_array, layer_color)
@pytest.mark.parametrize("attribute", ['edge', 'face'])
def test_single_shape_properties(attribute):
"""Test creating single shape with properties"""
shape = (4, 2)
np.random.seed(0)
data = 20 * np.random.random(shape)
layer_kwargs = {f'{attribute}_color': 'red'}
layer = Shapes(data, **layer_kwargs)
layer_color = getattr(layer, f'{attribute}_color')
assert len(layer_color) == 1
np.testing.assert_allclose([1, 0, 0, 1], layer_color[0])
color_cycle_str = ['red', 'blue']
color_cycle_rgb = [[1, 0, 0], [0, 0, 1]]
color_cycle_rgba = [[1, 0, 0, 1], [0, 0, 1, 1]]
@pytest.mark.parametrize("attribute", ['edge', 'face'])
@pytest.mark.parametrize(
"color_cycle",
[color_cycle_str, color_cycle_rgb, color_cycle_rgba],
)
def test_color_cycle(attribute, color_cycle):
"""Test setting edge/face color with a color cycle list"""
# create Shapes using list color cycle
shape = (10, 4, 2)
np.random.seed(0)
data = 20 * np.random.random(shape)
properties = {'shape_type': _make_cycled_properties(['A', 'B'], shape[0])}
shapes_kwargs = {
'properties': properties,
f'{attribute}_color': 'shape_type',
f'{attribute}_color_cycle': color_cycle,
}
layer = Shapes(data, **shapes_kwargs)
assert layer.properties == properties
color_array = transform_color(
list(islice(cycle(color_cycle), 0, shape[0]))
)
layer_color = getattr(layer, f'{attribute}_color')
np.testing.assert_allclose(layer_color, color_array)
# Add new shape and test its color
new_shape = np.random.random((1, 4, 2))
layer.selected_data = {0}
layer.add(new_shape)
layer_color = getattr(layer, f'{attribute}_color')
assert len(layer_color) == shape[0] + 1
np.testing.assert_allclose(
layer_color,
np.vstack((color_array, transform_color('red'))),
)
# Check removing data adjusts colors correctly
layer.selected_data = {0, 2}
layer.remove_selected()
assert len(layer.data) == shape[0] - 1
layer_color = getattr(layer, f'{attribute}_color')
assert len(layer_color) == shape[0] - 1
np.testing.assert_allclose(
layer_color,
np.vstack((color_array[1], color_array[3:], transform_color('red'))),
)
# refresh colors
layer.refresh_colors(update_color_mapping=True)
# test adding a shape with a new property value
layer.selected_data = {}
current_properties = layer.current_properties
current_properties['shape_type'] = np.array(['new'])
layer.current_properties = current_properties
new_shape_2 = np.random.random((1, 4, 2))
layer.add(new_shape_2)
color_cycle_map = getattr(layer, f'{attribute}_color_cycle_map')
assert 'new' in color_cycle_map
np.testing.assert_allclose(
color_cycle_map['new'], np.squeeze(transform_color(color_cycle[0]))
)
@pytest.mark.parametrize("attribute", ['edge', 'face'])
def test_add_color_cycle_to_empty_layer(attribute):
"""Test adding a shape to an empty layer when edge/face color is a color cycle
See: https://github.com/napari/napari/pull/1069
"""
default_properties = {'shape_type': np.array(['A'])}
color_cycle = ['red', 'blue']
shapes_kwargs = {
'properties': default_properties,
f'{attribute}_color': 'shape_type',
f'{attribute}_color_cycle': color_cycle,
}
layer = Shapes(**shapes_kwargs)
# verify the current_edge_color is correct
expected_color = transform_color(color_cycle[0])
current_color = getattr(layer, f'_current_{attribute}_color')
np.testing.assert_allclose(current_color, expected_color)
# add a shape
np.random.seed(0)
new_shape = 20 * np.random.random((1, 4, 2))
layer.add(new_shape)
props = {'shape_type': | np.array(['A']) | numpy.array |
from typing import Tuple, Dict, Any, Union, Callable
import numpy as np
import scipy.ndimage as ndi
from common.exceptionmanager import catch_error_exception
from common.functionutil import ImagesUtil
from preprocessing.imagegenerator import ImageGenerator
_epsilon = 1e-6
class TransformRigidImages(ImageGenerator):
def __init__(self,
size_image: Union[Tuple[int, int, int], Tuple[int, int]],
is_normalize_data: bool = False,
type_normalize_data: str = 'samplewise',
is_zca_whitening: bool = False,
is_inverse_transform: bool = False,
rescale_factor: float = None,
preprocessing_function: Callable[[np.ndarray], np.ndarray] = None
) -> None:
super(TransformRigidImages, self).__init__(size_image, num_images=1)
if is_normalize_data:
if type_normalize_data == 'featurewise':
self._featurewise_center = True
self._featurewise_std_normalization = True
self._samplewise_center = False
self._samplewise_std_normalization = False
else:
# type_normalize_data == 'samplewise'
self._featurewise_center = False
self._featurewise_std_normalization = False
self._samplewise_center = True
self._samplewise_std_normalization = True
else:
self._featurewise_center = False
self._featurewise_std_normalization = False
self._samplewise_center = False
self._samplewise_std_normalization = False
self._is_zca_whitening = is_zca_whitening
self._zca_epsilon = 1e-6
self._rescale_factor = rescale_factor
self._preprocessing_function = preprocessing_function
self._mean = None
self._std = None
self._principal_components = None
self._is_inverse_transform = is_inverse_transform
self._initialize_gendata()
def update_image_data(self, in_shape_image: Tuple[int, ...]) -> None:
# self._num_images = in_shape_image[0]
pass
def _initialize_gendata(self) -> None:
self._transform_matrix = None
self._transform_params = None
self._count_trans_in_images = 0
def _update_gendata(self, **kwargs) -> None:
seed = kwargs['seed']
(self._transform_matrix, self._transform_params) = self._calc_gendata_random_transform(seed)
self._count_trans_in_images = 0
def _get_image(self, in_image: np.ndarray) -> np.ndarray:
is_type_input_image = (self._count_trans_in_images == 0)
self._count_trans_in_images += 1
return self._get_transformed_image(in_image, is_type_input_image=is_type_input_image)
def _get_transformed_image(self, in_image: np.ndarray, is_type_input_image: bool = False) -> np.ndarray:
if ImagesUtil.is_without_channels(self._size_image, in_image.shape):
in_image = np.expand_dims(in_image, axis=-1)
is_reshape_input_image = True
else:
is_reshape_input_image = False
in_image = self._calc_transformed_image(in_image, is_type_input_image=is_type_input_image)
if is_type_input_image:
in_image = self._standardize(in_image)
if is_reshape_input_image:
in_image = np.squeeze(in_image, axis=-1)
return in_image
def _get_inverse_transformed_image(self, in_image: np.ndarray, is_type_input_image: bool = False) -> np.ndarray:
if ImagesUtil.is_without_channels(self._size_image, in_image.shape):
in_image = np.expand_dims(in_image, axis=-1)
is_reshape_input_image = True
else:
is_reshape_input_image = False
if is_type_input_image:
in_image = self._standardize_inverse(in_image)
in_image = self._calc_inverse_transformed_image(in_image, is_type_input_image=is_type_input_image)
if is_reshape_input_image:
in_image = np.squeeze(in_image, axis=-1)
return in_image
def _calc_transformed_image(self, in_array: np.ndarray, is_type_input_image: bool = False) -> np.ndarray:
raise NotImplementedError
def _calc_inverse_transformed_image(self, in_array: np.ndarray, is_type_input_image: bool = False) -> np.ndarray:
raise NotImplementedError
def _calc_gendata_random_transform(self, seed: int = None) -> Tuple[np.ndarray, Dict[str, Any]]:
raise NotImplementedError
def _calc_gendata_inverse_random_transform(self, seed: int = None) -> Tuple[np.ndarray, Dict[str, Any]]:
raise NotImplementedError
def _standardize(self, in_image: np.ndarray) -> np.ndarray:
if self._preprocessing_function:
in_image = self._preprocessing_function(in_image)
if self._rescale_factor:
in_image *= self._rescale_factor
if self._samplewise_center:
in_image -= np.mean(in_image, keepdims=True)
if self._samplewise_std_normalization:
in_image /= (np.std(in_image, keepdims=True) + _epsilon)
template_message_error = 'This ImageDataGenerator specifies \'%s\', but it hasn\'t been fit on any ' \
'training data. Fit it first by calling \'fit(numpy_data)\'.'
if self._featurewise_center:
if self._mean is not None:
in_image -= self._mean
else:
message = template_message_error % ('featurewise_center')
catch_error_exception(message)
if self._featurewise_std_normalization:
if self._std is not None:
in_image /= (self._std + _epsilon)
else:
message = template_message_error % ('featurewise_std_normalization')
catch_error_exception(template_message_error % (message))
if self._is_zca_whitening:
if self._principal_components is not None:
flatx = np.reshape(in_image, (-1, np.prod(in_image.shape[-3:])))
whitex = np.dot(flatx, self._principal_components)
in_image = np.reshape(whitex, in_image.shape)
else:
message = template_message_error % ('zca_whitening')
catch_error_exception(message)
return in_image
def _standardize_inverse(self, in_image: np.ndarray) -> np.ndarray:
template_message_error = 'This ImageDataGenerator specifies \'%s\', but it hasn\'t been fit on any ' \
'training data. Fit it first by calling \'fit(numpy_data)\'.'
if self._is_zca_whitening:
if self._principal_components is not None:
flatx = np.reshape(in_image, (-1, np.prod(in_image.shape[-3:])))
inverse_principal_componens = np.divide(1.0, self._principal_components)
whitex = np.dot(flatx, inverse_principal_componens)
in_image = np.reshape(whitex, in_image.shape)
else:
message = template_message_error % ('zca_whitening')
catch_error_exception(message)
if self._featurewise_std_normalization:
if self._std is not None:
in_image *= self._std
else:
message = template_message_error % ('featurewise_std_normalization')
catch_error_exception(message)
if self._featurewise_center:
if self._mean is not None:
in_image += self._mean
else:
message = template_message_error % ('featurewise_center')
catch_error_exception(message)
if self._samplewise_std_normalization:
in_image *= np.std(in_image, keepdims=True)
if self._samplewise_center:
in_image += np.mean(in_image, keepdims=True)
if self._rescale_factor:
in_image /= self._rescale_factor
if self._preprocessing_function:
catch_error_exception('Not implemented inverse preprocessing function')
return in_image
@staticmethod
def _flip_axis(in_image: np.ndarray, axis: int) -> np.ndarray:
in_image = np.asarray(in_image).swapaxes(axis, 0)
in_image = in_image[::-1, ...]
in_image = in_image.swapaxes(0, axis)
return in_image
@staticmethod
def _apply_channel_shift(in_image: np.ndarray, intensity: int, channel_axis: int = 0) -> np.ndarray:
in_image = np.rollaxis(in_image, channel_axis, 0)
min_x, max_x = np.min(in_image), np.max(in_image)
channel_images = [np.clip(x_channel + intensity, min_x, max_x) for x_channel in in_image]
in_image = np.stack(channel_images, axis=0)
in_image = np.rollaxis(in_image, 0, channel_axis + 1)
return in_image
def _apply_brightness_shift(self, in_image: np.ndarray, brightness: int) -> np.ndarray:
catch_error_exception('Not implemented brightness shifting option...')
# in_image = array_to_img(in_image)
# in_image = imgenhancer_Brightness = ImageEnhance.Brightness(in_image)
# in_image = imgenhancer_Brightness.enhance(brightness)
# in_image = img_to_array(in_image)
def get_text_description(self) -> str:
raise NotImplementedError
class TransformRigidImages2D(TransformRigidImages):
_img_row_axis = 0
_img_col_axis = 1
_img_channel_axis = 2
def __init__(self,
size_image: Tuple[int, int],
is_normalize_data: bool = False,
type_normalize_data: str = 'samplewise',
is_zca_whitening: bool = False,
rotation_range: float = 0.0,
width_shift_range: float = 0.0,
height_shift_range: float = 0.0,
brightness_range: Tuple[float, float] = None,
shear_range: float = 0.0,
zoom_range: Union[float, Tuple[float, float]] = 0.0,
channel_shift_range: float = 0.0,
fill_mode: str = 'nearest',
cval: float = 0.0,
horizontal_flip: bool = False,
vertical_flip: bool = False,
rescale_factor: float = None,
preprocessing_function: Callable[[np.ndarray], np.ndarray] = None
) -> None:
self._rotation_range = rotation_range
self._width_shift_range = width_shift_range
self._height_shift_range = height_shift_range
self._brightness_range = brightness_range
self._shear_range = shear_range
self._channel_shift_range = channel_shift_range
self._fill_mode = fill_mode
self._cval = cval
self._horizontal_flip = horizontal_flip
self._vertical_flip = vertical_flip
if np.isscalar(zoom_range):
self._zoom_range = (1 - zoom_range, 1 + zoom_range)
elif len(zoom_range) == 2:
self._zoom_range = (zoom_range[0], zoom_range[1])
else:
message = '\'zoom_range\' should be a float or a tuple of two floats. Received %s' % (str(zoom_range))
catch_error_exception(message)
if self._brightness_range is not None:
if len(self._brightness_range) != 2:
message = '\'brightness_range\' should be a tuple of two floats. Received %s' % (str(brightness_range))
catch_error_exception(message)
super(TransformRigidImages2D, self).__init__(size_image,
is_normalize_data=is_normalize_data,
type_normalize_data=type_normalize_data,
is_zca_whitening=is_zca_whitening,
rescale_factor=rescale_factor,
preprocessing_function=preprocessing_function)
def _calc_transformed_image(self, in_image: np.ndarray, is_type_input_image: bool = False) -> np.ndarray:
# Apply: 1st: rigid transformations
# 2nd: channel shift intensity / flipping
if self._transform_matrix is not None:
in_image = self._apply_transform(in_image, self._transform_matrix,
channel_axis=self._img_channel_axis,
fill_mode=self._fill_mode, cval=self._cval)
if is_type_input_image and (self._transform_params.get('channel_shift_intensity') is not None):
in_image = self._apply_channel_shift(in_image, self._transform_params['channel_shift_intensity'],
channel_axis=self._img_channel_axis)
if self._transform_params.get('flip_horizontal', False):
in_image = self._flip_axis(in_image, axis=self._img_col_axis)
if self._transform_params.get('flip_vertical', False):
in_image = self._flip_axis(in_image, axis=self._img_row_axis)
if is_type_input_image and (self._transform_params.get('brightness') is not None):
in_image = self._apply_brightness_shift(in_image, self._transform_params['brightness'])
return in_image
def _calc_inverse_transformed_image(self, in_image: np.ndarray, is_type_input_image: bool = False) -> np.ndarray:
# Apply: 1st: channel shift intensity / flipping
# 2nd: rigid transformations
if is_type_input_image and (self._transform_params.get('brightness') is not None):
in_image = self._apply_brightness_shift(in_image, self._transform_params['brightness'])
if self._transform_params.get('flip_vertical', False):
in_image = self._flip_axis(in_image, axis=self._img_row_axis)
if self._transform_params.get('flip_horizontal', False):
in_image = self._flip_axis(in_image, axis=self._img_col_axis)
if is_type_input_image and (self._transform_params.get('channel_shift_intensity') is not None):
in_image = self._apply_channel_shift(in_image, self._transform_params['channel_shift_intensity'],
channel_axis=self._img_channel_axis)
if self._transform_matrix is not None:
in_image = self._apply_transform(in_image, self._transform_matrix,
channel_axis=self._img_channel_axis,
fill_mode=self._fill_mode, cval=self._cval)
return in_image
def _calc_gendata_random_transform(self, seed: int = None) -> Tuple[np.ndarray, Dict[str, Any]]:
# compute composition of homographies
if seed is not None:
np.random.seed(seed)
# ****************************************************
if self._rotation_range:
theta = np.deg2rad(np.random.uniform(-self._rotation_range, self._rotation_range))
else:
theta = 0
if self._height_shift_range:
tx = np.random.uniform(-self._height_shift_range, self._height_shift_range)
if np.max(self._height_shift_range) < 1:
tx *= self._size_image[self._img_row_axis]
else:
tx = 0
if self._width_shift_range:
ty = np.random.uniform(-self._width_shift_range, self._width_shift_range)
if np.max(self._width_shift_range) < 1:
ty *= self._size_image[self._img_col_axis]
else:
ty = 0
if self._shear_range:
shear = np.deg2rad(np.random.uniform(-self._shear_range, self._shear_range))
else:
shear = 0
if self._zoom_range[0] == 1 and self._zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(self._zoom_range[0], self._zoom_range[1], 2)
flip_horizontal = (np.random.random() < 0.5) * self._horizontal_flip
flip_vertical = (np.random.random() < 0.5) * self._vertical_flip
channel_shift_intensity = None
if self._channel_shift_range != 0:
channel_shift_intensity = np.random.uniform(-self._channel_shift_range, self._channel_shift_range)
brightness = None
if self._brightness_range is not None:
brightness = np.random.uniform(self._brightness_range[0], self._brightness_range[1])
transform_parameters = {'flip_horizontal': flip_horizontal,
'flip_vertical': flip_vertical,
'channel_shift_intensity': channel_shift_intensity,
'brightness': brightness}
# ****************************************************
# ****************************************************
transform_matrix = None
if theta != 0:
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
transform_matrix = rotation_matrix
if tx != 0 or ty != 0:
shift_matrix = np.array([[1, 0, tx],
[0, 1, ty],
[0, 0, 1]])
transform_matrix = shift_matrix if transform_matrix is None else np.dot(transform_matrix, shift_matrix)
if shear != 0:
shear_matrix = np.array([[1, -np.sin(shear), 0],
[0, np.cos(shear), 0],
[0, 0, 1]])
transform_matrix = shear_matrix if transform_matrix is None else np.dot(transform_matrix, shear_matrix)
if zx != 1 or zy != 1:
zoom_matrix = np.array([[zx, 0, 0],
[0, zy, 0],
[0, 0, 1]])
transform_matrix = zoom_matrix if transform_matrix is None else np.dot(transform_matrix, zoom_matrix)
if transform_matrix is not None:
h, w = self._size_image[self._img_row_axis], self._size_image[self._img_col_axis]
transform_matrix = self._transform_matrix_offset_center(transform_matrix, h, w)
# ****************************************************
return (transform_matrix, transform_parameters)
def _calc_gendata_inverse_random_transform(self, seed: int = None) -> Tuple[np.ndarray, Dict[str, Any]]:
# compute composition of inverse homographies
if seed is not None:
np.random.seed(seed)
# ****************************************************
if self._rotation_range:
theta = np.deg2rad(np.random.uniform(-self._rotation_range, self._rotation_range))
else:
theta = 0
if self._height_shift_range:
tx = np.random.uniform(-self._height_shift_range, self._height_shift_range)
if self._height_shift_range < 1:
tx *= self._size_image[self._img_row_axis]
else:
tx = 0
if self._width_shift_range:
ty = np.random.uniform(-self._width_shift_range, self._width_shift_range)
if self._width_shift_range < 1:
ty *= self._size_image[self._img_col_axis]
else:
ty = 0
if self._shear_range:
shear = np.deg2rad(np.random.uniform(-self._shear_range, self._shear_range))
else:
shear = 0
if self._zoom_range[0] == 1 and self._zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(self._zoom_range[0], self._zoom_range[1], 2)
flip_horizontal = (np.random.random() < 0.5) * self._horizontal_flip
flip_vertical = (np.random.random() < 0.5) * self._vertical_flip
channel_shift_intensity = None
if self._channel_shift_range != 0:
channel_shift_intensity = np.random.uniform(-self._channel_shift_range, self._channel_shift_range)
brightness = None
if self._brightness_range is not None:
brightness = np.random.uniform(self._brightness_range[0], self._brightness_range[1])
transform_parameters = {'flip_horizontal': flip_horizontal,
'flip_vertical': flip_vertical,
'channel_shift_intensity': channel_shift_intensity,
'brightness': brightness}
# ****************************************************
# ****************************************************
transform_matrix = None
if theta != 0:
rotation_matrix = np.array([[np.cos(theta), np.sin(theta), 0],
[-np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
transform_matrix = rotation_matrix
if tx != 0 or ty != 0:
shift_matrix = np.array([[1, 0, -tx],
[0, 1, -ty],
[0, 0, 1]])
transform_matrix = shift_matrix if transform_matrix is None else np.dot(transform_matrix, shift_matrix)
if shear != 0:
shear_matrix = np.array([[1, np.tan(shear), 0],
[0, 1.0 / np.cos(shear), 0],
[0, 0, 1]])
transform_matrix = shear_matrix if transform_matrix is None else np.dot(transform_matrix, shear_matrix)
if zx != 1 or zy != 1:
zoom_matrix = np.array([[1.0 / zx, 0, 0],
[0, 1.0 / zy, 0],
[0, 0, 1]])
transform_matrix = zoom_matrix if transform_matrix is None else np.dot(transform_matrix, zoom_matrix)
if transform_matrix is not None:
h, w = self._size_image[self._img_row_axis], self._size_image[self._img_col_axis]
transform_matrix = self._transform_matrix_offset_center(transform_matrix, h, w)
# ****************************************************
return (transform_matrix, transform_parameters)
@staticmethod
def _transform_matrix_offset_center(matrix: np.ndarray, x: int, y: int) -> np.ndarray:
o_x = float(x) / 2 + 0.5
o_y = float(y) / 2 + 0.5
offset_matrix = np.array([[1, 0, o_x], [0, 1, o_y], [0, 0, 1]])
reset_matrix = np.array([[1, 0, -o_x], [0, 1, -o_y], [0, 0, 1]])
transform_matrix = np.dot(np.dot(offset_matrix, matrix), reset_matrix)
return transform_matrix
@staticmethod
def _apply_transform(in_image: np.ndarray, transform_matrix: np.ndarray,
channel_axis: int = 0, fill_mode: str = 'nearest', cval: float = 0.0) -> np.ndarray:
in_image = np.rollaxis(in_image, channel_axis, 0)
final_affine_matrix = transform_matrix[:2, :2]
final_offset = transform_matrix[:2, 2]
channel_images = [ndi.interpolation.affine_transform(x_channel, final_affine_matrix, final_offset, order=1,
mode=fill_mode, cval=cval) for x_channel in in_image]
in_image = np.stack(channel_images, axis=0)
in_image = np.rollaxis(in_image, 0, channel_axis + 1)
return in_image
def get_text_description(self) -> str:
message = 'Rigid 2D transformations of images, with parameters...\n'
message += 'rotation (plane_XY) range: \'%s\'...\n' % (self._rotation_range)
message += 'shift (width, height) range: \'(%s, %s)\'...\n' \
% (self._width_shift_range, self._height_shift_range)
message += 'flip (horizontal, vertical): \'(%s, %s)\'...\n' \
% (self._horizontal_flip, self._vertical_flip)
message += 'zoom (min, max) range: \'(%s, %s)\'...\n' % (self._zoom_range[0], self._zoom_range[1])
message += 'shear (plane_XY) range: \'%s\'...\n' % (self._shear_range)
message += 'fill mode, when applied transformation: \'%s\'...\n' % (self._fill_mode)
return message
class TransformRigidImages3D(TransformRigidImages):
_img_dep_axis = 0
_img_row_axis = 1
_img_col_axis = 2
_img_channel_axis = 3
def __init__(self,
size_image: Tuple[int, int, int],
is_normalize_data: bool = False,
type_normalize_data: str = 'samplewise',
is_zca_whitening: bool = False,
rotation_xy_range: float = 0.0,
rotation_xz_range: float = 0.0,
rotation_yz_range: float = 0.0,
width_shift_range: float = 0.0,
height_shift_range: float = 0.0,
depth_shift_range: float = 0.0,
brightness_range: Tuple[float, float] = None,
shear_xy_range: float = 0.0,
shear_xz_range: float = 0.0,
shear_yz_range: float = 0.0,
zoom_range: Union[float, Tuple[float, float]] = 0.0,
channel_shift_range: float = 0.0,
fill_mode: str = 'nearest',
cval: float = 0.0,
horizontal_flip: bool = False,
vertical_flip: bool = False,
axialdir_flip: bool = False,
rescale_factor: float = None,
preprocessing_function: Callable[[np.ndarray], np.ndarray] = None
) -> None:
self._rotation_xy_range = rotation_xy_range
self._rotation_xz_range = rotation_xz_range
self._rotation_yz_range = rotation_yz_range
self._width_shift_range = width_shift_range
self._height_shift_range = height_shift_range
self._depth_shift_range = depth_shift_range
self._brightness_range = brightness_range
self._shear_xy_range = shear_xy_range
self._shear_xz_range = shear_xz_range
self._shear_yz_range = shear_yz_range
self._channel_shift_range = channel_shift_range
self._fill_mode = fill_mode
self._cval = cval
self._horizontal_flip = horizontal_flip
self._vertical_flip = vertical_flip
self._axialdir_flip = axialdir_flip
if np.isscalar(zoom_range):
self._zoom_range = (1 - zoom_range, 1 + zoom_range)
elif len(zoom_range) == 2:
self._zoom_range = (zoom_range[0], zoom_range[1])
else:
message = '\'zoom_range\' should be a float or a tuple of two floats. Received %s' % (str(zoom_range))
catch_error_exception(message)
if self._brightness_range is not None:
if len(self._brightness_range) != 2:
message = '\'brightness_range\' should be a tuple of two floats. Received %s' % (str(brightness_range))
catch_error_exception(message)
super(TransformRigidImages3D, self).__init__(size_image,
is_normalize_data=is_normalize_data,
type_normalize_data=type_normalize_data,
is_zca_whitening=is_zca_whitening,
rescale_factor=rescale_factor,
preprocessing_function=preprocessing_function)
def _calc_transformed_image(self, in_image: np.ndarray, is_type_input_image: bool = False) -> np.ndarray:
# Apply: 1st: rigid transformations
# 2nd: channel shift intensity / flipping
if self._transform_matrix is not None:
in_image = self._apply_transform(in_image, self._transform_matrix,
channel_axis=self._img_channel_axis,
fill_mode=self._fill_mode, cval=self._cval)
if is_type_input_image and (self._transform_params.get('channel_shift_intensity') is not None):
in_image = self._apply_channel_shift(in_image, self._transform_params['channel_shift_intensity'],
channel_axis=self._img_channel_axis)
if self._transform_params.get('flip_horizontal', False):
in_image = self._flip_axis(in_image, axis=self._img_col_axis)
if self._transform_params.get('flip_vertical', False):
in_image = self._flip_axis(in_image, axis=self._img_row_axis)
if self._transform_params.get('flip_axialdir', False):
in_image = self._flip_axis(in_image, axis=self._img_dep_axis)
if is_type_input_image and (self._transform_params.get('brightness') is not None):
in_image = self._apply_brightness_shift(in_image, self._transform_params['brightness'])
return in_image
def _calc_inverse_transformed_image(self, in_image: np.ndarray, is_type_input_image: bool = False) -> np.ndarray:
# Apply: 1st: channel shift intensity / flipping
# 2nd: rigid transformations
if is_type_input_image and (self._transform_params.get('brightness') is not None):
in_image = self._apply_brightness_shift(in_image, self._transform_params['brightness'])
if self._transform_params.get('flip_axialdir', False):
in_image = self._flip_axis(in_image, axis=self._img_dep_axis)
if self._transform_params.get('flip_vertical', False):
in_image = self._flip_axis(in_image, axis=self._img_row_axis)
if self._transform_params.get('flip_horizontal', False):
in_image = self._flip_axis(in_image, axis=self._img_col_axis)
if is_type_input_image and (self._transform_params.get('channel_shift_intensity') is not None):
in_image = self._apply_channel_shift(in_image, self._transform_params['channel_shift_intensity'],
channel_axis=self._img_channel_axis)
if self._transform_matrix is not None:
in_image = self._apply_transform(in_image, self._transform_matrix,
channel_axis=self._img_channel_axis,
fill_mode=self._fill_mode, cval=self._cval)
return in_image
def _calc_gendata_random_transform(self, seed: int = None) -> Tuple[np.ndarray, Dict[str, Any]]:
# compute composition of homographies
if seed is not None:
np.random.seed(seed)
# ****************************************************
if self._rotation_xy_range:
angle_xy = np.deg2rad(np.random.uniform(-self._rotation_xy_range, self._rotation_xy_range))
else:
angle_xy = 0
if self._rotation_xz_range:
angle_xz = np.deg2rad(np.random.uniform(-self._rotation_xz_range, self._rotation_xz_range))
else:
angle_xz = 0
if self._rotation_yz_range:
angle_yz = np.deg2rad(np.random.uniform(-self._rotation_yz_range, self._rotation_yz_range))
else:
angle_yz = 0
if self._height_shift_range:
tx = np.random.uniform(-self._height_shift_range, self._height_shift_range)
if self._height_shift_range < 1:
tx *= self._size_image[self._img_row_axis]
else:
tx = 0
if self._width_shift_range:
ty = np.random.uniform(-self._width_shift_range, self._width_shift_range)
if self._width_shift_range < 1:
ty *= self._size_image[self._img_col_axis]
else:
ty = 0
if self._depth_shift_range:
tz = np.random.uniform(-self._depth_shift_range, self._depth_shift_range)
if self._depth_shift_range < 1:
tz *= self._size_image[self._img_dep_axis]
else:
tz = 0
if self._shear_xy_range:
shear_xy = np.deg2rad(np.random.uniform(-self._shear_xy_range, self._shear_xy_range))
else:
shear_xy = 0
if self._shear_xz_range:
shear_xz = np.deg2rad(np.random.uniform(-self._shear_xz_range, self._shear_xz_range))
else:
shear_xz = 0
if self._shear_yz_range:
shear_yz = np.deg2rad(np.random.uniform(-self._shear_yz_range, self._shear_yz_range))
else:
shear_yz = 0
if self._zoom_range[0] == 1 and self._zoom_range[1] == 1:
(zx, zy, zz) = (1, 1, 1)
else:
(zx, zy, zz) = np.random.uniform(self._zoom_range[0], self._zoom_range[1], 3)
flip_horizontal = (np.random.random() < 0.5) * self._horizontal_flip
flip_vertical = (np.random.random() < 0.5) * self._vertical_flip
flip_axialdir = (np.random.random() < 0.5) * self._axialdir_flip
channel_shift_intensity = None
if self._channel_shift_range != 0:
channel_shift_intensity = np.random.uniform(-self._channel_shift_range, self._channel_shift_range)
brightness = None
if self._brightness_range is not None:
brightness = np.random.uniform(self._brightness_range[0], self._brightness_range[1])
transform_parameters = {'flip_horizontal': flip_horizontal,
'flip_vertical': flip_vertical,
'flip_axialdir': flip_axialdir,
'channel_shift_intensity': channel_shift_intensity,
'brightness': brightness}
# ****************************************************
# ****************************************************
transform_matrix = None
if angle_xy != 0:
rotation_matrix = np.array([[1, 0, 0, 0],
[0, np.cos(angle_xy), -np.sin(angle_xy), 0],
[0, np.sin(angle_xy), np.cos(angle_xy), 0],
[0, 0, 0, 1]])
transform_matrix = rotation_matrix
if angle_xz != 0:
rotation_matrix = np.array([[np.cos(angle_xz), np.sin(angle_xz), 0, 0],
[-np.sin(angle_xz), np.cos(angle_xz), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]])
transform_matrix = \
rotation_matrix if transform_matrix is None else np.dot(transform_matrix, rotation_matrix)
if angle_yz != 0:
rotation_matrix = np.array([[np.cos(angle_yz), 0, np.sin(angle_yz), 0],
[0, 1, 0, 0],
[-np.sin(angle_yz), 0, np.cos(angle_yz), 0],
[0, 0, 0, 1]])
transform_matrix = \
rotation_matrix if transform_matrix is None else np.dot(transform_matrix, rotation_matrix)
if tx != 0 or ty != 0 or tz != 0:
shift_matrix = np.array([[1, 0, 0, tz],
[0, 1, 0, tx],
[0, 0, 1, ty],
[0, 0, 0, 1]])
transform_matrix = shift_matrix if transform_matrix is None else np.dot(transform_matrix, shift_matrix)
if shear_xy != 0:
shear_matrix = np.array([[1, 0, 0, 0],
[0, 1, -np.sin(shear_xy), 0],
[0, 0, np.cos(shear_xy), 0],
[0, 0, 0, 1]])
transform_matrix = shear_matrix if transform_matrix is None else np.dot(transform_matrix, shear_matrix)
if shear_xz != 0:
shear_matrix = np.array([[np.cos(shear_xz), 0, 0, 0],
[-np.sin(shear_xz), 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]])
transform_matrix = shear_matrix if transform_matrix is None else np.dot(transform_matrix, shear_matrix)
if shear_yz != 0:
shear_matrix = np.array([[np.cos(shear_yz), 0, 0, 0],
[0, 1, 0, 0],
[-np.sin(shear_yz), 0, 1, 0],
[0, 0, 0, 1]])
transform_matrix = shear_matrix if transform_matrix is None else np.dot(transform_matrix, shear_matrix)
if zx != 1 or zy != 1 or zz != 1:
zoom_matrix = np.array([[zz, 0, 0, 0],
[0, zx, 0, 0],
[0, 0, zy, 0],
[0, 0, 0, 1]])
transform_matrix = zoom_matrix if transform_matrix is None else np.dot(transform_matrix, zoom_matrix)
if transform_matrix is not None:
(d, h, w) = (self._size_image[self._img_dep_axis],
self._size_image[self._img_row_axis],
self._size_image[self._img_col_axis])
transform_matrix = self._transform_matrix_offset_center(transform_matrix, d, h, w)
# ****************************************************
return (transform_matrix, transform_parameters)
def _calc_gendata_inverse_random_transform(self, seed: int = None) -> Tuple[np.ndarray, Dict[str, Any]]:
# compute composition of inverse homographies
if seed is not None:
np.random.seed(seed)
# ****************************************************
if self._rotation_xy_range:
angle_xy = np.deg2rad(np.random.uniform(-self._rotation_xy_range, self._rotation_xy_range))
else:
angle_xy = 0
if self._rotation_xz_range:
angle_xz = np.deg2rad(np.random.uniform(-self._rotation_xz_range, self._rotation_xz_range))
else:
angle_xz = 0
if self._rotation_yz_range:
angle_yz = np.deg2rad(np.random.uniform(-self._rotation_yz_range, self._rotation_yz_range))
else:
angle_yz = 0
if self._height_shift_range:
tx = np.random.uniform(-self._height_shift_range, self._height_shift_range)
if self._height_shift_range < 1:
tx *= self._size_image[self._img_row_axis]
else:
tx = 0
if self._width_shift_range:
ty = np.random.uniform(-self._width_shift_range, self._width_shift_range)
if self._width_shift_range < 1:
ty *= self._size_image[self._img_col_axis]
else:
ty = 0
if self._depth_shift_range:
tz = np.random.uniform(-self._depth_shift_range, self._depth_shift_range)
if self._depth_shift_range < 1:
tz *= self._size_image[self._img_dep_axis]
else:
tz = 0
if self._shear_xy_range:
shear_xy = np.deg2rad(np.random.uniform(-self._shear_xy_range, self._shear_xy_range))
else:
shear_xy = 0
if self._shear_xz_range:
shear_xz = np.deg2rad(np.random.uniform(-self._shear_xz_range, self._shear_xz_range))
else:
shear_xz = 0
if self._shear_yz_range:
shear_yz = np.deg2rad(np.random.uniform(-self._shear_yz_range, self._shear_yz_range))
else:
shear_yz = 0
if self._zoom_range[0] == 1 and self._zoom_range[1] == 1:
(zx, zy, zz) = (1, 1, 1)
else:
(zx, zy, zz) = np.random.uniform(self._zoom_range[0], self._zoom_range[1], 3)
flip_horizontal = (np.random.random() < 0.5) * self._horizontal_flip
flip_vertical = (np.random.random() < 0.5) * self._vertical_flip
flip_axialdir = (np.random.random() < 0.5) * self._axialdir_flip
channel_shift_intensity = None
if self._channel_shift_range != 0:
channel_shift_intensity = np.random.uniform(-self._channel_shift_range, self._channel_shift_range)
brightness = None
if self._brightness_range is not None:
brightness = np.random.uniform(self._brightness_range[0], self._brightness_range[1])
transform_parameters = {'flip_horizontal': flip_horizontal,
'flip_vertical': flip_vertical,
'flip_axialdir': flip_axialdir,
'channel_shift_intensity': channel_shift_intensity,
'brightness': brightness}
# ****************************************************
# ****************************************************
transform_matrix = None
if angle_xy != 0:
rotation_matrix = np.array([[1, 0, 0, 0],
[0, np.cos(angle_xy), np.sin(angle_xy), 0],
[0, -np.sin(angle_xy), np.cos(angle_xy), 0],
[0, 0, 0, 1]])
transform_matrix = rotation_matrix
if angle_xz != 0:
rotation_matrix = np.array([[np.cos(angle_xz), -np.sin(angle_xz), 0, 0],
[np.sin(angle_xz), | np.cos(angle_xz) | numpy.cos |
import json
import os
import numpy as np, h5py
import scipy.io as sp
import pandas as pd
from depth import depthlist
from feature_smooth import feature_smooth
from utils import angle_between, cross_validation
def parse_feats(f_in,f_out,f_in_d,depth,oversample):
""" Load """
json_files = os.listdir(f_in)
face_feats_all = np.zeros([2, len(json_files), 210], dtype=np.float64)
pose_feats_all = np.zeros([2, len(json_files), 54], dtype=np.float64)
pose_feats = np.zeros([len(json_files), 66], dtype=np.float64)
for idx in range(0,len(json_files)):
data = json.load(open(f_in + json_files[idx]))
if len(data['people']) > 0:
face_feats_all[0,idx] = data['people'][0]['face_keypoints']
pose_feats_all[0,idx] = data['people'][0]['pose_keypoints']
try:
face_feats_all[1,idx] = data['people'][1]['face_keypoints']
pose_feats_all[1,idx] = data['people'][1]['pose_keypoints']
except IndexError:
pass
else:
face_feats_all[0,idx] = np.zeros([210])
face_feats_all[1,idx] = | np.zeros([210]) | numpy.zeros |
from scipy.optimize import leastsq
from copy import deepcopy
import numpy as np
import warnings
def standardize_vector(vector):
if vector[0] != 0:
if vector[0] < 0:
vector = np.array(vector) * -1
vector = vector.tolist()
elif vector[1] != 0:
if vector[1] < 0:
vector = np.array(vector) * -1
vector = vector.tolist()
else:
if vector[2] < 0:
vector = np.array(vector) * -1
vector = vector.tolist()
for i in range(3):
vector[i] = vector[i] + 0
return vector
def is_rasci_transition(configuration, reference, n_electron=1, max_jump=10):
"""
Determine if a configuration corresponds to a transition of n_electron
:param configuration: dictionary containing the configuration to be analyzed
:param reference: reference configuration (in general lowest energy Slater determinant)
:param n_electron:
:param max_jump: Restrict to transitions with jumps less or equal to max_jump orbitals
:return: True if conditions are met, otherwise False
"""
warnings.warn('This function will be deprecated, use "is_transition" instead', DeprecationWarning)
alpha_diff = [int(i) - int(j) for i, j in zip(configuration['alpha'], reference['alpha'])]
beta_diff = [int(i) - int(j) for i, j in zip(configuration['beta'], reference['beta'])]
ini_alpha = np.where(np.array(alpha_diff) < 0)[0]
fin_alpha = np.where(np.array(alpha_diff) > 0)[0]
ini_beta = np.where(np.array(beta_diff) < 0)[0]
fin_beta = np.where(np.array(beta_diff) > 0)[0]
try:
jump_alpha = np.max(fin_alpha) - np.min(ini_alpha)
except ValueError:
jump_alpha = 0
try:
jump_beta = np.max(fin_beta) - np.min(ini_beta)
except ValueError:
jump_beta = 0
n_alpha = len(fin_alpha)
n_beta = len(fin_beta)
elec_condition = n_alpha + n_beta == n_electron
jump_condition = jump_alpha <= max_jump and jump_beta <= max_jump
return elec_condition and jump_condition
def get_ratio_of_condition_rasci(state, n_electron=1, max_jump=10):
# reference = {'hole': '', 'alpha': '111000', 'beta': '111000', 'part': '', 'amplitude': 0.9777044}
warnings.warn('This function will be deprecated, use "get_ratio_of_condition" instead', DeprecationWarning)
alpha_int = [int(i) for i in state['configurations'][0]['alpha']]
ref_alpha = ''
for i in range(len(alpha_int)):
if i < np.sum(alpha_int):
ref_alpha += '1'
else:
ref_alpha += '0'
reference = {'hole': '', 'alpha': ref_alpha, 'beta': ref_alpha, 'part': ''}
p = 0
for configuration in state['configurations']:
if is_rasci_transition(configuration, reference, n_electron, max_jump):
p += configuration['amplitude']**2
return p
def is_transition(configuration, reference, n_electron=1, max_jump=10):
"""
Determine if a configuration corresponds to a transition of n_electron
:param configuration: dictionary containing the configuration to be analyzed
:param reference: reference configuration (in general lowest energy Slater determinant)
:param n_electron: number of electrons in the transition
:param max_jump: Restrict to transitions with jumps less or equal to max_jump orbitals
:return: True if conditions are met, otherwise False
"""
alpha_diff = [int(i) - int(j) for i, j in zip(configuration['occupations']['alpha'], reference['alpha'])]
beta_diff = [int(i) - int(j) for i, j in zip(configuration['occupations']['beta'], reference['beta'])]
ini_alpha = np.where(np.array(alpha_diff) < 0)[0]
fin_alpha = np.where(np.array(alpha_diff) > 0)[0]
ini_beta = np.where(np.array(beta_diff) < 0)[0]
fin_beta = np.where(np.array(beta_diff) > 0)[0]
try:
jump_alpha = np.max(fin_alpha) - np.min(ini_alpha)
except ValueError:
jump_alpha = 0
try:
jump_beta = | np.max(fin_beta) | numpy.max |
#%%
#前處理
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
df = pd.read_csv('Pokemon.csv')
df = df.drop(["Unnamed: 0","Name"], axis = 1)
title_mapping = {False: 0, True: 1}
df['Legendary'] = df['Legendary'].map(title_mapping)
title_mapping2 = {'Normal': 1, 'Water': 2,'Psychic':3}
df['Type 1'] = df['Type 1'].map(title_mapping2)
for i in range(8):
df.iloc[:,i+1] = (df.iloc[:,i+2]-df.iloc[:,i+2].mean())/df.iloc[:,i+2].std()
x_train = df.iloc[:120,:]
x_test = df.iloc[120:,:]
#%%
#1. K-nearest-neighbor classi
def euclideanDistance(data, test, n1, n2):
d1=np.sqrt(np.sum(np.square(data.iloc[n1,1:]-test.iloc[n2,1:])))
return(d1)
def pred(data,test):
eudis = []
predict = []
true = []
for i in range(test.shape[0]):
dis = []
p = []
for j in range(data.shape[0]):
dis.append(euclideanDistance(data, test, j, i))
p.append(data.iloc[j,0])
eudis.append(dis)
predict.append(p)
true.append(test.iloc[i,0])
return(eudis,predict,true)
eudis,predict,true = pred(x_train,x_test)
def select_k(dis,p,k):
pred=[]
for i in range(len(dis)):
idx = np.argsort(dis[i])
pred.append(np.array(p[i])[idx][:k])
pred = np.array(pred)
if k==1:
return(np.array(pred))
else:
pp = []
for j in range(len(pred)):
d = {x:list(pred[j]).count(x) for x in list(pred[j])}
a,b = list(d.keys()),list(d.values())
pp.append(a[b.index(max(b))])
return(np.array(pp))
def accuracy(p,t):
h = 0
for i in range(len(p)):
if p[i] == np.array(t)[i]:
h += 1
return(h/len(p))
acc = []
for k in range(1,11):
acc.append(accuracy(select_k(eudis,predict,k),true))
acc
k = [i for i in range(1,11)]
plt.plot(k,acc)
plt.xlabel('k')
plt.ylabel('accuracy')
plt.title('KNN')
plt.savefig('KNN.png')
#%%
#(2).PCA
def pca(df,k):
xtx = np.dot(df.T,df)
[df_eig_v,df_eig_vec] = | np.linalg.eigh(xtx) | numpy.linalg.eigh |
"""
@author: <NAME>
"""
import yfinance as yf
import datetime as dt
import pandas as pd
import numpy as np
from pandas.plotting import table
import matplotlib.pyplot as plt
from scipy.stats import levene
# Download historical data for S&P 500
ticker = "^GSPC"
SnP = yf.download(ticker, start="1991-02-01", end="2018-06-01")
SnP = SnP.drop(['Open','High','Low','Close','Volume'], axis=1)
SnP['Return'] = SnP['Adj Close'].pct_change()
def Ret_everyndays(DF,n):
""" This function takes in the SnP data, calculates the returns every n days, returns a list"""
df = DF.copy().drop('Return', axis = 1).iloc[::n, :]
ret = df['Adj Close'].pct_change().to_list()
return ret
def MV_Breach(mvg_avg_days,DF):
"""this function takes in the MA days, df of close prices & outputs events when MA was breached
. In order for the moving average to be breached, the previous day’s closing price
has to be ABOVE the moving average and today's close must be BELOW the moving average"""
df = DF.copy().drop("Return", axis=1)
df["Moving Average Price"] = df["Adj Close"].rolling(mvg_avg_days).mean()
last_close_price = df["Adj Close"].iloc[mvg_avg_days-2]
df = df.iloc[mvg_avg_days-2:, ]
df_BreakingDMA = df[(df["Adj Close"].shift(1) > df["Moving Average Price"].shift(1)) & (df["Adj Close"] < df["Moving Average Price"])]
df_BreakingDMA = df_BreakingDMA.reset_index().rename(columns={'Date': f'Date {mvg_avg_days}d MA is breached','Adj Close': 'Closing Price on Day 0'})
df_BreakingDMA = df_BreakingDMA[[f'Date {mvg_avg_days}d MA is breached', 'Moving Average Price', 'Closing Price on Day 0']]
return df_BreakingDMA
def strategyretdata(Price,breachdata,n,N):
""" Extract the close prices 1d,2d,..,nd from the breach date. Then calculate the returns for each of such intervals
taking the price as on breach date as the base price"""
price = Price.copy()
price = price.reset_index()
dict = {}
for i in breachdata[f'Date {N}d MA is breached']:
x = price[price['Date'] == i]['Adj Close'].index.values
S = pd.Series(SnP['Adj Close'][x[0]:x[0] + n])
first_element = S[0]
dict[i] = list(S.apply(lambda y: ((y / first_element) - 1)))
return dict
# Create a DataFrame with SnP returns from every 1d,2d,...,40d
SnP_Returns = pd.DataFrame()
df = pd.DataFrame()
for k in range(1, 41):
Column_Name = str(f'Date - EVERY {k} DAYS')
df[Column_Name] = np.array(Ret_everyndays(SnP, k))
SnP_Returns = pd.concat([SnP_Returns, df], axis=1)
df = pd.DataFrame()
continue
SnP_Returns.drop(index=0, inplace=True)
# Create DataFrame with the Close Prices on the Breach Date
Breach_data_50DMA = MV_Breach(50,SnP) # 50 DMA
Breach_data_100DMA = MV_Breach(100,SnP) # 100 DMA
Breach_data_200DMA = MV_Breach(200,SnP) # 200 DMA
# Create a DataFrame with the Strategy Returns every 1d,2d,....,40d from the Breach Date
Breach_ret_50DMA = pd.DataFrame(dict((k, v) for k, v in strategyretdata(SnP, Breach_data_50DMA, 41, 50).items() if len(v)==41)).transpose().drop(columns=0)
Breach_ret_100DMA = pd.DataFrame(dict((k, v) for k, v in strategyretdata(SnP, Breach_data_100DMA, 41, 100).items() if len(v)==41)).transpose().drop(columns=0)
Breach_ret_200DMA = pd.DataFrame(dict((k, v) for k, v in strategyretdata(SnP, Breach_data_200DMA, 41, 200).items() if len(v)==41)).transpose().drop(columns=0)
# Performing Levene's Test on 50d,100d,200d against S&P_Ret for very 1d,2d,3d,...,40d holding period
P_Values = pd.DataFrame(index=range(1,41), columns=["Levene's test p-value MA 200 ","Levene's test p-value MA 100","Levene's test p-value MA 50"])
for i in range(0, 40):
stat1, p1 = levene(list(Breach_ret_50DMA.iloc[:, i].dropna()), list(SnP_Returns.iloc[:, i].dropna()))
stat2, p2 = levene(list(Breach_ret_100DMA.iloc[:, i].dropna()), list(SnP_Returns.iloc[:, i].dropna()))
stat3, p3 = levene(list(Breach_ret_200DMA.iloc[:, i].dropna()), list(SnP_Returns.iloc[:, i].dropna()))
P_Values.iloc[i, 2] = p1
P_Values.iloc[i, 1] = p2
P_Values.iloc[i, 0] = p3
# Analyzing the p-values for 50d,100d and 200d MA
mu1 = round(np.mean(P_Values.iloc[:, 0]), 2)
sigma1 = round(np.std(P_Values.iloc[:, 0]), 2)
plt.subplot(311)
plt.hist(P_Values.iloc[:,0], 20, density=True)
plt.title("Histogram of 'p-value - MA 200': '$\mu={}$, $\sigma={}$'".format(mu1, sigma1))
plt.xticks([]) # Disables xticks
plt.axvline(x=0.05, color='r', label='p-value of 0.05', linestyle='--', linewidth=1)
plt.legend()
mu2 = round(np.mean(P_Values.iloc[:, 1]), 2)
sigma2 = round( | np.std(P_Values.iloc[:, 1]) | numpy.std |
"""
Module: LMR_verify_proxy_plot.py
Purpose: Plotting of summary statistics from proxy-based verification. Both proxy
chronologies that were assimilated to create reconstructions and those witheld for
independent verification are considered.
Input: Reads .pckl files containing verification data generated by running the
LMR_verify_proxy.py script.
Originator: <NAME> | Dept. of Atmospheric Sciences, Univ. of Washington
| October 2015
Revisions:
- Histogram plots now include overlays of distributions derived from kernel
(gaussian) density estimation, as well as results from a 2-sided
Kolmogorov-Smirnov significance test on distributions representing results
over two different time periods [<NAME> - U. of Washington, Dec. 2017]
- Added the production of plots showing results per individual proxy records.
[<NAME> - U. of Washington, March 2018]
"""
import os
import numpy as np
import pickle
from time import time
from os.path import join
from scipy import stats
import statsmodels.api as sm
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from matplotlib.colors import from_levels_and_colors
from mpl_toolkits.basemap import Basemap
# =========================================================================================
# START: set user parameters here
# =========================================================================================
# ------------------------------
# Section 1: Plotting parameters
# ------------------------------
make_plots_hist = True
make_plots_maps = False
make_plots_individual_sites = False
make_pdfs = False
# set the default size of the figure in inches. ['figure.figsize'] = width, height;
plt.rcParams['figure.figsize'] = 10, 8 # that's default image size for this interactive session
plt.rcParams['axes.linewidth'] = 2.0 #set the value globally
plt.rcParams['font.weight'] = 'bold' #set the font weight globally
plt.rcParams['font.size'] = 11 #set the font size globally
#plt.rc('text', usetex=True)
plt.rc('text', usetex=False)
plt.style.use('ggplot')
# Histogram plotting parameters
binwidth = 0.05
CORRrange = [-1,1]
CErange = [-1,1]
CEchangerange = [-1,1]
alpha = 0.25
fcolor = ['blue', 'red']
# -------------------------
# Section 2: Proxy datasets
# -------------------------
#proxies = 'PAGES2kv1'
proxies = 'LMRdb'
# Assign symbol to proxy types for plotting: dependent on proxy database used.
if proxies == 'PAGES2kv1':
# PAGES2kv1 proxies
proxy_verif = {\
'Tree ring_Width' :'o',\
'Tree ring_Density' :'s',\
'Ice core_d18O' :'v',\
'Ice core_d2H' :'^',\
'Ice core_Accumulation' :'D',\
'Coral_d18O' :'p',\
'Coral_Luminescence' :'8',\
'Lake sediment_All' :'<',\
'Marine sediment_All' :'>',\
'Speleothem_All' :'h',\
}
elif proxies == 'LMRdb':
# LMRdb proxies
proxy_verif = {\
'Tree Rings_WoodDensity' :'s',\
'Tree Rings_WidthPages' :'o',\
'Tree Rings_WidthPages2' :'o',\
'Tree Rings_WidthBreit' :'o',\
'Tree Rings_Isotopes' :'*',\
'Corals and Sclerosponges_d18O' :'p',\
'Corals and Sclerosponges_SrCa' :'h',\
'Corals and Sclerosponges_Rates':'D',\
'Ice Cores_d18O' :'v',\
'Ice Cores_dD' :'^',\
'Ice Cores_Accumulation' :'D',\
'Ice Cores_MeltFeature' :'d',\
'Lake Cores_Varve' :'<',\
'Lake Cores_Misc' :'>',\
'Lake Cores_BioMarkers' :'>',\
'Lake Cores_GeoChem' :'^',\
'Marine Cores_d18O' :'H',\
'Bivalve_d18O' :'8',\
'Speleothems_d18O' :'h',\
}
else:
raise SystemExit('ERROR in the especification of the proxy dataset to be considered. Exiting!')
# Only keep proxy sites for which the linear PSM has a correlation >= than this value
r_crit = 0.0
#r_crit = 0.2
# ------------------------------------
# Section 3: Directories & experiments
# ------------------------------------
#datadir_input = '/home/disk/ekman4/rtardif/LMR/output'
datadir_input = '/home/disk/kalman3/rtardif/LMR/output'
#datadir_input = '/home/disk/kalman3/rtardif/LMR/output/verification_production_runs'
#nexp = 'production_gis_ccsm4_pagesall_0.75'
#nexp = 'production_mlost_ccsm4_pagesall_0.75'
nexp = 'test_py3'
#verif_period = [[1880,2000],[0,1879]]
verif_period = [[1900,2000],[1800,1899]]
# Output directory, where the figs will be dumped.
#datadir_output = datadir_input # if want to keep things tidy
datadir_output = '.' # if want local plots
# =========================================================================================
# END: set user parameters here
# =========================================================================================
# =============================================================================
# <<<<<<<<<<<<<<<<<<<<<<<<<<<<< Main code >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
# =============================================================================
def main():
begin_time = time()
# =============================
# Loading the verification data
# =============================
vtype = {'assim': 'Assimilated proxies', 'verif':'Non-assimilated proxies'}
nbperiods = len(verif_period)
assim_dict = [dict() for x in range(nbperiods)]
verif_dict = [dict() for x in range(nbperiods)]
# loop over verification periods & load data in dictionaries
for p in range(nbperiods):
# Read the pickle files containing summary stats
fname_assim = datadir_input+'/'+nexp+'/'+'verifProxy_'+str(verif_period[p][0])+'to'+str(verif_period[p][1])+\
'/reconstruction_eval_assimilated_proxy_summary.pckl'
fname_verif = datadir_input+'/'+nexp+'/'+'verifProxy_'+str(verif_period[p][0])+'to'+str(verif_period[p][1])+\
'/reconstruction_eval_withheld_proxy_summary.pckl'
infile_assim = open(fname_assim,'rb')
assim_dict[p] = pickle.load(infile_assim)
infile_assim.close()
if os.path.isfile(fname_verif):
infile_verif = open(fname_verif,'rb')
verif_dict[p] = pickle.load(infile_verif)
infile_verif.close()
verif_data = True
else:
verif_data = False
# get list of all proxy types in the assimilated/withheld data
lst = []
for p in range(nbperiods):
a_sites = list(assim_dict[p].keys())
lst = lst + list(set([item[0] for item in a_sites]))
if verif_data:
v_sites = list(verif_dict[p].keys())
lst = lst + list(set([item[0] for item in v_sites]))
master_proxy_types = list(set([item for item in lst]))
master_proxy_types.insert(0,'All')
# ==================
# Now creating plots
# ==================
if datadir_output != '.':
figdir = datadir_output+'/VerifFigs'
if not os.path.isdir(figdir):
os.system('mkdir %s' % figdir)
else:
figdir = '.'
# ============================================================================================================
# 1) Histograms of (recon, proxy) CORRELATION, CE across grand ensemble for all proxy types and per proxy type
# ============================================================================================================
if make_plots_hist:
# loop over proxy types
for proxy in master_proxy_types:
print('Proxies: %s' %proxy)
fig = plt.figure(figsize=(12,8))
irow = 1
for v in list(vtype.keys()): # "assim" & "verif" proxies
if v == 'verif' and not verif_data:
break
ax_master = fig.add_subplot(2,1,irow)
# Turn off axis lines and ticks of the big subplot
ax_master.tick_params(labelcolor=(1.,1.,1., 0.0), top='off', bottom='off', left='off', right='off')
# Removes the white frame
ax_master._frameon = False
ax_master.set_title("%s\n" % vtype[v], fontsize=16, fontweight='bold')
facecolor = fcolor[0]
if v == 'assim':
pos = [1,2,3]
else:
pos = [4,5,6]
bins_corr = np.arange(-1.-binwidth/2, 1.+binwidth/2, binwidth)
bins_ce = np.arange(-2.-binwidth/2, 1.+binwidth/2, binwidth)
# 1) --- Correlation ---
ax = fig.add_subplot(2,3,pos[0])
mean_stat = np.zeros([nbperiods])
std_stat = np.zeros([nbperiods])
prior_tmp = []
stat_comp = []
for p in range(nbperiods):
# pick right dict and associate to "workdict"
dname = v+'_dict'
workdict = eval(dname)
sitetag = list(workdict[p].keys())
if proxy == 'All':
proxy_types = list(set([item[0] for item in sitetag]))
else:
proxy_types = proxy
tmp = [workdict[p][k]['MCensCorr'] for k in sitetag if k[0] in proxy_types and np.abs(workdict[p][k]['PSMinfo']['corr'])>=r_crit]
stat = [item for sublist in tmp for item in sublist] # flatten list of lists
nbdata = len(stat)
mean_stat[p] = np.mean(stat)
std_stat[p] = np.std(stat)
results, edges = np.histogram(stat, bins=bins_corr, normed=True)
plt.bar(edges[:-1]+binwidth/2,results,binwidth,color=fcolor[p],alpha=alpha,linewidth=0,align="center")
# kernel density estimation
statv = np.asarray(stat)
kde = sm.nonparametric.KDEUnivariate(statv)
nbpts, = statv.shape
if nbpts > 0:
kde.fit(kernel='gau')
plt.plot(kde.support,kde.density,color=fcolor[p],lw=2,label=str(verif_period[p][0])+' to '+str(verif_period[p][1]))
stat_comp.append(stat)
# Accumulate prior stat
tmp = [workdict[p][k]['PriorMCensCorr'] for k in sitetag if k[0] in proxy_types and np.abs(workdict[p][k]['PSMinfo']['corr'])>=r_crit]
prior_tmp.append([item for sublist in tmp for item in sublist]) # flatten list of lists
# Kolmogorov-Smirnov significance testing of difference between distributions from both tested periods
nbdist = len(stat_comp)
if nbdist > 1:
dist_test = stats.ks_2samp(stat_comp[0],stat_comp[1])
#print('Corr: %f %f' %(dist_test.statistic, dist_test.pvalue))
xmind,xmaxd,ymind,ymaxd = plt.axis()
prior_corr = [item for sublist in prior_tmp for item in sublist]
results, edges = np.histogram(prior_corr, bins=bins_corr, normed=True)
plt.plot(edges[:-1]+binwidth,results,linewidth=1,ls='steps',color='black',label='Prior')
plt.xlabel("Correlation",fontweight='bold')
plt.ylabel("Probability density",fontweight='bold')
ymin = 0.0
#ymax = 0.04; nbins = 4
#ymax = 0.05; nbins = 5 # for r_crit = 0.2
#ymax = 0.1; nbins = 5
#ymax = 2.0; nbins = 5
if proxy == 'All':
ymax = 2.0; nbins = 5
else:
ymax = ymaxd
plt.axis((CORRrange[0],CORRrange[1],ymin,ymax))
plt.locator_params(axis = 'y', nbins = nbins)
plt.legend(loc=2,fontsize=9,frameon=False,handlelength=1.2)
xmin,xmax,ymin,ymax = plt.axis()
xpos = xmin+0.025*(xmax-xmin)
ypos = ymin+0.5*(ymax-ymin)
for p in range(nbperiods):
plt.text(xpos,ypos,'Mean = %s' %"{:.2f}".format(mean_stat[p]),fontsize=10,fontweight='bold',color=fcolor[p])
ypos = ypos-0.075*(ymax-ymin)
if nbdist > 1:
plt.text(xpos,ypos,' p-value = %s' %"{:.3f}".format(dist_test.pvalue),fontsize=9,fontweight='bold')
ypos = ypos-0.075*(ymax-ymin)
plt.text(xpos,ypos,'Mean = %s' %"{:.2f}".format(np.mean(prior_corr)),fontsize=10,fontweight='bold')
# 2) --- CE ---
ax = fig.add_subplot(2,3,pos[1])
mean_stat = np.zeros([nbperiods])
std_stat = np.zeros([nbperiods])
prior_tmp = []
stat_comp = []
for p in range(nbperiods):
# pick right dict and associate to "workdict"
dname = v+'_dict'
workdict = eval(dname)
sitetag = list(workdict[p].keys())
if proxy == 'All':
proxy_types = list(set([item[0] for item in sitetag]))
else:
proxy_types = proxy
tmp = [workdict[p][k]['MCensCE'] for k in sitetag if k[0] in proxy_types and np.abs(workdict[p][k]['PSMinfo']['corr'])>=r_crit]
stat = [item for sublist in tmp for item in sublist] # flatten list of lists
nbdata = len(stat)
mean_stat[p] = np.mean(stat)
std_stat[p] = np.std(stat)
# Since CE is not bounded at the lower end, assign values smaller than 1st bin to value of 1st bin
#stat = [bins[0] if x<bins[0] else x for x in stat]
results, edges = np.histogram(stat, bins=bins_ce, normed=True)
plt.bar(edges[:-1],results,binwidth,color=fcolor[p],alpha=alpha,linewidth=0)
# kernel density estimation
statv = np.asarray(stat)
kde = sm.nonparametric.KDEUnivariate(statv)
nbpts, = statv.shape
if nbpts > 0:
kde.fit(kernel='gau')
plt.plot(kde.support,kde.density,color=fcolor[p],lw=2,label=str(verif_period[p][0])+' to '+str(verif_period[p][1]))
stat_comp.append(stat)
# Accumulate prior stat
tmp = [workdict[p][k]['PriorMCensCE'] for k in sitetag if k[0] in proxy_types and np.abs(workdict[p][k]['PSMinfo']['corr'])>=r_crit]
prior_tmp.append([item for sublist in tmp for item in sublist]) # flatten list of lists
# Kolmogorov-Smirnov significance testing of difference between distributions from both tested periods
nbdist = len(stat_comp)
if nbdist > 1:
dist_test = stats.ks_2samp(stat_comp[0],stat_comp[1])
#print('CE: %f %f' %(dist_test.statistic, dist_test.pvalue))
prior_ce = [item for sublist in prior_tmp for item in sublist]
# Since CE is not bounded at the lower end, assign values smaller than 1st bin to value of 1st bin
prior_ce = [bins_ce[0] if x<bins_ce[0] else x for x in prior_ce]
results, edges = np.histogram(prior_ce, bins=bins_ce, normed=True)
plt.plot(edges[:-1]+binwidth,results,linewidth=1,ls='steps',color='black',label='Prior')
plt.xlabel("Coefficient of efficiency",fontweight='bold')
plt.ylabel("Probability density",fontweight='bold')
xmin,xmax,ymin,ymax = plt.axis()
ymin = 0.0
#ymax = 0.45
#ymax = 0.1 # for r_crit = 0.2
#ymax = 0.5; nbins = 5
ymax = 12.0; nbins = 6
plt.axis((CErange[0],CErange[1],ymin,ymax))
plt.legend(loc=2,fontsize=9,frameon=False,handlelength=1.2)
xmin,xmax,ymin,ymax = plt.axis()
xpos = xmin+0.025*(xmax-xmin)
ypos = ymin+0.5*(ymax-ymin)
for p in range(nbperiods):
plt.text(xpos,ypos,'Mean = %s' %"{:.2f}".format(mean_stat[p]),fontsize=10,fontweight='bold',color=fcolor[p])
ypos = ypos-0.075*(ymax-ymin)
if nbdist > 1:
plt.text(xpos,ypos,' p-value = %s' %"{:.3f}".format(dist_test.pvalue),fontsize=9,fontweight='bold')
ypos = ypos-0.075*(ymax-ymin)
plt.text(xpos,ypos,'Mean = %s' %"{:.2f}".format(np.mean(prior_ce)),fontsize=10,fontweight='bold')
# 3) --- Change in CE from prior to posterior ---
ax = fig.add_subplot(2,3,pos[2])
prior_tmp = []
stat_comp = []
for p in range(nbperiods):
# pick right dict and associate to "workdict"
dname = v+'_dict'
workdict = eval(dname)
sitetag = list(workdict[p].keys())
if proxy == 'All':
proxy_types = list(set([item[0] for item in sitetag]))
else:
proxy_types = proxy
tmpPost = [workdict[p][k]['MCensCE'] for k in sitetag if k[0] in proxy_types and np.abs(workdict[p][k]['PSMinfo']['corr'])>=r_crit]
tmpPrior = [workdict[p][k]['PriorMCensCE'] for k in sitetag if k[0] in proxy_types and np.abs(workdict[p][k]['PSMinfo']['corr'])>=r_crit]
statPost = [item for sublist in tmpPost for item in sublist] # flatten list of lists
statPrior = [item for sublist in tmpPrior for item in sublist] # flatten list of lists
# difference
stat = [statPost[i]-statPrior[i] for i in range(len(statPost))]
nbdata = len(stat)
mean_stat = np.mean(stat)
std_stat = | np.std(stat) | numpy.std |
from vergeml.command import command, CommandPlugin
from vergeml.option import option
from vergeml.plots import load_labels
import os.path
import csv
from vergeml.utils import VergeMLError
import numpy as np
@command('confusion-matrix', descr="Plot a confusion matrix.")
@option('@AI')
@option('normalize', type='bool', default=False, descr="When true normalize the confusion matrix.", short="n", flag=True)
class ConfusionMatrixPlot(CommandPlugin):
def __call__(self, args, env):
# Plotting a confusion matrix needs the model to follow the convention
# - labels.txt in checkpoints
# - predictions.csv in stats
import itertools
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
from vergeml.plots import load_labels, load_predictions
try:
labels = load_labels(env)
except FileNotFoundError:
raise VergeMLError("Can't plot confusion matrix - not supported by model.")
nclasses = len(labels)
try:
y_test, y_score = load_predictions(env, nclasses)
except FileNotFoundError:
raise VergeMLError("Can't plot confusion matrix - not supported by model.")
# From:
# https://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html#sphx-glr-auto-examples-model-selection-plot-confusion-matrix-py
def plot_confusion_matrix(cm, classes, AI,
normalize=False,
cmap=plt.cm.Blues): # pylint: disable=E1101
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
plt.title(f"Confusion matrix for @{AI} (normalized)")
else:
plt.title(f'Confusion matrix for @{AI}')
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.tight_layout()
y_test = | np.argmax(y_test, axis=1) | numpy.argmax |
import threading
import time
import cv2
import numpy as np
import serial
import sys, getopt
import paho.mqtt
from PIL import Image
import paho.mqtt.client as mqtt
from paho.mqtt import publish
from video import create_capture
import Adafruit_CharLCD as LCD
import Adafruit_GPIO as GPIO
lcd_rs = 25
lcd_en = 24
lcd_d4 = 23
lcd_d5 = 17
lcd_d6 = 18
lcd_d7 = 22
lcd_backlight = 2
# Define LCD column and row size for 16x2 LCD.
lcd_columns = 16
lcd_rows = 2
lcd = LCD.Adafruit_CharLCD(lcd_rs, lcd_en, lcd_d4, lcd_d5, lcd_d6, lcd_d7, lcd_columns, lcd_rows, lcd_backlight)
ser = serial.Serial(
"/dev/ttyAMA0",
baudrate=115200,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS,
writeTimeout=1,
timeout=10,
rtscts=False,
dsrdtr=False,
xonxoff=False)
serLock = threading.Lock()
# --------------------------------------------------------MQTT----------------------------------------------------------#
def on_connect_cart(client, obj, flags, rc):
if rc == 0:
print("Cart connected with result code " + str(rc))
client.subscribe("cart/room/starting_room_number")
client.subscribe("cart/room/destination_room_number")
client.subscribe("cart/parking")
else:
print("Bad connection returned code = ", rc)
def on_message_cart(client, obj, msg):
# print("Cart new message: " + msg.topic + " " + str(msg.payload))
roomNumber = str(msg.payload)
if msg.topic == "cart/room/starting_room_number":
if roomNumber == "331":
# pass a value to micom for scenario 1
serLock.acquire()
try:
ser.write('!')
finally:
serLock.release()
# print("2 unlock")
print(roomNumber)
else:
print("Unknown roomNumber")
print(roomNumber)
elif msg.topic == "cart/room/destination_room_number":
if roomNumber == "323-1":
# pass a value to micom for scenario 2
serLock.acquire()
try:
ser.write('@')
finally:
serLock.release()
# print("2 unlock")
print(roomNumber)
elif roomNumber == "250":
# pass a value to micom for scenario 3
serLock.acquire()
try:
ser.write('#')
finally:
serLock.release()
# print("2 unlock")
print(roomNumber)
else:
print("Unknown roomNumber")
print(roomNumber)
elif msg.topic == "cart/parking":
if roomNumber == "0":
# pass a vale to micom for parking scenario, which is not yet decided
print(roomNumber)
else:
print("Unknown roomNumber")
print(roomNumber)
else:
print("Unknown topic")
def on_publish_cart(client, obj, mid):
print("mid: " + str(mid))
def on_subscribe_cart(client, obj, mid, granted_qos):
print("Subscribed: " + str(mid) + " " + str(granted_qos))
def on_log_cart(client, obj, level, string):
print(string)
# The below lines will be used to publish the topics
# publish.single("elevator/starting_floor_number", "3", hostname="172.16.17.32", port=1883)
# publish.single("elevator/destination_floor_number", "2", hostname="172.16.17.32", port=1883)
# ---------------------------------------------------------------------------------------------------------------------#
# ------------------------------------------Hallway detction------------------------------------------------------------#
class Line():
def __init__(self):
# was the line detected in the last iteration?
self.detected = False
# smoothing span
self.n_avg = 10
# x values of the last n_avg fits of the line
self.recent_xfitted = []
#average x values of the fitted line over the last n iterations
self.bestx = None
#polynomial coefficients averaged over the last n iterations
self.best_fit = None
self.i = 0
def add(self, recent_xfitted, ploty):
# add in line
self.detected = True
self.recent_xfitted.append(recent_xfitted)
# smooth out last n fits
# keep only n_avg values
if len(self.recent_xfitted) > self.n_avg:
self.recent_xfitted = self.recent_xfitted[(len(self.recent_xfitted) - self.n_avg):]
self.bestx = np.mean(self.recent_xfitted, axis=0)
# find polynomial co-efficients of averaged x values
self.best_fit = np.polyfit(ploty, self.bestx, 2)
def perspective_transform(img, src, dst):
h = img.shape[0]
w = img.shape[1]
M = cv2.getPerspectiveTransform(src, dst)
#Minv = cv2.getPerspectiveTransform(dst, src)
warped = cv2.warpPerspective(img, M, (w,h))
return warped, M
def find_lanes_sliding_window(binary_warped, draw_rect=False):
# Assuming you have created a warped binary image called "binary_warped"
# Take a histogram of the bottom half of the image
histogram = np.sum(binary_warped[int(binary_warped.shape[0]/2):,:], axis=0)
# Find the peak of the left and right halves of the histogram
# These will be the starting point for the left and right lines
midpoint = np.int(histogram.shape[0]/2)
leftx_base = np.argmax(histogram[:midpoint])
rightx_base = | np.argmax(histogram[midpoint:]) | numpy.argmax |
from __future__ import print_function, division
import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
from geotnf.point_tnf import PointTnf
class TransformedGridLoss(nn.Module):
def __init__(self, geometric_model='affine', use_cuda=True, grid_size=20):
super(TransformedGridLoss, self).__init__()
self.geometric_model = geometric_model
# define virtual grid of points to be transformed
axis_coords = np.linspace(-1,1,grid_size)
self.N = grid_size*grid_size
X,Y = np.meshgrid(axis_coords,axis_coords)
X = np.reshape(X,(1,1,self.N))
Y = np.reshape(Y,(1,1,self.N))
P = | np.concatenate((X,Y),1) | numpy.concatenate |
# neural network functions and classes
import numpy as np
import random
import json
import cma
from es import SimpleGA, CMAES, PEPG, OpenES
from env import make_env
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def relu(x):
return np.maximum(x, 0)
def passthru(x):
return x
# useful for discrete actions
def softmax(x):
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum(axis=0)
# useful for discrete actions
def sample(p):
return np.argmax(np.random.multinomial(1, p))
"""
learning the model
"""
class RNNCell:
def __init__(self, input_size, weight, bias):
self.input_size=input_size
self.weight = weight
self.bias = bias
def __call__(self, x, h):
concat = np.concatenate((x, h), axis=1)
hidden = np.matmul(concat, self.weight)+self.bias
return | np.tanh(hidden) | numpy.tanh |
#!/usr/bin/python3
'''
DoomScenario.py
Authors: <NAME>
Last Updated: 3/26/17
'''
"""
This script defines the instance of Vizdoom used to train and test
Reinforcement Learning Models.
"""
from vizdoom import DoomGame, Mode, ScreenResolution
import itertools as it
import numpy as np
np.seterr(divide='ignore', invalid='ignore')
from tqdm import tqdm
def softmax(x, t):
'''
Method defines softmax function
'''
e_x = np.exp(x - np.max(x))/t
return e_x / e_x.sum(axis=0)
class DoomScenario:
"""
DoomScenario class runs instances of Vizdoom according to scenario
configuration (.cfg) files.
Scenario Configuration files for this project are located in
the /src/configs/ folder.
"""
def __init__(self, config_filename):
'''
Method initiates Vizdoom with desired configuration file.
'''
self.config_filename = config_filename
self.game = DoomGame()
self.game.load_config("configs/" + config_filename)
self.game.set_window_visible(False)
self.game.init()
self.res = (self.game.get_screen_height(), self.game.get_screen_width())
self.actions = [list(a) for a in it.product([0, 1], repeat=self.game.get_available_buttons_size())]
self.pbar = None
self.game.new_episode()
def play(self, action, tics):
'''
Method advances state with desired action for a number of tics.
'''
self.game.set_action(action)
self.game.advance_action(tics, True)
if self.pbar: self.pbar.update(int(tics))
def get_processed_state(self, depth_radius, depth_contrast):
'''
Method processes the Vizdoom RGB and depth buffer into
a composite one channel image that can be used by the Models.
depth_radius defines how far the depth buffer sees with 1.0 being
as far as ViZDoom allows.
depth_contrast defines how much of the depth buffer is in the final
processed image as compared to the greyscaled RGB buffer.
**processed = (1-depth_contrast)* grey_buffer + depth_contrast*depth_buffer
'''
state = self.game.get_state()
screen_buffer = np.array(state.screen_buffer).astype('float32')/255
try:
# Grey Scaling
grey_buffer = np.dot(np.transpose(screen_buffer, (1, 2, 0)), [0.21, 0.72, 0.07])
# Depth Radius
depth_buffer = np.array(state.depth_buffer).astype('float32')/255
depth_buffer[(depth_buffer > depth_radius)] = depth_radius #Effects depth radius
depth_buffer_filtered = (depth_buffer - np.amin(depth_buffer))/ (np.amax(depth_buffer) - np.amin(depth_buffer))
# Depth Contrast
processed_buffer = ((1 - depth_contrast) * grey_buffer) + (depth_contrast* (1- depth_buffer))
processed_buffer = (processed_buffer - np.amin(processed_buffer))/ ( | np.amax(processed_buffer) | numpy.amax |
# Copyright 2018-2021 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tests for the Hamiltonian class.
"""
import numpy as np
import pytest
import pennylane as qml
from pennylane import numpy as pnp
# Make test data in different interfaces, if installed
COEFFS_PARAM_INTERFACE = [
([-0.05, 0.17], 1.7, "autograd"),
(np.array([-0.05, 0.17]), np.array(1.7), "autograd"),
(pnp.array([-0.05, 0.17], requires_grad=True), pnp.array(1.7, requires_grad=True), "autograd"),
]
try:
from jax import numpy as jnp
COEFFS_PARAM_INTERFACE.append((jnp.array([-0.05, 0.17]), jnp.array(1.7), "jax"))
except ImportError:
pass
try:
import tf
COEFFS_PARAM_INTERFACE.append(
(tf.Variable([-0.05, 0.17], dtype=tf.double), tf.Variable(1.7, dtype=tf.double), "tf")
)
except ImportError:
pass
try:
import torch
COEFFS_PARAM_INTERFACE.append((torch.tensor([-0.05, 0.17]), torch.tensor([1.7]), "torch"))
except ImportError:
pass
H_ONE_QUBIT = np.array([[1.0, 0.5j], [-0.5j, 2.5]])
H_TWO_QUBITS = np.array(
[[0.5, 1.0j, 0.0, -3j], [-1.0j, -1.1, 0.0, -0.1], [0.0, 0.0, -0.9, 12.0], [3j, -0.1, 12.0, 0.0]]
)
COEFFS = [(0.5, 1.2, -0.7), (2.2, -0.2, 0.0), (0.33,)]
OBSERVABLES = [
(qml.PauliZ(0), qml.PauliY(0), qml.PauliZ(1)),
(qml.PauliX(0) @ qml.PauliZ(1), qml.PauliY(0) @ qml.PauliZ(1), qml.PauliZ(1)),
(qml.Hermitian(H_TWO_QUBITS, [0, 1]),),
]
valid_hamiltonians = [
((1.0,), (qml.Hermitian(H_TWO_QUBITS, [0, 1]),)),
((-0.8,), (qml.PauliZ(0),)),
((0.6,), (qml.PauliX(0) @ qml.PauliX(1),)),
((0.5, -1.6), (qml.PauliX(0), qml.PauliY(1))),
((0.5, -1.6), (qml.PauliX(1), qml.PauliY(1))),
((0.5, -1.6), (qml.PauliX("a"), qml.PauliY("b"))),
((1.1, -0.4, 0.333), (qml.PauliX(0), qml.Hermitian(H_ONE_QUBIT, 2), qml.PauliZ(2))),
((-0.4, 0.15), (qml.Hermitian(H_TWO_QUBITS, [0, 2]), qml.PauliZ(1))),
([1.5, 2.0], [qml.PauliZ(0), qml.PauliY(2)]),
(np.array([-0.1, 0.5]), [qml.Hermitian(H_TWO_QUBITS, [0, 1]), qml.PauliY(0)]),
((0.5, 1.2), (qml.PauliX(0), qml.PauliX(0) @ qml.PauliX(1))),
]
valid_hamiltonians_str = [
" (1.0) [Hermitian0,1]",
" (-0.8) [Z0]",
" (0.6) [X0 X1]",
" (-1.6) [Y1]\n+ (0.5) [X0]",
" (-1.6) [Y1]\n+ (0.5) [X1]",
" (-1.6) [Yb]\n+ (0.5) [Xa]",
" (-0.4) [Hermitian2]\n+ (0.333) [Z2]\n+ (1.1) [X0]",
" (0.15) [Z1]\n+ (-0.4) [Hermitian0,2]",
" (1.5) [Z0]\n+ (2.0) [Y2]",
" (0.5) [Y0]\n+ (-0.1) [Hermitian0,1]",
" (0.5) [X0]\n+ (1.2) [X0 X1]",
]
valid_hamiltonians_repr = [
"<Hamiltonian: terms=1, wires=[0, 1]>",
"<Hamiltonian: terms=1, wires=[0]>",
"<Hamiltonian: terms=1, wires=[0, 1]>",
"<Hamiltonian: terms=2, wires=[0, 1]>",
"<Hamiltonian: terms=2, wires=[1]>",
"<Hamiltonian: terms=2, wires=['a', 'b']>",
"<Hamiltonian: terms=3, wires=[0, 2]>",
"<Hamiltonian: terms=2, wires=[0, 1, 2]>",
"<Hamiltonian: terms=2, wires=[0, 2]>",
"<Hamiltonian: terms=2, wires=[0, 1]>",
"<Hamiltonian: terms=2, wires=[0, 1]>",
]
invalid_hamiltonians = [
((), (qml.PauliZ(0),)),
((), (qml.PauliZ(0), qml.PauliY(1))),
((3.5,), ()),
((1.2, -0.4), ()),
((0.5, 1.2), (qml.PauliZ(0),)),
((1.0,), (qml.PauliZ(0), qml.PauliY(0))),
]
simplify_hamiltonians = [
(
qml.Hamiltonian([1, 1, 1], [qml.PauliX(0) @ qml.Identity(1), qml.PauliX(0), qml.PauliX(1)]),
qml.Hamiltonian([2, 1], [qml.PauliX(0), qml.PauliX(1)]),
),
(
qml.Hamiltonian(
[-1, 1, 1], [qml.PauliX(0) @ qml.Identity(1), qml.PauliX(0), qml.PauliX(1)]
),
qml.Hamiltonian([1], [qml.PauliX(1)]),
),
(
qml.Hamiltonian(
[1, 0.5],
[qml.PauliX(0) @ qml.PauliY(1), qml.PauliY(1) @ qml.Identity(2) @ qml.PauliX(0)],
),
qml.Hamiltonian([1.5], [qml.PauliX(0) @ qml.PauliY(1)]),
),
(
qml.Hamiltonian(
[1, 1, 0.5],
[
qml.Hermitian(np.array([[1, 0], [0, -1]]), "a"),
qml.PauliX("b") @ qml.PauliY(1.3),
qml.PauliY(1.3) @ qml.Identity(-0.9) @ qml.PauliX("b"),
],
),
qml.Hamiltonian(
[1, 1.5],
[qml.Hermitian(np.array([[1, 0], [0, -1]]), "a"), qml.PauliX("b") @ qml.PauliY(1.3)],
),
),
# Simplifies to zero Hamiltonian
(
qml.Hamiltonian(
[1, -0.5, -0.5], [qml.PauliX(0) @ qml.Identity(1), qml.PauliX(0), qml.PauliX(0)]
),
qml.Hamiltonian([], []),
),
(
qml.Hamiltonian(
[1, -1],
[qml.PauliX(4) @ qml.Identity(0) @ qml.PauliX(1), qml.PauliX(4) @ qml.PauliX(1)],
),
qml.Hamiltonian([], []),
),
(
qml.Hamiltonian([0], [qml.Identity(0)]),
qml.Hamiltonian([0], [qml.Identity(0)]),
),
]
equal_hamiltonians = [
(
qml.Hamiltonian([1, 1], [qml.PauliX(0) @ qml.Identity(1), qml.PauliZ(0)]),
qml.Hamiltonian([1, 1], [qml.PauliX(0), qml.PauliZ(0)]),
True,
),
(
qml.Hamiltonian([1, 1], [qml.PauliX(0) @ qml.Identity(1), qml.PauliY(2) @ qml.PauliZ(0)]),
qml.Hamiltonian([1, 1], [qml.PauliX(0), qml.PauliZ(0) @ qml.PauliY(2) @ qml.Identity(1)]),
True,
),
(
qml.Hamiltonian(
[1, 1, 1], [qml.PauliX(0) @ qml.Identity(1), qml.PauliZ(0), qml.Identity(1)]
),
qml.Hamiltonian([1, 1], [qml.PauliX(0), qml.PauliZ(0)]),
False,
),
(qml.Hamiltonian([1], [qml.PauliZ(0) @ qml.PauliX(1)]), qml.PauliZ(0) @ qml.PauliX(1), True),
(qml.Hamiltonian([1], [qml.PauliZ(0)]), qml.PauliZ(0), True),
(
qml.Hamiltonian(
[1, 1, 1],
[
qml.Hermitian(np.array([[1, 0], [0, -1]]), "b") @ qml.Identity(7),
qml.PauliZ(3),
qml.Identity(1.2),
],
),
qml.Hamiltonian(
[1, 1, 1],
[qml.Hermitian(np.array([[1, 0], [0, -1]]), "b"), qml.PauliZ(3), qml.Identity(1.2)],
),
True,
),
(
qml.Hamiltonian([1, 1], [qml.PauliZ(3) @ qml.Identity(1.2), qml.PauliZ(3)]),
qml.Hamiltonian([2], [qml.PauliZ(3)]),
True,
),
]
add_hamiltonians = [
(
qml.Hamiltonian([1, 1.2, 0.1], [qml.PauliX(0), qml.PauliZ(1), qml.PauliX(2)]),
qml.Hamiltonian([0.5, 0.3, 1], [qml.PauliX(0), qml.PauliX(1), qml.PauliX(2)]),
qml.Hamiltonian(
[1.5, 1.2, 1.1, 0.3], [qml.PauliX(0), qml.PauliZ(1), qml.PauliX(2), qml.PauliX(1)]
),
),
(
qml.Hamiltonian(
[1.3, 0.2, 0.7], [qml.PauliX(0) @ qml.PauliX(1), qml.Hadamard(1), qml.PauliX(2)]
),
qml.Hamiltonian(
[0.5, 0.3, 1.6], [qml.PauliX(0), qml.PauliX(1) @ qml.PauliX(0), qml.PauliX(2)]
),
qml.Hamiltonian(
[1.6, 0.2, 2.3, 0.5],
[qml.PauliX(0) @ qml.PauliX(1), qml.Hadamard(1), qml.PauliX(2), qml.PauliX(0)],
),
),
(
qml.Hamiltonian([1, 1], [qml.PauliX(0), qml.Hermitian(np.array([[1, 0], [0, -1]]), 0)]),
qml.Hamiltonian([0.5, 0.5], [qml.PauliX(0), qml.Hermitian(np.array([[1, 0], [0, -1]]), 0)]),
qml.Hamiltonian([1.5, 1.5], [qml.PauliX(0), qml.Hermitian(np.array([[1, 0], [0, -1]]), 0)]),
),
(
qml.Hamiltonian([1, 1.2, 0.1], [qml.PauliX(0), qml.PauliZ(1), qml.PauliX(2)]),
qml.PauliX(0) @ qml.Identity(1),
qml.Hamiltonian([2, 1.2, 0.1], [qml.PauliX(0), qml.PauliZ(1), qml.PauliX(2)]),
),
(
qml.Hamiltonian(
[1.3, 0.2, 0.7], [qml.PauliX(0) @ qml.PauliX(1), qml.Hadamard(1), qml.PauliX(2)]
),
qml.Hadamard(1),
qml.Hamiltonian(
[1.3, 1.2, 0.7], [qml.PauliX(0) @ qml.PauliX(1), qml.Hadamard(1), qml.PauliX(2)]
),
),
(
qml.Hamiltonian([1, 1.2, 0.1], [qml.PauliX("b"), qml.PauliZ(3.1), qml.PauliX(1.6)]),
qml.PauliX("b") @ qml.Identity(5),
qml.Hamiltonian([2, 1.2, 0.1], [qml.PauliX("b"), qml.PauliZ(3.1), qml.PauliX(1.6)]),
),
# Case where arguments coeffs and ops to the Hamiltonian are iterables other than lists
(
qml.Hamiltonian((1, 1.2, 0.1), (qml.PauliX(0), qml.PauliZ(1), qml.PauliX(2))),
qml.Hamiltonian(
np.array([0.5, 0.3, 1]), np.array([qml.PauliX(0), qml.PauliX(1), qml.PauliX(2)])
),
qml.Hamiltonian(
(1.5, 1.2, 1.1, 0.3),
np.array([qml.PauliX(0), qml.PauliZ(1), qml.PauliX(2), qml.PauliX(1)]),
),
),
]
sub_hamiltonians = [
(
qml.Hamiltonian([1, 1.2, 0.1], [qml.PauliX(0), qml.PauliZ(1), qml.PauliX(2)]),
qml.Hamiltonian([0.5, 0.3, 1.6], [qml.PauliX(0), qml.PauliX(1), qml.PauliX(2)]),
qml.Hamiltonian(
[0.5, 1.2, -1.5, -0.3], [qml.PauliX(0), qml.PauliZ(1), qml.PauliX(2), qml.PauliX(1)]
),
),
(
qml.Hamiltonian(
[1.3, 0.2, 1], [qml.PauliX(0) @ qml.PauliX(1), qml.Hadamard(1), qml.PauliX(2)]
),
qml.Hamiltonian(
[0.5, 0.3, 1], [qml.PauliX(0), qml.PauliX(1) @ qml.PauliX(0), qml.PauliX(2)]
),
qml.Hamiltonian(
[1, 0.2, -0.5], [qml.PauliX(0) @ qml.PauliX(1), qml.Hadamard(1), qml.PauliX(0)]
),
),
(
qml.Hamiltonian([1, 1], [qml.PauliX(0), qml.Hermitian(np.array([[1, 0], [0, -1]]), 0)]),
qml.Hamiltonian([0.5, 0.5], [qml.PauliX(0), qml.Hermitian(np.array([[1, 0], [0, -1]]), 0)]),
qml.Hamiltonian([0.5, 0.5], [qml.PauliX(0), qml.Hermitian(np.array([[1, 0], [0, -1]]), 0)]),
),
(
qml.Hamiltonian([1, 1.2, 0.1], [qml.PauliX(0), qml.PauliZ(1), qml.PauliX(2)]),
qml.PauliX(0) @ qml.Identity(1),
qml.Hamiltonian([1.2, 0.1], [qml.PauliZ(1), qml.PauliX(2)]),
),
(
qml.Hamiltonian(
[1.3, 0.2, 0.7], [qml.PauliX(0) @ qml.PauliX(1), qml.Hadamard(1), qml.PauliX(2)]
),
qml.Hadamard(1),
qml.Hamiltonian(
[1.3, -0.8, 0.7], [qml.PauliX(0) @ qml.PauliX(1), qml.Hadamard(1), qml.PauliX(2)]
),
),
(
qml.Hamiltonian([1, 1.2, 0.1], [qml.PauliX("b"), qml.PauliZ(3.1), qml.PauliX(1.6)]),
qml.PauliX("b") @ qml.Identity(1),
qml.Hamiltonian([1.2, 0.1], [qml.PauliZ(3.1), qml.PauliX(1.6)]),
),
# The result is the zero Hamiltonian
(
qml.Hamiltonian([1, 1.2, 0.1], [qml.PauliX(0), qml.PauliZ(1), qml.PauliX(2)]),
qml.Hamiltonian([1, 1.2, 0.1], [qml.PauliX(0), qml.PauliZ(1), qml.PauliX(2)]),
qml.Hamiltonian([], []),
),
(
qml.Hamiltonian([1, 2], [qml.PauliX(4), qml.PauliZ(2)]),
qml.Hamiltonian([1, 2], [qml.PauliX(4), qml.PauliZ(2)]),
qml.Hamiltonian([], []),
),
# Case where arguments coeffs and ops to the Hamiltonian are iterables other than lists
(
qml.Hamiltonian((1, 1.2, 0.1), (qml.PauliX(0), qml.PauliZ(1), qml.PauliX(2))),
qml.Hamiltonian(
np.array([0.5, 0.3, 1.6]), np.array([qml.PauliX(0), qml.PauliX(1), qml.PauliX(2)])
),
qml.Hamiltonian(
(0.5, 1.2, -1.5, -0.3),
np.array([qml.PauliX(0), qml.PauliZ(1), qml.PauliX(2), qml.PauliX(1)]),
),
),
]
mul_hamiltonians = [
(
3,
qml.Hamiltonian([1.5, 0.5], [qml.PauliX(0), qml.PauliZ(1)]),
qml.Hamiltonian([4.5, 1.5], [qml.PauliX(0), qml.PauliZ(1)]),
),
(
-1.3,
qml.Hamiltonian([1, -0.3], [qml.PauliX(0), qml.PauliZ(1) @ qml.PauliZ(2)]),
qml.Hamiltonian([-1.3, 0.39], [qml.PauliX(0), qml.PauliZ(1) @ qml.PauliZ(2)]),
),
(
-1.3,
qml.Hamiltonian(
[1, -0.3],
[qml.Hermitian(np.array([[1, 0], [0, -1]]), "b"), qml.PauliZ(23) @ qml.PauliZ(0)],
),
qml.Hamiltonian(
[-1.3, 0.39],
[qml.Hermitian(np.array([[1, 0], [0, -1]]), "b"), qml.PauliZ(23) @ qml.PauliZ(0)],
),
),
# The result is the zero Hamiltonian
(
0,
qml.Hamiltonian([1], [qml.PauliX(0)]),
qml.Hamiltonian([0], [qml.PauliX(0)]),
),
(
0,
qml.Hamiltonian([1, 1.2, 0.1], [qml.PauliX(0), qml.PauliZ(1), qml.PauliX(2)]),
qml.Hamiltonian([0, 0, 0], [qml.PauliX(0), qml.PauliZ(1), qml.PauliX(2)]),
),
# Case where arguments coeffs and ops to the Hamiltonian are iterables other than lists
(
3,
qml.Hamiltonian((1.5, 0.5), (qml.PauliX(0), qml.PauliZ(1))),
qml.Hamiltonian(np.array([4.5, 1.5]), np.array([qml.PauliX(0), qml.PauliZ(1)])),
),
]
matmul_hamiltonians = [
(
qml.Hamiltonian([1, 1], [qml.PauliX(0), qml.PauliZ(1)]),
qml.Hamiltonian([0.5, 0.5], [qml.PauliZ(2), qml.PauliZ(3)]),
qml.Hamiltonian(
[0.5, 0.5, 0.5, 0.5],
[
qml.PauliX(0) @ qml.PauliZ(2),
qml.PauliX(0) @ qml.PauliZ(3),
qml.PauliZ(1) @ qml.PauliZ(2),
qml.PauliZ(1) @ qml.PauliZ(3),
],
),
),
(
qml.Hamiltonian([0.5, 0.25], [qml.PauliX(0) @ qml.PauliX(1), qml.PauliZ(0)]),
qml.Hamiltonian([1, 1], [qml.PauliX(3) @ qml.PauliZ(2), qml.PauliZ(2)]),
qml.Hamiltonian(
[0.5, 0.5, 0.25, 0.25],
[
qml.PauliX(0) @ qml.PauliX(1) @ qml.PauliX(3) @ qml.PauliZ(2),
qml.PauliX(0) @ qml.PauliX(1) @ qml.PauliZ(2),
qml.PauliZ(0) @ qml.PauliX(3) @ qml.PauliZ(2),
qml.PauliZ(0) @ qml.PauliZ(2),
],
),
),
(
qml.Hamiltonian([1, 1], [qml.PauliX("b"), qml.Hermitian(np.array([[1, 0], [0, -1]]), 0)]),
qml.Hamiltonian([2, 2], [qml.PauliZ(1.2), qml.PauliY("c")]),
qml.Hamiltonian(
[2, 2, 2, 2],
[
qml.PauliX("b") @ qml.PauliZ(1.2),
qml.PauliX("b") @ qml.PauliY("c"),
qml.Hermitian(np.array([[1, 0], [0, -1]]), 0) @ qml.PauliZ(1.2),
qml.Hermitian(np.array([[1, 0], [0, -1]]), 0) @ qml.PauliY("c"),
],
),
),
(
qml.Hamiltonian([1, 1], [qml.PauliX(0), qml.PauliZ(1)]),
qml.PauliX(2),
qml.Hamiltonian([1, 1], [qml.PauliX(0) @ qml.PauliX(2), qml.PauliZ(1) @ qml.PauliX(2)]),
),
# Case where arguments coeffs and ops to the Hamiltonian are iterables other than lists
(
qml.Hamiltonian((1, 1), (qml.PauliX(0), qml.PauliZ(1))),
qml.Hamiltonian(np.array([0.5, 0.5]), np.array([qml.PauliZ(2), qml.PauliZ(3)])),
qml.Hamiltonian(
(0.5, 0.5, 0.5, 0.5),
np.array(
[
qml.PauliX(0) @ qml.PauliZ(2),
qml.PauliX(0) @ qml.PauliZ(3),
qml.PauliZ(1) @ qml.PauliZ(2),
qml.PauliZ(1) @ qml.PauliZ(3),
]
),
),
),
]
big_hamiltonian_coeffs = np.array(
[
-0.04207898,
0.17771287,
0.17771287,
-0.24274281,
-0.24274281,
0.17059738,
0.04475014,
-0.04475014,
-0.04475014,
0.04475014,
0.12293305,
0.16768319,
0.16768319,
0.12293305,
0.17627641,
]
)
big_hamiltonian_ops = [
qml.Identity(wires=[0]),
qml.PauliZ(wires=[0]),
qml.PauliZ(wires=[1]),
qml.PauliZ(wires=[2]),
qml.PauliZ(wires=[3]),
qml.PauliZ(wires=[0]) @ qml.PauliZ(wires=[1]),
qml.PauliY(wires=[0]) @ qml.PauliX(wires=[1]) @ qml.PauliX(wires=[2]) @ qml.PauliY(wires=[3]),
qml.PauliY(wires=[0]) @ qml.PauliY(wires=[1]) @ qml.PauliX(wires=[2]) @ qml.PauliX(wires=[3]),
qml.PauliX(wires=[0]) @ qml.PauliX(wires=[1]) @ qml.PauliY(wires=[2]) @ qml.PauliY(wires=[3]),
qml.PauliX(wires=[0]) @ qml.PauliY(wires=[1]) @ qml.PauliY(wires=[2]) @ qml.PauliX(wires=[3]),
qml.PauliZ(wires=[0]) @ qml.PauliZ(wires=[2]),
qml.PauliZ(wires=[0]) @ qml.PauliZ(wires=[3]),
qml.PauliZ(wires=[1]) @ qml.PauliZ(wires=[2]),
qml.PauliZ(wires=[1]) @ qml.PauliZ(wires=[3]),
qml.PauliZ(wires=[2]) @ qml.PauliZ(wires=[3]),
]
big_hamiltonian = qml.Hamiltonian(big_hamiltonian_coeffs, big_hamiltonian_ops)
big_hamiltonian_grad = (
np.array(
[
[
[6.52084595e-18, -2.11464420e-02, -1.16576858e-02],
[-8.22589330e-18, -5.20597922e-02, -1.85365365e-02],
[-2.73850768e-17, 1.14202988e-01, -5.45041403e-03],
[-1.27514307e-17, -1.10465531e-01, 5.19489457e-02],
],
[
[-2.45428288e-02, 8.38921555e-02, -2.00641818e-17],
[-2.21085973e-02, 7.39332741e-04, -1.25580654e-17],
[9.62058625e-03, -1.51398765e-01, 2.02129847e-03],
[1.10020832e-03, -3.49066271e-01, 2.13669117e-03],
],
]
),
)
def circuit1(param):
"""First Pauli subcircuit"""
qml.RX(param, wires=0)
qml.RY(param, wires=0)
return qml.expval(qml.PauliX(0))
def circuit2(param):
"""Second Pauli subcircuit"""
qml.RX(param, wires=0)
qml.RY(param, wires=0)
return qml.expval(qml.PauliZ(0))
dev = qml.device("default.qubit", wires=2)
class TestHamiltonian:
"""Test the Hamiltonian class"""
@pytest.mark.parametrize("coeffs, ops", valid_hamiltonians)
def test_hamiltonian_valid_init(self, coeffs, ops):
"""Tests that the Hamiltonian object is created with
the correct attributes"""
H = qml.Hamiltonian(coeffs, ops)
assert np.allclose(H.terms[0], coeffs)
assert H.terms[1] == list(ops)
@pytest.mark.parametrize("coeffs, ops", invalid_hamiltonians)
def test_hamiltonian_invalid_init_exception(self, coeffs, ops):
"""Tests that an exception is raised when giving an invalid
combination of coefficients and ops"""
with pytest.raises(ValueError, match="number of coefficients and operators does not match"):
H = qml.Hamiltonian(coeffs, ops)
@pytest.mark.parametrize(
"obs", [[qml.PauliX(0), qml.CNOT(wires=[0, 1])], [qml.PauliZ, qml.PauliZ(0)]]
)
def test_hamiltonian_invalid_observables(self, obs):
"""Tests that an exception is raised when
a complex Hamiltonian is given"""
coeffs = [0.1, 0.2]
with pytest.raises(ValueError, match="observables are not valid"):
qml.Hamiltonian(coeffs, obs)
@pytest.mark.parametrize("coeffs, ops", valid_hamiltonians)
def test_hamiltonian_wires(self, coeffs, ops):
"""Tests that the Hamiltonian object has correct wires."""
H = qml.Hamiltonian(coeffs, ops)
assert set(H.wires) == set([w for op in H.ops for w in op.wires])
@pytest.mark.parametrize("terms, string", zip(valid_hamiltonians, valid_hamiltonians_str))
def test_hamiltonian_str(self, terms, string):
"""Tests that the __str__ function for printing is correct"""
H = qml.Hamiltonian(*terms)
assert H.__str__() == string
@pytest.mark.parametrize("terms, string", zip(valid_hamiltonians, valid_hamiltonians_repr))
def test_hamiltonian_repr(self, terms, string):
"""Tests that the __repr__ function for printing is correct"""
H = qml.Hamiltonian(*terms)
assert H.__repr__() == string
def test_hamiltonian_name(self):
"""Tests the name property of the Hamiltonian class"""
H = qml.Hamiltonian([], [])
assert H.name == "Hamiltonian"
@pytest.mark.parametrize(("old_H", "new_H"), simplify_hamiltonians)
def test_simplify(self, old_H, new_H):
"""Tests the simplify method"""
old_H.simplify()
assert old_H.compare(new_H)
def test_simplify_while_queueing(self):
"""Tests that simplifying a Hamiltonian in a tape context
queues the simplified Hamiltonian."""
with qml.tape.QuantumTape() as tape:
a = qml.PauliX(wires=0)
b = qml.PauliY(wires=1)
c = qml.Identity(wires=2)
d = b @ c
H = qml.Hamiltonian([1.0, 2.0], [a, d])
H.simplify()
# check that H is simplified
assert H.ops == [a, b]
# check that the simplified Hamiltonian is in the queue
assert H in tape._queue
def test_data(self):
"""Tests the obs_data method"""
H = qml.Hamiltonian(
[1, 1, 0.5],
[qml.PauliZ(0), qml.PauliZ(0) @ qml.PauliX(1), qml.PauliX(2) @ qml.Identity(1)],
)
data = H._obs_data()
assert data == {
(1, frozenset([("PauliZ", qml.wires.Wires(0), ())])),
(
1,
frozenset([("PauliZ", qml.wires.Wires(0), ()), ("PauliX", qml.wires.Wires(1), ())]),
),
(0.5, frozenset([("PauliX", qml.wires.Wires(2), ())])),
}
def test_hamiltonian_equal_error(self):
"""Tests that the correct error is raised when compare() is called on invalid type"""
H = qml.Hamiltonian([1], [qml.PauliZ(0)])
with pytest.raises(
ValueError,
match=r"Can only compare a Hamiltonian, and a Hamiltonian/Observable/Tensor.",
):
H.compare([[1, 0], [0, -1]])
@pytest.mark.parametrize(("H1", "H2", "res"), equal_hamiltonians)
def test_hamiltonian_equal(self, H1, H2, res):
"""Tests that equality can be checked between Hamiltonians"""
assert H1.compare(H2) == res
@pytest.mark.parametrize(("H1", "H2", "H"), add_hamiltonians)
def test_hamiltonian_add(self, H1, H2, H):
"""Tests that Hamiltonians are added correctly"""
assert H.compare(H1 + H2)
@pytest.mark.parametrize(("coeff", "H", "res"), mul_hamiltonians)
def test_hamiltonian_mul(self, coeff, H, res):
"""Tests that scalars and Hamiltonians are multiplied correctly"""
assert res.compare(coeff * H)
assert res.compare(H * coeff)
@pytest.mark.parametrize(("H1", "H2", "H"), sub_hamiltonians)
def test_hamiltonian_sub(self, H1, H2, H):
"""Tests that Hamiltonians are subtracted correctly"""
assert H.compare(H1 - H2)
@pytest.mark.parametrize(("H1", "H2", "H"), matmul_hamiltonians)
def test_hamiltonian_matmul(self, H1, H2, H):
"""Tests that Hamiltonians are tensored correctly"""
assert H.compare(H1 @ H2)
def test_hamiltonian_same_wires(self):
"""Test if a ValueError is raised when multiplication between Hamiltonians acting on the
same wires is attempted"""
h1 = qml.Hamiltonian([1, 1], [qml.PauliZ(0), qml.PauliZ(1)])
with pytest.raises(ValueError, match="Hamiltonians can only be multiplied together if"):
h1 @ h1
@pytest.mark.parametrize(("H1", "H2", "H"), add_hamiltonians)
def test_hamiltonian_iadd(self, H1, H2, H):
"""Tests that Hamiltonians are added inline correctly"""
H1 += H2
assert H.compare(H1)
@pytest.mark.parametrize(("coeff", "H", "res"), mul_hamiltonians)
def test_hamiltonian_imul(self, coeff, H, res):
"""Tests that scalars and Hamiltonians are multiplied inline correctly"""
H *= coeff
assert res.compare(H)
@pytest.mark.parametrize(("H1", "H2", "H"), sub_hamiltonians)
def test_hamiltonian_isub(self, H1, H2, H):
"""Tests that Hamiltonians are subtracted inline correctly"""
H1 -= H2
assert H.compare(H1)
def test_arithmetic_errors(self):
"""Tests that the arithmetic operations thrown the correct errors"""
H = qml.Hamiltonian([1], [qml.PauliZ(0)])
A = [[1, 0], [0, -1]]
with pytest.raises(ValueError, match="Cannot tensor product Hamiltonian"):
H @ A
with pytest.raises(ValueError, match="Cannot add Hamiltonian"):
H + A
with pytest.raises(ValueError, match="Cannot multiply Hamiltonian"):
H * A
with pytest.raises(ValueError, match="Cannot subtract"):
H - A
with pytest.raises(ValueError, match="Cannot add Hamiltonian"):
H += A
with pytest.raises(ValueError, match="Cannot multiply Hamiltonian"):
H *= A
with pytest.raises(ValueError, match="Cannot subtract"):
H -= A
def test_hamiltonian_queue(self):
"""Tests that Hamiltonian are queued correctly"""
# Outside of tape
queue = [
qml.Hadamard(wires=1),
qml.PauliX(wires=0),
qml.PauliZ(0),
qml.PauliZ(2),
qml.PauliZ(0) @ qml.PauliZ(2),
qml.PauliX(1),
qml.PauliZ(1),
qml.Hamiltonian(
[1, 3, 1], [qml.PauliX(1), qml.PauliZ(0) @ qml.PauliZ(2), qml.PauliZ(1)]
),
]
H = qml.PauliX(1) + 3 * qml.PauliZ(0) @ qml.PauliZ(2) + qml.PauliZ(1)
with qml.tape.QuantumTape() as tape:
qml.Hadamard(wires=1)
qml.PauliX(wires=0)
qml.expval(H)
assert np.all([q1.compare(q2) for q1, q2 in zip(tape.queue, queue)])
# Inside of tape
queue = [
qml.Hadamard(wires=1),
qml.PauliX(wires=0),
qml.PauliX(1),
qml.PauliZ(0),
qml.PauliZ(2),
qml.PauliZ(0) @ qml.PauliZ(2),
qml.PauliZ(1),
qml.Hamiltonian(
[1, 3, 1], [qml.PauliX(1), qml.PauliZ(0) @ qml.PauliZ(2), qml.PauliZ(1)]
),
]
with qml.tape.QuantumTape() as tape:
qml.Hadamard(wires=1)
qml.PauliX(wires=0)
qml.expval(
qml.Hamiltonian(
[1, 3, 1], [qml.PauliX(1), qml.PauliZ(0) @ qml.PauliZ(2), qml.PauliZ(1)]
)
)
assert np.all([q1.compare(q2) for q1, q2 in zip(tape.queue, queue)])
class TestHamiltonianCoefficients:
"""Test the creation of a Hamiltonian"""
@pytest.mark.parametrize("coeffs", [el[0] for el in COEFFS_PARAM_INTERFACE])
def test_creation_different_coeff_types(self, coeffs):
"""Check that Hamiltonian's coefficients and data attributes are set correctly."""
H = qml.Hamiltonian(coeffs, [qml.PauliX(0), qml.PauliZ(0)])
assert np.allclose(coeffs, H.coeffs)
assert np.allclose([coeffs[i] for i in range(qml.math.shape(coeffs)[0])], H.data)
@pytest.mark.parametrize("coeffs", [el[0] for el in COEFFS_PARAM_INTERFACE])
def test_simplify(self, coeffs):
"""Test that simplify works with different coefficient types."""
H1 = qml.Hamiltonian(coeffs, [qml.PauliX(0), qml.PauliZ(1)])
H2 = qml.Hamiltonian(coeffs, [qml.PauliX(0), qml.Identity(0) @ qml.PauliZ(1)])
H2.simplify()
assert H1.compare(H2)
assert H1.data == H2.data
class TestHamiltonianArithmeticTF:
"""Tests creation of Hamiltonians using arithmetic
operations with TensorFlow tensor coefficients."""
def test_hamiltonian_equal(self):
"""Tests equality"""
tf = pytest.importorskip("tensorflow")
coeffs = tf.Variable([0.5, -1.6])
obs = [qml.PauliX(0), qml.PauliY(1)]
H1 = qml.Hamiltonian(coeffs, obs)
coeffs2 = tf.Variable([-1.6, 0.5])
obs2 = [qml.PauliY(1), qml.PauliX(0)]
H2 = qml.Hamiltonian(coeffs2, obs2)
assert H1.compare(H2)
def test_hamiltonian_add(self):
"""Tests that Hamiltonians are added correctly"""
tf = pytest.importorskip("tensorflow")
coeffs = tf.Variable([0.5, -1.6])
obs = [qml.PauliX(0), qml.PauliY(1)]
H1 = qml.Hamiltonian(coeffs, obs)
coeffs2 = tf.Variable([0.5, -0.4])
H2 = qml.Hamiltonian(coeffs2, obs)
coeffs_expected = tf.Variable([1.0, -2.0])
H = qml.Hamiltonian(coeffs_expected, obs)
assert H.compare(H1 + H2)
H1 += H2
assert H.compare(H1)
def test_hamiltonian_sub(self):
"""Tests that Hamiltonians are subtracted correctly"""
tf = pytest.importorskip("tensorflow")
coeffs = tf.Variable([1.0, -2.0])
obs = [qml.PauliX(0), qml.PauliY(1)]
H1 = qml.Hamiltonian(coeffs, obs)
coeffs2 = tf.Variable([0.5, -0.4])
H2 = qml.Hamiltonian(coeffs2, obs)
coeffs_expected = tf.Variable([0.5, -1.6])
H = qml.Hamiltonian(coeffs_expected, obs)
assert H.compare(H1 - H2)
H1 -= H2
assert H.compare(H1)
def test_hamiltonian_matmul(self):
"""Tests that Hamiltonians are tensored correctly"""
tf = pytest.importorskip("tensorflow")
coeffs = tf.Variable([1.0, 2.0])
obs = [qml.PauliX(0), qml.PauliY(1)]
H1 = qml.Hamiltonian(coeffs, obs)
coeffs2 = tf.Variable([-1.0, -2.0])
obs2 = [qml.PauliX(2), qml.PauliY(3)]
H2 = qml.Hamiltonian(coeffs2, obs2)
coeffs_expected = tf.Variable([-4.0, -2.0, -2.0, -1.0])
obs_expected = [
qml.PauliY(1) @ qml.PauliY(3),
qml.PauliX(0) @ qml.PauliY(3),
qml.PauliX(2) @ qml.PauliY(1),
qml.PauliX(0) @ qml.PauliX(2),
]
H = qml.Hamiltonian(coeffs_expected, obs_expected)
assert H.compare(H1 @ H2)
class TestHamiltonianArithmeticTorch:
"""Tests creation of Hamiltonians using arithmetic
operations with torch tensor coefficients."""
def test_hamiltonian_equal(self):
"""Tests equality"""
torch = pytest.importorskip("torch")
coeffs = torch.tensor([0.5, -1.6])
obs = [qml.PauliX(0), qml.PauliY(1)]
H1 = qml.Hamiltonian(coeffs, obs)
coeffs2 = torch.tensor([-1.6, 0.5])
obs2 = [qml.PauliY(1), qml.PauliX(0)]
H2 = qml.Hamiltonian(coeffs2, obs2)
assert H1.compare(H2)
def test_hamiltonian_add(self):
"""Tests that Hamiltonians are added correctly"""
torch = pytest.importorskip("torch")
coeffs = torch.tensor([0.5, -1.6])
obs = [qml.PauliX(0), qml.PauliY(1)]
H1 = qml.Hamiltonian(coeffs, obs)
coeffs2 = torch.tensor([0.5, -0.4])
H2 = qml.Hamiltonian(coeffs2, obs)
coeffs_expected = torch.tensor([1.0, -2.0])
H = qml.Hamiltonian(coeffs_expected, obs)
assert H.compare(H1 + H2)
H1 += H2
assert H.compare(H1)
def test_hamiltonian_sub(self):
"""Tests that Hamiltonians are subtracted correctly"""
torch = pytest.importorskip("torch")
coeffs = torch.tensor([1.0, -2.0])
obs = [qml.PauliX(0), qml.PauliY(1)]
H1 = qml.Hamiltonian(coeffs, obs)
coeffs2 = torch.tensor([0.5, -0.4])
H2 = qml.Hamiltonian(coeffs2, obs)
coeffs_expected = torch.tensor([0.5, -1.6])
H = qml.Hamiltonian(coeffs_expected, obs)
assert H.compare(H1 - H2)
H1 -= H2
assert H.compare(H1)
def test_hamiltonian_matmul(self):
"""Tests that Hamiltonians are tensored correctly"""
torch = pytest.importorskip("torch")
coeffs = torch.tensor([1.0, 2.0])
obs = [qml.PauliX(0), qml.PauliY(1)]
H1 = qml.Hamiltonian(coeffs, obs)
coeffs2 = torch.tensor([-1.0, -2.0])
obs2 = [qml.PauliX(2), qml.PauliY(3)]
H2 = qml.Hamiltonian(coeffs2, obs2)
coeffs_expected = torch.tensor([-4.0, -2.0, -2.0, -1.0])
obs_expected = [
qml.PauliY(1) @ qml.PauliY(3),
qml.PauliX(0) @ qml.PauliY(3),
qml.PauliX(2) @ qml.PauliY(1),
qml.PauliX(0) @ qml.PauliX(2),
]
H = qml.Hamiltonian(coeffs_expected, obs_expected)
assert H.compare(H1 @ H2)
class TestHamiltonianArithmeticAutograd:
"""Tests creation of Hamiltonians using arithmetic
operations with autograd tensor coefficients."""
def test_hamiltonian_equal(self):
"""Tests equality"""
coeffs = pnp.array([0.5, -1.6])
obs = [qml.PauliX(0), qml.PauliY(1)]
H1 = qml.Hamiltonian(coeffs, obs)
coeffs2 = pnp.array([-1.6, 0.5])
obs2 = [qml.PauliY(1), qml.PauliX(0)]
H2 = qml.Hamiltonian(coeffs2, obs2)
assert H1.compare(H2)
def test_hamiltonian_add(self):
"""Tests that Hamiltonians are added correctly"""
coeffs = pnp.array([0.5, -1.6])
obs = [qml.PauliX(0), qml.PauliY(1)]
H1 = qml.Hamiltonian(coeffs, obs)
coeffs2 = pnp.array([0.5, -0.4])
H2 = qml.Hamiltonian(coeffs2, obs)
coeffs_expected = pnp.array([1.0, -2.0])
H = qml.Hamiltonian(coeffs_expected, obs)
assert H.compare(H1 + H2)
H1 += H2
assert H.compare(H1)
def test_hamiltonian_sub(self):
"""Tests that Hamiltonians are subtracted correctly"""
coeffs = pnp.array([1.0, -2.0])
obs = [qml.PauliX(0), qml.PauliY(1)]
H1 = qml.Hamiltonian(coeffs, obs)
coeffs2 = pnp.array([0.5, -0.4])
H2 = qml.Hamiltonian(coeffs2, obs)
coeffs_expected = pnp.array([0.5, -1.6])
H = qml.Hamiltonian(coeffs_expected, obs)
assert H.compare(H1 - H2)
H1 -= H2
assert H.compare(H1)
def test_hamiltonian_matmul(self):
"""Tests that Hamiltonians are tensored correctly"""
coeffs = pnp.array([1.0, 2.0])
obs = [qml.PauliX(0), qml.PauliY(1)]
H1 = qml.Hamiltonian(coeffs, obs)
coeffs2 = pnp.array([-1.0, -2.0])
obs2 = [qml.PauliX(2), qml.PauliY(3)]
H2 = qml.Hamiltonian(coeffs2, obs2)
coeffs_expected = pnp.array([-4.0, -2.0, -2.0, -1.0])
obs_expected = [
qml.PauliY(1) @ qml.PauliY(3),
qml.PauliX(0) @ qml.PauliY(3),
qml.PauliX(2) @ qml.PauliY(1),
qml.PauliX(0) @ qml.PauliX(2),
]
H = qml.Hamiltonian(coeffs_expected, obs_expected)
assert H.compare(H1 @ H2)
class TestHamiltonianArithmeticJax:
"""Tests creation of Hamiltonians using arithmetic
operations with jax tensor coefficients."""
def test_hamiltonian_equal(self):
"""Tests equality"""
jax = pytest.importorskip("jax")
from jax import numpy as jnp
coeffs = jnp.array([0.5, -1.6])
obs = [qml.PauliX(0), qml.PauliY(1)]
H1 = qml.Hamiltonian(coeffs, obs)
coeffs2 = jnp.array([-1.6, 0.5])
obs2 = [qml.PauliY(1), qml.PauliX(0)]
H2 = qml.Hamiltonian(coeffs2, obs2)
assert H1.compare(H2)
def test_hamiltonian_add(self):
"""Tests that Hamiltonians are added correctly"""
jax = pytest.importorskip("jax")
from jax import numpy as jnp
coeffs = jnp.array([0.5, -1.6])
obs = [qml.PauliX(0), qml.PauliY(1)]
H1 = qml.Hamiltonian(coeffs, obs)
coeffs2 = jnp.array([0.5, -0.4])
H2 = qml.Hamiltonian(coeffs2, obs)
coeffs_expected = jnp.array([1.0, -2.0])
H = qml.Hamiltonian(coeffs_expected, obs)
assert H.compare(H1 + H2)
H1 += H2
assert H.compare(H1)
def test_hamiltonian_sub(self):
"""Tests that Hamiltonians are subtracted correctly"""
jax = pytest.importorskip("jax")
from jax import numpy as jnp
coeffs = jnp.array([1.0, -2.0])
obs = [qml.PauliX(0), qml.PauliY(1)]
H1 = qml.Hamiltonian(coeffs, obs)
coeffs2 = jnp.array([0.5, -0.4])
H2 = qml.Hamiltonian(coeffs2, obs)
coeffs_expected = jnp.array([0.5, -1.6])
H = qml.Hamiltonian(coeffs_expected, obs)
assert H.compare(H1 - H2)
H1 -= H2
assert H.compare(H1)
def test_hamiltonian_matmul(self):
"""Tests that Hamiltonians are tensored correctly"""
jax = pytest.importorskip("jax")
from jax import numpy as jnp
coeffs = jnp.array([1.0, 2.0])
obs = [qml.PauliX(0), qml.PauliY(1)]
H1 = qml.Hamiltonian(coeffs, obs)
coeffs2 = jnp.array([-1.0, -2.0])
obs2 = [qml.PauliX(2), qml.PauliY(3)]
H2 = qml.Hamiltonian(coeffs2, obs2)
coeffs_expected = jnp.array([-4.0, -2.0, -2.0, -1.0])
obs_expected = [
qml.PauliY(1) @ qml.PauliY(3),
qml.PauliX(0) @ qml.PauliY(3),
qml.PauliX(2) @ qml.PauliY(1),
qml.PauliX(0) @ qml.PauliX(2),
]
H = qml.Hamiltonian(coeffs_expected, obs_expected)
assert H.compare(H1 @ H2)
class TestGrouping:
"""Tests for the grouping functionality"""
def test_grouping_is_correct_kwarg(self):
"""Basic test checking that grouping with a kwarg works as expected"""
a = qml.PauliX(0)
b = qml.PauliX(1)
c = qml.PauliZ(0)
obs = [a, b, c]
coeffs = [1.0, 2.0, 3.0]
H = qml.Hamiltonian(coeffs, obs, grouping_type="qwc")
assert H.grouping_indices == [[0, 1], [2]]
def test_grouping_is_correct_compute_grouping(self):
"""Basic test checking that grouping with compute_grouping works as expected"""
a = qml.PauliX(0)
b = qml.PauliX(1)
c = qml.PauliZ(0)
obs = [a, b, c]
coeffs = [1.0, 2.0, 3.0]
H = qml.Hamiltonian(coeffs, obs, grouping_type="qwc")
H.compute_grouping()
assert H.grouping_indices == [[0, 1], [2]]
def test_grouping_for_non_groupable_hamiltonians(self):
"""Test that grouping is computed correctly, even if no observables commute"""
a = qml.PauliX(0)
b = qml.PauliY(0)
c = qml.PauliZ(0)
obs = [a, b, c]
coeffs = [1.0, 2.0, 3.0]
H = qml.Hamiltonian(coeffs, obs, grouping_type="qwc")
assert H.grouping_indices == [[0], [1], [2]]
def test_grouping_is_reset_when_simplifying(self):
"""Tests that calling simplify() resets the grouping"""
obs = [qml.PauliX(0), qml.PauliX(1), qml.PauliZ(0)]
coeffs = [1.0, 2.0, 3.0]
H = qml.Hamiltonian(coeffs, obs, grouping_type="qwc")
assert H.grouping_indices is not None
H.simplify()
assert H.grouping_indices is None
def test_grouping_does_not_alter_queue(self):
"""Tests that grouping is invisible to the queue."""
a = qml.PauliX(0)
b = qml.PauliX(1)
c = qml.PauliZ(0)
obs = [a, b, c]
coeffs = [1.0, 2.0, 3.0]
with qml.tape.QuantumTape() as tape:
H = qml.Hamiltonian(coeffs, obs, grouping_type="qwc")
assert tape.queue == [a, b, c, H]
def test_grouping_method_can_be_set(self):
r"""Tests that the grouping method can be controlled by kwargs.
This is done by changing from default to 'rlf' and checking the result."""
a = qml.PauliX(0)
b = qml.PauliX(1)
c = qml.PauliZ(0)
obs = [a, b, c]
coeffs = [1.0, 2.0, 3.0]
# compute grouping during construction
H2 = qml.Hamiltonian(coeffs, obs, grouping_type="qwc", method="lf")
assert H2.grouping_indices == [[2, 1], [0]]
# compute grouping separately
H3 = qml.Hamiltonian(coeffs, obs, grouping_type=None)
H3.compute_grouping(method="lf")
assert H3.grouping_indices == [[2, 1], [0]]
class TestHamiltonianEvaluation:
"""Test the usage of a Hamiltonian as an observable"""
@pytest.mark.parametrize("coeffs, param, interface", COEFFS_PARAM_INTERFACE)
def test_vqe_forward_different_coeff_types(self, coeffs, param, interface):
"""Check that manually splitting a Hamiltonian expectation has the same
result as passing the Hamiltonian as an observable"""
dev = qml.device("default.qubit", wires=2)
H = qml.Hamiltonian(coeffs, [qml.PauliX(0), qml.PauliZ(0)])
@qml.qnode(dev, interface=interface)
def circuit():
qml.RX(param, wires=0)
qml.RY(param, wires=0)
return qml.expval(H)
@qml.qnode(dev, interface=interface)
def circuit1():
qml.RX(param, wires=0)
qml.RY(param, wires=0)
return qml.expval(qml.PauliX(0))
@qml.qnode(dev, interface=interface)
def circuit2():
qml.RX(param, wires=0)
qml.RY(param, wires=0)
return qml.expval(qml.PauliZ(0))
res = circuit()
res_expected = coeffs[0] * circuit1() + coeffs[1] * circuit2()
assert np.isclose(res, res_expected)
def test_simplify_reduces_tape_parameters(self):
"""Test that simplifying a Hamiltonian reduces the number of parameters on a tape"""
dev = qml.device("default.qubit", wires=2)
@qml.qnode(dev)
def circuit():
qml.RY(0.1, wires=0)
return qml.expval(
qml.Hamiltonian([1.0, 2.0], [qml.PauliX(1), qml.PauliX(1)], simplify=True)
)
circuit()
pars = circuit.qtape.get_parameters(trainable_only=False)
# simplify worked and added 1. and 2.
assert pars == [0.1, 3.0]
@pytest.mark.parametrize("simplify", [True, False])
@pytest.mark.parametrize("group", [None, "qwc"])
class TestHamiltonianDifferentiation:
"""Test that the Hamiltonian coefficients are differentiable"""
def test_vqe_differentiation_paramshift(self, simplify, group):
"""Test the parameter-shift method by comparing the differentiation of linearly combined subcircuits
with the differentiation of a Hamiltonian expectation"""
coeffs = np.array([-0.05, 0.17])
param = np.array(1.7)
# differentiating a circuit with measurement expval(H)
@qml.qnode(dev, diff_method="parameter-shift")
def circuit(coeffs, param):
qml.RX(param, wires=0)
qml.RY(param, wires=0)
return qml.expval(
qml.Hamiltonian(
coeffs,
[qml.PauliX(0), qml.PauliZ(0)],
simplify=simplify,
grouping_type=group,
)
)
grad_fn = qml.grad(circuit)
grad = grad_fn(coeffs, param)
# differentiating a cost that combines circuits with
# measurements expval(Pauli)
half1 = qml.QNode(circuit1, dev, diff_method="parameter-shift")
half2 = qml.QNode(circuit2, dev, diff_method="parameter-shift")
def combine(coeffs, param):
return coeffs[0] * half1(param) + coeffs[1] * half2(param)
grad_fn_expected = qml.grad(combine)
grad_expected = grad_fn_expected(coeffs, param)
assert np.allclose(grad[0], grad_expected[0])
assert np.allclose(grad[1], grad_expected[1])
def test_vqe_differentiation_autograd(self, simplify, group):
"""Test the autograd interface by comparing the differentiation of linearly combined subcircuits
with the differentiation of a Hamiltonian expectation"""
coeffs = pnp.array([-0.05, 0.17], requires_grad=True)
param = pnp.array(1.7, requires_grad=True)
# differentiating a circuit with measurement expval(H)
@qml.qnode(dev, interface="autograd")
def circuit(coeffs, param):
qml.RX(param, wires=0)
qml.RY(param, wires=0)
return qml.expval(
qml.Hamiltonian(
coeffs,
[qml.PauliX(0), qml.PauliZ(0)],
simplify=simplify,
grouping_type=group,
)
)
grad_fn = qml.grad(circuit)
grad = grad_fn(coeffs, param)
# differentiating a cost that combines circuits with
# measurements expval(Pauli)
half1 = qml.QNode(circuit1, dev, interface="autograd")
half2 = qml.QNode(circuit2, dev, interface="autograd")
def combine(coeffs, param):
return coeffs[0] * half1(param) + coeffs[1] * half2(param)
grad_fn_expected = qml.grad(combine)
grad_expected = grad_fn_expected(coeffs, param)
assert np.allclose(grad[0], grad_expected[0])
assert np.allclose(grad[1], grad_expected[1])
def test_vqe_differentiation_jax(self, simplify, group):
"""Test the jax interface by comparing the differentiation of linearly combined subcircuits
with the differentiation of a Hamiltonian expectation"""
jax = pytest.importorskip("jax")
jnp = pytest.importorskip("jax.numpy")
coeffs = jnp.array([-0.05, 0.17])
param = jnp.array(1.7)
# differentiating a circuit with measurement expval(H)
@qml.qnode(dev, interface="jax", diff_method="backprop")
def circuit(coeffs, param):
qml.RX(param, wires=0)
qml.RY(param, wires=0)
return qml.expval(
qml.Hamiltonian(
coeffs,
[qml.PauliX(0), qml.PauliZ(0)],
simplify=simplify,
grouping_type=group,
)
)
grad_fn = jax.grad(circuit)
grad = grad_fn(coeffs, param)
# differentiating a cost that combines circuits with
# measurements expval(Pauli)
half1 = qml.QNode(circuit1, dev, interface="jax", diff_method="backprop")
half2 = qml.QNode(circuit2, dev, interface="jax", diff_method="backprop")
def combine(coeffs, param):
return coeffs[0] * half1(param) + coeffs[1] * half2(param)
grad_fn_expected = jax.grad(combine)
grad_expected = grad_fn_expected(coeffs, param)
assert | np.allclose(grad[0], grad_expected[0]) | numpy.allclose |
'''
Created on Nov. 11, 2019
Mosaik interface for the Distribution State Estimation.
@file simulator_dse.py
@author <NAME>
@date 2019.11.11
@version 0.1
@company University of Alberta - Computing Science
'''
import mosaik_api
import numpy as np
import pandas as pd
import os
import sys
import csv
from ast import literal_eval
import scipy.io as spio
import math
from pathlib import Path
META = {
'models': {
'Estimator': {
'public': True,
'params': ['idt', 'ymat_file', 'devs_file', 'acc_period', 'max_iter', 'threshold', 'baseS', 'baseV', 'baseNode', 'basePF', 'se_period', 'se_result', 'pseudo_loads', 'verbose'],
'attrs': ['v', 't'],
},
},
}
class DSESim(mosaik_api.Simulator):
def __init__(self):
super().__init__(META)
self.entities = {}
self.next = {}
self.instances = {}
self.devParams = {}
self.data = {}
def init(self, sid, eid_prefix=None, step_size=1, verbose=0):
if eid_prefix is not None:
self.eid_prefix = eid_prefix
self.sid = sid
self.step_size = step_size
self.verbose = verbose
self.cktState = {}
self.MsgCount = 0
return self.meta
def create(self, num, model, idt, ymat_file, devs_file, acc_period, max_iter, threshold, baseS, baseV, baseNode, basePF, se_period, pseudo_loads, se_result):
if (self.verbose > 0): print('simulator_dse::create', num, model, idt)
eid = '%s%s' % (self.eid_prefix, idt)
self.entities[eid] = {}
self.entities[eid]['ymat_file'] = ymat_file
self.entities[eid]['devs_file'] = devs_file
self.entities[eid]['acc_period'] = acc_period
self.entities[eid]['max_iter'] = max_iter
self.entities[eid]['threshold'] = threshold
self.entities[eid]['type'] = model
self.entities[eid]['baseS'] = baseS
self.entities[eid]['baseV'] = baseV
self.entities[eid]['baseI'] = baseS/baseV
self.entities[eid]['baseY'] = baseS/np.power(baseV,2)
self.entities[eid]['baseNode'] = baseNode
self.entities[eid]['basePF'] = basePF
self.entities[eid]['se_period'] = se_period
self.entities[eid]['pseudo_loads'] = pseudo_loads
self.entities[eid]['se_result'] = se_result
self.entities[eid]['vecZ'] = {}
self.entities[eid]['nodes'] = 0
self.entities[eid]['df_devs'] = pd.DataFrame({})
''' read ymat_file and get number of nodes '''
self.entities[eid]['ymat_data'] = np.load(ymat_file)
self.entities[eid]['nodes'] = len(self.entities[eid]['ymat_data'])
self.entities[eid]['ymat_data'] = self.entities[eid]['ymat_data'] / self.entities[eid]['baseY']
if (self.verbose > 0): print('DSESim::create Nodes YMat:', self.entities[eid]['nodes'])
''' get device list '''
self.entities[eid]['df_devs'] = pd.read_csv(devs_file, delimiter = ',', index_col = 'idn')
self.entities[eid]['df_devs']= pd.concat([self.entities[eid]['df_devs'], pd.DataFrame({'SPA':[], # true power phase A
'SQA':[], # reactive power phase A
'SPB':[], # true power phase B
'SQB':[], # reactive power phase B
'SPC':[], # true power phase C
'SQC':[], # reactive power phase C
'VMA':[], # voltage magnitude phase A
'VAA':[], # voltage angle phase A
'VMB':[], # voltage magnitude phase B
'VAB':[], # voltage angle phase B
'VMC':[], # voltage magnitude phase C
'VAC':[], # voltage angle phase C
'IMA':[], # current magnitude phase A
'IAA':[], # current angle phase A
'IMB':[], # current magnitude phase B
'IAB':[], # current angle phase B
'IMC':[], # current magnitude phase C
'IAC':[], # current angle phase C
'TS':[]})] # last time stamp
, sort=False)
if (self.verbose > 1):
print('DSESim::create Entities:')
print(self.entities[eid]['df_devs'])
''' create vecZ and vecZType '''
df_devs = self.entities[eid]['df_devs']
nr_phasors = (df_devs[df_devs['type'] == 'phasor']).shape[0]
self.entities[eid]['vecZ'] = np.zeros((int(self.entities[eid]['nodes']) - 3) + # P values
(int(self.entities[eid]['nodes']) - 3) + # Q values
(nr_phasors*3 ) + # Voltage magnitude
(nr_phasors*3 - 1), # Voltage angles
np.float64)
entities = []
self.data[eid] = {}
self.data[eid]['v'] = 0
self.data[eid]['t'] = 0
entities.append({'eid': eid, 'type': model})
return entities
def step(self, time, inputs):
if (self.verbose > 5): print('simulator_dse::step INPUT', time, inputs)
next_step = time + self.step_size
''' prepare data to be used in get_data '''
#self.data = {}
''' prepare data to be used in get_data and calculate system state '''
''' for each instance '''
for dse_eid, attrs in inputs.items():
attr_v = attrs['v']
df_devs = self.entities[dse_eid]['df_devs']
''' for each smartmeter/phasor '''
for dev_instance, param in attr_v.items():
if (param != None and param != 'null' and param != "None"):
self.MsgCount += 1
''' change to dict because NS-3 need to transmit string '''
if isinstance(param, str):
param = literal_eval(param)
dev_idn = (param['IDT']).split("_")[1]
dev_type = param['TYPE']
dev_name = dev_instance.split(".")[1]
''' store values already per-unit '''
if (self.verbose > 1):
print('simulator_dse::step INPUT PROCESSED: ',
'TIME:', time,
'TIME_Sent:', param['TS'],
'IDN:', dev_idn,
'TYPE:', dev_type,
'NAME:', dev_name,
'PARMS:', param)
for dev_param_key in param.keys():
if dev_param_key == 'VA' and dev_type == 'Phasor':
df_devs.at[int(dev_idn), 'VMA'] = param['VA'][0] / self.entities[dse_eid]['baseV']
df_devs.at[int(dev_idn), 'VAA'] = param['VA'][1]
elif dev_param_key == 'VB' and dev_type == 'Phasor':
df_devs.at[int(dev_idn), 'VMB'] = param['VB'][0] / self.entities[dse_eid]['baseV']
df_devs.at[int(dev_idn), 'VAB'] = param['VB'][1]
elif dev_param_key == 'VC' and dev_type == 'Phasor':
df_devs.at[int(dev_idn), 'VMC'] = param['VC'][0] / self.entities[dse_eid]['baseV']
df_devs.at[int(dev_idn), 'VAC'] = param['VC'][1]
elif dev_param_key == 'IA':
df_devs.at[int(dev_idn), 'IMA'] = param['IA'][0] / self.entities[dse_eid]['baseI']
df_devs.at[int(dev_idn), 'IAA'] = param['IA'][1]
elif dev_param_key == 'IB':
df_devs.at[int(dev_idn), 'IMB'] = param['IB'][0] / self.entities[dse_eid]['baseI']
df_devs.at[int(dev_idn), 'IAB'] = param['IB'][1]
elif dev_param_key == 'IC':
df_devs.at[int(dev_idn), 'IMC'] = param['IC'][0] / self.entities[dse_eid]['baseI']
df_devs.at[int(dev_idn), 'IAC'] = param['IC'][1]
elif dev_param_key == 'SPA':
df_devs.at[int(dev_idn), 'SPA'] = param['SPA'] / (self.entities[dse_eid]['baseS']*1000)
df_devs.at[int(dev_idn), 'SQA'] = df_devs.at[int(dev_idn), 'SPA'] * np.tan(np.arccos(self.entities[dse_eid]['basePF'] ))
elif dev_param_key == 'SPB':
df_devs.at[int(dev_idn), 'SPB'] = param['SPB'] / (self.entities[dse_eid]['baseS']*1000)
df_devs.at[int(dev_idn), 'SQB'] = df_devs.at[int(dev_idn), 'SPB'] * np.tan(np.arccos(self.entities[dse_eid]['basePF'] ))
elif dev_param_key == 'SPC':
df_devs.at[int(dev_idn), 'SPC'] = param['SPC'] / (self.entities[dse_eid]['baseS']*1000)
df_devs.at[int(dev_idn), 'SQC'] = df_devs.at[int(dev_idn), 'SPC'] * np.tan(np.arccos(self.entities[dse_eid]['basePF'] ))
elif dev_param_key == 'TS':
df_devs.at[int(dev_idn), 'TS'] = param['TS']
elif ((dev_param_key == 'VA') or (dev_param_key == 'VB') or (dev_param_key == 'VC')) and (dev_type != 'Phasor'):
pass
elif (dev_param_key == 'IDT') or (dev_param_key == 'TYPE'):
pass
else:
raise Exception('dev_param_key value unknown:', dev_param_key, "Device:", dev_name)
if (0 == time % self.entities[dse_eid]['acc_period']):
self.data[dse_eid]['v'] = self.MsgCount
self.data[dse_eid]['t'] = time
self.MsgCount = 0
#(self.entities[dse_eid]['vecZ'], _) = self.createZVectors(dse_eid, len(self.entities[dse_eid]['vecZ']))
# se_period = 1000
# if next_step == 500:
# print("Check the phasors!")
if time > 0 and time % self.entities[dse_eid]['se_period'] == 0:
# if time % se_period == 0:
z, ztype, error_cov = self.get_measurements(df_devs, time)
stop_counter = 0
while stop_counter < 5:
v_wls, iter_number = self.state_estimation(self.entities[dse_eid]['ymat_data'], z, ztype, error_cov,
self.entities[dse_eid]['max_iter'], self.entities[dse_eid]['threshold'])
if iter_number > 1 & iter_number < 10:
stop_counter = 5
else:
stop_counter += 1
# array_name = 'v_wls_{}'.format(int(time / self.entities[dse_eid]['se_period']))
array_name = 'v_wls'
# if Path('C:/OpenDSS/DSSE33DetailedMultiPhase/wls_results.mat').is_file():
if time / self.entities[dse_eid]['se_period'] > 1:
mat = spio.loadmat(self.entities[dse_eid]['se_result'], squeeze_me=True)
mat[array_name] = np.vstack((mat[array_name], v_wls))
spio.savemat(self.entities[dse_eid]['se_result'], mat)
else:
spio.savemat(self.entities[dse_eid]['se_result'], {array_name: v_wls})
return next_step
def state_estimation(self, ybus, z, ztype, err_cov, iter_max, threshold):
ztype= np.array(ztype)
n = len(ybus) # number of single phase nodes
g = np.real(ybus) # real part of the admittance matrix
b = np.imag(ybus) # imaginary art of the admittance matrix
x = np.concatenate(
([-2 * math.pi / 3, -4 * math.pi / 3],
np.tile([0, -2 * math.pi / 3, -4 * math.pi / 3], math.floor(n / 3) - 1),
np.ones(n) * (1 + .000001 * np.random.randn(n)))) # our initial guess fot the voltage phasors
# x = np.concatenate((np.angle(vtrue[1:]), np.abs(vtrue)))
k = 0
cont = True
while k < iter_max and cont:
v = x[n - 1:] # voltage magnitudes
th = np.concatenate(([0], x[0: n - 1])) # voltage angles. we add a 0 for the reference bus
# calculating the measurement functions h(x)
h = np.zeros(len(z))
for m in range(0, len(z)):
if ztype[m, 0] == 2: # Pi active load demand at node i
i = ztype[m, 1] - 1
for jj in range(n):
h[m] += v[i] * v[jj] * (
g[i, jj] * math.cos(th[i] - th[jj]) + b[i, jj] * math.sin(th[i] - th[jj]))
elif ztype[m, 0] == 4: # Qi reactive load demand at node i
i = ztype[m, 1] - 1
for jj in range(n):
h[m] += v[i] * v[jj] * (
g[i, jj] * math.sin(th[i] - th[jj]) - b[i, jj] * math.cos(th[i] - th[jj]))
elif ztype[m, 0] == 5: # |Vi| voltage phasor magnitude at bus i
i = ztype[m, 1] - 1
h[m] = v[i]
elif ztype[m, 0] == 6: # Theta Vi voltage phasor phase angle at bus i
i = ztype[m, 1] - 1
h[m] = th[i]
elif ztype[m, 0] == 7 or ztype[m, 0] == 8:
i = ztype[m, 1] - 1 # sending node
jj = ztype[m, 2] - 1 # receiving node
ph = ztype[m, 3] - 1 # phase
a1, b1, c1 = 3 * i + [0, 1, 2]
a2, b2, c2 = 3 * jj + [0, 1, 2]
yline = -ybus[np.array([a1, b1, c1])[:, None], np.array([a2, b2, c2])]
gline = np.real(yline)
bline = np.imag(yline)
if ztype[m, 0] == 7: # real part of Iij phasor
h[m] = gline[ph, 0] * (v[a1] * math.cos(th[a1]) - v[a2] * math.cos(th[a2])) - \
bline[ph, 0] * (v[a1] * math.sin(th[a1]) - v[a2] * math.sin(th[a2])) + \
gline[ph, 1] * (v[b1] * math.cos(th[b1]) - v[b2] * math.cos(th[b2])) - \
bline[ph, 1] * (v[b1] * math.sin(th[b1]) - v[b2] * math.sin(th[b2])) + \
gline[ph, 2] * (v[c1] * math.cos(th[c1]) - v[c2] * math.cos(th[c2])) - \
bline[ph, 2] * (v[c1] * math.sin(th[c1]) - v[c2] * math.sin(th[c2]))
else: # imaginary part of Iij phasor
h[m] = gline[ph, 0] * (v[a1] * math.sin(th[a1]) - v[a2] * math.sin(th[a2])) + \
bline[ph, 0] * (v[a1] * math.cos(th[a1]) - v[a2] * math.cos(th[a2])) + \
gline[ph, 1] * (v[b1] * math.sin(th[b1]) - v[b2] * math.sin(th[b2])) + \
bline[ph, 1] * (v[b1] * math.cos(th[b1]) - v[b2] * math.cos(th[b2])) + \
gline[ph, 2] * (v[c1] * math.sin(th[c1]) - v[c2] * math.sin(th[c2])) + \
bline[ph, 2] * (v[c1] * math.cos(th[c1]) - v[c2] * math.cos(th[c2]))
else:
print("Measurement type not defined!")
# print(h-z)
# calculating the jacobian of h
h_jacob = np.zeros([len(z), len(x)])
for m in range(0, len(z)):
if ztype[m, 0] == 2: # Pi active load demand at node i
i = ztype[m, 1] - 1
for jj in range(n):
if jj != i:
if jj > 0:
h_jacob[m, jj - 1] = v[i] * v[jj] * (g[i, jj] * math.sin(th[i] - th[jj]) -
b[i, jj] * math.cos(th[i] - th[jj]))
h_jacob[m, jj + n - 1] = v[i] * (g[i, jj] * math.cos(th[i] - th[jj]) +
b[i, jj] * math.sin(th[i] - th[jj]))
if i > 0:
h_jacob[m, i - 1] = -v[i] ** 2 * b[i, i]
for jj in range(n):
h_jacob[m, i - 1] += v[i] * v[jj] * (-g[i, jj] * math.sin(th[i] - th[jj]) +
b[i, jj] * math.cos(th[i] - th[jj]))
h_jacob[m, i + n - 1] = v[i] * g[i, i]
for jj in range(n):
h_jacob[m, i + n - 1] += v[jj] * (g[i, jj] * math.cos(th[i] - th[jj]) +
b[i, jj] * math.sin(th[i] - th[jj]))
elif ztype[m, 0] == 4: # Qi reactive load demand at node i
i = ztype[m, 1] - 1
for jj in range(n):
if jj != i:
if jj > 0:
h_jacob[m, jj - 1] = v[i] * v[jj] * (-g[i, jj] * math.cos(th[i] - th[jj]) -
b[i, jj] * math.sin(th[i] - th[jj]))
h_jacob[m, jj + n - 1] = v[i] * (g[i, jj] * math.sin(th[i] - th[jj]) -
b[i, jj] * math.cos(th[i] - th[jj]))
if i > 0:
h_jacob[m, i - 1] = -v[i] ** 2 * g[i, i]
for jj in range(n):
h_jacob[m, i - 1] += v[i] * v[jj] * (g[i, jj] * math.cos(th[i] - th[jj]) +
b[i, jj] * math.sin(th[i] - th[jj]))
h_jacob[m, i + n - 1] = -v[i] * b[i, i]
for jj in range(n):
h_jacob[m, i + n - 1] += v[jj] * (g[i, jj] * math.sin(th[i] - th[jj]) -
b[i, jj] * math.cos(th[i] - th[jj]))
elif ztype[m, 0] == 5: # |Vi| voltage phasor magnitude at bus i
i = ztype[m, 1] - 1
h_jacob[m, i + n - 1] = 1
elif ztype[m, 0] == 6: # Theta Vi voltage phasor phase angle at bus i
i = ztype[m, 1] - 1
h_jacob[m, i - 1] = 1
elif ztype[m, 0] == 7 or ztype[m, 0] == 8:
i = ztype[m, 1] - 1 # sending node
jj = ztype[m, 2] - 1 # receiving node
ph = ztype[m, 3] - 1 # phase
a1, b1, c1 = 3 * i + [0, 1, 2]
a2, b2, c2 = 3 * jj + [0, 1, 2]
yline = -ybus[np.array([a1, b1, c1])[:, None], np.array([a2, b2, c2])]
gline = np.real(yline)
bline = np.imag(yline)
if ztype[m, 0] == 7: # real part of Iij phasor
# derivatives with respect to voltage phase angles
if a1 > 0:
h_jacob[m, a1 - 1] = -gline[ph, 0] * v[a1] * math.sin(th[a1]) - bline[ph, 0] * v[
a1] * math.cos(th[a1])
h_jacob[m, b1 - 1] = -gline[ph, 1] * v[b1] * math.sin(th[b1]) - bline[ph, 1] * v[b1] * math.cos(
th[b1])
h_jacob[m, c1 - 1] = -gline[ph, 2] * v[c1] * math.sin(th[c1]) - bline[ph, 2] * v[c1] * math.cos(
th[c1])
h_jacob[m, a2 - 1] = gline[ph, 0] * v[a2] * math.sin(th[a2]) + bline[ph, 0] * v[a2] * math.cos(
th[a2])
h_jacob[m, b2 - 1] = gline[ph, 1] * v[b2] * math.sin(th[b2]) + bline[ph, 1] * v[b2] * math.cos(
th[b2])
h_jacob[m, c2 - 1] = gline[ph, 2] * v[c2] * math.sin(th[c2]) + bline[ph, 2] * v[c2] * math.cos(
th[c2])
# derivatives with respect to voltage magnitudes
h_jacob[m, a1 + n - 1] = gline[ph, 0] * math.cos(th[a1]) - bline[ph, 0] * math.sin(th[a1])
h_jacob[m, b1 + n - 1] = gline[ph, 1] * math.cos(th[b1]) - bline[ph, 1] * math.sin(th[b1])
h_jacob[m, c1 + n - 1] = gline[ph, 2] * math.cos(th[c1]) - bline[ph, 2] * math.sin(th[c1])
h_jacob[m, a2 + n - 1] = -gline[ph, 0] * math.cos(th[a2]) + bline[ph, 0] * math.sin(th[a2])
h_jacob[m, b2 + n - 1] = -gline[ph, 1] * math.cos(th[b2]) + bline[ph, 1] * math.sin(th[b2])
h_jacob[m, c2 + n - 1] = -gline[ph, 2] * math.cos(th[c2]) + bline[ph, 2] * math.sin(th[c2])
else: # imaginary part of Iij phasor
if a1 > 0:
h_jacob[m, a1 - 1] = gline[ph, 0] * v[a1] * math.cos(th[a1]) - bline[ph, 0] * v[
a1] * math.sin(th[a1])
h_jacob[m, b1 - 1] = gline[ph, 1] * v[b1] * math.cos(th[b1]) - bline[ph, 1] * v[b1] * math.sin(
th[b1])
h_jacob[m, c1 - 1] = gline[ph, 2] * v[c1] * math.cos(th[c1]) - bline[ph, 2] * v[c1] * math.sin(
th[c1])
h_jacob[m, a2 - 1] = -gline[ph, 0] * v[a2] * math.cos(th[a2]) + bline[ph, 0] * v[a2] * math.sin(
th[a2])
h_jacob[m, b2 - 1] = -gline[ph, 1] * v[b2] * math.cos(th[b2]) + bline[ph, 1] * v[b2] * math.sin(
th[b2])
h_jacob[m, c2 - 1] = -gline[ph, 2] * v[c2] * math.cos(th[c2]) + bline[ph, 2] * v[c2] * math.sin(
th[c2])
# derivatives with respect to voltage magnitudes
h_jacob[m, a1 + n - 1] = gline[ph, 0] * math.sin(th[a1]) + bline[ph, 0] * math.cos(th[a1])
h_jacob[m, b1 + n - 1] = gline[ph, 1] * math.sin(th[b1]) + bline[ph, 1] * math.cos(th[b1])
h_jacob[m, c1 + n - 1] = gline[ph, 2] * math.sin(th[c1]) + bline[ph, 2] * math.cos(th[c1])
h_jacob[m, a2 + n - 1] = -gline[ph, 0] * math.sin(th[a2]) - bline[ph, 0] * math.cos(th[a2])
h_jacob[m, b2 + n - 1] = -gline[ph, 1] * math.sin(th[b2]) - bline[ph, 1] * math.cos(th[b2])
h_jacob[m, c2 + n - 1] = -gline[ph, 2] * math.sin(th[c2]) - bline[ph, 2] * math.cos(th[c2])
else:
print("Measurement type not defined!")
# the right hand side of the equation
rhs = h_jacob.transpose() @ np.linalg.inv(err_cov) @ (z - h)
# d1 = h_jacob.transpose() @ np.linalg.inv(err_cov)
# d2 = np.linalg.inv(err_cov) @ (z-h)
# saving to mat file
# scipy.io.savemat('C:/Users/<NAME>/Desktop/testArrays.mat', {'d11': d1, 'd22': d2})
# print("Array saved")
# the gain matrix
gain = h_jacob.transpose() @ np.linalg.inv(err_cov) @ h_jacob
delta_x = np.linalg.solve(gain, rhs)
x += delta_x
if np.max( | np.absolute(delta_x) | numpy.absolute |
import sys
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
import numpy as np
from numpy.testing import (assert_, assert_array_equal, assert_allclose,
assert_equal)
from pytest import raises as assert_raises
from scipy.sparse import coo_matrix
from scipy.special import erf
from scipy.integrate._bvp import (modify_mesh, estimate_fun_jac,
estimate_bc_jac, compute_jac_indices,
construct_global_jac, solve_bvp)
def exp_fun(x, y):
return np.vstack((y[1], y[0]))
def exp_fun_jac(x, y):
df_dy = np.empty((2, 2, x.shape[0]))
df_dy[0, 0] = 0
df_dy[0, 1] = 1
df_dy[1, 0] = 1
df_dy[1, 1] = 0
return df_dy
def exp_bc(ya, yb):
return | np.hstack((ya[0] - 1, yb[0])) | numpy.hstack |
import os
import bisect
import warnings
from astropy.timeseries import periodograms
from pkg_resources import resource_stream
import numpy as np
from numpy import sqrt, sum
import matplotlib.pyplot as plt
from astropy.io import fits
from cached_property import cached_property
from .iCCF import Indicators
from .gaussian import gaussfit, RV, RVerror, FWHM, FWHMerror
from .keywords import getRV, getRVarray
from .utils import find_myself
# from .utils import get_orders_mean_wavelength
def read_spectral_format():
sf_red_stream = resource_stream(__name__, 'data/spectral_format_red.dat')
red = np.loadtxt(sf_red_stream)
sf_blue_stream = resource_stream(__name__, 'data/spectral_format_blue.dat')
blue = np.loadtxt(sf_blue_stream)
col_start_wave = 7
col_end_wave = 8
order_wave_range = {}
for i, order in enumerate(blue[::-1]):
order_range = [order[col_start_wave], order[col_end_wave]]
order_wave_range[i] = order_range
for i, order in enumerate(red[::-1], start=i+1):
order_range = [order[col_start_wave], order[col_end_wave]]
order_wave_range[i] = order_range
return order_wave_range
class chromaticRV():
def __init__(self, indicators):
"""
indicators : Indicators or list
Instance or list of instances of iCCF.Indicators
"""
self.order_wave_range = read_spectral_format()
self.wave_starts = [v[0] for v in self.order_wave_range.values()]
self.wave_ends = [v[1] for v in self.order_wave_range.values()]
self._blue_wave_limits = (440, 570)
self._mid_wave_limits = (570, 690)
self._red_wave_limits = (730, 790)
self._slice_policy = 0 # by default use both slices
self.blue_orders = self._find_orders(self._blue_wave_limits)
self.mid_orders = self._find_orders(self._mid_wave_limits)
self.red_orders = self._find_orders(self._red_wave_limits)
self._blueRV = None
self._midRV = None
self._redRV = None
self._blueRVerror = None
self._midRVerror = None
self._redRVerror = None
self.n = len(indicators)
if self.n == 1:
indicators = [indicators, ]
self.I = self.indicators = indicators
# store all but the last CCF for each of the Indicators instances
self.ccfs = [i.HDU[i._hdu_number].data[:-1] for i in self.I]
# store the last CCFs separately
self.ccf = [i.HDU[i._hdu_number].data[-1] for i in self.I]
# try storing the CCF uncertainties as well
self.eccfs = []
for i in self.I:
try:
self.eccfs.append(i.HDU[2].data[:-1])
except IndexError:
self.eccfs.append(None)
def __repr__(self):
bands = ', '.join(map(repr, self.bands))
nb = len(self.bands)
return f'chromaticRV({self.n} CCFs; {nb} bands: {bands} nm)'
@property
def blue_wave_limits(self):
""" Wavelength limits for the blue RV calculations [nm] """
return self._blue_wave_limits
@blue_wave_limits.setter
def blue_wave_limits(self, vals):
assert len(vals) == 2, 'provide two wavelengths (start and end) in nm'
self.blue_orders = self._find_orders(vals)
self._blue_wave_limits = vals
self._blueRV, self._midRV, self._redRV = None, None, None
@property
def mid_wave_limits(self):
""" Wavelength limits for the mid RV calculations [nm] """
return self._mid_wave_limits
@mid_wave_limits.setter
def mid_wave_limits(self, vals):
assert len(vals) == 2, 'provide two wavelengths (start and end) in nm'
self.mid_orders = self._find_orders(vals)
self._mid_wave_limits = vals
self._blueRV, self._midRV, self._redRV = None, None, None
@property
def red_wave_limits(self):
""" Wavelength limits for the red RV calculations [nm] """
return self._red_wave_limits
@red_wave_limits.setter
def red_wave_limits(self, vals):
assert len(vals) == 2, 'provide two wavelengths (start and end) in nm'
self.red_orders = self._find_orders(vals)
self._red_wave_limits = vals
self._blueRV, self._midRV, self._redRV = None, None, None
@property
def slice_policy(self):
""" How to deal with the two order slices.
0: use both slices by adding the corresponding CCFs (default)
1: use only the first slice
2: use only the second slice
"""
return self._slice_policy
@slice_policy.setter
def slice_policy(self, val):
self._slice_policy = val
self.blue_orders = self._find_orders(self._blue_wave_limits)
self.mid_orders = self._find_orders(self._mid_wave_limits)
self.red_orders = self._find_orders(self._red_wave_limits)
self._blueRV, self._midRV, self._redRV = None, None, None
def _find_orders(self, wave_limits):
order_start = bisect.bisect(self.wave_starts, wave_limits[0])
order_end = bisect.bisect(self.wave_ends, wave_limits[1])
order_start = order_start * 2
order_end = order_end * 2 + 1
if self.slice_policy == 0: # using both order slices
step = 1
return slice(order_start, order_end+1, step)
elif self.slice_policy == 1: # using first slice only
step = 2
return slice(order_start, order_end+1, step)
elif self.slice_policy == 2: # using second slice only
step = 2
return slice(order_start+1, order_end+1, step)
def get_rv(self, orders):
""" Get radial velocity, FWHM and uncertainties for specific orders
orders : int, slice, tuple, array
The CCFs of these orders will be summed to calculate the RV.
If int, only the CCF at that index will be used.
If slice, orders in the slice's range will be used.
If tuple, should have length 2 or 3 and correspond to minimum index,
maximum index and possibly step, for which orders to use
If array, should contain indices of orders to use
"""
if isinstance(orders, int):
orders = slice(orders, orders + 1)
elif isinstance(orders, tuple):
orders = slice(*orders)
rv, rve = [], []
fwhm, fwhme = [], []
for i, full_ccf, full_eccf in zip(self.I, self.ccfs, self.eccfs):
# create the CCF
ccf = full_ccf[orders].sum(axis=0)
if full_eccf is not None:
eccf = np.sqrt(np.square(full_eccf[orders]).sum(axis=0))
else:
eccf = None
# calculate RV and RV error
rv.append(RV(i.rv, ccf, eccf))
rve.append(RVerror(i.rv, ccf, eccf))
# rve.append(np.nan)
# calculate FWHM and FWHM error
fwhm.append(FWHM(i.rv, ccf))
fwhme.append(FWHMerror(i.rv, ccf, eccf))
# if not has_errors:
# warnings.warn(
# 'Cannot access CCF uncertainties to calculate RV error')
# return np.array(rv), None
# else:
return map(np.array, (rv, rve, fwhm, fwhme))
@property
def bands(self):
""" Wavelength limits of blue, mid, and red bands """
b = self.blue_wave_limits, self.mid_wave_limits, self.red_wave_limits
return b
@cached_property
def time(self):
""" BJD of observations """
return np.fromiter((i.bjd for i in self.I), np.float, self.n)
@property
def blueRV(self):
if self._blueRV is None:
out = self.get_rv(self.blue_orders)
self._blueRV, self._blueRVerror, self._blueFWHM, self._blueFWHMerror = out
return self._blueRV
@property
def midRV(self):
if self._midRV is None:
out = self.get_rv(self.mid_orders)
self._midRV, self._midRVerror, self._midFWHM, self._midFWHMerror = out
return self._midRV
@property
def redRV(self):
if self._redRV is None:
out = self.get_rv(self.red_orders)
self._redRV, self._redRVerror, self._redFWHM, self._redFWHMerror = out
return self._redRV
@property
def fullRV(self):
return np.fromiter((i.RV for i in self.I), np.float, self.n)
@property
def fullRVerror(self):
return np.fromiter((i.RVerror for i in self.I), np.float, self.n)
def bin(self, night_indices):
u = np.unique(night_indices)
ccfs = np.array(self.ccfs) # shape: (Nobs, Norders, Nrv)
ccfsb = [ccfs[night_indices == i].mean(axis=0) for i in u]
ccfsb = np.array(ccfsb) # shape: (Nobs_binned, Norders, Nrv)
self.ccfs = ccfsb
eccfs = np.array(self.eccfs) # shape: (Nobs, Norders, Nrv)
eccfsb = [sqrt(sum(eccfs[night_indices == i]**2, axis=0)) for i in u]
eccfsb = np.array(eccfsb) # shape: (Nobs_binned, Norders, Nrv)
self.eccfs = eccfsb
ccf = np.array(self.ccf) # shape: (Nobs, Nrv)
ccfb = [ccf[night_indices == i].mean(axis=0) for i in u]
ccfb = np.array(ccfb) # shape: (Nobs_binned, Nrv)
self.ccf = ccfb
rv = self.I[0].rv
self.indicators = [Indicators(rv, ccf.sum(axis=0)) for ccf in ccfsb]
self.I = self.indicators
self.n = len(self.I)
def plot(self, periodogram=False, mask=None, obs=None):
ncols = 2 if periodogram else 1
fig, axs = plt.subplots(3 + 1, ncols, constrained_layout=True)
axs = axs.ravel()
if periodogram:
indices_plots = np.arange(0, 8, 2)
indices_pers = np.arange(1, 8, 2)
for ax in axs[indices_pers[1:]]:
ax.sharex(axs[indices_pers[0]])
ax.sharey(axs[indices_pers[0]])
else:
indices_plots = np.arange(0, 4)
for ax in axs[indices_plots[1:]]:
ax.sharex(axs[indices_plots[0]])
ax.sharey(axs[indices_plots[0]])
kw = dict(fmt='o', ms=2)
if mask is None:
mask = | np.ones_like(self.time, dtype=bool) | numpy.ones_like |
# ******************************************************************************
# Copyright (c) 2020, Intel Corporation
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Intel Corporation nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ******************************************************************************
import pytest
import numpy as np
from numpy.testing import (assert_allclose, assert_equal, assert_almost_equal)
from skipp.transform import AffineTransform
from skipp.transform import resize
from skipp.transform import rotate
from skipp.transform import warp
# Used acronyms
# Intel(R) Integrated Performance Primitives (Intel(R) IPP)
def test_invalid_input():
with pytest.raises(ValueError):
AffineTransform(np.zeros((2, 3)))
with pytest.raises(ValueError):
AffineTransform(matrix= | np.zeros((2, 3)) | numpy.zeros |
from src.AutomaticDifferentiation.AutomaticDifferentiation import AutoDiff
from src.AutomaticDifferentiation.AutomaticDifferentiation import ForwardFunctions
import pytest
import numpy as np
from numpy.testing import assert_almost_equal
class TestFunctions:
##########################################
# __init__, float
def test_init_value(self):
"""check that variables can be initialized correctly"""
value = 2.0
derivative = 1.0
label = 'x'
x = AutoDiff(value, der=derivative, label=label)
assert x.val == value
def test_init_der(self):
"""check that variables can be initialized correctly"""
value = 2.0
derivative = 1.0
label = 'x'
x = AutoDiff(value, der=derivative, label=label)
assert x.der == derivative
def test_init_der_list(self):
"""check that variables can be initialized correctly"""
value = 2.0
derivative = [1.0]
label = 'x'
x = AutoDiff(value, der=derivative, label=label)
assert x.der == derivative
def test_init_der_array(self):
"""check that variables can be initialized correctly"""
value = 2.0
derivative = np.array([1.0])
label = 'x'
x = AutoDiff(value, der=derivative, label=label)
assert x.der == derivative
def test_init_label(self):
"""check that variables can be initialized correctly"""
value = 2.0
derivative = 1.0
label = 'x'
x = AutoDiff(value, der=derivative, label=label)
assert x.label == [label]
def test_init_label_list(self):
"""check that variables can be initialized correctly"""
value = 2.0
derivative = 1.0
label = ['x']
x = AutoDiff(value, der=derivative, label=label)
assert x.label == label
##########################################
# __init__, array
def test_init_value_array(self):
"""check that variables can be initialized correctly"""
value = np.array([1,2,3])
derivative = [1.0, 1.0, 1.0]
label = ['x','y','z']
x = AutoDiff(value, der=derivative, label=label)
assert np.array_equal(x.val, value.reshape(value.shape[0],1))
def test_init_der_2array(self):
"""check that variables can be initialized correctly"""
value = np.array([2.0, 1.0])
derivative = np.array([1.0, 1.0])
label = ['x', 'y']
x = AutoDiff(value, der=derivative, label=label)
assert np.array_equal(x.der, np.array(derivative, dtype=np.float64))
def test_init_der_narray(self):
"""check that variables can be initialized correctly"""
value = np.array([1,2,3])
derivative = [1.0, 1.0, 1.0]
label = ['x','y','z']
x = AutoDiff(value, der=derivative, label=label)
assert np.array_equal(x.der, np.array(derivative, dtype=np.float64))
def test_init_label_array(self):
"""check that variables can be initialized correctly"""
value = np.array([1,2,3])
derivative = [1.0, 1.0, 1.0]
label = ['x','y','z']
x = AutoDiff(value, der=derivative, label=label)
assert x.label == label
##########################################
# function
def test_init_function_label(self):
"""check that functions can be created correctly"""
x = AutoDiff(2, der=1, label="x")
y = AutoDiff(3, der=1, label="y")
f = ForwardFunctions([x.sqrt(), y.sqrt()])
assert f.labels == ['x','y']
def test_init_function_value(self):
"""check that functions can be created correctly"""
x = AutoDiff(2, der=1, label="x")
y = AutoDiff(3, der=1, label="y")
f = ForwardFunctions([x.sqrt(), y.sqrt()])
assert f.values == [[np.sqrt(2)],[np.sqrt(3)]]
def test_init_function_der(self):
"""check that functions can be created correctly"""
x = AutoDiff(2, der=1, label="x")
y = AutoDiff(3, der=1, label="y")
f = ForwardFunctions([x.sqrt(), y.sqrt()])
assert f.jacobians == [[x.sqrt().der[0][0],0],[0, y.sqrt().der[0][0]]]
#TODO: write tests for exceptions in this section
##########################################
# Addition
def test_add_val(self):
"""check that __add__ has been overwritten correctly"""
x = AutoDiff(2.0, der=1, label="x")
y = AutoDiff(3.0, der=1, label="y")
z = AutoDiff(4.0, der=1, label="z")
f = ForwardFunctions([x + y, y + z])
assert f.values == [[5.0],[7.0]]
def test_add_der(self):
"""check that __add__ has been overwritten correctly"""
x = AutoDiff(2.0, der=1, label="x")
y = AutoDiff(3.0, der=1, label="y")
z = AutoDiff(4.0, der=1, label="z")
f = ForwardFunctions([x + y, y + z])
assert f.jacobians == [[1.0,1.0,0],[0, 1.0, 1.0]]
def test_add_label(self):
"""check that __add__ has been overwritten correctly"""
x = AutoDiff(2.0, der=1, label="x")
y = AutoDiff(3.0, der=1, label="y")
z = AutoDiff(4.0, der=1, label="z")
f = ForwardFunctions([x + y, y + z])
assert f.labels == ['x','y','z']
def test_radd_val(self):
"""check that __radd__ has been overwritten correctly"""
x = AutoDiff(2, der=1, label="x")
y = AutoDiff(3, der=1, label="y")
f = ForwardFunctions([3.0 + x, x + y])
assert f.values == [[5.0],[5.0]]
def test_radd_der(self):
"""check that __radd__ has been overwritten correctly"""
x = AutoDiff(2, der=1, label="x")
y = AutoDiff(3, der=1, label="y")
f = ForwardFunctions([3.0 + x, x + y])
assert f.jacobians == [[1.0,0],[1.0,1.0]]
def test_radd_label(self):
"""check that __radd__ has been overwritten correctly"""
x = AutoDiff(2, der=1, label="x")
y = AutoDiff(3, der=1, label="y")
f = ForwardFunctions([3.0 + x, x + y])
assert f.labels == ['x','y']
##########################################
# Subtraction
def test_sub_val(self):
"""check that __add__ has been overwritten correctly"""
x = AutoDiff(2, der=1, label="x")
y = AutoDiff(3, der=1, label="y")
f = ForwardFunctions([x - 3.0, x + y])
assert f.values == [[-1.0], [5.0]]
def test_sub_der(self):
"""check that __add__ has been overwritten correctly"""
x = AutoDiff(2, der=1, label="x")
y = AutoDiff(3, der=1, label="y")
f = ForwardFunctions([x - 3.0, x + y])
assert f.jacobians == [[1.0,0], [1.0, 1.0]]
def test_sub_label(self):
"""check that __add__ has been overwritten correctly"""
x = AutoDiff(2, der=1, label="x")
y = AutoDiff(3, der=1, label="y")
f = ForwardFunctions([x - 3.0, x + y])
assert f.labels == ['x','y']
def test_rsub_val(self):
"""check that __radd__ has been overwritten correctly"""
x = AutoDiff(2, der=1, label="x")
y = AutoDiff(3, der=1, label="y")
f = ForwardFunctions([3.0 - x, x + y])
assert f.values == [[1.0], [5.0]]
def test_rsub_der(self):
"""check that __radd__ has been overwritten correctly"""
x = AutoDiff(2, der=1, label="x")
y = AutoDiff(3, der=1, label="y")
f = ForwardFunctions([3.0 - x, x + y])
assert f.jacobians == [[-1.0,0], [1.0, 1.0]]
def test_rsub_label(self):
"""check that __radd__ has been overwritten correctly"""
x = AutoDiff(2, der=1, label="x")
y = AutoDiff(3, der=1, label="y")
f = ForwardFunctions([3.0 - x, x + y])
assert f.labels == ['x', 'y']
##########################################
# multiplication
def test_mul_val(self):
"""check that __mul__ has been overwritten correctly"""
x = AutoDiff(2, der=1, label="x")
y = AutoDiff(3, der=1, label="y")
f = ForwardFunctions([x * 3, x * y, y * x])
assert f.values == [[6.0],[6.0],[6.0]]
def test_mul_der(self):
"""check that __mul__ has been overwritten correctly"""
x = AutoDiff(2, der=1, label="x")
y = AutoDiff(3, der=1, label="y")
f = ForwardFunctions([x * 3, x * y, y * x])
assert f.jacobians == [[3.0,0],[3.0,2.0],[3.0, 2.0]]
def test_mul_label(self):
"""check that __mul__ has been overwritten correctly"""
x = AutoDiff(2, der=1, label="x")
y = AutoDiff(3, der=1, label="y")
f = ForwardFunctions([x * 3, x * y, y * x])
assert f.labels == ['x','y']
def test_mul_array(self):
"""check that __mul__ has been overwritten correctly"""
X = AutoDiff(np.array([2,2]), der=[1,1], label=["x1","x2"])
Y = AutoDiff(np.array([1,3]), der=[1,1], label=["y1","y2"])
f = ForwardFunctions([X * 3, Y * 4])
assert f.values == [[6.0],[4.0]]
def test_rmul_val(self):
"""check that __rmul__ has been overwritten correctly"""
x = AutoDiff(2, der=1, label="x")
y = AutoDiff(3, der=1, label="y")
f = ForwardFunctions([3 * x, x * y, y * x])
assert f.values == [[6.0],[6.0],[6.0]]
def test_rmul_der(self):
"""check that __rmul__ has been overwritten correctly"""
x = AutoDiff(2, der=1, label="x")
y = AutoDiff(3, der=1, label="y")
f = ForwardFunctions([3 * x, x * y, y * x])
assert f.jacobians == [[3.0,0],[3.0,2.0],[3.0, 2.0]]
def test_rmul_label(self):
"""check that __rmul__ has been overwritten correctly"""
x = AutoDiff(2, der=1, label="x")
y = AutoDiff(3, der=1, label="y")
f = ForwardFunctions([3 * x, x * y, y * x])
assert f.labels == ['x','y']
##########################################
# true division
def test_truediv_val(self):
"""check that __truediv__ has been overwritten correctly"""
x = AutoDiff(2, der=1, label="x")
y = AutoDiff(4, der=1, label="y")
f = ForwardFunctions([x / 2, x / y])
assert f.values == [[1.0],[0.5]]
def test_truediv_der(self):
"""check that __truediv__ has been overwritten correctly"""
x = AutoDiff(2, der=1, label="x")
y = AutoDiff(4, der=1, label="y")
f = ForwardFunctions([x / 2, x / y])
assert f.jacobians == [[0.5, 0],[0.25, -0.125]]
def test_truediv_label(self):
"""check that __truediv__ has been overwritten correctly"""
x = AutoDiff(2, der=1, label="x")
y = AutoDiff(4, der=1, label="y")
f = ForwardFunctions([x / 2, x / y])
assert f.labels == ['x','y']
def test_rtruediv_val(self):
"""check that __rtruediv__ has been overwritten correctly"""
x = AutoDiff(2, der=1, label="x")
y = AutoDiff(3, der=1, label="y")
f = ForwardFunctions([2 / x, x + y])
assert f.values == [[1.0],[5.0]]
def test_rtruediv_der(self):
"""check that __rtruediv__ has been overwritten correctly"""
x = AutoDiff(2, der=1, label="x")
y = AutoDiff(3, der=1, label="y")
f = ForwardFunctions([2 / x, x + y])
assert f.jacobians == [[-0.5,0],[1.0, 1.0]]
def test_rtruediv_label(self):
"""check that __rtruediv__ has been overwritten correctly"""
x = AutoDiff(2, der=1, label="x")
y = AutoDiff(3, der=1, label="y")
f = ForwardFunctions([2 / x, x + y])
assert f.labels == ['x','y']
##########################################
# negation
def test_neg_val(self):
"""check that __truediv__ has been overwritten correctly"""
x = AutoDiff(2, der=1, label="x")
z = AutoDiff(2, der=1, label="z")
f = ForwardFunctions([-x / z, x / z])
assert f.values == [[-1.0], [1.0]]
def test_neg_der(self):
"""check that __truediv__ has been overwritten correctly"""
x = AutoDiff(2, der=1, label="x")
z = AutoDiff(2, der=1, label="z")
f = ForwardFunctions([-x / z, x / z])
assert f.jacobians == [[-0.5, 0.5], [0.5, -0.5]]
def test_neg_label(self):
"""check that __truediv__ has been overwritten correctly"""
x = AutoDiff(2, der=1, label="x")
z = AutoDiff(2, der=1, label="z")
f = ForwardFunctions([-x / z, x / z])
assert f.labels == ['x','z']
##########################################
# power
def test_pow_val(self):
"""check that __mul__ has been overwritten correctly"""
x = AutoDiff(2, der=1, label="x")
y = AutoDiff(3, der=1, label="y")
f = ForwardFunctions([x ** 2, y ** x])
assert f.values == [[4.0],[9.0]]
def test_pow_der(self):
"""check that __mul__ has been overwritten correctly"""
x = AutoDiff(2, der=1, label="x")
y = AutoDiff(1, der=1, label="y")
f = ForwardFunctions([x ** 2, y ** x])
assert f.jacobians == [[4.0,0],[0.0, 2.0]]
def test_pow_label(self):
"""check that __mul__ has been overwritten correctly"""
x = AutoDiff(2, der=1, label="x")
y = AutoDiff(1, der=1, label="y")
f = ForwardFunctions([x ** 2, y ** x])
assert f.labels == ['x','y']
def test_rpow_val(self):
"""check that __rmul__ has been overwritten correctly"""
x = AutoDiff(2, der=1, label="x")
y = AutoDiff(3, der=1, label="y")
f = ForwardFunctions([2 ** x, 2 ** (x + y)])
assert f.values == [[4.0],[32.0]]
def test_rpow_der(self):
"""check that __rmul__ has been overwritten correctly"""
x = AutoDiff(1, der=1, label="x")
y = AutoDiff(1, der=1, label="y")
f = ForwardFunctions([2 ** x, 2 ** (x + y)])
assert f.jacobians == [[np.log(2)*2*1,0],[np.log(2)*4*1.0,np.log(2)*4*1.0]]
def test_rpow_label(self):
"""check that __rmul__ has been overwritten correctly"""
x = AutoDiff(1, der=1, label="x")
y = AutoDiff(1, der=1, label="y")
f = ForwardFunctions([2 ** x, 2 ** (x + y)])
assert f.labels == ['x','y']
##########################################
# sin
def test_sin_val(self):
"""check that sin function has been correctly implemented"""
x = AutoDiff(np.pi / 2, der=1, label="x")
y = AutoDiff(np.pi / 2, der=1, label="y")
f = ForwardFunctions([x.sin(), (x + y).sin()])
e = 1e-8
assert f.values[0] == [1.0] and f.values[1][0] < e
def test_sin_der(self):
"""check that sin function has been correctly implemented"""
x = AutoDiff(np.pi / 2, der=1, label="x")
y = AutoDiff(np.pi / 2, der=1, label="y")
f = ForwardFunctions([x.sin(), (x + y).sin()])
assert_almost_equal(f.jacobians,[[0,0],[-1,-1]])
def test_sin_label(self):
"""check that sin function has been correctly implemented"""
x = AutoDiff(np.pi / 2, der=1, label="x")
y = AutoDiff(np.pi / 2, der=1, label="y")
f = ForwardFunctions([x.sin(), (x + y).sin()])
assert f.labels == ['x','y']
##########################################
# cos
def test_cos_val(self):
"""check that cos function has been correctly implemented"""
x = AutoDiff(np.pi / 2, der=1, label="x")
y = AutoDiff(np.pi / 2, der=1, label="y")
f = ForwardFunctions([x.cos(), (x + y).cos()])
assert_almost_equal(f.values,[[0],[-1]])
def test_cos_der(self):
"""check that cos function has been correctly implemented"""
x = AutoDiff(np.pi / 2, der=1, label="x")
y = AutoDiff(np.pi / 2, der=1, label="y")
f = ForwardFunctions([x.cos(), (x + y).cos()])
assert_almost_equal(f.jacobians,[[-1,0],[0,0]])
def test_cos_label(self):
"""check that cos function has been correctly implemented"""
x = AutoDiff(np.pi / 2, der=1, label="x")
y = AutoDiff(np.pi / 2, der=1, label="y")
f = ForwardFunctions([x.cos(), (x + y).cos()])
assert f.labels == ['x','y']
##########################################
# tan
def test_tan_val(self):
"""check that tan function has been correctly implemented"""
x = AutoDiff(np.pi, der=1, label="x")
y = AutoDiff(np.pi, der=1, label="y")
f = ForwardFunctions([x.tan(), (x + y).tan()])
assert_almost_equal(f.values, [[0],[0]])
def test_tan_der(self):
"""check that tan function has been correctly implemented"""
x = AutoDiff(np.pi, der=1, label="x")
y = AutoDiff(np.pi, der=1, label="y")
f = ForwardFunctions([x.tan(), (x + y).tan()])
assert_almost_equal(f.jacobians,[[1,0],[1,1]])
def test_tan_label(self):
"""check that tan function has been correctly implemented"""
x = AutoDiff(np.pi, der=1, label="x")
y = AutoDiff(np.pi, der=1, label="y")
f = ForwardFunctions([x.tan(), (x + y).tan()])
assert f.labels == ['x','y']
##########################################
# sinh
def test_sinh_val(self):
"""check that sinh function has been correctly implemented"""
x = AutoDiff(2, der=1, label="x")
y = AutoDiff(3, der=1, label="y")
f = ForwardFunctions([x.sinh(), y.sinh()])
assert_almost_equal(f.values, [np.sinh(x.val).tolist()[0],np.sinh(y.val).tolist()[0]])
def test_sinh_der(self):
"""check that sinh function has been correctly implemented"""
x = AutoDiff(2, der=1, label="x")
y = AutoDiff(3, der=1, label="y")
f = ForwardFunctions([x.sinh(), y.sinh()])
xder = np.cosh(x.val).tolist()[0][0]
yder = np.cosh(y.val).tolist()[0][0]
assert_almost_equal(f.jacobians,[[xder,0],[0,yder]])
def test_sinh_label(self):
"""check that sinh function has been correctly implemented"""
x = AutoDiff(2, der=1, label="x")
y = AutoDiff(3, der=1, label="y")
f = ForwardFunctions([x.sinh(), y.sinh()])
assert f.labels == ['x','y']
##########################################
# cosh
def test_cosh_val(self):
"""check that cosh function has been correctly implemented"""
x = AutoDiff(2, der=1, label="x")
y = AutoDiff(3, der=1, label="y")
f = ForwardFunctions([x.cosh(), y.cosh()])
assert_almost_equal(f.values, [np.cosh(x.val).tolist()[0], | np.cosh(y.val) | numpy.cosh |
import pydicom as dicom
import numpy as np
from numpy import interp
import os
import cv2
from skimage import morphology,restoration,transform
from torch.utils.data import DataLoader,Dataset
from torchvision import *
from sklearn.model_selection import train_test_split as tts
from progress.bar import IncrementalBar
from PIL import Image
from keras_preprocessing.image import img_to_array
print("packages imported")
class Progress:
"""
Class to print progress bar instead of printing n times
"""
def __init__(self, value, end,buffer, title='Preprocessing'):
self.title = title
#when calling in a for loop it doesn't include the last number
self.end = end -1
self.buffer = buffer
self.value = value
self.progress()
def progress(self):
maped = int(interp(self.value, [0, self.end], [0, self.buffer]))
print(f'{self.title}: [{"#"*maped}{"-"*(self.buffer - maped)}]{self.value}/{self.end} {((self.value/self.end)*100):.2f}%', end='\r')
class data_preprocessing():
"""
Class containing all functions used for preprocessing the binary numpy objects
obtained from the dicom files
"""
def image_preprocessor(self,path_):
"""
Takes all dicom files in a directory, preprocesses the files by
performing morphological transformations and returns the images in a numpy array.
"""
count=0
processed_array=[]
## loading the normalised images saved as npy file ##
norm_images=np.load(path_,allow_pickle=True)
print("Images loaded from npy,beginning preprocessing now......")
bar=IncrementalBar('Preprocessing',max=norm_images.shape[0])
## iterating through the images to perform operations on each image ##
for i in range(norm_images.shape[0]):
## defining a mask ##
norm_images[i] = norm_images[i] * (np.max(norm_images[i]) - np.min(norm_images[i])) + np.min(norm_images[i])
mask=morphology.opening(norm_images[i], | np.ones((5, 5)) | numpy.ones |
import numpy as np
import os
NUM_LABELS = {'ENZYMES':3, 'COLLAB':0, 'IMDBBINARY':0, 'IMDBMULTI':0, 'MUTAG':7, 'NCI1':37, 'NCI109':38, 'PROTEINS':3, 'PTC':22, 'DD':89}
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
def load_dataset(ds_name):
"""
construct graphs and labels from dataset text in data folder
:param ds_name: name of data set you want to load
:return: two numpy arrays of shape (num_of_graphs).
the graphs array contains in each entry a ndarray represent adjacency matrix of a graph of shape (num_vertex, num_vertex, num_vertex_labels)
the labels array in index i represent the class of graphs[i]
"""
directory = BASE_DIR + "/data/benchmark_graphs/{0}/{0}.txt".format(ds_name)
graphs = []
labels = []
with open(directory, "r") as data:
num_graphs = int(data.readline().rstrip().split(" ")[0])
for i in range(num_graphs):
graph_meta = data.readline().rstrip().split(" ")
num_vertex = int(graph_meta[0])
curr_graph = np.zeros(shape=(num_vertex, num_vertex, NUM_LABELS[ds_name]+1), dtype=np.float32)
labels.append(int(graph_meta[1]))
for j in range(num_vertex):
vertex = data.readline().rstrip().split(" ")
if NUM_LABELS[ds_name] != 0:
curr_graph[j, j, int(vertex[0])+1]= 1.
for k in range(2,len(vertex)):
curr_graph[j, int(vertex[k]), 0] = 1.
curr_graph = noramlize_graph(curr_graph)
graphs.append(curr_graph)
graphs = np.array(graphs)
for i in range(graphs.shape[0]):
graphs[i] = np.transpose(graphs[i], [2,0,1])
return graphs, np.array(labels)
def get_train_val_indexes(num_val, ds_name):
"""
reads the indexes of a specific split to train and validation sets from data folder
:param num_val: number of the split
:param ds_name: name of data set
:return: indexes of the train and test graphs
"""
directory = BASE_DIR + "/data/benchmark_graphs/{0}/10fold_idx".format(ds_name)
train_file = "train_idx-{0}.txt".format(num_val)
train_idx=[]
with open(os.path.join(directory, train_file), 'r') as file:
for line in file:
train_idx.append(int(line.rstrip()))
test_file = "test_idx-{0}.txt".format(num_val)
test_idx = []
with open(os.path.join(directory, test_file), 'r') as file:
for line in file:
test_idx.append(int(line.rstrip()))
return train_idx, test_idx
def get_parameter_split(ds_name):
"""
reads the indexes of a specific split to train and validation sets from data folder
:param ds_name: name of data set
:return: indexes of the train and test graphs
"""
directory = BASE_DIR + "/data/benchmark_graphs/{0}/".format(ds_name)
train_file = "tests_train_split.txt"
train_idx=[]
with open(os.path.join(directory, train_file), 'r') as file:
for line in file:
train_idx.append(int(line.rstrip()))
test_file = "tests_val_split.txt"
test_idx = []
with open(os.path.join(directory, test_file), 'r') as file:
for line in file:
test_idx.append(int(line.rstrip()))
return train_idx, test_idx
def group_same_size(graphs, labels):
"""
group graphs of same size to same array
:param graphs: numpy array of shape (num_of_graphs) of numpy arrays of graphs adjacency matrix
:param labels: numpy array of labels
:return: two numpy arrays. graphs arrays in the shape (num of different size graphs) where each entry is a numpy array
in the shape (number of graphs with this size, num vertex, num. vertex, num vertex labels)
the second arrayy is labels with correspons shape
"""
sizes = list(map(lambda t: t.shape[1], graphs))
indexes = np.argsort(sizes)
graphs = graphs[indexes]
labels = labels[indexes]
r_graphs = []
r_labels = []
one_size = []
start = 0
size = graphs[0].shape[1]
for i in range(len(graphs)):
if graphs[i].shape[1] == size:
one_size.append(np.expand_dims(graphs[i], axis=0))
else:
r_graphs.append(np.concatenate(one_size, axis=0))
r_labels.append(np.array(labels[start:i]))
start = i
one_size = []
size = graphs[i].shape[1]
one_size.append(np.expand_dims(graphs[i], axis=0))
r_graphs.append(np.concatenate(one_size, axis=0))
r_labels.append(np.array(labels[start:]))
return r_graphs, r_labels
# helper method to shuffle each same size graphs array
def shuffle_same_size(graphs, labels):
r_graphs, r_labels = [], []
for i in range(len(labels)):
curr_graph, curr_labels = shuffle(graphs[i], labels[i])
r_graphs.append(curr_graph)
r_labels.append(curr_labels)
return r_graphs, r_labels
def split_to_batches(graphs, labels, size):
"""
split the same size graphs array to batches of specified size
last batch is in size num_of_graphs_this_size % size
:param graphs: array of arrays of same size graphs
:param labels: the corresponding labels of the graphs
:param size: batch size
:return: two arrays. graphs array of arrays in size (batch, num vertex, num vertex. num vertex labels)
corresponds labels
"""
r_graphs = []
r_labels = []
for k in range(len(graphs)):
r_graphs = r_graphs + np.split(graphs[k], [j for j in range(size, graphs[k].shape[0], size)])
r_labels = r_labels + np.split(labels[k], [j for j in range(size, labels[k].shape[0], size)])
return np.array(r_graphs), np.array(r_labels)
# helper method to shuffle the same way graphs and labels arrays
def shuffle(graphs, labels):
shf = np.arange(labels.shape[0], dtype=np.int32)
np.random.shuffle(shf)
return np.array(graphs)[shf], labels[shf]
def noramlize_graph(curr_graph):
split = np.split(curr_graph, [1], axis=2)
adj = np.squeeze(split[0], axis=2)
deg = np.sqrt(np.sum(adj, 0))
deg = np.divide(1., deg, out=np.zeros_like(deg), where=deg!=0)
normal = np.diag(deg)
norm_adj = np.expand_dims(np.matmul(np.matmul(normal, adj), normal), axis=2)
ones = np.ones(shape=(curr_graph.shape[0], curr_graph.shape[1], curr_graph.shape[2]), dtype=np.float32)
spred_adj = np.multiply(ones, norm_adj)
labels= np.append(np.zeros(shape=(curr_graph.shape[0], curr_graph.shape[1], 1)), split[1], axis=2)
return np.add(spred_adj, labels)
if __name__ == '__main__':
graphs, labels = load_dataset("MUTAG")
a, b = get_train_val_indexes(1, "MUTAG")
print( | np.transpose(graphs[a[0]], [1, 2, 0]) | numpy.transpose |
"""
Utility functions for atmospheric retrieval with ``petitRADTRANS``.
This module was put together many contributions by <NAME>
(MPIA).
"""
import copy
import sys
from typing import Dict, List, Optional, Tuple, Union
import matplotlib.pyplot as plt
import numpy as np
from scipy.interpolate import interp1d, PchipInterpolator
from scipy.ndimage import gaussian_filter
from typeguard import typechecked
from species.core import constants
@typechecked
def get_line_species() -> List[str]:
"""
Function to get the list of the molecular and atomic line species.
This function is not used anywhere so could be removed.
Returns
-------
list(str)
List with the line species.
"""
return [
"CH4",
"CO",
"CO_all_iso",
"CO_all_iso_HITEMP",
"CO_all_iso_Chubb",
"CO2",
"H2O",
"H2O_HITEMP",
"H2S",
"HCN",
"K",
"K_lor_cut",
"K_allard",
"K_burrows",
"NH3",
"Na",
"Na_lor_cut",
"Na_allard",
"Na_burrows",
"OH",
"PH3",
"TiO",
"TiO_all_Exomol",
"TiO_all_Plez",
"VO",
"VO_Plez",
"FeH",
"H2O_main_iso",
"CH4_main_iso",
]
@typechecked
def pt_ret_model(
temp_3: Optional[np.ndarray],
delta: float,
alpha: float,
tint: float,
press: np.ndarray,
metallicity: float,
c_o_ratio: float,
conv: bool = True,
) -> Tuple[Optional[np.ndarray], Optional[float], Optional[float]]:
"""
Pressure-temperature profile for a self-luminous atmosphere (see
Mollière et al. 2020).
Parameters
----------
temp_3 : np.ndarray, None
Array with three temperature points that are added on top of
the radiative Eddington structure (i.e. above tau = 0.1). The
temperature nodes are connected with a spline interpolation
and a prior is used such that t1 < t2 < t3 < t_connect. The
three temperature points are not used if set to ``None``.
delta : float
Proportionality factor in tau = delta * press_cgs**alpha.
alpha : float
Power law index in
:math:`\\tau = \\delta * P_\\mathrm{cgs}**\\alpha`.
For the tau model: use the proximity to the
:math:`\\kappa_\\mathrm{rosseland}` photosphere as prior.
tint : float
Internal temperature for the Eddington model.
press : np.ndarray
Pressure profile (bar).
metallicity : float
Metallicity [Fe/H]. Required for the ``nabla_ad``
interpolation.
c_o_ratio : float
Carbon-to-oxygen ratio. Required for the ``nabla_ad``
interpolation.
conv : bool
Enforce a convective adiabat.
Returns
-------
np.ndarray
Temperature profile (K) for ``press``.
float
Pressure (bar) where the optical depth is 1.
float, None
Pressure (bar) at the radiative-convective boundary.
"""
# Convert pressures from bar to cgs units
press_cgs = press * 1e6
# Calculate the optical depth
tau = delta * press_cgs ** alpha
# Calculate the Eddington temperature
tedd = (3.0 / 4.0 * tint ** 4.0 * (2.0 / 3.0 + tau)) ** 0.25
# Import interpol_abundances here because it slows down importing
# species otherwise. Importing interpol_abundances is only slow
# the first time, which occurs at the start of the run_multinest
# method of AtmosphericRetrieval
if "poor_mans_nonequ_chem" in sys.modules:
from poor_mans_nonequ_chem.poor_mans_nonequ_chem import interpol_abundances
else:
from petitRADTRANS.poor_mans_nonequ_chem.poor_mans_nonequ_chem import interpol_abundances
ab = interpol_abundances(
np.full(tedd.shape[0], c_o_ratio),
np.full(tedd.shape[0], metallicity),
tedd,
press,
)
nabla_ad = ab["nabla_ad"]
# Enforce convective adiabat
if conv:
# Calculate the current, radiative temperature gradient
nab_rad = np.diff(np.log(tedd)) / np.diff(np.log(press_cgs))
# Extend to array of same length as pressure structure
nabla_rad = np.ones_like(tedd)
nabla_rad[0] = nab_rad[0]
nabla_rad[-1] = nab_rad[-1]
nabla_rad[1:-1] = (nab_rad[1:] + nab_rad[:-1]) / 2.0
# Where is the atmosphere convectively unstable?
conv_index = nabla_rad > nabla_ad
if np.argwhere(conv_index).size == 0:
conv_press = None
else:
conv_bound = np.amin(np.argwhere(conv_index))
conv_press = press[conv_bound]
tfinal = None
for i in range(10):
if i == 0:
t_take = copy.copy(tedd)
else:
t_take = copy.copy(tfinal)
ab = interpol_abundances(
np.full(t_take.shape[0], c_o_ratio),
np.full(t_take.shape[0], metallicity),
t_take,
press,
)
nabla_ad = ab["nabla_ad"]
# Calculate the average nabla_ad between the layers
nabla_ad_mean = nabla_ad
nabla_ad_mean[1:] = (nabla_ad[1:] + nabla_ad[:-1]) / 2.0
# What are the increments in temperature due to convection
tnew = nabla_ad_mean[conv_index] * np.mean(np.diff(np.log(press_cgs)))
# What is the last radiative temperature?
tstart = np.log(t_take[~conv_index][-1])
# Integrate and translate to temperature
# from log(temperature)
tnew = np.exp(np.cumsum(tnew) + tstart)
# Add upper radiative and lower covective
# part into one single array
tfinal = copy.copy(t_take)
tfinal[conv_index] = tnew
if np.max(np.abs(t_take - tfinal) / t_take) < 0.01:
# print('n_ad', 1./(1.-nabla_ad[conv_index]))
break
else:
tfinal = tedd
conv_press = None
# Add the three temperature-point P-T description above tau = 0.1
@typechecked
def press_tau(tau: float) -> float:
"""
Function to return the pressure in cgs units at a given
optical depth.
Parameters
----------
tau : float
Optical depth.
Returns
-------
float
Pressure (cgs) at optical depth ``tau``.
"""
return (tau / delta) ** (1.0 / alpha)
# Where is the uppermost pressure of the
# Eddington radiative structure?
p_bot_spline = press_tau(0.1)
if temp_3 is None:
tret = tfinal
else:
for i_intp in range(2):
if i_intp == 0:
# Create the pressure coordinates for the spline
# support nodes at low pressure
support_points_low = np.logspace(
np.log10(press_cgs[0]), np.log10(p_bot_spline), 4
)
# Create the pressure coordinates for the spline
# support nodes at high pressure, the corresponding
# temperatures for these nodes will be taken from
# the radiative-convective solution
support_points_high = 10.0 ** np.arange(
np.log10(p_bot_spline),
np.log10(press_cgs[-1]),
np.diff(np.log10(support_points_low))[0],
)
# Combine into one support node array, don't add
# the p_bot_spline point twice.
support_points = np.zeros(
len(support_points_low) + len(support_points_high) - 1
)
support_points[:4] = support_points_low
support_points[4:] = support_points_high[1:]
else:
# Create the pressure coordinates for the spline
# support nodes at low pressure
support_points_low = np.logspace(
np.log10(press_cgs[0]), np.log10(p_bot_spline), 7
)
# Create the pressure coordinates for the spline
# support nodes at high pressure, the corresponding
# temperatures for these nodes will be taken from
# the radiative-convective solution
support_points_high = np.logspace(
np.log10(p_bot_spline), np.log10(press_cgs[-1]), 7
)
# Combine into one support node array, don't add
# the p_bot_spline point twice.
support_points = np.zeros(
len(support_points_low) + len(support_points_high) - 1
)
support_points[:7] = support_points_low
support_points[7:] = support_points_high[1:]
# Define the temperature values at the node points
t_support = np.zeros_like(support_points)
if i_intp == 0:
tfintp = interp1d(press_cgs, tfinal)
# The temperature at p_bot_spline (from the
# radiative-convective solution)
t_support[len(support_points_low) - 1] = tfintp(p_bot_spline)
# if temp_3 is not None:
# The temperature at pressures below
# p_bot_spline (free parameters)
t_support[: len(support_points_low) - 1] = temp_3
# else:
# t_support[:3] = tfintp(support_points_low[:3])
# The temperature at pressures above p_bot_spline
# (from the radiative-convective solution)
t_support[len(support_points_low) :] = tfintp(
support_points[len(support_points_low) :]
)
else:
tfintp1 = interp1d(press_cgs, tret)
t_support[: len(support_points_low) - 1] = tfintp1(
support_points[: len(support_points_low) - 1]
)
tfintp = interp1d(press_cgs, tfinal)
# The temperature at p_bot_spline (from
# the radiative-convective solution)
t_support[len(support_points_low) - 1] = tfintp(p_bot_spline)
# print('diff', t_connect_calc - tfintp(p_bot_spline))
try:
t_support[len(support_points_low) :] = tfintp(
support_points[len(support_points_low) :]
)
except ValueError:
return None, None, None
# Make the temperature spline interpolation to be returned
# to the user tret = spline(np.log10(support_points),
# t_support, np.log10(press_cgs), order = 3)
cs = PchipInterpolator(np.log10(support_points), t_support)
tret = cs(np.log10(press_cgs))
# Return the temperature, the pressure at tau = 1
# The temperature at the connection point: tfintp(p_bot_spline)
# The last two are needed for the priors on the P-T profile.
return tret, press_tau(1.0) / 1e6, conv_press
@typechecked
def pt_spline_interp(
knot_press: np.ndarray,
knot_temp: np.ndarray,
pressure: np.ndarray,
pt_smooth: float = 0.3,
) -> np.ndarray:
"""
Function for interpolating the P-T nodes with a PCHIP 1-D monotonic
cubic interpolation. The interpolated temperature is smoothed with
a Gaussian kernel of width 0.3 dex in pressure (see Piette &
Madhusudhan 2020).
Parameters
----------
knot_press : np.ndarray
Pressure knots (bar).
knot_temp : np.ndarray
Temperature knots (K).
pressure : np.ndarray
Pressure points (bar) at which the temperatures is
interpolated.
pt_smooth : float, dict
Standard deviation of the Gaussian kernel that is used for
smoothing the P-T profile, after the temperature nodes
have been interpolated to a higher pressure resolution.
The argument should be given as
:math:`\\log10{P/\\mathrm{bar}}`, with the default value
set to 0.3 dex.
Returns
-------
np.ndarray
Interpolated, smoothed temperature points (K).
"""
pt_interp = PchipInterpolator(np.log10(knot_press), knot_temp)
temp_interp = pt_interp(np.log10(pressure))
log_press = np.log10(pressure)
log_diff = np.mean(np.diff(log_press))
if np.std(np.diff(log_press)) / log_diff > 1e-6:
raise ValueError("Expecting equally spaced pressures in log space.")
temp_interp = gaussian_filter(
temp_interp, sigma=pt_smooth / log_diff, mode="nearest"
)
return temp_interp
@typechecked
def create_pt_profile(
cube,
cube_index: Dict[str, float],
pt_profile: str,
pressure: np.ndarray,
knot_press: Optional[np.ndarray],
metallicity: float,
c_o_ratio: float,
pt_smooth: Union[float, Dict[str, float]] = 0.3,
) -> Tuple[np.ndarray, Optional[np.ndarray], Optional[float], Optional[float]]:
"""
Function for creating the P-T profile.
Parameters
----------
cube : LP_c_double
Unit cube.
cube_index : dict
Dictionary with the index of each parameter in the ``cube``.
pt_profile : str
The parametrization for the pressure-temperature profile
('molliere', 'free', 'monotonic', 'eddington').
pressure : np.ndarray
Pressure points (bar) at which the temperatures is
interpolated.
knot_press : np.ndarray, None
Pressure knots (bar), which are required when the argument of
``pt_profile`` is either 'free' or 'monotonic'.
metallicity : float
Metallicity [Fe/H].
c_o_ratio : float
Carbon-to-oxgen ratio.
pt_smooth : float, dict
Standard deviation of the Gaussian kernel that is used for
smoothing the P-T profile, after the temperature nodes
have been interpolated to a higher pressure resolution.
The argument should be given as
:math:`\\log10{P/\\mathrm{bar}}`, with the default value
set to 0.3 dex.
Returns
-------
np.ndarray
Temperatures (K).
np.ndarray, None
Temperature at the knots (K). A ``None`` is returned if
``pt_profile`` is set to 'molliere' or 'eddington'.
float
Pressure (bar) where the optical depth is 1.
float, None
Pressure (bar) at the radiative-convective boundary.
"""
knot_temp = None
if pt_profile == "molliere":
temp, phot_press, conv_press = pt_ret_model(
np.array(
[cube[cube_index["t1"]], cube[cube_index["t2"]], cube[cube_index["t3"]]]
),
10.0 ** cube[cube_index["log_delta"]],
cube[cube_index["alpha"]],
cube[cube_index["tint"]],
pressure,
metallicity,
c_o_ratio,
)
elif pt_profile == "mod-molliere":
temp, phot_press, conv_press = pt_ret_model(
None,
10.0 ** cube[cube_index["log_delta"]],
cube[cube_index["alpha"]],
cube[cube_index["tint"]],
pressure,
metallicity,
c_o_ratio,
)
elif pt_profile in ["free", "monotonic"]:
knot_temp = []
for i in range(knot_press.shape[0]):
knot_temp.append(cube[cube_index[f"t{i}"]])
knot_temp = np.asarray(knot_temp)
temp = pt_spline_interp(knot_press, knot_temp, pressure, pt_smooth)
phot_press = None
conv_press = None
elif pt_profile == "eddington":
# Eddington approximation
# delta = kappa_ir/gravity
tau = pressure * 1e6 * 10.0 ** cube[cube_index["log_delta"]]
temp = (0.75 * cube[cube_index["tint"]] ** 4.0 * (2.0 / 3.0 + tau)) ** 0.25
phot_press = None
conv_press = None
return temp, knot_temp, phot_press, conv_press
@typechecked
def make_half_pressure_better(
p_base: Dict[str, float], pressure: np.ndarray
) -> Tuple[np.ndarray, np.ndarray]:
"""
Function for reducing the number of pressure layers from 1440 to
~100 (depending on the number of cloud species) with a refinement
around the cloud decks.
Parameters
----------
p_base : dict
Dictionary with the base of the cloud deck for all cloud
species. The keys in the dictionary are included for example
as MgSiO3(c).
pressure : np.ndarray
Pressures (bar) at high resolution (1440 points).
Returns
-------
np.ndarray
Pressures (bar) at lower resolution (60 points) but with a
refinement around the position of the cloud decks.
np.ndarray, None
The indices of the pressures that have been selected from
the input array ``pressure``.
"""
press_plus_index = np.zeros(len(pressure) * 2).reshape(len(pressure), 2)
press_plus_index[:, 0] = pressure
press_plus_index[:, 1] = range(len(pressure))
press_small = press_plus_index[::24, :]
press_plus_index = press_plus_index[::2, :]
indexes_small = press_small[:, 0] > 0.0
indexes = press_plus_index[:, 0] > 0.0
for key, P_cloud in p_base.items():
indexes_small = indexes_small & (
(np.log10(press_small[:, 0] / P_cloud) > 0.05)
| (np.log10(press_small[:, 0] / P_cloud) < -0.3)
)
indexes = indexes & (
(np.log10(press_plus_index[:, 0] / P_cloud) > 0.05)
| (np.log10(press_plus_index[:, 0] / P_cloud) < -0.3)
)
press_cut = press_plus_index[~indexes, :]
press_small_cut = press_small[indexes_small, :]
press_out = np.zeros((len(press_cut) + len(press_small_cut)) * 2).reshape(
(len(press_cut) + len(press_small_cut)), 2
)
press_out[: len(press_small_cut), :] = press_small_cut
press_out[len(press_small_cut) :, :] = press_cut
press_out = np.sort(press_out, axis=0)
return press_out[:, 0], press_out[:, 1].astype("int")
@typechecked
def create_abund_dict(
abund_in: dict,
line_species: list,
chemistry: str,
pressure_grid: str = "smaller",
indices: Optional[np.array] = None,
) -> dict:
"""
Function to update the names in the abundance dictionary.
Parameters
----------
abund_in : dict
Dictionary with the mass fractions.
line_species : list
List with the line species.
chemistry : str
Chemistry type ('equilibrium' or 'free').
pressure_grid : str
The type of pressure grid that is used for the radiative
transfer. Either 'standard', to use 180 layers both for the
atmospheric structure (e.g. when interpolating the abundances)
and 180 layers with the radiative transfer, or 'smaller' to
use 60 (instead of 180) with the radiative transfer, or 'clouds'
to start with 1440 layers but resample to ~100 layers (depending
on the number of cloud species) with a refinement around the
cloud decks. For cloudless atmospheres it is recommended to use
'smaller', which runs faster than 'standard' and provides
sufficient accuracy. For cloudy atmosphere, one can test with
'smaller' but it is recommended to use 'clouds' for improved
accuracy fluxes.
indices : np.ndarray, None
Pressure indices from the adaptive refinement in a cloudy
atmosphere. Only required with ``pressure_grid='clouds'``.
Otherwise, the argument can be set to ``None``.
Returns
-------
dict
Dictionary with the updated names of the abundances.
"""
# create a dictionary with the updated abundance names
abund_out = {}
if indices is not None:
for item in line_species:
if chemistry == "equilibrium":
item_replace = item.replace("_R_10", "")
item_replace = item_replace.replace("_R_30", "")
item_replace = item_replace.replace("_all_iso_HITEMP", "")
item_replace = item_replace.replace("_all_iso_Chubb", "")
item_replace = item_replace.replace("_all_iso", "")
item_replace = item_replace.replace("_HITEMP", "")
item_replace = item_replace.replace("_main_iso", "")
item_replace = item_replace.replace("_lor_cut", "")
item_replace = item_replace.replace("_allard", "")
item_replace = item_replace.replace("_burrows", "")
item_replace = item_replace.replace("_all_Plez", "")
item_replace = item_replace.replace("_all_Exomol", "")
item_replace = item_replace.replace("_Plez", "")
abund_out[item] = abund_in[item_replace][indices]
elif chemistry == "free":
abund_out[item] = abund_in[item][indices]
if "Fe(c)" in abund_in:
abund_out["Fe(c)"] = abund_in["Fe(c)"][indices]
if "MgSiO3(c)" in abund_in:
abund_out["MgSiO3(c)"] = abund_in["MgSiO3(c)"][indices]
if "Al2O3(c)" in abund_in:
abund_out["Al2O3(c)"] = abund_in["Al2O3(c)"][indices]
if "Na2S(c)" in abund_in:
abund_out["Na2S(c)"] = abund_in["Na2S(c)"][indices]
if "KCL(c)" in abund_in:
abund_out["KCL(c)"] = abund_in["KCL(c)"][indices]
abund_out["H2"] = abund_in["H2"][indices]
abund_out["He"] = abund_in["He"][indices]
elif pressure_grid == "smaller":
for item in line_species:
if chemistry == "equilibrium":
item_replace = item.replace("_R_10", "")
item_replace = item_replace.replace("_R_30", "")
item_replace = item_replace.replace("_all_iso_HITEMP", "")
item_replace = item_replace.replace("_all_iso_Chubb", "")
item_replace = item_replace.replace("_all_iso", "")
item_replace = item_replace.replace("_HITEMP", "")
item_replace = item_replace.replace("_main_iso", "")
item_replace = item_replace.replace("_lor_cut", "")
item_replace = item_replace.replace("_allard", "")
item_replace = item_replace.replace("_burrows", "")
item_replace = item_replace.replace("_all_Plez", "")
item_replace = item_replace.replace("_all_Exomol", "")
item_replace = item_replace.replace("_Plez", "")
abund_out[item] = abund_in[item_replace][::3]
elif chemistry == "free":
abund_out[item] = abund_in[item][::3]
if "Fe(c)" in abund_in:
abund_out["Fe(c)"] = abund_in["Fe(c)"][::3]
if "MgSiO3(c)" in abund_in:
abund_out["MgSiO3(c)"] = abund_in["MgSiO3(c)"][::3]
if "Al2O3(c)" in abund_in:
abund_out["Al2O3(c)"] = abund_in["Al2O3(c)"][::3]
if "Na2S(c)" in abund_in:
abund_out["Na2S(c)"] = abund_in["Na2S(c)"][::3]
if "KCL(c)" in abund_in:
abund_out["KCL(c)"] = abund_in["KCL(c)"][::3]
abund_out["H2"] = abund_in["H2"][::3]
abund_out["He"] = abund_in["He"][::3]
else:
for item in line_species:
if chemistry == "equilibrium":
item_replace = item.replace("_R_10", "")
item_replace = item_replace.replace("_R_30", "")
item_replace = item_replace.replace("_all_iso_HITEMP", "")
item_replace = item_replace.replace("_all_iso_Chubb", "")
item_replace = item_replace.replace("_all_iso", "")
item_replace = item_replace.replace("_HITEMP", "")
item_replace = item_replace.replace("_main_iso", "")
item_replace = item_replace.replace("_lor_cut", "")
item_replace = item_replace.replace("_allard", "")
item_replace = item_replace.replace("_burrows", "")
item_replace = item_replace.replace("_all_Plez", "")
item_replace = item_replace.replace("_all_Exomol", "")
item_replace = item_replace.replace("_Plez", "")
abund_out[item] = abund_in[item_replace]
elif chemistry == "free":
abund_out[item] = abund_in[item]
if "Fe(c)" in abund_in:
abund_out["Fe(c)"] = abund_in["Fe(c)"]
if "MgSiO3(c)" in abund_in:
abund_out["MgSiO3(c)"] = abund_in["MgSiO3(c)"]
if "Al2O3(c)" in abund_in:
abund_out["Al2O3(c)"] = abund_in["Al2O3(c)"]
if "Na2S(c)" in abund_in:
abund_out["Na2S(c)"] = abund_in["Na2S(c)"]
if "KCL(c)" in abund_in:
abund_out["KCL(c)"] = abund_in["KCL(c)"]
abund_out["H2"] = abund_in["H2"]
abund_out["He"] = abund_in["He"]
# Correction for the nuclear spin degeneracy that was not included
# in the partition function. See Charnay et al. (2018)
if "FeH" in abund_out:
abund_out["FeH"] = abund_out["FeH"] / 2.0
return abund_out
@typechecked
def calc_spectrum_clear(
rt_object,
pressure: np.ndarray,
temperature: np.ndarray,
log_g: float,
c_o_ratio: Optional[float],
metallicity: Optional[float],
p_quench: Optional[float],
log_x_abund: Optional[dict],
chemistry: str,
pressure_grid: str = "smaller",
contribution: bool = False,
) -> Tuple[np.ndarray, np.ndarray, Optional[np.ndarray]]:
"""
Function to simulate an emission spectrum of a clear atmosphere.
The function supports both equilibrium chemistry
(``chemistry='equilibrium'``) and free abundances
(``chemistry='free'``).
rt_object : petitRADTRANS.radtrans.Radtrans
Instance of ``Radtrans``.
pressure : np.ndarray
Array with the pressure points (bar).
temperature : np.ndarray
Array with the temperature points (K) corresponding to
``pressure``.
log_g : float
Log10 of the surface gravity (cm s-2).
c_o_ratio : float, None
Carbon-to-oxygen ratio.
metallicity : float, None
Metallicity.
p_quench : float, None
Quenching pressure (bar).
log_x_abund : dict, None
Dictionary with the log10 of the abundances. Only required when
``chemistry='free'``.
chemistry : str
Chemistry type (``'equilibrium'`` or ``'free'``).
pressure_grid : str
The type of pressure grid that is used for the radiative
transfer. Either 'standard', to use 180 layers both for the
atmospheric structure (e.g. when interpolating the abundances)
and 180 layers with the radiative transfer, or 'smaller' to use
60 (instead of 180) with the radiative transfer, or 'clouds' to
start with 1440 layers but resample to ~100 layers (depending
on the number of cloud species) with a refinement around the
cloud decks. For cloudless atmospheres it is recommended to use
'smaller', which runs faster than 'standard' and provides
sufficient accuracy. For cloudy atmosphere, one can test with
'smaller' but it is recommended to use 'clouds' for improved
accuracy fluxes.
contribution : bool
Calculate the emission contribution.
Returns
-------
np.ndarray
Wavelength (um).
np.ndarray
Flux (W m-2 um-1).
np.ndarray, None
Emission contribution.
"""
# Import interpol_abundances here because it slows down importing
# species otherwise. Importing interpol_abundances is only slow the
# first time, which occurs at the start of the run_multinest method
# of AtmosphericRetrieval
if "poor_mans_nonequ_chem" in sys.modules:
from poor_mans_nonequ_chem.poor_mans_nonequ_chem import interpol_abundances
else:
from petitRADTRANS.poor_mans_nonequ_chem.poor_mans_nonequ_chem import interpol_abundances
if chemistry == "equilibrium":
# Chemical equilibrium
abund_in = interpol_abundances(
np.full(pressure.shape, c_o_ratio),
np.full(pressure.shape, metallicity),
temperature,
pressure,
Pquench_carbon=p_quench,
)
# Mean molecular weight
mmw = abund_in["MMW"]
elif chemistry == "free":
# Free abundances
# Create a dictionary with all mass fractions
abund_in = mass_fractions(log_x_abund)
# Mean molecular weight
mmw = mean_molecular_weight(abund_in)
# Create arrays of constant atmosphere abundance
for item in abund_in:
abund_in[item] *= np.ones_like(pressure)
# Create an array of a constant mean molecular weight
mmw *= np.ones_like(pressure)
# Extract every three levels when pressure_grid is set to 'smaller'
if pressure_grid == "smaller":
temperature = temperature[::3]
pressure = pressure[::3]
mmw = mmw[::3]
abundances = create_abund_dict(
abund_in,
rt_object.line_species,
chemistry,
pressure_grid=pressure_grid,
indices=None,
)
# calculate the emission spectrum
rt_object.calc_flux(
temperature, abundances, 10.0 ** log_g, mmw, contribution=contribution
)
# convert frequency (Hz) to wavelength (cm)
wavel = constants.LIGHT * 1e2 / rt_object.freq
# optionally return the emission contribution
if contribution:
contr_em = rt_object.contr_em
else:
contr_em = None
# return wavelength (micron), flux (W m-2 um-1),
# and emission contribution
return (
1e4 * wavel,
1e-7 * rt_object.flux * constants.LIGHT * 1e2 / wavel ** 2.0,
contr_em,
)
@typechecked
def calc_spectrum_clouds(
rt_object,
pressure: np.ndarray,
temperature: np.ndarray,
c_o_ratio: float,
metallicity: float,
p_quench: Optional[float],
log_x_abund: Optional[dict],
log_x_base: Optional[dict],
cloud_dict: Dict[str, Optional[float]],
log_g: float,
chemistry: str,
pressure_grid: str = "smaller",
plotting: bool = False,
contribution: bool = False,
tau_cloud: Optional[float] = None,
cloud_wavel: Optional[Tuple[float, float]] = None,
) -> Tuple[
Optional[np.ndarray], Optional[np.ndarray], Optional[np.ndarray], np.ndarray
]:
"""
Function to simulate an emission spectrum of a cloudy atmosphere.
Parameters
----------
rt_object : petitRADTRANS.radtrans.Radtrans
Instance of ``Radtrans``.
pressure : np.ndarray
Array with the pressure points (bar).
temperature : np.ndarray
Array with the temperature points (K) corresponding to
``pressure``.
c_o_ratio : float
Carbon-to-oxygen ratio.
metallicity : float
Metallicity.
p_quench : float, None
Quenching pressure (bar).
log_x_abund : dict, None
Dictionary with the log10 of the abundances. Only required
when ``chemistry='free'``.
log_x_base : dict, None
Dictionary with the log10 of the mass fractions at the cloud
base. Only required when the ``cloud_dict`` contains ``fsed``,
``log_kzz``, and ``sigma_lnorm``.
cloud_dict : dict
Dictionary with the cloud parameters.
log_g : float
Log10 of the surface gravity (cm s-2).
chemistry : str
Chemistry type (only ``'equilibrium'`` is supported).
pressure_grid : str
The type of pressure grid that is used for the radiative
transfer. Either 'standard', to use 180 layers both for the
atmospheric structure (e.g. when interpolating the abundances)
and 180 layers with the radiative transfer, or 'smaller' to
use 60 (instead of 180) with the radiative transfer, or
'clouds' to start with 1440 layers but resample to ~100 layers
(depending on the number of cloud species) with a refinement
around the cloud decks. For cloudless atmospheres it is
recommended to use 'smaller', which runs faster than 'standard'
and provides sufficient accuracy. For cloudy atmosphere, one
can test with 'smaller' but it is recommended to use 'clouds'
for improved accuracy fluxes.
plotting : bool
Create plots.
contribution : bool
Calculate the emission contribution.
tau_cloud : float, None
Total cloud optical that will be used for scaling the cloud
mass fractions. The mass fractions will not be scaled if the
parameter is set to ``None``.
cloud_wavel : tuple(float, float), None
Tuple with the wavelength range (um) that is used for
calculating the median optical depth of the clouds at the
gas-only photosphere and then scaling the cloud optical
depth to the value of ``log_tau_cloud``. The range of
``cloud_wavel`` should be encompassed by the range of
``wavel_range``. The full wavelength range (i.e.
``wavel_range``) is used if the argument is set to ``None``.
Returns
-------
np.ndarray, None
Wavelength (um).
np.ndarray, None
Flux (W m-2 um-1).
np.ndarray, None
Emission contribution.
np.ndarray
Array with mean molecular weight.
"""
if chemistry == "equilibrium":
# Import interpol_abundances here because it slows down
# importing species otherwise. Importing interpol_abundances
# is only slow the first time, which occurs at the start
# of the run_multinest method of AtmosphericRetrieval
if "poor_mans_nonequ_chem" in sys.modules:
from poor_mans_nonequ_chem.poor_mans_nonequ_chem import interpol_abundances
else:
from petitRADTRANS.poor_mans_nonequ_chem.poor_mans_nonequ_chem import interpol_abundances
# Interpolate the abundances, following chemical equilibrium
abund_in = interpol_abundances(
np.full(pressure.shape, c_o_ratio),
np.full(pressure.shape, metallicity),
temperature,
pressure,
Pquench_carbon=p_quench,
)
# Extract the mean molecular weight
mmw = abund_in["MMW"]
elif chemistry == "free":
# Free abundances
# Create a dictionary with all mass fractions
abund_in = mass_fractions(log_x_abund)
# Mean molecular weight
mmw = mean_molecular_weight(abund_in)
# Create arrays of constant atmosphere abundance
for item in abund_in:
abund_in[item] *= np.ones_like(pressure)
# Create an array of a constant mean molecular weight
mmw *= np.ones_like(pressure)
if log_x_base is not None:
p_base = {}
for item in log_x_base:
p_base_item = find_cloud_deck(
item,
pressure,
temperature,
metallicity,
c_o_ratio,
mmw=np.mean(mmw),
plotting=plotting,
)
abund_in[f"{item}(c)"] = np.zeros_like(temperature)
abund_in[f"{item}(c)"][pressure < p_base_item] = (
10.0 ** log_x_base[item]
* (pressure[pressure <= p_base_item] / p_base_item)
** cloud_dict["fsed"]
)
p_base[f"{item}(c)"] = p_base_item
# Adaptive pressure refinement around the cloud base
if pressure_grid == "clouds":
_, indices = make_half_pressure_better(p_base, pressure)
else:
indices = None
abundances = create_abund_dict(
abund_in,
rt_object.line_species,
chemistry,
pressure_grid=pressure_grid,
indices=indices,
)
# Create dictionary with sedimentation parameters
# Use the same value for all cloud species
fseds = {}
for item in rt_object.cloud_species:
# The item has the form of e.g. MgSiO3(c)
# For parametrized cloud opacities,
# then number of cloud_species is zero
# so the fseds dictionary remains empty
fseds[item] = cloud_dict["fsed"]
# Create an array with a constant eddy diffusion coefficient (cm2 s-1)
if "log_kzz" in cloud_dict:
Kzz_use = np.full(pressure.shape, 10.0 ** cloud_dict["log_kzz"])
else:
Kzz_use = None
# Adjust number of atmospheric levels
if pressure_grid == "smaller":
temperature = temperature[::3]
pressure = pressure[::3]
mmw = mmw[::3]
if "log_kzz" in cloud_dict:
Kzz_use = Kzz_use[::3]
elif pressure_grid == "clouds":
temperature = temperature[indices]
pressure = pressure[indices]
mmw = mmw[indices]
if "log_kzz" in cloud_dict:
Kzz_use = Kzz_use[indices]
# Optionally plot the cloud properties
if (
plotting
and Kzz_use is not None
and (
rt_object.wlen_bords_micron[0] != 0.5
and rt_object.wlen_bords_micron[1] != 30.0
)
):
if "CO_all_iso" in abundances:
plt.plot(abundances["CO_all_iso"], pressure, label="CO")
if "CO_all_iso_HITEMP" in abundances:
plt.plot(abundances["CO_all_iso_HITEMP"], pressure, label="CO")
if "CO_all_iso_Chubb" in abundances:
plt.plot(abundances["CO_all_iso_Chubb"], pressure, label="CO")
if "CH4" in abundances:
plt.plot(abundances["CH4"], pressure, label="CH4")
if "H2O" in abundances:
plt.plot(abundances["H2O"], pressure, label="H2O")
if "H2O_HITEMP" in abundances:
plt.plot(abundances["H2O_HITEMP"], pressure, label="H2O")
plt.xlim(1e-10, 1.0)
plt.ylim(pressure[-1], pressure[0])
plt.yscale("log")
plt.xscale("log")
plt.xlabel("Mass fraction")
plt.ylabel("Pressure (bar)")
if p_quench is not None:
plt.axhline(p_quench, ls="--", color="black")
plt.legend(loc="best")
plt.savefig("abundances.pdf", bbox_inches="tight")
plt.clf()
plt.plot(temperature, pressure, "o", ls="none", ms=2.0)
for item in log_x_base:
plt.axhline(
p_base[f"{item}(c)"], label=f"Cloud deck {item}", ls="--", color="black"
)
plt.yscale("log")
plt.ylim(1e3, 1e-6)
plt.xlim(0.0, 4000.0)
plt.savefig("pt_cloud_deck.pdf", bbox_inches="tight")
plt.clf()
for item in log_x_base:
plt.plot(abundances[f"{item}(c)"], pressure)
plt.axhline(p_base[f"{item}(c)"])
plt.yscale("log")
if np.count_nonzero(abundances[f"{item}(c)"]) > 0:
plt.xscale("log")
plt.ylim(1e3, 1e-6)
plt.xlim(1e-10, 1.0)
log_x_base_item = log_x_base[item]
fsed = cloud_dict["fsed"]
log_kzz = cloud_dict["log_kzz"]
plt.title(
f"fsed = {fsed:.2f}, log(Kzz) = {log_kzz:.2f}, "
+ f"X_b = {log_x_base_item:.2f}"
)
plt.savefig(f"{item.lower()}_clouds.pdf", bbox_inches="tight")
plt.clf()
# Turn clouds off
# abundances['MgSiO3(c)'] = np.zeros_like(pressure)
# abundances['Fe(c)'] = np.zeros_like(pressure)
# Reinitiate the pressure layers after make_half_pressure_better
if pressure_grid == "clouds":
rt_object.setup_opa_structure(pressure)
# Width of cloud particle distribution
if "sigma_lnorm" in cloud_dict:
sigma_lnorm = cloud_dict["sigma_lnorm"]
else:
sigma_lnorm = None
if "log_kappa_0" in cloud_dict:
# Cloud model 2
@typechecked
def kappa_abs(wavel_micron: np.ndarray, press_bar: np.ndarray) -> np.ndarray:
p_base = 10.0 ** cloud_dict["log_p_base"] # (bar)
kappa_0 = 10.0 ** cloud_dict["log_kappa_0"] # (cm2 g-1)
# Opacity at 1 um (cm2 g-1) as function of pressure (bar)
# See Eq. 5 in Mollière et al. 2020
kappa_p = kappa_0 * (press_bar / p_base) ** cloud_dict["fsed"]
# Opacity (cm2 g-1) as function of wavelength (um)
# See Eq. 4 in Mollière et al. 2020
kappa_grid, wavel_grid = np.meshgrid(kappa_p, wavel_micron, sparse=True)
kappa_tot = kappa_grid * wavel_grid ** cloud_dict["opa_index"]
kappa_tot[:, press_bar > p_base] = 0.0
# if (
# cloud_dict["opa_knee"] > wavel_micron[0]
# and cloud_dict["opa_knee"] < wavel_micron[-1]
# ):
# indices = np.where(wavel_micron > cloud_dict["opa_knee"])[0]
# for i in range(press_bar.size):
# kappa_tot[indices, i] = (
# kappa_tot[indices[0], i]
# * (wavel_micron[indices] / wavel_micron[indices[0]]) ** -4.0
# )
return (1.0 - cloud_dict["albedo"]) * kappa_tot
@typechecked
def kappa_scat(wavel_micron: np.ndarray, press_bar: np.ndarray):
p_base = 10.0 ** cloud_dict["log_p_base"] # (bar)
kappa_0 = 10.0 ** cloud_dict["log_kappa_0"] # (cm2 g-1)
# Opacity at 1 um (cm2 g-1) as function of pressure (bar)
# See Eq. 5 in Mollière et al. 2020
kappa_p = kappa_0 * (press_bar / p_base) ** cloud_dict["fsed"]
# Opacity (cm2 g-1) as function of wavelength (um)
# See Eq. 4 in Mollière et al. 2020
kappa_grid, wavel_grid = np.meshgrid(kappa_p, wavel_micron, sparse=True)
kappa_tot = kappa_grid * wavel_grid ** cloud_dict["opa_index"]
kappa_tot[:, press_bar > p_base] = 0.0
# if (
# cloud_dict["opa_knee"] > wavel_micron[0]
# and cloud_dict["opa_knee"] < wavel_micron[-1]
# ):
# indices = np.where(wavel_micron > cloud_dict["opa_knee"])[0]
# for i in range(press_bar.size):
# kappa_tot[indices, i] = (
# kappa_tot[indices[0], i]
# * (wavel_micron[indices] / wavel_micron[indices[0]]) ** -4.0
# )
return cloud_dict["albedo"] * kappa_tot
elif "log_kappa_abs" in cloud_dict:
# Powerlaw absorption and scattering opacities
@typechecked
def kappa_abs(wavel_micron: np.ndarray, press_bar: np.ndarray) -> np.ndarray:
p_base = 10.0 ** cloud_dict["log_p_base"] # (bar)
kappa_0 = 10.0 ** cloud_dict["log_kappa_abs"] # (cm2 g-1)
# Opacity at 1 um (cm2 g-1) as function of pressure (bar)
kappa_p = kappa_0 * (press_bar / p_base) ** cloud_dict["fsed"]
# Opacity (cm2 g-1) as function of wavelength (um)
kappa_grid, wavel_grid = np.meshgrid(kappa_p, wavel_micron, sparse=True)
kappa_abs = kappa_grid * wavel_grid ** cloud_dict["opa_abs_index"]
kappa_abs[:, press_bar > p_base] = 0.0
return kappa_abs
@typechecked
def kappa_scat(wavel_micron: np.ndarray, press_bar: np.ndarray):
p_base = 10.0 ** cloud_dict["log_p_base"] # (bar)
kappa_0 = 10.0 ** cloud_dict["log_kappa_sca"] # (cm2 g-1)
# Opacity at 1 um (cm2 g-1) as function of pressure (bar)
kappa_p = kappa_0 * (press_bar / p_base) ** cloud_dict["fsed"]
# Opacity (cm2 g-1) as function of wavelength (um)
kappa_grid, wavel_grid = np.meshgrid(kappa_p, wavel_micron, sparse=True)
kappa_sca = kappa_grid * wavel_grid ** cloud_dict["opa_sca_index"]
kappa_sca[:, press_bar > p_base] = 0.0
if (
cloud_dict["lambda_ray"] > wavel_micron[0]
and cloud_dict["lambda_ray"] < wavel_micron[-1]
):
indices = np.where(wavel_micron > cloud_dict["lambda_ray"])[0]
for i in range(press_bar.size):
kappa_sca[indices, i] = (
kappa_sca[indices[0], i]
* (wavel_micron[indices] / wavel_micron[indices[0]]) ** -4.0
)
return kappa_sca
elif "log_kappa_gray" in cloud_dict:
# Gray clouds with cloud top
@typechecked
def kappa_abs(wavel_micron: np.ndarray, press_bar: np.ndarray) -> np.ndarray:
p_top = 10.0 ** cloud_dict["log_cloud_top"] # (bar)
kappa_gray = 10.0 ** cloud_dict["log_kappa_gray"] # (cm2 g-1)
opa_abs = np.full((wavel_micron.size, press_bar.size), kappa_gray)
opa_abs[:, press_bar < p_top] = 0.0
return opa_abs
# Add optional scattering opacity
if "albedo" in cloud_dict:
@typechecked
def kappa_scat(wavel_micron: np.ndarray, press_bar: np.ndarray) -> np.ndarray:
# Absorption opacity (cm2 g-1)
opa_abs = kappa_abs(wavel_micron, press_bar)
# Scattering opacity (cm2 g-1)
opa_scat = cloud_dict["albedo"] * opa_abs / (1. - cloud_dict["albedo"])
return opa_scat
else:
kappa_scat = None
else:
kappa_abs = None
kappa_scat = None
# Calculate the emission spectrum
rt_object.calc_flux(
temperature,
abundances,
10.0 ** log_g,
mmw,
sigma_lnorm=sigma_lnorm,
Kzz=Kzz_use,
fsed=fseds,
radius=None,
contribution=contribution,
gray_opacity=None,
Pcloud=None,
kappa_zero=None,
gamma_scat=None,
add_cloud_scat_as_abs=False,
hack_cloud_photospheric_tau=tau_cloud,
give_absorption_opacity=kappa_abs,
give_scattering_opacity=kappa_scat,
cloud_wlen=cloud_wavel,
)
# if (
# hasattr(rt_object, "scaling_physicality")
# and rt_object.scaling_physicality > 1.0
# ):
# # cloud_scaling_factor > 2 * (fsed + 1)
# # Set to None such that -inf will be returned as ln_like
# wavel = None
# f_lambda = None
# contr_em = None
#
# else:
# wavel = 1e6 * constants.LIGHT / rt_object.freq # (um)
#
# # (erg s-1 cm-2 Hz-1) -> (erg s-1 m-2 Hz-1)
# f_lambda = 1e4 * rt_object.flux
#
# # (erg s-1 m-2 Hz-1) -> (erg s-1 m-2 m-1)
# f_lambda *= constants.LIGHT / (1e-6 * wavel) ** 2.0
#
# # (erg s-1 m-2 m-1) -> (erg s-1 m-2 um-1)
# f_lambda *= 1e-6
#
# # (erg s-1 m-2 um-1) -> (W m-2 um-1)
# f_lambda *= 1e-7
#
# # Optionally return the emission contribution
# if contribution:
# contr_em = rt_object.contr_em
# else:
# contr_em = None
if rt_object.flux is None:
wavel = None
f_lambda = None
contr_em = None
else:
wavel = 1e6 * constants.LIGHT / rt_object.freq # (um)
# (erg s-1 cm-2 Hz-1) -> (erg s-1 m-2 Hz-1)
f_lambda = 1e4 * rt_object.flux
# (erg s-1 m-2 Hz-1) -> (erg s-1 m-2 m-1)
f_lambda *= constants.LIGHT / (1e-6 * wavel) ** 2.0
# (erg s-1 m-2 m-1) -> (erg s-1 m-2 um-1)
f_lambda *= 1e-6
# (erg s-1 m-2 um-1) -> (W m-2 um-1)
f_lambda *= 1e-7
# Optionally return the emission contribution
if contribution:
contr_em = rt_object.contr_em
else:
contr_em = None
# if (
# plotting
# and Kzz_use is None
# and hasattr(rt_object, "continuum_opa")
# ):
# plt.plot(wavel, rt_object.continuum_opa[:, 0], label="Total continuum opacity")
# # plt.plot(wavel, rt_object.continuum_opa[:, 0] - rt_object.continuum_opa_scat[:, 0], label="Absorption continuum opacity")
# # plt.plot(wavel, rt_object.continuum_opa_scat[:, 0], label="Scattering continuum opacity")
# plt.xlabel(r"Wavelength ($\mu$m)")
# plt.ylabel("Opacity at smallest pressure")
# plt.yscale("log")
# plt.legend(loc="best")
# plt.savefig("continuum_opacity.pdf", bbox_inches="tight")
# plt.clf()
return wavel, f_lambda, contr_em, mmw
@typechecked
def mass_fractions(log_x_abund: dict) -> dict:
"""
Function to return a dictionary with the mass fractions of
all species.
Parameters
----------
log_x_abund : dict
Dictionary with the log10 of the mass fractions of metals.
Returns
-------
dict
Dictionary with the mass fractions of all species.
"""
# initiate abundance dictionary
abund = {}
# initiate the total mass fraction of the metals
metal_sum = 0.0
for item in log_x_abund:
# add the mass fraction to the dictionary
abund[item] = 10.0 ** log_x_abund[item]
# update the total mass fraction of the metals
metal_sum += abund[item]
# mass fraction of H2 and He
ab_h2_he = 1.0 - metal_sum
# add H2 and He mass fraction to the dictionary
abund["H2"] = ab_h2_he * 0.75
abund["He"] = ab_h2_he * 0.25
return abund
@typechecked
def calc_metal_ratio(log_x_abund: Dict[str, float]) -> Tuple[float, float, float]:
"""
Function for calculating [C/H], [O/H], and C/O for a given set
of abundances.
Parameters
----------
log_x_abund : dict
Dictionary with the log10 mass fractions.
Returns
-------
float
Carbon-to-hydrogen ratio, relative to solar.
float
Oxygen-to-hydrogen ratio, relative to solar.
float
Carbon-to-oxygen ratio.
"""
# Solar C/H from Asplund et al. (2009)
c_h_solar = 10.0 ** (8.43 - 12.0)
# Solar O/H from Asplund et al. (2009)
o_h_solar = 10.0 ** (8.69 - 12.0)
# Get the atomic masses
masses = atomic_masses()
# Create a dictionary with all mass fractions
abund = mass_fractions(log_x_abund)
# Calculate the mean molecular weight from the input mass fractions
mmw = mean_molecular_weight(abund)
# Initiate the C, H, and O abundance
c_abund = 0.0
o_abund = 0.0
h_abund = 0.0
# Calculate the total C abundance
if "CO" in abund:
c_abund += abund["CO"] * mmw / masses["CO"]
if "CO_all_iso" in abund:
c_abund += abund["CO_all_iso"] * mmw / masses["CO"]
if "CO_all_iso_HITEMP" in abund:
c_abund += abund["CO_all_iso_HITEMP"] * mmw / masses["CO"]
if "CO_all_iso_Chubb" in abund:
c_abund += abund["CO_all_iso_Chubb"] * mmw / masses["CO"]
if "CO2" in abund:
c_abund += abund["CO2"] * mmw / masses["CO2"]
if "CO2_main_iso" in abund:
c_abund += abund["CO2_main_iso"] * mmw / masses["CO2"]
if "CH4" in abund:
c_abund += abund["CH4"] * mmw / masses["CH4"]
if "CH4_main_iso" in abund:
c_abund += abund["CH4_main_iso"] * mmw / masses["CH4"]
# Calculate the total O abundance
if "CO" in abund:
o_abund += abund["CO"] * mmw / masses["CO"]
if "CO_all_iso" in abund:
o_abund += abund["CO_all_iso"] * mmw / masses["CO"]
if "CO_all_iso_HITEMP" in abund:
o_abund += abund["CO_all_iso_HITEMP"] * mmw / masses["CO"]
if "CO_all_iso_Chubb" in abund:
o_abund += abund["CO_all_iso_Chubb"] * mmw / masses["CO"]
if "CO2" in abund:
o_abund += 2.0 * abund["CO2"] * mmw / masses["CO2"]
if "CO2_main_iso" in abund:
o_abund += 2.0 * abund["CO2_main_iso"] * mmw / masses["CO2"]
if "H2O" in abund:
o_abund += abund["H2O"] * mmw / masses["H2O"]
if "H2O_HITEMP" in abund:
o_abund += abund["H2O_HITEMP"] * mmw / masses["H2O"]
if "H2O_main_iso" in abund:
o_abund += abund["H2O_main_iso"] * mmw / masses["H2O"]
# Calculate the total H abundance
h_abund += 2.0 * abund["H2"] * mmw / masses["H2"]
if "CH4" in abund:
h_abund += 4.0 * abund["CH4"] * mmw / masses["CH4"]
if "CH4_main_iso" in abund:
h_abund += 4.0 * abund["CH4_main_iso"] * mmw / masses["CH4"]
if "H2O" in abund:
h_abund += 2.0 * abund["H2O"] * mmw / masses["H2O"]
if "H2O_HITEMP" in abund:
h_abund += 2.0 * abund["H2O_HITEMP"] * mmw / masses["H2O"]
if "H2O_main_iso" in abund:
h_abund += 2.0 * abund["H2O_main_iso"] * mmw / masses["H2O"]
if "NH3" in abund:
h_abund += 3.0 * abund["NH3"] * mmw / masses["NH3"]
if "NH3_main_iso" in abund:
h_abund += 3.0 * abund["NH3_main_iso"] * mmw / masses["NH3"]
if "H2S" in abund:
h_abund += 2.0 * abund["H2S"] * mmw / masses["H2S"]
if "H2S_main_iso" in abund:
h_abund += 2.0 * abund["H2S_main_iso"] * mmw / masses["H2S"]
return (
np.log10(c_abund / h_abund / c_h_solar),
np.log10(o_abund / h_abund / o_h_solar),
c_abund / o_abund,
)
@typechecked
def mean_molecular_weight(abundances: dict) -> float:
"""
Function to calculate the mean molecular weight from the
abundances.
Parameters
----------
abundances : dict
Dictionary with the mass fraction of each species.
Returns
-------
float
Mean molecular weight in atomic mass units.
"""
masses = atomic_masses()
mmw = 0.0
for key in abundances:
if key in ["CO_all_iso", "CO_all_iso_HITEMP", "CO_all_iso_Chubb"]:
mmw += abundances[key] / masses["CO"]
elif key in ["Na_lor_cut", "Na_allard", "Na_burrows"]:
mmw += abundances[key] / masses["Na"]
elif key in ["K_lor_cut", "K_allard", "K_burrows"]:
mmw += abundances[key] / masses["K"]
elif key == "CH4_main_iso":
mmw += abundances[key] / masses["CH4"]
elif key in ["H2O_main_iso", "H2O_HITEMP"]:
mmw += abundances[key] / masses["H2O"]
else:
mmw += abundances[key] / masses[key]
return 1.0 / mmw
@typechecked
def potassium_abundance(log_x_abund: dict) -> float:
"""
Function to calculate the mass fraction of potassium at a solar
ratio of the sodium and potassium abundances.
Parameters
----------
log_x_abund : dict
Dictionary with the log10 of the mass fractions.
Returns
-------
float
Log10 of the mass fraction of potassium.
"""
# solar volume mixing ratios of Na and K (Asplund et al. 2009)
n_na_solar = 1.60008694353205e-06
n_k_solar = 9.86605611925677e-08
# get the atomic masses
masses = atomic_masses()
# create a dictionary with all mass fractions
x_abund = mass_fractions(log_x_abund)
# calculate the mean molecular weight from the input mass fractions
mmw = mean_molecular_weight(x_abund)
# volume mixing ratio of sodium
if "Na" in log_x_abund:
n_na_abund = x_abund["Na"] * mmw / masses["Na"]
elif "Na_lor_cut" in log_x_abund:
n_na_abund = x_abund["Na_lor_cut"] * mmw / masses["Na"]
elif "Na_allard" in log_x_abund:
n_na_abund = x_abund["Na_allard"] * mmw / masses["Na"]
elif "Na_burrows" in log_x_abund:
n_na_abund = x_abund["Na_burrows"] * mmw / masses["Na"]
# volume mixing ratio of potassium
n_k_abund = n_na_abund * n_k_solar / n_na_solar
return np.log10(n_k_abund * masses["K"] / mmw)
@typechecked
def log_x_cloud_base(
c_o_ratio: float, metallicity: float, cloud_fractions: dict
) -> dict:
"""
Function for returning a dictionary with the log10 mass fractions
at the cloud base.
Parameters
----------
c_o_ratio : float
C/O ratio.
metallicity : float
Metallicity, [Fe/H].
cloud_fractions : dict
Dictionary with the log10 mass fractions at the cloud base,
relative to the maximum values allowed from elemental
abundances. The dictionary keys are the cloud species without
the structure and shape index (e.g. Na2S(c) instead of
Na2S(c)_cd).
Returns
-------
dict
Dictionary with the log10 mass fractions at the cloud base.
Compared to the keys of ``cloud_fractions``, the keys in the
returned dictionary are provided without ``(c)`` (e.g. Na2S
instead of Na2S(c)).
"""
log_x_base = {}
for item in cloud_fractions:
# Mass fraction
x_cloud = cloud_mass_fraction(f"{item[:-3]}", metallicity, c_o_ratio)
# Log10 of the mass fraction at the cloud base
log_x_base[f"{item[:-3]}"] = np.log10(10.0 ** cloud_fractions[item] * x_cloud)
return log_x_base
@typechecked
def solar_mixing_ratios() -> dict:
"""
Function which returns the volume mixing ratios for solar elemental
abundances (i.e. [Fe/H] = 0); adopted from Asplund et al. (2009).
Returns
-------
dict
Dictionary with the solar number fractions (i.e. volume
mixing ratios).
"""
n_fracs = {}
n_fracs["H"] = 0.9207539305
n_fracs["He"] = 0.0783688694
n_fracs["C"] = 0.0002478241
n_fracs["N"] = 6.22506056949881e-05
n_fracs["O"] = 0.0004509658
n_fracs["Na"] = 1.60008694353205e-06
n_fracs["Mg"] = 3.66558742055362e-05
n_fracs["Al"] = 2.595e-06
n_fracs["Si"] = 2.9795e-05
n_fracs["P"] = 2.36670201997668e-07
n_fracs["S"] = 1.2137900734604e-05
n_fracs["Cl"] = 2.91167958499589e-07
n_fracs["K"] = 9.86605611925677e-08
n_fracs["Ca"] = 2.01439011429255e-06
n_fracs["Ti"] = 8.20622804366359e-08
n_fracs["V"] = 7.83688694089992e-09
n_fracs["Fe"] = 2.91167958499589e-05
n_fracs["Ni"] = 1.52807116806281e-06
return n_fracs
@typechecked
def atomic_masses() -> dict:
"""
Function which returns the atomic and molecular masses.
Returns
-------
dict
Dictionary with the atomic and molecular masses.
"""
masses = {}
# Atoms
masses["H"] = 1.0
masses["He"] = 4.0
masses["C"] = 12.0
masses["N"] = 14.0
masses["O"] = 16.0
masses["Na"] = 23.0
masses["Na_lor_cur"] = 23.0
masses["Na_allard"] = 23.0
masses["Na_burrows"] = 23.0
masses["Mg"] = 24.3
masses["Al"] = 27.0
masses["Si"] = 28.0
masses["P"] = 31.0
masses["S"] = 32.0
masses["Cl"] = 35.45
masses["K"] = 39.1
masses["K_lor_cut"] = 39.1
masses["K_allard"] = 39.1
masses["K_burrows"] = 39.1
masses["Ca"] = 40.0
masses["Ti"] = 47.9
masses["V"] = 51.0
masses["Fe"] = 55.8
masses["Ni"] = 58.7
# Molecules
masses["H2"] = 2.0
masses["H2O"] = 18.0
masses["H2O_HITEMP"] = 18.0
masses["H2O_main_iso"] = 18.0
masses["CH4"] = 16.0
masses["CH4_main_iso"] = 16.0
masses["CO2"] = 44.0
masses["CO2_main_iso"] = 44.0
masses["CO"] = 28.0
masses["CO_all_iso"] = 28.0
masses["CO_all_iso_Chubb"] = 28.0
masses["CO_all_iso_HITEMP"] = 28.0
masses["NH3"] = 17.0
masses["NH3_main_iso"] = 17.0
masses["HCN"] = 27.0
masses["C2H2,acetylene"] = 26.0
masses["PH3"] = 34.0
masses["PH3_main_iso"] = 34.0
masses["H2S"] = 34.0
masses["H2S_main_iso"] = 34.0
masses["VO"] = 67.0
masses["VO_Plez"] = 67.0
masses["TiO"] = 64.0
masses["TiO_all_Exomol"] = 64.0
masses["TiO_all_Plez"] = 64.0
masses["FeH"] = 57.0
masses["FeH_main_iso"] = 57.0
masses["OH"] = 17.0
return masses
@typechecked
def cloud_mass_fraction(
composition: str, metallicity: float, c_o_ratio: float
) -> float:
"""
Function to calculate the mass fraction for a cloud species.
Parameters
----------
composition : str
Cloud composition ('Fe', 'MgSiO3', 'Al2O3', 'Na2S', or 'KCL').
metallicity : float
Metallicity [Fe/H].
c_o_ratio : float
Carbon-to-oxygen ratio.
Returns
-------
float
Mass fraction.
"""
# Solar fractional number densities (i.e. volume mixing ratios; VMR)
nfracs = solar_mixing_ratios()
# Atomic masses
masses = atomic_masses()
# Make a copy of the dictionary with the solar number densities
nfracs_use = copy.copy(nfracs)
# Scale the solar number densities by the [Fe/H], except H and He
for item in nfracs:
if item != "H" and item != "He":
nfracs_use[item] = nfracs[item] * 10.0 ** metallicity
# Adjust the VMR of O with the C/O ratio
nfracs_use["O"] = nfracs_use["C"] / c_o_ratio
if composition == "Fe":
nfrac_cloud = nfracs_use["Fe"]
mass_cloud = masses["Fe"]
elif composition == "MgSiO3":
nfrac_cloud = np.min(
[nfracs_use["Mg"], nfracs_use["Si"], nfracs_use["O"] / 3.0]
)
mass_cloud = masses["Mg"] + masses["Si"] + 3.0 * masses["O"]
elif composition == "Al2O3":
nfrac_cloud = np.min([nfracs_use["Al"] / 2.0, nfracs_use["O"] / 3.0])
mass_cloud = 2.0 * masses["Al"] + 3.0 * masses["O"]
elif composition == "Na2S":
nfrac_cloud = np.min([nfracs_use["Na"] / 2.0, nfracs_use["S"]])
mass_cloud = 2.0 * masses["Na"] + masses["S"]
elif composition == "KCL":
nfrac_cloud = np.min([nfracs_use["K"], nfracs_use["Cl"]])
mass_cloud = masses["K"] + masses["Cl"]
# Cloud mass fraction
x_cloud = mass_cloud * nfrac_cloud
mass_norm = 0.0
for item in nfracs_use:
# Sum up the mass fractions of all species
mass_norm += masses[item] * nfracs_use[item]
# Normalize the cloud mass fraction by the total mass fraction
return x_cloud / mass_norm
@typechecked
def find_cloud_deck(
composition: str,
press: np.ndarray,
temp: np.ndarray,
metallicity: float,
c_o_ratio: float,
mmw: float = 2.33,
plotting: bool = False,
) -> float:
"""
Function to find the base of the cloud deck by intersecting the
P-T profile with the saturation vapor pressure.
Parameters
----------
composition : str
Cloud composition ('Fe', 'MgSiO3', 'Al2O3', 'Na2S', or 'KCL').
press : np.ndarray
Pressures (bar).
temp : np.ndarray
Temperatures (K).
metallicity : float
Metallicity [Fe/H].
c_o_ratio : float
Carbon-to-oxygen ratio.
mmw : float
Mean molecular weight.
plotting : bool
Create a plot.
Returns
-------
float
Pressure (bar) at the base of the cloud deck.
"""
if composition == "Fe":
Pc, Tc = return_T_cond_Fe_comb(metallicity, c_o_ratio, mmw)
elif composition == "MgSiO3":
Pc, Tc = return_T_cond_MgSiO3(metallicity, c_o_ratio, mmw)
elif composition == "Al2O3":
Pc, Tc = return_T_cond_Al2O3(metallicity, c_o_ratio, mmw)
elif composition == "Na2S":
Pc, Tc = return_T_cond_Na2S(metallicity, c_o_ratio, mmw)
elif composition == "KCL":
Pc, Tc = return_T_cond_KCl(metallicity, c_o_ratio, mmw)
else:
raise ValueError(
f"The '{composition}' composition is not supported by find_cloud_deck."
)
index = (Pc > 1e-8) & (Pc < 1e5)
Pc, Tc = Pc[index], Tc[index]
tcond_p = interp1d(Pc, Tc)
Tcond_on_input_grid = tcond_p(press)
Tdiff = Tcond_on_input_grid - temp
diff_vec = Tdiff[1:] * Tdiff[:-1]
ind_cdf = diff_vec < 0.0
if len(diff_vec[ind_cdf]) > 0:
P_clouds = (press[1:] + press[:-1])[ind_cdf] / 2.0
P_cloud = float(P_clouds[-1])
else:
P_cloud = 1e-8
if plotting:
plt.plot(temp, press)
plt.plot(Tcond_on_input_grid, press)
plt.axhline(P_cloud, color="red", linestyle="--")
plt.yscale("log")
plt.xlim(0.0, 3000.0)
plt.ylim(1e2, 1e-6)
plt.savefig(f"{composition.lower()}_clouds_cdf.pdf", bbox_inches="tight")
plt.clf()
return P_cloud
@typechecked
def scale_cloud_abund(
params: Dict[str, float],
rt_object,
pressure: np.ndarray,
temperature: np.ndarray,
mmw: np.ndarray,
chemistry: str,
abund_in: Dict[str, np.ndarray],
composition: str,
tau_cloud: float,
pressure_grid: str,
) -> float:
"""
Function to scale the mass fraction of a cloud species to the
requested optical depth.
Parameters
----------
params : dict
Dictionary with the model parameters.
rt_object : petitRADTRANS.radtrans.Radtrans
Instance of ``Radtrans``.
pressure : np.ndarray
Array with the pressure points (bar).
temperature : np.ndarray
Array with the temperature points (K) corresponding
to ``pressure``.
mmw : np.ndarray
Array with the mean molecular weights corresponding
to ``pressure``.
chemistry : str
Chemistry type (only ``'equilibrium'`` is supported).
abund_in : dict
Dictionary with arrays that contain the pressure-dependent,
equilibrium mass fractions of the line species.
composition : sr
Cloud composition ('Fe(c)', 'MgSiO3(c)', 'Al2O3(c)',
'Na2S(c)', 'KCl(c)').
tau_cloud : float
Optical depth of the clouds. The returned mass fraction is
scaled such that the optical depth at the shortest wavelength
is equal to ``tau_cloud``.
pressure_grid : str
The type of pressure grid that is used for the radiative
transfer. Either 'standard', to use 180 layers both for the
atmospheric structure (e.g. when interpolating the abundances)
and 180 layers with the radiative transfer, or 'smaller' to
use 60 (instead of 180) with the radiative transfer, or
'clouds' to start with 1440 layers but resample to ~100 layers
(depending on the number of cloud species) with a refinement
around the cloud decks. For cloudless atmospheres it is
recommended to use 'smaller', which runs faster than 'standard'
and provides sufficient accuracy. For cloudy atmosphere, one
can test with 'smaller' but it is recommended to use 'clouds' for
improved accuracy fluxes.
Returns
-------
float
Mass fraction relative to the maximum value allowed from
elemental abundances. The value has been scaled to the
requested optical depth ``tau_cloud`` (at the shortest
wavelength).
"""
# Dictionary with the requested cloud composition and setting the
# log10 of the mass fraction (relative to the maximum value
# allowed from elemental abundances) equal to zero
cloud_fractions = {composition: 0.0}
# Create a dictionary with the log10 of
# the mass fraction at the cloud base
log_x_base = log_x_cloud_base(
params["c_o_ratio"], params["metallicity"], cloud_fractions
)
# Get the pressure (bar) of the cloud base
p_base = find_cloud_deck(
composition[:-3],
pressure,
temperature,
params["metallicity"],
params["c_o_ratio"],
mmw=np.mean(mmw),
plotting=False,
)
# Initialize the cloud abundance in
# the dictionary with mass fractions
abund_in[composition] = np.zeros_like(temperature)
# Set the cloud abundances by scaling
# from the base with the f_sed parameter
abund_in[composition][pressure < p_base] = (
10.0 ** log_x_base[composition[:-3]]
* (pressure[pressure <= p_base] / p_base) ** params["fsed"]
)
# Adaptive pressure refinement around the cloud base
if pressure_grid == "clouds":
_, indices = make_half_pressure_better({composition: p_base}, pressure)
else:
indices = None
# Update the abundance dictionary
abundances = create_abund_dict(
abund_in,
rt_object.line_species,
chemistry,
pressure_grid=pressure_grid,
indices=indices,
)
# Interpolate the line opacities to the temperature structure
if pressure_grid == "standard":
rt_object.interpolate_species_opa(temperature)
mmw_select = mmw.copy()
if "log_kzz" in params:
kzz_select = np.full(pressure.size, 10.0 ** params["log_kzz"])
else:
# Backward compatibility
kzz_select = np.full(pressure.size, 10.0 ** params["kzz"])
elif pressure_grid == "smaller":
rt_object.interpolate_species_opa(temperature[::3])
mmw_select = mmw[::3]
if "log_kzz" in params:
kzz_select = np.full(pressure[::3].size, 10.0 ** params["log_kzz"])
else:
# Backward compatibility
kzz_select = np.full(pressure[::3].size, 10.0 ** params["kzz"])
elif pressure_grid == "clouds":
# Reinitiate the pressure structure
# after make_half_pressure_better
rt_object.setup_opa_structure(pressure[indices])
rt_object.interpolate_species_opa(temperature[indices])
mmw_select = mmw[indices]
if "log_kzz" in params:
kzz_select = np.full(pressure[indices].size, 10.0 ** params["log_kzz"])
else:
# Backward compatibility
kzz_select = np.full(pressure[indices].size, 10.0 ** params["kzz"])
# Set the continuum opacities to zero because
# calc_cloud_opacity adds to existing opacities
rt_object.continuum_opa = np.zeros_like(rt_object.continuum_opa)
rt_object.continuum_opa_scat = np.zeros_like(rt_object.continuum_opa_scat)
rt_object.continuum_opa_scat_emis = np.zeros_like(rt_object.continuum_opa_scat_emis)
# Calculate the cloud opacities for
# the defined atmospheric structure
rt_object.calc_cloud_opacity(
abundances,
mmw_select,
10.0 ** params["logg"],
params["sigma_lnorm"],
fsed=params["fsed"],
Kzz=kzz_select,
radius=None,
add_cloud_scat_as_abs=False,
)
# Calculate the cloud optical depth and set the tau_cloud attribute
rt_object.calc_tau_cloud(10.0 ** params["logg"])
# Extract the wavelength-averaged optical
# depth at the largest pressure
tau_current = np.mean(rt_object.tau_cloud[0, :, 0, -1])
# Set the continuum opacities again to zero
rt_object.continuum_opa = np.zeros_like(rt_object.continuum_opa)
rt_object.continuum_opa_scat = np.zeros_like(rt_object.continuum_opa_scat)
rt_object.continuum_opa_scat_emis = np.zeros_like(rt_object.continuum_opa_scat_emis)
if tau_current > 0.0:
# Scale the mass fraction
log_x_scaled = np.log10(tau_cloud / tau_current)
else:
log_x_scaled = 100.0
return log_x_scaled
@typechecked
def cube_to_dict(cube, cube_index: Dict[str, float]) -> Dict[str, float]:
"""
Function to convert the parameter cube into a dictionary.
Parameters
----------
cube : LP_c_double
Cube with the parameters.
cube_index : dict
Dictionary with the index of each parameter in the ``cube``.
Returns
-------
dict
Dictionary with the parameters.
"""
params = {}
for key, value in cube_index.items():
params[key] = cube[value]
return params
@typechecked
def list_to_dict(param_list: List[str], sample_val: np.ndarray) -> Dict[str, float]:
"""
Function to convert the parameter cube into a dictionary.
Parameters
----------
param_list : list(str)
List with the parameter labels.
sample_val : np.ndarray
Array with the parameter values, in the same order as
``param_list``.
Returns
-------
dict
Dictionary with the parameters.
"""
sample_dict = {}
for item in param_list:
sample_dict[item] = sample_val[param_list.index(item)]
return sample_dict
@typechecked
def return_T_cond_Fe(
FeH: float, CO: float, MMW: float = 2.33
) -> Tuple[np.ndarray, np.ndarray]:
"""
Function for calculating the saturation pressure for solid Fe.
Parameters
----------
FeH : float
Metallicity.
CO : float
Carbon-to-oxygen ratio.
MMW : float
Mean molecular weight.
Returns
-------
np.ndarray
Saturation pressure (bar).
np.ndarray
Temperature (K).
"""
masses = atomic_masses()
T = np.linspace(100.0, 10000.0, 1000)
# Taken from Ackerman & Marley (2001)
# including erratum (P_vap is in bar, not cgs!)
P_vap = lambda x: np.exp(15.71 - 47664.0 / x)
XFe = cloud_mass_fraction("Fe", FeH, CO)
return P_vap(T) / (XFe * MMW / masses["Fe"]), T
@typechecked
def return_T_cond_Fe_l(
FeH: float, CO: float, MMW: float = 2.33
) -> Tuple[np.ndarray, np.ndarray]:
"""
Function for calculating the saturation pressure for liquid Fe.
Parameters
----------
FeH : float
Metallicity.
CO : float
Carbon-to-oxygen ratio.
MMW : float
Mean molecular weight.
Returns
-------
np.ndarray
Saturation pressure (bar).
np.ndarray
Temperature (K).
"""
masses = atomic_masses()
T = np.linspace(100.0, 10000.0, 1000)
# Taken from Ackerman & Marley (2001)
# including erratum (P_vap is in bar, not cgs!)
P_vap = lambda x: | np.exp(9.86 - 37120.0 / x) | numpy.exp |
import utm as UTM
import unittest
import numpy as np
class UTMTestCase(unittest.TestCase):
def assert_utm_equal(self, a, b):
self.assertTrue(np.allclose(a[0], b[0]))
self.assertTrue(np.allclose(a[1], b[1]))
self.assertEqual(a[2], b[2])
self.assertEqual(a[3].upper(), b[3].upper())
def assert_latlon_equal(self, a, b):
self.assertTrue(np.allclose(a[0], b[0], rtol=1e-4, atol=1e-4))
self.assertTrue(np.allclose(a[1], b[1], rtol=1e-4, atol=1e-4))
class KnownValues(UTMTestCase):
known_values = [
# Aachen, Germany
(
(50.77535, 6.08389),
(294409, 5628898, 32, 'U'),
{'northern': True},
),
# New York, USA
(
(40.71435, -74.00597),
(583960, 4507523, 18, 'T'),
{'northern': True},
),
# Wellington, New Zealand
(
(-41.28646, 174.77624),
(313784, 5427057, 60, 'G'),
{'northern': False},
),
# Capetown, South Africa
(
(-33.92487, 18.42406),
(261878, 6243186, 34, 'H'),
{'northern': False},
),
# Mendoza, Argentina
(
(-32.89018, -68.84405),
(514586, 6360877, 19, 'h'),
{'northern': False},
),
# Fairbanks, Alaska, USA
(
(64.83778, -147.71639),
(466013, 7190568, 6, 'W'),
{'northern': True},
),
# <NAME>, Scotland, UK
(
(56.79680, -5.00601),
(377486, 6296562, 30, 'V'),
{'northern': True},
),
# Latitude 84
(
(84, -5.00601),
(476594, 9328501, 30, 'X'),
{'northern': True},
),
]
def test_from_latlon(self):
lats = np.array([0.0, 3.0, 6.0])
lons = np.array([0.0, 1.0, 3.4])
result = UTM.from_latlon(lats, lons)
self.assert_utm_equal((np.array([166021.44317933032,
277707.83075574087,
544268.12794623]),
np.array([0.0,
331796.29167519242,
663220.7198366751]),
31, 'N'), result)
for latlon, utm, _ in self.known_values:
result = UTM.from_latlon(*[np.array([x]) for x in latlon])
self.assert_utm_equal(utm, result)
def test_to_latlon(self):
result = UTM.to_latlon(np.array([166021.44317933032,
277707.83075574087,
544268.12794623]),
np.array([0.0,
331796.29167519242,
663220.7198366751]),
31, 'N')
self.assert_latlon_equal((np.array([0.0, 3.0, 6.0]),
np.array([0.0, 1.0, 3.4])),
result)
for latlon, utm, utm_kw in self.known_values:
utm = [np.array([x]) for x in utm[:2]] + list(utm[2:])
result = UTM.to_latlon(*utm)
self.assert_latlon_equal(latlon, result)
class BadInput(UTMTestCase):
def test_from_latlon_range_checks(self):
'''from_latlon should fail with out-of-bounds input'''
self.assertRaises(UTM.OutOfRangeError, UTM.from_latlon,
np.array(-100), np.array(0))
self.assertRaises(UTM.OutOfRangeError, UTM.from_latlon,
np.array(-80.1), np.array(0))
for i in range(-8000, 8400):
UTM.from_latlon(np.array(i / 100.0), np.array(0))
self.assertRaises(UTM.OutOfRangeError, UTM.from_latlon,
np.array(84.1), np.array(0))
self.assertRaises(UTM.OutOfRangeError, UTM.from_latlon,
np.array(100), np.array(0))
self.assertRaises(UTM.OutOfRangeError, UTM.from_latlon,
np.array(0), np.array(-300))
self.assertRaises(UTM.OutOfRangeError, UTM.from_latlon,
np.array(0), np.array(-180.1))
for i in range(-18000, 18000):
UTM.from_latlon(np.array(0), np.array(i / 100.0))
self.assertRaises(UTM.OutOfRangeError, UTM.from_latlon,
np.array(0), np.array(180.1))
self.assertRaises(UTM.OutOfRangeError, UTM.from_latlon,
np.array(0), np.array(300))
self.assertRaises(UTM.OutOfRangeError, UTM.from_latlon,
| np.array(-100) | numpy.array |
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 3 11:31:57 2016
@author: cpkmanchee
beam_profile.py contains methods for dealing with beam profile images.
All definitions, usages, and calculations are consisten with ISO
standard 11146 (generally dealing with D4sigma beamwidths).
"""
import numpy as np
import scipy.optimize as opt
import uncertainties as un
import glob
import time
from beamtools.constants import h,c,pi
from beamtools.common import normalize
from beamtools.import_data_file import import_data_file as _import
#import matplotlib.pyplot as plt
#from mpl_toolkits.mplot3d import Axes3D
#from matplotlib.gridspec import GridSpec
__all__ = ['gaussian_beamwaist','fitM2',
'get_roi','calculate_beamwidths','flattenrgb',
'calculate_2D_moments','pix2um']
BITS = 8 #image channel intensity resolution
SATLIM = 0.001 #fraction of non-zero pixels allowed to be saturated
PIXSIZE = 1.745 #pixel size in um, measured
def _stop(string = 'error'): raise Exception(string)
def gaussian_beamwaist(z,z0,d0,M2=1,const=0,wl=1.030):
'''Generate gaussian beam profile w(z), along optical axis.
w(z) = w0*(1+((z-z0)/zR)^2)^(1/2)
where zR is the Rayleigh length given by
zR = pi*w0^2/wl
Units for waist and wavelength is um.
Units for optical axis positon is mm.
Inputs:
z = array of position along optical axis in mm
z0 = position of beam waist (focus) in mm
d0 = beam waist in um (diameter)
M2 = beam parameter M^2, unitless
wl = wavelenth in um, default to 1030nm (1.03um)
Outputs:
w = w(z) position dependent beam waist in um. Same size as 'z'
'''
z = 1000*np.asarray(z).astype(float)
z0 = 1000*z0
w0 = d0/2
w = (w0**2 + M2**2*(wl/(np.pi*w0))**2*(z-z0)**2)**(1/2) + const
return w
def fitM2(dz, z, wl=1.03E-6):
'''Fit series of beamwaists to gaussian beamwaist.
Follows ISO standard 11146 for fitting.
Returns gaussian beam paramters with uncertainties.
Inputs:
z, position aling optical axis in mm
dz, beamwidth in um, = 2*w, i.e. dz is beam diameter
wl, (optional) wavelength, default = 1030nm
a,b, and c are fit parameters
Outputs:
z0,d0,M2,theta,zR and associated uncertainties.
'''
dz = dz*1E-6
z = z*1E-3
#initial paramters for fit
di = np.min(dz)
zi = z[np.argmin(dz)]
ci = (wl/(np.pi*di))**2
bi = -2*zi*ci
ai = di**2 + ci*zi**2
#fit limits
c_lim = np.array([0, np.inf])
b_lim = np.array([-np.inf,np.inf])
a_lim = np.array([0, np.inf])
p0 = [ai, bi, ci]
limits = ([i.min() for i in [a_lim,b_lim,c_lim]], [i.max() for i in [a_lim,b_lim,c_lim]])
f = lambda z,a,b,c: (a + b*z + c*z**2)**(1/2)
popt,pcov = opt.curve_fit(f,z,dz,p0,bounds=limits)
[a,b,c] = [un.ufloat(popt[i], np.sqrt(pcov[i,i])) for i in range(3)]
z0 = (1E3)*(-b/(2*c))
d0 = (1E6)*((4*a*c-b**2)**(1/2))*(1/(2*c**(1/2)))
M2 = (np.pi/(8*wl))*((4*a*c-b**2)**(1/2))
theta = c**(1/2)
zR = (1E3)*((4*a*c-b**2)**(1/2))*(1/(2*c))
value = [x.nominal_value for x in [z0,d0,M2,theta,zR]]
std = [x.std_dev for x in [z0,d0,M2,theta,zR]]
return value, std
def flattenrgb(im, bits=8, satlim=0.001):
'''Flattens rbg array, excluding saturated channels
'''
sat_det = np.zeros(im.shape[2])
Nnnz = np.zeros(im.shape[2])
Nsat = np.zeros(im.shape[2])
data = np.zeros(im[...,0].shape, dtype = 'uint32')
for i in range(im.shape[2]):
Nnnz[i] = (im[:,:,i] != 0).sum()
Nsat[i] = (im[:,:,i] >= 2**bits-1).sum()
if Nsat[i]/Nnnz[i] <= satlim:
data += im[:,:,i]
else:
sat_det[i] = 1
output = normalize(data.astype(float))
return output, sat_det
def calculate_beamwidths(data, error_limit=1E-4, it_limit=5):
'''Calculate beam profile parameters, ISO standard 11146
data = image matrix
data,x,y all same dimensions
'''
errx = 1
erry = 1
itN = 0
d0x = data.shape[1]
d0y = data.shape[0]
roi_new = [d0x/2,d0x,d0y/2,d0y]
full_data = data
while any([i>error_limit for i in [errx,erry]]):
roi = roi_new
data = get_roi(full_data,roi)
moments = calculate_2D_moments(data)
dx = 4*np.sqrt(moments[2])
dy = 4*np.sqrt(moments[3])
errx = np.abs(dx-d0x)/d0x
erry = np.abs(dy-d0y)/d0y
d0x = dx
d0y = dy
roi_new = [moments[0]+roi[0]-roi[1]/2,3*dx,moments[1]+roi[2]-roi[3]/2,3*dy] #[centrex,width,centrey,height]
itN += 1
if itN >= it_limit:
print('exceeded iteration in calculating moments')
break
pixel_scale = [pix2um(1)]*2+ [pix2um(1)**2]*3
moments = pixel_scale*moments
[ax,ay,s2x,s2y,s2xy] = moments
g = np.sign(s2x-s2y)
dx = 2*np.sqrt(2)*((s2x+s2y) + g*((s2x-s2y)**2 + 4*s2xy**2)**(1/2))**(1/2)
dy = 2*np.sqrt(2)*((s2x+s2y) - g*((s2x-s2y)**2 + 4*s2xy**2)**(1/2))**(1/2)
if s2x == s2y:
phi = (np.pi/4)*np.sign(s2xy)
else:
phi = (1/2)*np.arctan(2*s2xy/(s2x-s2y))
beamwidths = [dx,dy,phi]
return beamwidths,roi,moments
def calculate_2D_moments(data, axes_scale=[1,1], calc_2nd_moments = True):
'''
data = 2D data
axes_scale = (optional) scaling factor for x and y
returns first and second moments
first moments are averages in each direction
second moments are variences in x, y and diagonal
'''
x = axes_scale[0]*(np.arange(data.shape[1]))
y = axes_scale[1]*(np.arange(data.shape[0]))
dx,dy = np.meshgrid(np.gradient(x),np.gradient(y))
x,y = np.meshgrid(x,y)
A = np.sum(data*dx*dy)
#first moments (averages)
avgx = np.sum(data*x*dx*dy)/A
avgy = | np.sum(data*y*dx*dy) | numpy.sum |
import math
import numpy as np
import random
class Task(object):
def __init__(self, robot=None):
"""Init of the Task
Args:
desired_velocity: velocity that will achieve the maximum reward
max_desired_velocity: maximum desired velocity
"""
self.starting_command = np.array([1., 0., 0.])
self.sample = Sample_Command()
self.reset(robot)
self.desired_lv = 0.25
self.angle = 0
self.foot_position = np.zeros((3,12))
self.idx = -1
self.r_lv = 0
self.r_av = 0
self.r_s = 0
self.r_br = 0
self.r_bp = 0
self.r_t = 0
def reset(self, robot, command_mode=1):
"""Initializes a new instance of the robot and resets the
desired velocity"""
self.robot = robot
self.command = self.starting_command
# self.command[0] = random.uniform(0,1)
# self.command[1] = random.uniform(-1.0, 1.0)
# print(self.command)
# self.sample.reset(command_mode)
# 3 conditioned
def set_desired_yaw_rate(self, yaw_rate):
"""Sets a new desired yaw rate"""
self.command[1] = yaw_rate
def change_desired_yaw_rate(self, change):
self.command[1] += change
self.command[1] = min(max(self.command[1],-1),1)
# print(self.command[2])
def change_desired_forward_velocity(self, change):
self.command[0] += change
self.command[0] = min(max(self.command[0],0),1)
def enable_command(self):
self.stop_command = False
def get_desired_velocities_and_yaw_rate(self):
"""Get desired direction of the robot CoM and yaw rate"""
# self.command = self.sample.sample_command(
# self.robot.get_base_position()[:2],
# self.robot.get_base_roll_pitch_yaw()[2],
# 1)
# print(self.command)
return self.command
def stop(self, bool):
self.stop_command = bool
def get_reward_distribution(self):
r_lv, r_av, r_s, r_br, r_bp, r_t = self.r_lv, self.r_av, self.r_s, self.r_br, self.r_bp, self.r_t
self.r_lv, self.r_av, self.r_s, self.r_br, self.r_bp, self.r_t = 0, 0, 0, 0, 0, 0
return r_lv, r_av, r_s, r_br, r_bp, r_t
# MY REWARD
def get_reward(self, measurements, action):
"""Get the reward for the current time step
Args:
measurements: The robot's current filtered x,y velocity, roll rate,
pitch rate, and yaw rate.
Returns:
float: reward obtained in the current time step
"""
# print(self.command)
# MY LINEAR VELOCITY
v_pr = np.dot(measurements[0], self.command[0])
if v_pr > self.desired_lv:
r_lv = 1
else:
r_lv = 0.5*math.exp(-15*((measurements[0]-self.command[0]*self.desired_lv)**2))+\
0.5*math.exp(-15*((measurements[1]-self.command[1]*self.desired_lv)**2))
# MY ANGULAR REWARD
v_ar = np.dot(measurements[4], self.command[2])
if v_ar > 0.3:
r_av = 1
else:
r_av = math.exp(-15*((measurements[4]-self.command[2]*0.3)**2))
# TARGET SMOOTHNESS
self.idx = (self.idx + 1)%3
self.foot_position[self.idx, :] = action
r_s = - np.linalg.norm(self.foot_position[self.idx,:]
-2*self.foot_position[self.idx-1,:]
+ self.foot_position[self.idx-2,:], ord=2)
# BASE MOTION REWARD (Roll rate and pitch rate)
r_br = math.exp(-2*(abs(measurements[2])))
r_bp = math.exp(-2*(abs(measurements[3])))
# TORQUE PENALTY
r_t = -self.robot.get_torques()
# PLOTTING
# self.r_lv += 0.07*r_lv
# self.r_av += 0.02*r_av
# self.r_br += 0.005*r_br
# self.r_bp += 0.01*r_bp
# self.r_t -= 0.00002*r_t
# self.r_s -= 0.025*r_s
return 0.05*r_lv + 0.05*r_av + 0.025*r_s + 0.005*r_br + 0.01*r_bp + 0.00002*r_t
def get_num_commands(self):
return self.command.shape[0]
def check_default_terminal_condition(self):
"""Returns true if the robot is in a position that should terminate
the episode, false otherwise"""
roll, pitch, _ = self.robot.get_base_roll_pitch_yaw()
pos = self.robot.get_base_position()
# return False
return abs(roll) > 1 or abs(pitch) > 1 #or pos[2] < 0.22
class Sample_Command():
# Used to train x,y,yaw command conditioned.
def __init__(self):
self.reset()
self._goal_position = np.array([100,0])
def sample_command(self, robot_position, heading_angle, command_mode=0):
dist = self._goal_position - robot_position
direction = np.arctan2(dist[1], dist[0])
direction_body_frame = direction - heading_angle
self._command[0] = math.cos(direction_body_frame)
self._command[1] = math.sin(direction_body_frame)
# self._command[2] = max(min(self._command[2]+0.01*self.desired_yaw,
# 1), -1)
return self._command
def reset(self, command_mode=0):
self._command = | np.array([0., 0., 0.]) | numpy.array |
"""
Tools to process galaxy spectra .fits files from SDSS-II Legacy survey.
Authored by <NAME> 02/13/16
"""
# TODO: add parameter descriptions to SpecProcessor, normalize, and process_fits
from __future__ import absolute_import, print_function, division
import numpy as np
from scipy import interp
import time
import sys
from .io import FitsData
class SpecProcessor(object):
"""
Perform basic processing of raw spectra.
Attributes
----------
loglam_grid: ndarray
Nsamples: integer
galaxy_params: numpy record array
filenames: string, list, or ndarray
spectra_directory: string
Nspectra: integer
"""
def __init__(self, filenames, galaxy_params, spectra_directory=None, n_samples=5000, loglam_grid=None):
if len(galaxy_params) != len(filenames):
sys.exit('filenames and galaxy_params must be same length')
self.galaxy_params = galaxy_params
self.filenames = filenames
self.Nspectra = len(self.filenames)
self.spectra_directory = spectra_directory
if loglam_grid:
self.loglam_grid = loglam_grid
self.Nsamples = len(loglam_grid)
else:
self.loglam_grid = 3.5 + 0.0001 * np.arange(n_samples)
self.Nsamples = n_samples
@staticmethod
def k(wavelength, r_v=3.1):
"""
Calculate A_wavelength/A_V using CCM 1989 extincton law.
Parameters
----------
wavelength: float or ndarray
Wavelength(s) at which to compute the reddening correction.
r_v: float (default=3.1)
R_V value assumed in reddening law.
Returns
-------
k: float or ndarray
Value(s) of k(lambda) at the specified wavelength(s).
"""
x = 1. / (wavelength / 10000.)
"""
Valid for 1.1 < x < 3.3 - all wavelengths in this code are between 1.35 and 2.7.
"""
y = x - 1.82
a = 1. + 0.17699 * y - 0.50447 * (y ** 2) - 0.02427 * (y ** 3) + 0.72085 * (y ** 4) + 0.01979 * (
y ** 5) - 0.77530 * (y ** 6) + 0.32999 * (y ** 7)
b = 1.41338 * y + 2.28305 * (y ** 2) + 1.07233 * (y ** 3) - 5.38434 * (y ** 4) - 0.62251 * (
y ** 5) + 5.30260 * (y ** 6) - 2.09002 * (y ** 7)
return a + b / r_v
def deredden(self, log_wavelength, flux, ebv):
"""
Correct flux at specified wavelength(s) for reddening using CCM 1989 extinction law.
Parameters
----------
log_wavelength: float or ndarray
Wavelength(s) at which to compute the reddening correction.
flux: float or array-like
Uncorrected flux(es).
ebv: float
Value of E(B-V).
Returns
-------
flux_corr: float or ndarray
Flux(es) corrected for reddening.
"""
return flux * 10 ** (0.4 * self.k(10 ** log_wavelength) * ebv)
def normalize(self, spectra, weights):
"""
Normalize the array of spectra to mean value of each spectrum between 4400 and 4450 A
Multiply inverse variances by the square of the normalization
Parameters
----------
spectra: ndarray
weights: ndarray
Returns
-------
spectra: ndarray
weights: ndarray
"""
# TODO: check that mean flux in this window is nonzero!
norm = np.mean(spectra[:, (10 ** self.loglam_grid > 4400.) * (10 ** self.loglam_grid < 4450.)], axis=1)
spectra /= norm[:, None]
weights *= norm[:, None] ** 2
return spectra, weights
def process_fits(self, normalize=False, mask=False, return_id=False, indices=None):
"""
Iterate over all .fits filenames, read in and process spectra.
Check that redshift in header matches redshift in parameters file.
Parameters
----------
normalize: boolean (default=False)
mask: boolean (default=False)
indices: integer, list, or ndarray (default=None)
return_id: boolean (default=False)
Returns
-------
spectra: ndarray
weights: ndarray
id_dict: dictionary
Only returned if return_id=True.
"""
start_time = time.time()
counter = 0
spectra = np.zeros((self.Nspectra, self.Nsamples))
weights = np.zeros((self.Nspectra, self.Nsamples))
redshifts = []
plates = []
mjds = []
fibers = []
if indices is not None:
index_list = indices
else:
index_list = | np.arange(self.Nspectra) | numpy.arange |
from redback.constants import *
from redback.transient_models.magnetar_models import magnetar_only, basic_magnetar, _evolving_gw_and_em_magnetar
import numpy as np
from astropy.cosmology import Planck18 as cosmo # noqa
from scipy.interpolate import interp1d
from collections import namedtuple
import astropy.units as uu # noqa
import astropy.constants as cc # noqa
from redback.utils import calc_kcorrected_properties, interpolated_barnes_and_kasen_thermalisation_efficiency, \
electron_fraction_from_kappa, citation_wrapper
from redback.sed import blackbody_to_flux_density
def _ejecta_dynamics_and_interaction(time, mej, beta, ejecta_radius, kappa, n_ism,
magnetar_luminosity, pair_cascade_switch, use_gamma_ray_opacity, **kwargs):
"""
:param time: time in source frame
:param mej: ejecta mass in solar masses
:param beta: initial ejecta velocity in c
:param ejecta_radius: initial ejecta radius
:param kappa: opacity
:param n_ism: ism number density
:param magnetar_luminosity: evaluated magnetar luminosity in source frame
:param pair_cascade_switch: whether to account for pair cascade losses
:param use_gamma_ray_opacity: whether to use gamma ray opacity to calculate thermalisation efficiency
:param kwargs: Additional parameters
:param kappa_gamma: Gamma-ray opacity for leakage efficiency, only used if use_gamma_ray_opacity = True
:param thermalisation_efficiency: magnetar thermalisation efficiency only used if use_gamma_ray_opacity = False
:param ejecta albedo: ejecta albedo; default is 0.5
:param pair_cascade_fraction: fraction of magnetar luminosity lost to pair cascades; default is 0.05
:return: named tuple with 'lorentz_factor', 'bolometric_luminosity', 'comoving_temperature',
'radius', 'doppler_factor', 'tau', 'time', 'kinetic_energy',
'erad_total', 'thermalisation_efficiency'
"""
mag_lum = magnetar_luminosity
ejecta_albedo = kwargs.get('ejecta_albedo', 0.5)
pair_cascade_fraction = kwargs.get('pair_cascade_fraction', 0.05)
mej = mej * solar_mass
lorentz_factor = []
radius = []
doppler_factor = []
lbol_ejecta = []
lbol_rest = []
comoving_temperature = []
tau = []
teff = []
internal_energy = 0.5 * beta ** 2 * mej * speed_of_light ** 2
comoving_volume = (4 / 3) * np.pi * ejecta_radius ** 3
gamma = 1 / np.sqrt(1 - beta ** 2)
t0_comoving = 1.3
tsigma_comoving = 0.11
for i in range(len(time)):
beta = np.sqrt(1 - 1 / gamma ** 2)
doppler_factor_temp = 1 / (gamma * (1 - beta))
if i > 0:
dt = time[i] - time[i - 1]
gamma = gamma + dgamma_dt * dt
ejecta_radius = ejecta_radius + drdt * dt
comoving_volume = comoving_volume + dcomoving_volume_dt * dt
internal_energy = internal_energy + dinternal_energy_dt * dt
swept_mass = (4 / 3) * np.pi * ejecta_radius ** 3 * n_ism * proton_mass
comoving_pressure = internal_energy / (3 * comoving_volume)
comoving_time = doppler_factor_temp * time[i]
comoving_dvdt = 4 * np.pi * ejecta_radius ** 2 * beta * speed_of_light
rad_denom = (1 / 2) - (1 / 3.141592654) * np.arctan((comoving_time - t0_comoving) / tsigma_comoving)
comoving_radiative_luminosity = (4 * 10 ** 49 * (mej / (2 * 10 ** 33) * 10 ** 2) * rad_denom ** 1.3)
tau_temp = kappa * (mej / comoving_volume) * (ejecta_radius / gamma)
if tau_temp <= 1:
comoving_emitted_luminosity = (internal_energy * speed_of_light) / (ejecta_radius / gamma)
comoving_temp_temperature = (internal_energy / (radiation_constant * comoving_volume)) ** (1./4.)
else:
comoving_emitted_luminosity = (internal_energy * speed_of_light) / (tau_temp * ejecta_radius / gamma)
comoving_temp_temperature = (internal_energy / (radiation_constant * comoving_volume * tau_temp)) ** (1./4.)
emitted_luminosity = comoving_emitted_luminosity * doppler_factor_temp ** 2
vej = ((1 / gamma) ** 2 + 1) ** 0.5 * speed_of_light
if use_gamma_ray_opacity:
kappa_gamma = kwargs["kappa_gamma"]
prefactor = 3 * kappa_gamma * mej / (4 * np.pi * vej**2)
thermalisation_efficiency = 1 - np.exp(-prefactor * time[i] ** -2)
else:
thermalisation_efficiency = kwargs["thermalisation_efficiency"]
drdt = (beta * speed_of_light) / (1 - beta)
dswept_mass_dt = 4 * np.pi * ejecta_radius ** 2 * n_ism * proton_mass * drdt
dedt = thermalisation_efficiency * mag_lum[
i] + doppler_factor_temp ** 2 * comoving_radiative_luminosity - doppler_factor_temp ** 2 * comoving_emitted_luminosity
comoving_dinternal_energydt = thermalisation_efficiency * doppler_factor_temp ** (-2) * mag_lum[
i] + comoving_radiative_luminosity - comoving_emitted_luminosity - comoving_pressure * comoving_dvdt
dcomoving_volume_dt = comoving_dvdt * doppler_factor_temp
dinternal_energy_dt = comoving_dinternal_energydt * doppler_factor_temp
dgamma_dt = (dedt - gamma * doppler_factor_temp * comoving_dinternal_energydt - (
gamma ** 2 - 1) * speed_of_light ** 2 * dswept_mass_dt) / (
mej * speed_of_light ** 2 + internal_energy + 2 * gamma * swept_mass * speed_of_light ** 2)
lorentz_factor.append(gamma)
lbol_ejecta.append(comoving_emitted_luminosity)
lbol_rest.append(emitted_luminosity)
comoving_temperature.append(comoving_temp_temperature)
radius.append(ejecta_radius)
tau.append(tau_temp)
doppler_factor.append(doppler_factor_temp)
teff.append(thermalisation_efficiency)
lorentz_factor = np.array(lorentz_factor)
v0 = ((1/lorentz_factor)**2 + 1)**0.5 * speed_of_light
bolometric_luminosity = np.array(lbol_rest)
radius = np.array(radius)
if pair_cascade_switch:
tlife_t = (0.6/(1 - ejecta_albedo))*(pair_cascade_fraction/0.1)**0.5 * (mag_lum/1.0e45)**0.5 \
* (v0/(0.3*speed_of_light))**(0.5) * (time/86400)**(-0.5)
bolometric_luminosity = bolometric_luminosity / (1.0 + tlife_t)
comoving_temperature = (bolometric_luminosity / (4.0 * np.pi * np.array(radius) ** (2.0) * sigma_sb)) ** (0.25)
dynamics_output = namedtuple('dynamics_output', ['lorentz_factor', 'bolometric_luminosity', 'comoving_temperature',
'radius', 'doppler_factor', 'tau', 'time', 'kinetic_energy',
'erad_total', 'thermalisation_efficiency'])
dynamics_output.lorentz_factor = lorentz_factor
dynamics_output.bolometric_luminosity = bolometric_luminosity
dynamics_output.comoving_temperature = np.array(comoving_temperature)
dynamics_output.radius = radius
dynamics_output.doppler_factor = np.array(doppler_factor)
dynamics_output.tau = tau
dynamics_output.time = time
dynamics_output.kinetic_energy = (lorentz_factor - 1)*mej*speed_of_light**2
dynamics_output.erad_total = np.trapz(bolometric_luminosity, x=time)
dynamics_output.thermalisation_efficiency = teff
return dynamics_output
def _comoving_blackbody_to_flux_density(dl, frequency, radius, temperature, doppler_factor):
"""
:param dl: luminosity distance in cm
:param frequency: frequency to calculate in Hz - Must be same length as time array or a single number
:param radius: ejecta radius in cm
:param temperature: comoving temperature in K
:param doppler_factor: doppler_factor
:return: flux_density
"""
## adding units back in to ensure dimensions are correct
frequency = frequency * uu.Hz
radius = radius * uu.cm
dl = dl * uu.cm
temperature = temperature * uu.K
planck = cc.h.cgs
speed_of_light = cc.c.cgs
boltzmann_constant = cc.k_B.cgs
num = 2 * np.pi * planck * frequency ** 3 * radius ** 2
denom = dl ** 2 * speed_of_light ** 2 * doppler_factor ** 2
frac = 1. / (np.exp((planck * frequency) / (boltzmann_constant * temperature * doppler_factor)) - 1)
flux_density = num / denom * frac
return flux_density
def _comoving_blackbody_to_luminosity(frequency, radius, temperature, doppler_factor):
"""
:param frequency: frequency to calculate in Hz - Must be same length as time array or a single number
:param radius: ejecta radius in cm
:param temperature: comoving temperature in K
:param doppler_factor: doppler_factor
:return: luminosity
"""
## adding units back in to ensure dimensions are correct
frequency = frequency * uu.Hz
radius = radius * uu.cm
temperature = temperature * uu.K
planck = cc.h.cgs
speed_of_light = cc.c.cgs
boltzmann_constant = cc.k_B.cgs
num = 8 * np.pi ** 2 * planck * frequency ** 4 * radius ** 2
denom = speed_of_light ** 2 * doppler_factor ** 2
frac = 1. / (np.exp((planck * frequency) / (boltzmann_constant * temperature * doppler_factor)) - 1)
luminosity = num / denom * frac
return luminosity
@citation_wrapper('https://ui.adsabs.harvard.edu/abs/2013ApJ...776L..40Y/abstract')
def basic_mergernova(time, redshift, mej, beta, ejecta_radius, kappa, n_ism, p0, logbp,
mass_ns, theta_pb, thermalisation_efficiency, **kwargs):
"""
:param time: time in observer frame in days
:param redshift: redshift
:param mej: ejecta mass in solar units
:param beta: initial ejecta velocity
:param ejecta_radius: initial ejecta radius
:param kappa: opacity
:param n_ism: ism number density
:param p0: initial spin period in seconds
:param bp: polar magnetic field strength in Gauss
:param mass_ns: mass of neutron star in solar masses
:param theta_pb: angle between spin and magnetic field axes
:param thermalisation_efficiency: magnetar thermalisation efficiency
:param kwargs: Additional parameters
:param pair_cascade_switch: whether to account for pair cascade losses, default is False
:param output_format: whether to output flux density or AB magnitude
:param frequency: (frequency to calculate - Must be same length as time array or a single number)
:return: flux density or AB magnitude
"""
pair_cascade_switch = kwargs.get('pair_cascade_switch', False)
frequency = kwargs['frequency']
time_temp = np.geomspace(1e-4, 1e8, 1000, endpoint=True)
dl = cosmo.luminosity_distance(redshift).cgs.value
bp = 10**logbp
magnetar_luminosity = basic_magnetar(time=time_temp, p0=p0, bp=bp, mass_ns=mass_ns, theta_pb=theta_pb)
output = _ejecta_dynamics_and_interaction(time=time_temp, mej=mej,
beta=beta, ejecta_radius=ejecta_radius,
kappa=kappa, n_ism=n_ism, magnetar_luminosity=magnetar_luminosity,
thermalisation_efficiency=thermalisation_efficiency,
pair_cascade_switch=pair_cascade_switch,
use_gamma_ray_opacity=False, **kwargs)
temp_func = interp1d(time_temp, y=output.comoving_temperature)
rad_func = interp1d(time_temp, y=output.radius)
d_func = interp1d(time_temp, y=output.doppler_factor)
# convert to source frame time and frequency
time = time * day_to_s
frequency, time = calc_kcorrected_properties(frequency=frequency, redshift=redshift, time=time)
temp = temp_func(time)
rad = rad_func(time)
df = d_func(time)
flux_density = _comoving_blackbody_to_flux_density(dl=dl, frequency=frequency, radius=rad, temperature=temp,
doppler_factor=df)
if kwargs['output_format'] == 'flux_density':
return flux_density.to(uu.mJy).value
elif kwargs['output_format'] == 'magnitude':
return flux_density.to(uu.ABmag).value
@citation_wrapper('Sarin et al. in prep.')
def general_mergernova(time, redshift, mej, beta, ejecta_radius, kappa, n_ism, l0, tau_sd, nn,
thermalisation_efficiency, **kwargs):
"""
:param time: time in observer frame in days
:param redshift: redshift
:param mej: ejecta mass in solar units
:param beta: initial ejecta velocity
:param ejecta_radius: initial ejecta radius
:param kappa: opacity
:param n_ism: ism number density
:param l0: initial magnetar X-ray luminosity
:param tau_sd: magnetar spin down damping timescale
:param nn: braking index
:param thermalisation_efficiency: magnetar thermalisation efficiency
:param kwargs: Additional parameters
:param pair_cascade_switch: whether to account for pair cascade losses, default is True
:param ejecta albedo: ejecta albedo; default is 0.5
:param pair_cascade_fraction: fraction of magnetar luminosity lost to pair cascades; default is 0.05
:param output_format: whether to output flux density or AB magnitude
:param frequency: (frequency to calculate - Must be same length as time array or a single number)
:return: flux density or AB magnitude
"""
frequency = kwargs['frequency']
pair_cascade_switch = kwargs.get('pair_cascade_switch', True)
time_temp = np.geomspace(1e-4, 1e8, 1000, endpoint=True)
dl = cosmo.luminosity_distance(redshift).cgs.value
magnetar_luminosity = magnetar_only(time=time_temp, l0=l0, tau=tau_sd, nn=nn)
output = _ejecta_dynamics_and_interaction(time=time_temp, mej=mej,
beta=beta, ejecta_radius=ejecta_radius,
kappa=kappa, n_ism=n_ism, magnetar_luminosity=magnetar_luminosity,
thermalisation_efficiency=thermalisation_efficiency,
pair_cascade_switch=pair_cascade_switch,
use_gamma_ray_opacity=False, **kwargs)
temp_func = interp1d(time_temp, y=output.comoving_temperature)
rad_func = interp1d(time_temp, y=output.radius)
d_func = interp1d(time_temp, y=output.doppler_factor)
# convert to source frame time and frequency
time = time * day_to_s
frequency, time = calc_kcorrected_properties(frequency=frequency, redshift=redshift, time=time)
temp = temp_func(time)
rad = rad_func(time)
df = d_func(time)
flux_density = _comoving_blackbody_to_flux_density(dl=dl, frequency=frequency, radius=rad, temperature=temp,
doppler_factor=df)
if kwargs['output_format'] == 'flux_density':
return flux_density.to(uu.mJy).value
elif kwargs['output_format'] == 'magnitude':
return flux_density.to(uu.ABmag).value
@citation_wrapper('Sarin et al. in prep.')
def general_mergernova_thermalisation(time, redshift, mej, beta, ejecta_radius, kappa, n_ism, l0, tau_sd, nn,
kappa_gamma, **kwargs):
"""
:param time: time in observer frame in days
:param redshift: redshift
:param mej: ejecta mass in solar units
:param beta: initial ejecta velocity
:param ejecta_radius: initial ejecta radius
:param kappa: opacity
:param n_ism: ism number density
:param l0: initial magnetar X-ray luminosity
:param tau_sd: magnetar spin down damping timescale
:param nn: braking index
:param kappa_gamma: gamma-ray opacity used to calculate magnetar thermalisation efficiency
:param kwargs: Additional parameters
:param pair_cascade_switch: whether to account for pair cascade losses, default is True
:param ejecta albedo: ejecta albedo; default is 0.5
:param pair_cascade_fraction: fraction of magnetar luminosity lost to pair cascades; default is 0.05
:param output_format: whether to output flux density or AB magnitude
:param frequency: (frequency to calculate - Must be same length as time array or a single number)
:return: flux density or AB magnitude
"""
frequency = kwargs['frequency']
pair_cascade_switch = kwargs.get('pair_cascade_switch', True)
time_temp = np.geomspace(1e-4, 1e8, 1000, endpoint=True)
dl = cosmo.luminosity_distance(redshift).cgs.value
magnetar_luminosity = magnetar_only(time=time_temp, l0=l0, tau=tau_sd, nn=nn)
output = _ejecta_dynamics_and_interaction(time=time_temp, mej=mej,
beta=beta, ejecta_radius=ejecta_radius,
kappa=kappa, n_ism=n_ism, magnetar_luminosity=magnetar_luminosity,
kappa_gamma=kappa_gamma, pair_cascade_switch=pair_cascade_switch,
use_gamma_ray_opacity=True, **kwargs)
temp_func = interp1d(time_temp, y=output.comoving_temperature)
rad_func = interp1d(time_temp, y=output.radius)
d_func = interp1d(time_temp, y=output.doppler_factor)
# convert to source frame time and frequency
time = time * day_to_s
frequency, time = calc_kcorrected_properties(frequency=frequency, redshift=redshift, time=time)
temp = temp_func(time)
rad = rad_func(time)
df = d_func(time)
flux_density = _comoving_blackbody_to_flux_density(dl=dl, frequency=frequency, radius=rad, temperature=temp,
doppler_factor=df)
if kwargs['output_format'] == 'flux_density':
return flux_density.to(uu.mJy).value
elif kwargs['output_format'] == 'magnitude':
return flux_density.to(uu.ABmag).value
@citation_wrapper('Sarin et al. in prep.')
def general_mergernova_evolution(time, redshift, mej, beta, ejecta_radius, kappa, n_ism, logbint,
logbext, p0, chi0, radius, logmoi, kappa_gamma, **kwargs):
"""
:param time: time in observer frame in days
:param redshift: redshift
:param mej: ejecta mass in solar units
:param beta: initial ejecta velocity
:param ejecta_radius: initial ejecta radius
:param kappa: opacity
:param n_ism: ism number density
:param logbint: log10 internal magnetic field in G
:param logbext: log10 external magnetic field in G
:param p0: spin period in s
:param chi0: initial inclination angle
:param radius: radius of NS in KM
:param logmoi: log10 moment of inertia of NS
:param kappa_gamma: gamma-ray opacity used to calculate magnetar thermalisation efficiency
:param kwargs: Additional parameters
:param pair_cascade_switch: whether to account for pair cascade losses, default is True
:param ejecta albedo: ejecta albedo; default is 0.5
:param pair_cascade_fraction: fraction of magnetar luminosity lost to pair cascades; default is 0.05
:param output_format: whether to output flux density or AB magnitude
:param frequency: (frequency to calculate - Must be same length as time array or a single number)
:return: flux density or AB magnitude
"""
frequency = kwargs['frequency']
pair_cascade_switch = kwargs.get('pair_cascade_switch', True)
time_temp = np.geomspace(1e-4, 1e8, 1000, endpoint=True)
dl = cosmo.luminosity_distance(redshift).cgs.value
bint = 10 ** logbint
bext = 10 ** logbext
radius = radius * km_cgs
moi = 10 ** logmoi
output = _evolving_gw_and_em_magnetar(time=time_temp, bint=bint, bext=bext, p0=p0, chi0=chi0, radius=radius, moi=moi)
magnetar_luminosity = output.Edot_d
output = _ejecta_dynamics_and_interaction(time=time_temp, mej=mej,
beta=beta, ejecta_radius=ejecta_radius,
kappa=kappa, n_ism=n_ism, magnetar_luminosity=magnetar_luminosity,
kappa_gamma=kappa_gamma, pair_cascade_switch=pair_cascade_switch,
use_gamma_ray_opacity=True, **kwargs)
temp_func = interp1d(time_temp, y=output.comoving_temperature)
rad_func = interp1d(time_temp, y=output.radius)
d_func = interp1d(time_temp, y=output.doppler_factor)
# convert to source frame time and frequency
time = time * day_to_s
frequency, time = calc_kcorrected_properties(frequency=frequency, redshift=redshift, time=time)
temp = temp_func(time)
rad = rad_func(time)
df = d_func(time)
flux_density = _comoving_blackbody_to_flux_density(dl=dl, frequency=frequency, radius=rad, temperature=temp,
doppler_factor=df)
if kwargs['output_format'] == 'flux_density':
return flux_density.to(uu.mJy).value
elif kwargs['output_format'] == 'magnitude':
return flux_density.to(uu.ABmag).value
def _trapped_magnetar_lum(time, mej, beta, ejecta_radius, kappa, n_ism, l0, tau_sd, nn, thermalisation_efficiency,
**kwargs):
"""
:param time: time in source frame
:param mej: ejecta mass in solar units
:param beta: initial ejecta velocity
:param ejecta_radius: initial ejecta radius
:param kappa: opacity
:param n_ism: ism number density
:param l0: initial magnetar X-ray luminosity
:param tau_sd: magnetar spin down damping timescale
:param nn: braking index
:param thermalisation_efficiency: magnetar thermalisation efficiency
:param kwargs: 'output_format' - whether to output flux density or AB magnitude
:param kwargs: 'frequency' in Hertz to evaluate the mergernova emission - use a typical X-ray frequency
:return: luminosity
"""
time_temp = np.geomspace(1e-4, 1e8, 1000, endpoint=True)
magnetar_luminosity = magnetar_only(time=time_temp, l0=l0, tau=tau_sd, nn=nn)
output = _ejecta_dynamics_and_interaction(time=time_temp, mej=mej,
beta=beta, ejecta_radius=ejecta_radius,
kappa=kappa, n_ism=n_ism, magnetar_luminosity=magnetar_luminosity,
thermalisation_efficiency=thermalisation_efficiency,
pair_cascade_switch=False, use_gamma_ray_opacity=False)
temp_func = interp1d(time_temp, y=output.comoving_temperature)
rad_func = interp1d(time_temp, y=output.radius)
d_func = interp1d(time_temp, y=output.doppler_factor)
tau_func = interp1d(time_temp, y=output.tau)
temp = temp_func(time)
rad = rad_func(time)
df = d_func(time)
optical_depth = tau_func(time)
frequency = kwargs['frequency']
trapped_ejecta_lum = _comoving_blackbody_to_luminosity(frequency=frequency, radius=rad,
temperature=temp, doppler_factor=df)
lsd = magnetar_only(time, l0=l0, tau=tau_sd, nn=nn)
lum = | np.exp(-optical_depth) | numpy.exp |
import enum
import struct
import numpy as np
import logging
logging = logging.getLogger(__name__)
class ViolationOrigin(enum.Enum):
# If the violation is caused by local vector outside the safe zone.
SafeZone = 0
# If the violation is caused by local vector outside the domain.
Domain = 1
# Faulty safe zone violations indicates the node detected violation that indicates a problem with the local constraints.
# In that case, the coordinator should perform full sync to update the reference point, thresholds, and local constraints.
FaultySafeZone = 2
class MessageType(enum.Enum):
Violation = 0 # violation_origin (FaultySafe, Domain, or FaultySafeZone), local_vector
GetLocalVector = 1 # the last constraint version that the coordinator sent to this node
LocalVectorInfo = 2 # constraint version of the node, local_vector
Sync = 3 # Common: global_vector, node_slack, l_thresh, u_thresh. AutoMon: global_vector, node_slack, l_thresh, u_thresh, neighborhood_size, dc_type, dc_argument, g_func_grad_at_x0, h_func_grad_at_x0
LazySync = 4 # node_slack
DataUpdate = 5 # Single data point
# Message header format: (message_type:unsigned long, node_idx:long, payload_len:unsigned long)
messages_header_format = struct.Struct('! L l L')
# This function is called only by the coordinator which can get multiple messages at once (violations and local vectors) during simulation.
def message_to_message_list(messages: bytes):
message_list = []
unique_message_type = None
message = messages
while len(message) > 0:
message_type, node_idx, payload_len = parse_message_header(message)
payload = message[messages_header_format.size: messages_header_format.size + payload_len]
# All messages must be of the same type
if unique_message_type is None:
unique_message_type = message_type # Initialize unique_message_type with the first message in the list
else:
assert message_type == unique_message_type
message_list.append((node_idx, payload))
message = message[messages_header_format.size + payload_len:]
return unique_message_type, message_list
def prepare_message_header(message_type: MessageType, node_idx: int, payload_len: int) -> bytes:
# Do not print log message for DataUpdate messages as it floods the log
if message_type != MessageType.DataUpdate:
logging.debug("Sending message type: " + str(message_type) + " node index: " + str(node_idx) + " payload_len: " + str(payload_len))
header = (message_type.value, node_idx, payload_len)
return messages_header_format.pack(*header)
def parse_message_header(message: bytes):
message_type, node_idx, payload_len = messages_header_format.unpack(message[:messages_header_format.size])
message_type = MessageType(message_type)
# Do not print log message for DataUpdate messages as it floods the log
if message_type != MessageType.DataUpdate:
logging.debug("Received message type: " + str(message_type) + " node index: " + str(node_idx) + " payload_len: " + str(payload_len))
return message_type, node_idx, payload_len
def prepare_message_violation(node_idx: int, constraint_version: int, violation_origin: ViolationOrigin, local_vector: np.ndarray) -> bytes:
payload = (constraint_version, violation_origin.value, *local_vector)
messages_payload_format = struct.Struct('! L L %dd' % local_vector.shape[0])
message = prepare_message_header(MessageType.Violation, node_idx, messages_payload_format.size) + messages_payload_format.pack(*payload)
return message
def prepare_message_get_local_vector(node_idx: int, constraint_version: int) -> bytes:
payload = (constraint_version,)
messages_payload_format = struct.Struct('! L')
message = prepare_message_header(MessageType.GetLocalVector, node_idx, messages_payload_format.size) + messages_payload_format.pack(*payload)
return message
def prepare_message_local_vector_info(node_idx: int, constraint_version: int, local_vector: np.ndarray) -> bytes:
payload = (constraint_version, *local_vector)
messages_payload_format = struct.Struct('! L %dd' % local_vector.shape[0])
message = prepare_message_header(MessageType.LocalVectorInfo, node_idx, messages_payload_format.size) + messages_payload_format.pack(*payload)
return message
def prepare_message_sync(node_idx: int, constraint_version: int, global_vector: np.ndarray, node_slack: np.ndarray, l_thresh: float, u_thresh: float) -> bytes:
x_len = global_vector.shape[0]
payload = (constraint_version, *global_vector, *node_slack, l_thresh, u_thresh)
messages_payload_format = struct.Struct('! L %dd %dd d d' % (x_len, x_len))
message = prepare_message_header(MessageType.Sync, node_idx, messages_payload_format.size) + messages_payload_format.pack(*payload)
return message
def prepare_message_lazy_sync(node_idx: int, constraint_version: int, node_slack: np.ndarray) -> bytes:
payload = (constraint_version, *node_slack)
messages_payload_format = struct.Struct('! L %dd' % node_slack.shape[0])
message = prepare_message_header(MessageType.LazySync, node_idx, messages_payload_format.size) + messages_payload_format.pack(*payload)
return message
def prepare_message_data_update(node_idx: int, data_point: np.ndarray) -> bytes:
payload = (*data_point,)
messages_payload_format = struct.Struct('! %dd' % data_point.shape[0])
message = prepare_message_header(MessageType.DataUpdate, node_idx, messages_payload_format.size) + messages_payload_format.pack(*payload)
return message
def parse_message_violation(payload: bytes, d: int):
messages_payload_format = struct.Struct('! L L %dd' % d)
unpacked_payload = messages_payload_format.unpack(payload)
constraint_version = unpacked_payload[0]
violation_origin = ViolationOrigin(unpacked_payload[1])
local_vector = np.array(unpacked_payload[2:])
return constraint_version, violation_origin, local_vector
def parse_message_get_local_vector(payload: bytes):
messages_payload_format = struct.Struct('! L')
unpacked_payload = messages_payload_format.unpack(payload)
constraint_version = unpacked_payload[0]
return constraint_version
def parse_message_local_vector_info(payload: bytes, d: int):
messages_payload_format = struct.Struct('! L %dd' % d)
unpacked_payload = messages_payload_format.unpack(payload)
constraint_version = unpacked_payload[0]
local_vector = np.array(unpacked_payload[1:])
return constraint_version, local_vector
def parse_message_sync(payload: bytes, d: int):
messages_payload_format = struct.Struct('! L %dd %dd d d' % (d, d))
unpacked_payload = messages_payload_format.unpack(payload)
constraint_version = unpacked_payload[0]
global_vector = np.array(unpacked_payload[1:d + 1])
node_slack = np.array(unpacked_payload[d + 1:2 * d + 1])
l_thresh, u_thresh = unpacked_payload[-2], unpacked_payload[-1]
return constraint_version, global_vector, node_slack, l_thresh, u_thresh
def parse_message_lazy_sync(payload: bytes, d: int):
messages_payload_format = struct.Struct('! L %dd' % d)
unpacked_payload = messages_payload_format.unpack(payload)
constraint_version = unpacked_payload[0]
node_slack = np.array(unpacked_payload[1:])
return constraint_version, node_slack
def parse_message_data_update(payload: bytes, data_point_len: int):
messages_payload_format = struct.Struct('! %dd' % data_point_len)
unpacked_payload = messages_payload_format.unpack(payload)
data_point = np.array(unpacked_payload)
return data_point
if __name__ == "__main__":
input_vector = np.random.randn(4)
node_slack_org = np.random.randn(4)
# MessageType.Violation
message = prepare_message_violation(7, 35, ViolationOrigin.Domain, input_vector)
message_type, node_idx, _ = parse_message_header(message)
payload = message[messages_header_format.size:]
assert node_idx == 7 and message_type == MessageType.Violation
constraint_version, violation_origin, local_vector = parse_message_violation(payload, input_vector.shape[0])
assert constraint_version == 35
assert np.allclose(input_vector, local_vector)
assert violation_origin == ViolationOrigin.Domain
# MessageType.GetLocalVector
constraint_version_org = 10
message = prepare_message_get_local_vector(5, constraint_version_org)
message_type, node_idx, _ = parse_message_header(message)
payload = message[messages_header_format.size:]
assert node_idx == 5 and message_type == MessageType.GetLocalVector
constraint_version = parse_message_get_local_vector(payload)
assert constraint_version == constraint_version_org
# MessageType.LocalVectorInfo
constraint_version_org = 20
message = prepare_message_local_vector_info(1, constraint_version_org, input_vector)
message_type, node_idx, _ = parse_message_header(message)
payload = message[messages_header_format.size:]
assert node_idx == 1 and message_type == MessageType.LocalVectorInfo
constraint_version, local_vector = parse_message_local_vector_info(payload, input_vector.shape[0])
assert np.allclose(input_vector, local_vector)
assert constraint_version == constraint_version_org
# MessageType.Sync
l_thresh_org, u_thresh_org = 6.47, 18.46782
message = prepare_message_sync(9, 106, input_vector, node_slack_org, l_thresh_org, u_thresh_org)
message_type, node_idx, _ = parse_message_header(message)
payload = message[messages_header_format.size:]
assert node_idx == 9 and message_type == MessageType.Sync
constraint_version, global_vector, node_slack, l_thresh, u_thresh = parse_message_sync(payload, input_vector.shape[0])
assert constraint_version == 106
assert np.allclose(input_vector, global_vector)
assert np.allclose(node_slack_org, node_slack)
assert np.allclose(l_thresh_org, l_thresh)
assert np.allclose(u_thresh_org, u_thresh)
# MessageType.LazySync
message = prepare_message_lazy_sync(3, 208, node_slack_org)
message_type, node_idx, _ = parse_message_header(message)
payload = message[messages_header_format.size:]
assert node_idx == 3 and message_type == MessageType.LazySync
constraint_version, node_slack = parse_message_lazy_sync(payload, input_vector.shape[0])
assert constraint_version == 208
assert np.allclose(node_slack_org, node_slack)
# MessageType.DataUpdate
data_point_org = np.random.randn(5)
message = prepare_message_data_update(2, data_point_org)
message_type, node_idx, _ = parse_message_header(message)
payload = message[messages_header_format.size:]
assert node_idx == 2 and message_type == MessageType.DataUpdate
data_point = parse_message_data_update(payload, data_point_org.shape[0])
assert | np.allclose(data_point_org, data_point) | numpy.allclose |
# ===============================================================================
# Copyright 2016 dgketchum
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance
# with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# =================================IMPORTS=======================================
import os
from numpy import linspace, array, insert, ones, divide
from pandas import DataFrame
from ogr import Open
from datetime import datetime
from recharge.time_series_manager import get_etrm_time_series
from recharge.etrm_processes import Processes
# Set start datetime object
SIMULATION_PERIOD = datetime(2000, 1, 1), datetime(2013, 12, 31)
FACTORS = ['Temperature', 'Precipitation', 'Reference ET', 'Total Available Water (TAW)',
'Vegetation Density (NDVI)', 'Soil Ksat']
def round_to_value(number, roundto):
return round(number / roundto) * roundto
def get_sensitivity_analysis(extracts, points, statics, initials, pickle=None):
temps = range(-5, 6)
all_pct = [x * 0.1 for x in range(5, 16)]
ndvi_range = linspace(0.9, 1.7, 11)
ndvi_range = array([round_to_value(x, 0.05) for x in ndvi_range])
var_arrs = []
y = 0
for x in range(0, 6):
ones_ = ones((5, 11), dtype=float)
zeros = [x * 0.0 for x in range(5, 16)]
norm_ndvi = array([1.25 for x in zeros])
if y == 0:
arr = insert(ones_, y, temps, axis=0)
arr = insert(arr, 4, norm_ndvi, axis=0)
arr = arr[0:6]
var_arrs.append(arr)
arr = []
elif y == 4:
arr = insert(ones_, 0, zeros, axis=0)
arr = insert(arr, y, ndvi_range, axis=0)
arr = arr[0:6]
var_arrs.append(arr)
print('shape arr: {}'.format(arr.shape))
arr = []
elif y == 5:
arr = insert(ones_, 0, zeros, axis=0)
arr = insert(arr, 4, norm_ndvi, axis=0)
arr = arr[0:5]
arr = insert(arr, y, all_pct, axis=0)
var_arrs.append(arr)
arr = []
else:
arr = insert(ones_, 0, zeros, axis=0)
arr = insert(arr, y, all_pct, axis=0)
arr = | insert(arr, 4, norm_ndvi, axis=0) | numpy.insert |
import os
import warnings
import re
import datetime
import json
import random
from collections import Counter
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import scipy.stats as stats
from utils import import_data, save_figure
from ipm_paper_part_1 import details_temporal_evolution, plot_one_group, calculate_confidence_interval
warnings.filterwarnings("ignore")
def import_crowdtangle_group_data():
posts_wi_date_df = import_data(folder="crowdtangle_group",
file_name="posts_self_declared_wi_date.csv")
print('\nThere are {} Facebook pages with the last strike date visible on the screenshot.'.\
format(posts_wi_date_df.account_id.nunique()))
posts_wo_date_df = import_data(folder="crowdtangle_group",
file_name="posts_self_declared_wo_date.csv")
list_wo_name = [
'Artists For A Free World',
'<NAME>',
'<NAME>',
'<NAME>',
'New Independence Network',
'Pruden POD & Post',
'PR Conservative',
'Org of Conservative Trump Americans',
'Con Ciencia Indigena',
'Republican Party of Lafayette County',
'The Daily Perspective Podcast',
'Freedom Memes',
'White Dragon Society',
'Robertson Family Values'
]
posts_wo_date_df = posts_wo_date_df[~posts_wo_date_df['account_name'].isin(list_wo_name)]
print('There are {} Facebook pages without the last strike date visible on the screenshot.'.\
format(posts_wo_date_df.account_id.nunique()))
posts_df = pd.concat([posts_wi_date_df, posts_wo_date_df])
posts_df['date'] = pd.to_datetime(posts_df['date'])
return posts_df
def save_figure_4(posts_df, pages_df):
account_name = '<NAME>'
account_id = posts_df[posts_df['account_name']==account_name].account_id.unique()[0]
reduced_distribution_date = pages_df[pages_df['page_name'] == account_name]['date'].values[0]
plt.figure(figsize=(10, 4))
ax = plt.subplot()
plt.title("Engagement metrics for one 'reduced distribution' page ('" + account_name + "')", size="x-large")
plot_one_group(ax, posts_df, account_id, fake_news_dates=[])
xticks = [np.datetime64('2019-01-01'), np.datetime64('2019-03-01'), np.datetime64('2019-05-01'),
np.datetime64('2019-07-01'), np.datetime64('2019-09-01'), np.datetime64('2019-11-01'),
np.datetime64('2020-01-01'), np.datetime64('2020-03-01'),
np.datetime64('2020-07-01'), np.datetime64('2020-09-01'), np.datetime64('2020-11-01'),
np.datetime64(reduced_distribution_date)
]
plt.xticks(xticks, rotation=30, ha='right')
plt.gca().get_xticklabels()[-1].set_color('red')
plt.axvline(x=np.datetime64(reduced_distribution_date),
color='C3', linestyle='--', linewidth=2)
plt.legend()
plt.tight_layout()
save_figure('figure_4', folder='ip&m', dpi=100)
def save_supplementary_figure_2(posts_df, pages_df):
accounts_to_plot = [
'<NAME>',
'Normals Are Pissed',
'Botanica Health',
'<NAME>',
'The PROOF Blog',
"The Rational Capitalist",
'<NAME>',
'POVnow',
"Tell The USA to DUMP Trump",
'Florida Boys TV'
]
fig = plt.figure(figsize=(10, 12))
for idx in range(len(accounts_to_plot)):
ax = plt.subplot(5, 2, idx + 1)
plt.title(accounts_to_plot[idx])
account_id = posts_df[posts_df['account_name']==accounts_to_plot[idx]].account_id.unique()[0]
reduced_distribution_date = pages_df[pages_df['page_name'] == accounts_to_plot[idx]]['date'].values[0]
plot_one_group(ax, posts_df, account_id, fake_news_dates=[])
xticks = [np.datetime64('2019-01-01'), np.datetime64('2019-05-01'), np.datetime64('2019-09-01'),
np.datetime64('2020-01-01'), np.datetime64('2020-05-01'), np.datetime64('2020-09-01'),
np.datetime64(reduced_distribution_date)]
plt.xticks(xticks, rotation=30, ha='right')
plt.gca().get_xticklabels()[-1].set_color('red')
plt.axvline(x=np.datetime64(reduced_distribution_date),
color='C3', linestyle='--', linewidth=2)
if idx == 0:
plt.legend()
plt.tight_layout()
save_figure('supplementary_figure_3', folder='ip&m', dpi=100)
def compute_periods_average(posts_df, pages_df, period_length=7):
before_date = {
'reaction': [],
'share': [],
'comment': [],
'post_nb': []
}
after_date = {
'reaction': [],
'share': [],
'comment': [],
'post_nb': []
}
for account_id in posts_df['account_id'].unique():
account_name = posts_df[posts_df['account_id']==account_id].account_name.unique()[0]
reduced_distribution_date = pages_df[pages_df['page_name'] == account_name]['date'].values[0]
reduced_distribution_date = datetime.datetime.strptime(str(reduced_distribution_date)[:10], '%Y-%m-%d')
posts_df_group = posts_df[posts_df["account_id"] == account_id]
posts_df_group_before = posts_df_group[
(posts_df_group['date'] > reduced_distribution_date - datetime.timedelta(days=period_length)) &
(posts_df_group['date'] < reduced_distribution_date)
]
posts_df_group_after = posts_df_group[
(posts_df_group['date'] > reduced_distribution_date) &
(posts_df_group['date'] < reduced_distribution_date + datetime.timedelta(days=period_length))
]
if (len(posts_df_group_before) > 0) & (len(posts_df_group_after) > 0):
before_date['reaction'].append(np.mean(posts_df_group_before['reaction']))
after_date['reaction'].append(np.mean(posts_df_group_after['reaction']))
before_date['share'].append(np.mean(posts_df_group_before['share']))
after_date['share'].append(np.mean(posts_df_group_after['share']))
before_date['comment'].append(np.mean(posts_df_group_before['comment']))
after_date['comment'].append(np.mean(posts_df_group_after['comment']))
before_date['post_nb'].append(len(posts_df_group_before)/period_length)
after_date['post_nb'].append(len(posts_df_group_after)/period_length)
return before_date, after_date
def print_before_after_statistics(before_date, after_date):
w, p = stats.wilcoxon(before_date['reaction'], after_date['reaction'])
print('\nWilcoxon test between the reactions: w =', w, ', p =', p)
w, p = stats.wilcoxon(before_date['share'], after_date['share'])
print('\nWilcoxon test between the shares: w =', w, ', p =', p)
w, p = stats.wilcoxon(before_date['comment'], after_date['comment'])
print('\nWilcoxon test between the comments: w =', w, ', p =', p)
w, p = stats.wilcoxon(before_date['post_nb'], after_date['post_nb'])
print('\nWilcoxon test between the number of posts: w =', w, ', p =', p)
print(np.mean(before_date['post_nb']), np.mean(after_date['post_nb']))
def details_bar_plot(ax):
ax.tick_params(axis='x', which='both', length=0)
ax.grid(axis="y", zorder=0)
plt.locator_params(axis='y', nbins=8)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.spines['top'].set_visible(False)
def plot_before_after_bars(before_date, after_date, period_length):
fig = plt.figure(figsize=(10, 4))
gs = fig.add_gridspec(1, 4)
## ENGAGEMENT METRICS
ax = fig.add_subplot(gs[0, 0:3])
width = .25
labels = ['Reactions', 'Shares', 'Comments']
x = np.arange(len(labels))
# Plot the bars
plt.bar(x - width/2, [np.mean(before_date['reaction']), np.mean(before_date['share']),
np.mean(before_date['comment'])],
width, label="{} days before the reduced distribution start date".format(period_length),
color='paleturquoise', edgecolor=[.2, .2, .2], zorder=3)
plt.bar(x + width/2, [np.mean(after_date['reaction']), np.mean(after_date['share']),
np.mean(after_date['comment'])],
width, label="{} days after the reduced distribution start date".format(period_length),
color='navajowhite', edgecolor=[.2, .2, .2], zorder=3)
# Add the error bars
idx = 0
for metric in ['reaction', 'share', 'comment']:
low, high = calculate_confidence_interval(before_date[metric])
plt.errorbar(idx - width/2, np.mean(before_date[metric]),
yerr=[[np.mean(before_date[metric]) - low], [high - np.mean(before_date[metric])]],
color=[.2, .2, .2], zorder=4, linestyle='')
low, high = calculate_confidence_interval(after_date[metric])
plt.errorbar(idx + width/2, np.mean(after_date[metric]),
yerr=[[np.mean(after_date[metric]) - low], [high - np.mean(after_date[metric])]],
color=[.2, .2, .2], zorder=4, linestyle='')
idx += 1
# details
plt.legend(framealpha=1)
plt.title("Averages over {} 'reduced distribution' accounts"\
.format(len(before_date['reaction'])), loc='right', size="x-large")
plt.xticks(x, labels, fontsize='large',)
plt.xlim([-.5, 2.5])
details_bar_plot(ax)
## NUMBER OF POSTS
ax = fig.add_subplot(gs[0, 3])
plt.bar(-width/2, np.mean(before_date['post_nb']),
width, label="{} days before the reduced distribution start date".format(period_length),
color='paleturquoise', edgecolor=[.2, .2, .2], zorder=3)
plt.bar(width/2, np.mean(after_date['post_nb']),
width, label="{} days after the reduced distribution start date".format(period_length),
color='navajowhite', edgecolor=[.2, .2, .2], zorder=3)
low, high = calculate_confidence_interval(before_date['post_nb'])
plt.errorbar(-width/2, np.mean(before_date['post_nb']),
yerr=[[np.mean(before_date['post_nb']) - low], [high - np.mean(before_date['post_nb'])]],
color=[.2, .2, .2], zorder=4, linestyle='')
low, high = calculate_confidence_interval(after_date['post_nb'])
plt.errorbar(width/2, np.mean(after_date['post_nb']),
yerr=[[np.mean(after_date['post_nb']) - low], [high - np.mean(after_date['post_nb'])]],
color=[.2, .2, .2], zorder=4, linestyle='')
plt.xticks([0], ['Number of daily posts'], fontsize='large',)
plt.xlim([-.5, .5])
details_bar_plot(ax)
plt.tight_layout()
if period_length == 7:
save_figure('figure_5', folder='ip&m', dpi=100)
else:
save_figure('supplementary_figure_4', folder='ip&m', dpi=100)
def save_figure_5(posts_df, pages_df, period_length=7):
before_date, after_date = compute_periods_average(posts_df, pages_df, period_length=period_length)
print_before_after_statistics(before_date, after_date)
plot_before_after_bars(before_date, after_date, period_length=period_length)
def print_statistics_screenshot_posts(screenshot_df):
print('\n\nOVERPERFORMING SCORE STATISTICS')
print('The average score is {}.'.format(np.nanmean(screenshot_df['score'].values)))
print('Only {} posts have a positive score.'.format(len(screenshot_df[screenshot_df['score'] > 0])))
w, p = stats.wilcoxon(screenshot_df['score'].values, alternative="less")
print('Wilcoxon test of the overperfoming scores against zero: w =', w, ', p =', p)
def save_all_groups_figures(posts_df, pages_df):
group_index = 0
for account_id in posts_df['account_id'].unique():
if group_index % 10 == 0:
plt.figure(figsize=(12, 14))
ax = plt.subplot(5, 2, group_index % 10 + 1)
account_name = posts_df[posts_df['account_id']==account_id].account_name.unique()[0]
plt.title(account_name, size="x-large")
reduced_distribution_date = pages_df[pages_df['page_name'] == account_name]['date'].values[0]
plot_one_group(ax, posts_df, account_id, fake_news_dates=[])
xticks = [np.datetime64('2019-01-01'), | np.datetime64('2019-05-01') | numpy.datetime64 |
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 17 16:20:02 2019
@author: lmassoul032513
"""
import pytest
import pandas as pd
import numpy as np
from copy import deepcopy
from sklearn.utils import check_random_state
from aikit.datasets.datasets import load_dataset, DatasetEnum
from aikit.enums import TypeOfProblem, TypeOfVariables, StepCategories
from aikit.ml_machine.ml_machine import (
AutoMlConfig,
JobConfig,
RandomModelGenerator,
AutoMlResultReader,
MlJobManager,
MlJobRunner,
_create_all_combinations,
random_list_generator,
)
from aikit.ml_machine.model_graph import convert_graph_to_code
from aikit.model_definition import sklearn_model_from_param
from aikit.ml_machine.ml_machine_guider import AutoMlModelGuider
from aikit.ml_machine.data_persister import FolderDataPersister
from aikit.tools.graph_helper import get_terminal_nodes
def loader(num_only=False):
if num_only:
np.random.seed(123)
dfX = pd.DataFrame(np.random.randn(100, 10), columns=["COL_%d" % d for d in range(10)])
y = 1 * (np.random.randn(100) > 0)
return dfX, y
else:
dfX, y, _, _, _ = load_dataset(DatasetEnum.titanic)
return dfX, y
def get_automl_config(num_only):
dfX, y = loader(num_only)
auto_ml_config = AutoMlConfig(dfX, y)
auto_ml_config.guess_everything()
return dfX, y, auto_ml_config
def test_AutoMlConfig_raise_if_wrong_nb_oberservations():
dfX = pd.DataFrame({"a": [0, 1, 2, 3, 4, 5], "b": [0, 10, 20, 30, 40, 50]})
y = np.array([0, 0, 0, 1, 1, 1])
auto_ml_config = AutoMlConfig(dfX, y[0:3])
with pytest.raises(ValueError):
auto_ml_config.guess_everything() # raise because y doesn't have the correct number of observations
def test_AutoMlConfig_raise_multioutput():
dfX = pd.DataFrame({"a": [0, 1, 2, 3, 4, 5], "b": [0, 10, 20, 30, 40, 50]})
y = np.array([0, 0, 0, 1, 1, 1])
y2d = | np.concatenate((y[:, np.newaxis], y[:, np.newaxis]), axis=1) | numpy.concatenate |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue May 12 20:10:00 2020
@author: thorius
"""
import os
import sys
from queue import Queue
import numpy as np
import logging
import pyaudio
import time
from tflite_runtime.interpreter import Interpreter
import collections
from scipy import signal
class StreamControl():
def __init__(self,
path_model = './model/E2E_1stage_v8/tflite_non_stream',
name_model = 'non_stream.tflite',
sample_rate = 16000,
chunk_duration = 0.25,
feed_duration = 1.0,
channels = 1,
threshold = 0.5,
time_out = 8):
self.logger = logging.getLogger()
self.logger.setLevel(logging.INFO)
argumentList = sys.argv
self.path_model = path_model
self.name_model = name_model
self.sample_rate = sample_rate
#chunk_duration -- time in second of a chunk
if(len(argumentList) == 2):
self.chunk_duration = float(sys.argv[1])
self.threshold = threshold
elif(len(argumentList) == 3):
self.chunk_duration = float(sys.argv[1])
self.threshold = float(sys.argv[2])
else:
self.chunk_duration = chunk_duration
self.threshold = threshold
# times for ru
# chanels of audio
self.channels = channels
#feed_duration -- time in second of the input to model
self.feed_duration = feed_duration
self.device_sample_rate = 44100
self.chunk_samples = int(self.device_sample_rate * self.chunk_duration)
self.feed_samples = int(self.device_sample_rate * self.feed_duration)
# Queue to communiate between the audio callback and main thread
self.q = Queue()
# Data buffer for the input wavform
self.data = np.zeros(self.feed_samples, dtype='int16')
with open(os.path.join(path_model, 'labels.txt'), 'r') as fd:
labels_txt = fd.read()
self.labels = labels_txt.split()
assert float(self.feed_duration/self.chunk_duration) == float(self.feed_duration/self.chunk_duration)
self.stream = True
def run(self):
# callback method
def audio_callback(in_data, frame_count, time_info, status):
data0 = | np.frombuffer(in_data, dtype='int16') | numpy.frombuffer |
import sys,os
import torch
import numpy as np
#sys.path.append(os.path.abspath(__file__).replace('evaluation/eval_ds_utils.py',''))
#import config
#from config import args
#import constants
def cmup_evaluation_act_wise(results,imgpaths,action_names):
actions = []
action_results = []
for imgpath in imgpaths:
actions.append(os.path.basename(imgpath).split('-')[0].split('_')[1])
for action_name in action_names:
action_idx = np.where(np.array(actions)==action_name)[0]
action_results.append('{:.2f}'.format(results[action_idx].mean()))
return action_results
def h36m_evaluation_act_wise(results,imgpaths,action_names):
actions = []
action_results = []
for imgpath in imgpaths:
actions.append(os.path.basename(imgpath).split('.jpg')[0].split('_')[1].split(' ')[0])
for action_name in action_names:
action_idx = np.where(np.array(actions)==action_name)[0]
action_results.append('{:.2f}'.format(results[action_idx].mean()))
return action_results
def pp_evaluation_cam_wise(results,imgpaths):
cam_ids = []
cam_results = []
for imgpath in imgpaths:
cam_ids.append(int(os.path.basename(imgpath).split('_')[1]))
#22 is missing
for camid in list(range(21))+list(range(22,31)):
cam_idx = np.where(np.array(cam_ids)==camid)[0]
cam_results.append('{:.2f}'.format(results[cam_idx].mean()))
return cam_results
def determ_worst_best(VIS_IDX,top_n=2):
sellected_ids, sellected_errors = [], []
if VIS_IDX is not None:
for ds_type in VIS_IDX:
for error, idx in zip(VIS_IDX[ds_type]['error'], VIS_IDX[ds_type]['idx']):
if torch.is_tensor(error):
error, idx = error.cpu().numpy(), idx.cpu().numpy()
worst_id = np.argsort(error)[-top_n:]
sellected_ids.append(idx[worst_id]); sellected_errors.append(error[worst_id])
best_id = np.argsort(error)[:top_n]
sellected_ids.append(idx[best_id]); sellected_errors.append(error[best_id])
if len(sellected_ids)>0 and len(sellected_errors)>0:
sellected_ids = np.concatenate(sellected_ids).tolist()
sellected_errors = np.concatenate(sellected_errors).tolist()
else:
sellected_ids, sellected_errors = [0], [0]
return sellected_ids, sellected_errors
def reorganize_vis_info(vis_ids, vis_errors, org_imgpath, new_imgpath):
vis_ids_new, vis_errors_new = [], []
org_imgpath_dict = {}
for vis_id, vis_error in zip(vis_ids, vis_errors):
imgpath = org_imgpath[vis_id]
if imgpath not in org_imgpath_dict:
org_imgpath_dict[imgpath] = []
org_imgpath_dict[imgpath].append(vis_error)
new_imgpath = | np.array(new_imgpath) | numpy.array |
#!/usr/bin/env python
"""Carry out standard MBAR analysis on 1D REMC simulation output.
The exchange variable is assumed to be temperature.
"""
import argparse
import numpy as np
from scipy import interpolate
from origamipy import conditions
from origamipy import biases
from origamipy import files
from origamipy import outputs
from origamipy import decorrelate
from origamipy import mbar_wrapper
from origamipy import utility
def main():
args = parse_args()
system_file = files.JSONStructInpFile(args.system_filename)
staple_lengths = utility.calc_staple_lengths(system_file)
staple_types = utility.calc_num_staple_types(system_file)
num_scaffold_domains = utility.calc_num_scaffold_domains(system_file)
inp_filebase = f'{args.outs_dir}/{args.filebase}'
fileformatter = construct_fileformatter()
all_conditions = conditions.construct_remc_conditions(
args.temps, args.staple_m, fileformatter, staple_lengths)
sim_collections = []
for rep in range(args.reps):
rep_sim_collections = outputs.create_sim_collections(
inp_filebase, all_conditions, rep)
sim_collections.append(rep_sim_collections)
decor_outs = decorrelate.DecorrelatedOutputs(
sim_collections, all_conditions=all_conditions,
rep_conditions_equal=True)
decor_outs.read_decors_from_files()
mbarw = mbar_wrapper.MBARWrapper(decor_outs)
mbarw.perform_mbar()
# Calculate expectations and LFEs for simulations temperatures
all_se_tags = decor_outs.all_series_tags
if args.tags == None:
se_tags = all_se_tags
else:
se_tags = args.tags
out_filebase = f'{args.analysis_dir}/{args.filebase}'
mbarw.calc_all_expectations(out_filebase, all_se_tags, all_conditions)
lfes_filebase = f'{out_filebase}_lfes'
mbarw.calc_all_1d_lfes(lfes_filebase, se_tags, all_conditions)
# Estimate melting temperature
guess_temp = estimate_halfway_temp(
mbarw, args.tag, all_conditions, args.assembled_op)
if args.guess_temp is not None:
guess_temp = args.guess_temp
print('Guess temperature: {:.3f} K'.format(
np.around(guess_temp, decimals=3)))
conds = conditions.SimConditions(
{'temp': guess_temp,
'staple_m': args.staple_m,
'bias': biases.NoBias()},
fileformatter, staple_lengths)
bias = biases.NoBias()
melting_temp = est_melting_temp_and_barrier(
mbarw, fileformatter, staple_lengths, conds, bias, guess_temp,
args.staple_m)
conds = conditions.SimConditions(
{'temp': melting_temp,
'staple_m': args.staple_m,
'bias': biases.NoBias()},
fileformatter, staple_lengths)
# Calculate expectations and LFEs for melting temperature
exps_filebase = f'{out_filebase}-melting'
lfes_filebase = f'{out_filebase}_lfes-melting'
mbarw.calc_all_1d_lfes(lfes_filebase, se_tags, [conds])
mbarw.calc_all_expectations(exps_filebase, all_se_tags, [conds])
# Calculate expectations along OP slices
mbarws = []
all_decor_outs = []
sampled_ops = []
for i in range(1, args.assembled_op + 1):
sim_collections = []
for rep in range(args.reps):
rep_sim_collections = outputs.create_sim_collections(
inp_filebase, all_conditions, rep)
sim_collections.append(rep_sim_collections)
decor_outs = decorrelate.DecorrelatedOutputs(
sim_collections, all_conditions=all_conditions,
rep_conditions_equal=True)
decor_outs.read_decors_from_files(data_only=True)
filtered_count = decor_outs.filter_collections(args.tag, i)
if filtered_count == 0:
continue
sampled_ops.append(i)
all_decor_outs.append(decor_outs)
mbarw = mbar_wrapper.MBARWrapper(decor_outs)
mbarw.perform_mbar()
mbarws.append(mbarw)
all_tags = []
for i in range(1, staple_types + 1):
all_tags.append(f'staples{i}')
all_tags.append(f'staplestates{i}')
for i in range(num_scaffold_domains):
all_tags.append(f'domainstate{i}')
aves, stds = calc_reduced_expectations(
conds, mbarws, all_decor_outs, all_tags)
aves = np.concatenate([[sampled_ops], | np.array(aves) | numpy.array |
# Modules normally used
import numpy as np
import cv2
def CreateFrame(img, krad, color): # Frame (trick to avoid out-of-bounds access)
height, width, depth = img.shape
if color == "white":
frm = np.ones((height + krad * 2, width + krad * 2, depth))
else:
frm = np.zeros((height + krad * 2, width + krad * 2, depth))
frm[krad:-krad, krad:-krad] = img
return frm
def MedianFilter(img, size):
radius = int(size / 2) # Effect Radius
# Frame (trick to avoid out-of-bounds access)
framed = CreateFrame(img, radius, "black")
shape = img.shape
height = shape[0]
width = shape[1]
fil = np.zeros(img.shape)
#Apply Filter
for i in range (0, height):
for j in range (0, width):
sub = framed[i:i+size, j:j+size]
b = np.median(sub[:, :, 0])
g = np.median(sub[:, :, 1])
r = np.median(sub[:, :, 2])
fil[i, j] = (b, g, r)
return fil
def BilateralFilter(img, tex, sigma_s, sigma_r):
r = int(np.ceil(3 * sigma_s))
# Image padding
if img.ndim == 3:
img_height = img.shape[0]
img_width = img.shape[1]
I = np.pad(img, ((r, r), (r, r), (0, 0)), 'symmetric').astype(np.float32)
elif img.ndim == 2:
img_height = img.shape[0]
img_width = img.shape[1]
I = np.pad(img, ((r, r), (r, r)), 'symmetric').astype(np.float32)
# Check texture size and do padding
if tex.ndim == 3:
tex_height = tex.shape[0]
tex_width = tex.shape[1]
if tex_height != img_height or tex_width != img_width:
print('The guidance image is not aligned with input image!')
return img
T = np.pad(tex, ((r, r), (r, r), (0, 0)), 'symmetric').astype(np.int32)
elif tex.ndim == 2:
tex_height = tex.shape[0]
tex_width = tex.shape[1]
if tex_height != img_height or tex_width != img_width:
print('The guidance image is not aligned with input image!')
return img
T = np.pad(tex, ((r, r), (r, r)), 'symmetric').astype(np.int32)
# Pre-compute
output = np.zeros_like(img)
scaleFactor_s = 1 / (2 * sigma_s * sigma_s)
scaleFactor_r = 1 / (2 * sigma_r * sigma_r)
# A lookup table for range kernel
LUT = np.exp(-np.arange(256) * np.arange(256) * scaleFactor_r)
# Generate a spatial Gaussian function
x, y = np.meshgrid( | np.arange(2 * r + 1) | numpy.arange |
""" Test the tracking utilities """
# Standard lib
import unittest
# 3rd party
import numpy as np
# Our own imports
from deep_hipsc_tracking import tracking
class TestCorrelateArrays(unittest.TestCase):
def test_matches_identical_centered_complete(self):
src_img = np.random.rand(129, 130)
src_count = np.ones_like(src_img, dtype=np.uint32)
match_img = src_img[1:-1, 1:-1]
search_step = 1 # 1x1 search window
blend_x = -1 # Complete matching in x
blend_y = -1 # Complete matching in y
# Correct start window
yst, yed = 1, 128
xst, xed = 1, 129
rows, cols = match_img.shape
comp_rows, comp_cols = src_img.shape
assert (yed - yst) == rows
assert (xed - xst) == cols
res_y, res_x = tracking.correlate_arrays(
src_img, src_count, match_img,
search_step, blend_x, blend_y,
xst, xed, yst, yed,
rows, cols, comp_rows, comp_cols)
# best_yst1, best_yed1, best_yst2, best_yed2
exp_y = (1, 128, 0, 127)
# best_xst1, best_xed1, best_xst2, best_xed2
exp_x = (1, 129, 0, 128)
self.assertEqual(res_y, exp_y)
self.assertEqual(res_x, exp_x)
np.testing.assert_almost_equal(src_img[res_y[0]:res_y[1], res_x[0]:res_x[1]],
match_img[res_y[2]:res_y[3], res_x[2]:res_x[3]])
def test_matches_identical_centered_partial(self):
src_img = np.random.rand(129, 130)
src_count = np.ones_like(src_img, dtype=np.uint32)
match_img = src_img[1:-1, 1:-1]
search_step = 1
blend_x = 5 # Only match the top 5 columns in x
blend_y = 5 # Only match the top 5 rows in y
# Correct start window
yst, yed = 1, 128
xst, xed = 1, 129
rows, cols = match_img.shape
comp_rows, comp_cols = src_img.shape
assert (yed - yst) == rows
assert (xed - xst) == cols
res_y, res_x = tracking.correlate_arrays(
src_img, src_count, match_img,
search_step, blend_x, blend_y,
xst, xed, yst, yed,
rows, cols, comp_rows, comp_cols)
# best_yst1, best_yed1, best_yst2, best_yed2
exp_y = (1, 128, 0, 127)
# best_xst1, best_xed1, best_xst2, best_xed2
exp_x = (1, 129, 0, 128)
self.assertEqual(res_y, exp_y)
self.assertEqual(res_x, exp_x)
np.testing.assert_almost_equal(src_img[res_y[0]:res_y[1], res_x[0]:res_x[1]],
match_img[res_y[2]:res_y[3], res_x[2]:res_x[3]])
def test_matches_identical_uncentered_partial(self):
src_img = np.random.rand(129, 130)
src_count = np.ones_like(src_img, dtype=np.uint32)
match_img = src_img[2:-2, 2:-2]
search_step = 1
blend_x = 5
blend_y = 5
# Inorrect start window
yst, yed = 1, 126
xst, xed = 1, 127
rows, cols = match_img.shape
comp_rows, comp_cols = src_img.shape
assert (yed - yst) == rows
assert (xed - xst) == cols
res_y, res_x = tracking.correlate_arrays(
src_img, src_count, match_img,
search_step, blend_x, blend_y,
xst, xed, yst, yed,
rows, cols, comp_rows, comp_cols)
# best_yst1, best_yed1, best_yst2, best_yed2
exp_y = (2, 127, 0, 125)
# best_xst1, best_xed1, best_xst2, best_xed2
exp_x = (2, 128, 0, 126)
self.assertEqual(res_y, exp_y)
self.assertEqual(res_x, exp_x)
np.testing.assert_almost_equal(src_img[res_y[0]:res_y[1], res_x[0]:res_x[1]],
match_img[res_y[2]:res_y[3], res_x[2]:res_x[3]])
def test_matches_identical_left_right(self):
src_img = np.random.rand(256, 130)
src_count = np.zeros_like(src_img, dtype=np.uint32)
# Only take the first half of the source image
src_count[0:128, 0:130] = 1
# Match with the second half of the image
match_img = src_img[120:247, 1:-1]
search_step = 10
blend_x = 10
blend_y = 10
yst, yed = 116, 243
xst, xed = 0, 128
rows, cols = match_img.shape
comp_rows, comp_cols = src_img.shape
assert (yed - yst) == rows
assert (xed - xst) == cols
res_y, res_x = tracking.correlate_arrays(
src_img, src_count, match_img,
search_step, blend_x, blend_y,
xst, xed, yst, yed,
rows, cols, comp_rows, comp_cols)
# best_yst1, best_yed1, best_yst2, best_yed2
exp_y = (120, 247, 0, 127)
# best_xst1, best_xed1, best_xst2, best_xed2
exp_x = (1, 129, 0, 128)
self.assertEqual(res_y, exp_y)
self.assertEqual(res_x, exp_x)
class TestSmoothVelocity(unittest.TestCase):
def test_linear_smooth_with_resample(self):
tt = np.linspace(1, 10, 10)
xx = np.linspace(-10, 10, tt.shape[0])
yy = | np.linspace(-5, 15, tt.shape[0]) | numpy.linspace |
import logging
logging.basicConfig(level=logging.INFO)
import argparse
import torch
import numpy as np
import cv2
import time
from scipy import spatial
from pytorch3d import transforms
import json
import open3d as o3d
def parseargs():
parser = argparse.ArgumentParser()
parser.add_argument('-r', '--resolution', default=800, type=int, help="View window resolution")
parser.add_argument('-l', '--trajectory-length', default=500, type=int, help="Generated trajectory key-frame count.")
parser.add_argument('-m', '--trajectory-momentum', default=0.95, type=float, help="Generated trajectory movement momentum.")
parser.add_argument('--trajectory-acceleration', default=0.8, type=float,
help="Generated trajectory acceleration standard deviation.")
parser.add_argument('--point-distance', default=10, type=float,
help="Point to camera distance standard deviation.")
parser.add_argument('--points-per-camera', default=15, type=float,
help="Point generated per camera.")
parser.add_argument('--point-visibility-falloff', default=8, type=float,
help="How distant cameras see the point.")
parser.add_argument('--view-noise', default=0.02, type=float,
help="Point location error in radians. (points projected to camera)")
parser.add_argument('--cam-noise', default=100, type=float,
help="Initial camera position noise.")
parser.add_argument('--point-noise', default=100, type=float,
help="Initial point position noise.")
parser.add_argument('--world-shift', default=10, type=float,
help="Initial shift in world position estimate.")
parser.add_argument('--learning-rate', default=5, type=float,
help="ADAM learning rate.")
parser.add_argument('--cam-dist-weight', default=0.25, type=float,
help="Mutual camera distance cost weight.")
parser.add_argument('--cam-dir-weight', default=0.25, type=float,
help="Horizontal camera alinment cost weight.")
parser.add_argument('--json-recording', help='Read viewing directions from json file.')
args = parser.parse_args()
return args
def range_coeff(x, distance, range):
return np.exp(-np.log2(distance / x) ** 2 / range)
def generate_cam_trajectory(length=30, momentum=0.94, acc_sdev=0.6, acc2_sdev=0.0):
positions = | np.zeros([length, 3], dtype=np.float32) | numpy.zeros |
import numpy
import Shadow
import Shadow.ShadowLibExtensions as sd
import sys
import inspect
import os
try:
import matplotlib.pylab as plt
from matplotlib import collections
except ImportError:
print(sys.exc_info()[1])
pass
#TODO: remove ShadowToolsPrivate
import Shadow.ShadowToolsPrivate as stp
from Shadow.ShadowToolsPrivate import Histo1_Ticket as Histo1_Ticket
from Shadow.ShadowToolsPrivate import plotxy_Ticket as plotxy_Ticket
#A2EV = 50676.89919462
codata_h = numpy.array(6.62606957e-34)
codata_ec = numpy.array(1.602176565e-19)
codata_c = numpy.array(299792458.0)
A2EV = 2.0*numpy.pi/(codata_h*codata_c/codata_ec*1e2)
#TODO: delete. Implemented for beam object
def getshonecol(beam,col):
'''
Extract a column from a shadow file (eg. begin.dat) or a Shadow.Beam instance.
The column are numbered in the fortran convention, i.e. starting from 1.
It returns a numpy.array filled with the values of the chosen column.
Inumpy.ts:
beam : str instance with the name of the shadow file to be loaded. OR
Shadow.Beam initialized instance.
col : int for the chosen columns.
Outputs:
numpy.array 1-D with length numpy.INT.
Error:
if an error occurs an ArgsError is raised.
Possible choice for col are:
1 X spatial coordinate [user's unit]
2 Y spatial coordinate [user's unit]
3 Z spatial coordinate [user's unit]
4 Xp direction or divergence [rads]
5 Yp direction or divergence [rads]
6 Zp direction or divergence [rads]
7 X component of the electromagnetic vector (s-polariz)
8 Y component of the electromagnetic vector (s-polariz)
9 Z component of the electromagnetic vector (s-polariz)
10 Lost ray flag
11 Energy [eV]
12 Ray index
13 Optical path length
14 Phase (s-polarization)
15 Phase (p-polarization)
16 X component of the electromagnetic vector (p-polariz)
17 Y component of the electromagnetic vector (p-polariz)
18 Z component of the electromagnetic vector (p-polariz)
19 Wavelength [A]
20 R= SQRT(X^2+Y^2+Z^2)
21 angle from Y axis
22 the magnituse of the Electromagnetic vector
23 |E|^2 (total intensity)
24 total intensity for s-polarization
25 total intensity for p-polarization
26 K = 2 pi / lambda [A^-1]
27 K = 2 pi / lambda * col4 [A^-1]
28 K = 2 pi / lambda * col5 [A^-1]
29 K = 2 pi / lambda * col6 [A^-1]
30 S0-stokes = |Es|^2 + |Ep|^2
31 S1-stokes = |Es|^2 - |Ep|^2
32 S2-stokes = 2 |Es| |Ep| cos(phase_s-phase_p)
33 S3-stokes = 2 |Es| |Ep| sin(phase_s-phase_p)
'''
try: stp.getshonecol_CheckArg(beam,col)
except stp.ArgsError as e: raise e
col=col-1
if isinstance(beam,sd.Beam):
ray = beam.rays
else:
bm = sd.Beam()
bm.load(beam)
ray = bm.rays
if col>=0 and col<18 and col!=10: column = ray[:,col]
if col==10: column = ray[:,col]/A2EV
if col==18: column = 2*numpy.pi*1.0e8/ray[:,10]
if col==19: column = numpy.sqrt(ray[:,0]*ray[:,0]+ray[:,1]*ray[:,1]+ray[:,2]*ray[:,2])
if col==20: column = numpy.arccos(ray[:,4])
if col==21: column = numpy.sqrt(numpy.sum(numpy.array([ ray[:,i]*ray[:,i] for i in [6,7,8,15,16,17] ]),axis=0))
if col==22: column = numpy.sum(numpy.array([ ray[:,i]*ray[:,i] for i in [6,7,8,15,16,17] ]),axis=0)
if col==23: column = numpy.sum(numpy.array([ ray[:,i]*ray[:,i] for i in [6,7,8] ]),axis=0)
if col==24: column = numpy.sum(numpy.array([ ray[:,i]*ray[:,i] for i in [15,16,17] ]),axis=0)
if col==25: column = ray[:,10]*1.0e8
if col==26: column = ray[:,3]*ray[:,10]*1.0e8
if col==27: column = ray[:,4]*ray[:,10]*1.0e8
if col==28: column = ray[:,5]*ray[:,10]*1.0e8
if col==29:
E2s = numpy.sum(numpy.array([ ray[:,i]*ray[:,i] for i in [6,7,8] ]),axis=0)
E2p = numpy.sum(numpy.array([ ray[:,i]*ray[:,i] for i in [15,16,17] ]),axis=0)
column = E2p+E2s
if col==30:
E2s = numpy.sum(numpy.array([ ray[:,i]*ray[:,i] for i in [6,7,8] ]),axis=0)
E2p = numpy.sum(numpy.array([ ray[:,i]*ray[:,i] for i in [15,16,17] ]),axis=0)
column = E2p-E2s
if col==31:
E2s = numpy.sum(numpy.array([ ray[:,i]*ray[:,i] for i in [6,7,8] ]),axis=0)
E2p = numpy.sum(numpy.array([ ray[:,i]*ray[:,i] for i in [15,16,17] ]),axis=0)
Cos = numpy.cos(ray[:,13]-ray[:,14])
column = 2*E2s*E2p*Cos
if col==32:
E2s = numpy.sum(numpy.array([ ray[:,i]*ray[:,i] for i in [6,7,8] ]),axis=0)
E2p = numpy.sum(numpy.array([ ray[:,i]*ray[:,i] for i in [15,16,17] ]),axis=0)
Sin = numpy.sin(ray[:,13]-ray[:,14])
column = 2*E2s*E2p*Sin
return column
#TODO: delete. Implemented for beam object
def getshcol(beam,col):
'''
Extract multiple columns from a shadow file (eg.'begin.dat') or a Shadow.Beam instance.
The column are numbered in the fortran convention, i.e. starting from 1.
It returns a numpy.array filled with the values of the chosen column.
Inumpy.ts:
beam : str instance with the name of the shadow file to be loaded. OR
Shadow.Beam initialized instance.
col : tuple or list instance of int with the number of columns chosen.
Outputs:
numpy.array 2-D with dimension R x numpy.INT. Where R is the total number of column chosen
Error:
if an error occurs an ArgsError is raised.
Possible choice for col are:
1 X spatial coordinate [user's unit]
2 Y spatial coordinate [user's unit]
3 Z spatial coordinate [user's unit]
4 X' direction or divergence [rads]
5 Y' direction or divergence [rads]
6 Z' direction or divergence [rads]
7 X component of the electromagnetic vector (s-polariz)
8 Y component of the electromagnetic vector (s-polariz)
9 Z component of the electromagnetic vector (s-polariz)
10 Lost ray flag
11 Energy [eV]
12 Ray index
13 Optical path length
14 Phase (s-polarization)
15 Phase (p-polarization)
16 X component of the electromagnetic vector (p-polariz)
17 Y component of the electromagnetic vector (p-polariz)
18 Z component of the electromagnetic vector (p-polariz)
19 Wavelength [A]
20 R= SQRT(X^2+Y^2+Z^2)
21 angle from Y axis
22 the magnituse of the Electromagnetic vector
23 |E|^2 (total intensity)
24 total intensity for s-polarization
25 total intensity for p-polarization
26 K = 2 pi / lambda [A^-1]
27 K = 2 pi / lambda * col4 [A^-1]
28 K = 2 pi / lambda * col5 [A^-1]
29 K = 2 pi / lambda * col6 [A^-1]
30 S0-stokes = |Es|^2 + |Ep|^2
31 S1-stokes = |Es|^2 - |Ep|^2
32 S2-stokes = 2 |Es| |Ep| cos(phase_s-phase_p)
33 S3-stokes = 2 |Es| |Ep| sin(phase_s-phase_p)
'''
try: stp.getshcol_CheckArg(beam,col)
except stp.ArgsError as e: raise e
if isinstance(beam,sd.Beam):
bm = beam
else:
bm = sd.Beam()
bm.load(beam)
ret = []
if isinstance(col, int): return getshonecol(bm,col)
for c in col:
ret.append(getshonecol(bm,c))
return tuple(ret)
def histo1(beam, col, notitle=0, nofwhm=0, bar=0, **kwargs):
"""
Plot the histogram of a column, as calculated by Shadow.Beam.histo1 using matplotlib
NOTE: This will replaces the old histo1 still available as histo1_old
:param beam: a Shadow.Beam() instance, or a file name with Shadow binary file
:param col: the Shadow column number (start from 1)
:param notitle: set to 1 to avoid displaying title
:param nofwhm: set to 1 to avoid labeling FWHM value
:param bar: 1=bar plot, 0=line plot
:param kwargs: keywords accepted by Shadow.Beam.histo1()
:return: the dictionary returned by Shadow.beam.histo1() with some keys added.
"""
title = "histo1"
if isinstance(beam,str):
beam1 = sd.Beam()
beam1.load(beam)
title += " - file: "+beam
beam = beam1
tk2 = beam.histo1(col, **kwargs)
h = tk2["histogram"]
bins = tk2["bin_left"]
xrange = tk2["xrange"]
yrange = [0,1.1*numpy.max(h)]
fwhm = tk2["fwhm"]
xtitle = "column %d"%tk2["col"]
ytitle = "counts ("
if tk2["nolost"] == 0:
ytitle += " all rays"
if tk2["nolost"] == 1:
ytitle += " good rays"
if tk2["nolost"] == 2:
ytitle += " lost rays"
if tk2["ref"] == 0:
ytitle += " = weight: number of rays"
else:
if tk2["ref"] == 23:
ytitle += " - weight: intensity"
else:
ytitle += " - weight column: %d"%(tk2["ref"])
ytitle += ")"
if fwhm != None: print ("fwhm = %g" % fwhm)
fig0 = plt.figure()
ax = fig0.add_subplot(111)
ax.set_xlabel(xtitle)
ax.set_ylabel(ytitle)
if notitle != 1: ax.set_title(title)
ax.set_xlim(xrange[0],xrange[1])
ax.set_ylim(yrange[0],yrange[1])
ax.grid(True)
if bar:
l = ax.bar(bins, h, 1.0*(bins[1]-bins[0]),color='blue') #,error_kw=dict(elinewidth=2,ecolor='red'))
else:
l = plt.plot(tk2["bin_path"], tk2["histogram_path"], color='blue') #,error_kw=dict(elinewidth=2,ecolor='red'))
if tk2["fwhm"] != None:
hh = 0.5*numpy.max(tk2["histogram"])
lines = [ [ (tk2["fwhm_coordinates"][0],hh), \
(tk2["fwhm_coordinates"][1],hh) ]]
lc = collections.LineCollection(lines,color='red',linewidths=2)
ax.add_collection(lc)
if nofwhm != 1:
if tk2["fwhm_coordinates"][0] < 0:
shift1 = 0.9
else:
shift1 = 1.0
ax.annotate('FWHM=%f'%tk2["fwhm"], xy=(shift1*tk2["fwhm_coordinates"][0],1.01*tk2["fwhm_coordinates"][0]))
plt.show()
return tk2
#TODO: delete. Reimplemented using Shadow.beam.histo1()
def histo1_old(beam,col,xrange=None,yrange=None,nbins=50,nolost=0,ref=0,write=0,title='HISTO1',xtitle=None,ytitle=None,calfwhm=0,noplot=0):
'''
Plot the histogram of a column, simply counting the rays, or weighting with the intensity.
It returns a ShadowTools.Histo1_Ticket which contains the histogram data, and the figure.
Inumpy.ts:
beam : str instance with the name of the shadow file to be loaded, or a Shadow.Beam initialized instance.
col : int for the chosen column.
Optional Inumpy.ts:
xrange : tuple or list of length 2 describing the interval of interest for x, the data read from the chosen column.
yrange : tuple or list of length 2 describing the interval of interest for y, counts or intensity depending on ref.
nbins : number of bins of the histogram.
nolost :
0 All rays
1 Only good rays
2 Only lost rays
ref :
0, None, "no", "NO" or "No": only count the rays
23, "Yes", "YES" or "yes": weight with intensity (look at col=23 |E|^2 total intensity)
other value: use that column as weight
write :
0 don't write any file
1 write the histogram into the file 'HISTO1'.
title : title of the figure, it will appear on top of the window.
xtitle : label for the x axis.
ytitle : label for the y axis.
calfwhm :
0 don't compute the fwhm
1 compute the fwhm
noplot :
0 plot the histogram
1 don't plot the histogram
orientation :
'vertical' x axis for data, y for intensity
'horizontal' y axis for data, x for intensity
plotxy :
0 standalone version
1 to use within plotxy
Outputs:
ShadowTools.Histo1_Ticket instance.
Error:
if an error occurs an ArgsError is raised.
Possible choice for col are:
1 X spatial coordinate [user's unit]
2 Y spatial coordinate [user's unit]
3 Z spatial coordinate [user's unit]
4 X' direction or divergence [rads]
5 Y' direction or divergence [rads]
6 Z' direction or divergence [rads]
7 X component of the electromagnetic vector (s-polariz)
8 Y component of the electromagnetic vector (s-polariz)
9 Z component of the electromagnetic vector (s-polariz)
10 Lost ray flag
11 Energy [eV]
12 Ray index
13 Optical path length
14 Phase (s-polarization)
15 Phase (p-polarization)
16 X component of the electromagnetic vector (p-polariz)
17 Y component of the electromagnetic vector (p-polariz)
18 Z component of the electromagnetic vector (p-polariz)
19 Wavelength [A]
20 R= SQRT(X^2+Y^2+Z^2)
21 angle from Y axis
22 the magnituse of the Electromagnetic vector
23 |E|^2 (total intensity)
24 total intensity for s-polarization
25 total intensity for p-polarization
26 K = 2 pi / lambda [A^-1]
27 K = 2 pi / lambda * col4 [A^-1]
28 K = 2 pi / lambda * col5 [A^-1]
29 K = 2 pi / lambda * col6 [A^-1]
30 S0-stokes = |Es|^2 + |Ep|^2
31 S1-stokes = |Es|^2 - |Ep|^2
32 S2-stokes = 2 |Es| |Ep| cos(phase_s-phase_p)
33 S3-stokes = 2 |Es| |Ep| sin(phase_s-phase_p)
'''
try: stp.Histo1_CheckArg(beam,col,xrange,yrange,nbins,nolost,ref,write,title,xtitle,ytitle,calfwhm,noplot)
except stp.ArgsError as e: raise e
col=col-1
if ref==1: ref = 23
#plot_nicc.ioff()
plt.ioff()
figure = plt.figure()
axHist = figure.add_axes([0.1,0.1,0.8,0.8])
if ytitle!=None:
ytitlesave=ytitle
else:
ytitlesave=None
if ref == None: ref = 0
if ref == "No": ref = 0
if ref == "NO": ref = 0
if ref == "no": ref = 0
if ref == "Yes": ref = 23
if ref == "YES": ref = 23
if ref == "yes": ref = 23
if ref == 1:
print("Shadow.ShadowTools.histo1_old: Warning: weighting with column 1 (X) [not with intensity as may happen in old versions]")
if ref==0:
x, a = getshcol(beam,(col+1,10))
w = numpy.ones(len(x))
else:
x, a, w = getshcol(beam,(col+1,10,ref))
if nolost==0:
t = numpy.where(a!=-3299)
ytitle = 'All rays'
if nolost==1:
t = numpy.where(a==1.0)
ytitle = 'Good rays'
if nolost==2:
t = numpy.where(a!=1.0)
ytitle = 'Lost rays'
if len(t[0])==0:
print ("no rays match the selection, the histogram will not be plotted")
return
if ref==0:
ytitle = 'counts ' + ytitle
h,bins,patches = axHist.hist(x[t],bins=nbins,range=xrange,histtype='step',alpha=0.5)
if yrange==None: yrange = [0.0, numpy.max(h)]
hw=h
if ref>=22:
ytitle = (stp.getLabel(ref-1))[0] + ' ' + ytitle
h,bins = numpy.histogram(x[t],range=xrange,bins=nbins)
hw,bins,patches = axHist.hist(x[t],range=xrange, bins=nbins,histtype='step',alpha=0.5,weights=w[t])
if yrange==None: yrange = [0.0, numpy.max(hw)]
fwhm = None
if calfwhm==1:
fwhm, tf, ti = stp.calcFWHM(hw,bins[1]-bins[0])
axHist.plot([bins[ti],bins[tf+1]],[max(h)*0.5,max(h)*0.5],'x-')
print ("fwhm = %g" % fwhm)
if write==1: stp.Histo1_write(title,bins,h,hw,col,beam,ref-1)
if xtitle==None: xtitle=(stp.getLabel(col))[0]
axHist.set_xlabel(xtitle)
if ytitlesave!=None:
axHist.set_ylabel(ytitlesave)
else:
axHist.set_ylabel(ytitle)
if title!=None: axHist.set_title(title)
if xrange!=None: axHist.set_xlim(xrange)
if yrange!=None: axHist.set_ylim(yrange)
if noplot==0:
plt.show()
ticket = Histo1_Ticket()
ticket.histogram = hw
ticket.bin_center = bins[:-1]+(bins[1]-bins[0])*0.5
ticket.bin_left = bins[:-1]
ticket.figure = figure
ticket.xrange = xrange
ticket.yrange = yrange
ticket.xtitle = xtitle
ticket.ytitle = ytitle
ticket.title = title
ticket.fwhm = fwhm
ticket.intensity = w[t].sum()
return ticket
def plotxy_gnuplot(beam,col_h,col_v,execute=1,ps=0,pdf=0,title="",viewer='okular',**kwargs):
"""
A plotxy implemented for gnuplot.
It uses Shadow.beam.histo2() for calculations.
It creates files for gnuplot (plotxy.gpl and plotxy_*.dat)
It can run gnuplot (system call) and display ps or pdf outputs
:param beam: it can be a SHADOW binary file, an instance of Shadow.Beam() or a dictionary from Shadow.Beam.histo2
:param col_h: the H column for the plot. Irrelevant if beam is a dictionary
:param col_v: the V column for the plot. Irrelevant if beam is a dictionary
:param execute: set to 1 to make a system call to execute gnuplot (default=1)
:param ps: set to 1 to get postscript output (irrelevant if pdf=1
:param pdf: set to 1 for pdf output (prioritaire over ps)
:param viewer: set to the ps or pdf viewer (default='okular')
:param kwargs: keywords to be passed to Shadow.beam.histo2()
:return: the dictionary produced by Shadow.beam.histo2 with some keys added
"""
if title == "":
title = "plotxy"
if isinstance(beam,dict):
tkt = beam
col_h = tkt["col_h"]
col_v = tkt["col_v"]
else:
if isinstance(beam,str):
beam1 = sd.Beam()
beam1.load(beam)
title += " - file: "+beam
beam = beam1
tkt = beam.histo2(col_h,col_v,**kwargs)
f = open("plotxy_histtop.dat",'w')
for i in range(tkt["nbins_h"]):
f.write("%12.5f %12.5f \n"%( tkt["bin_h_left"][i], tkt["histogram_h"][i] ))
f.write("%12.5f %12.5f \n"%( tkt["bin_h_right"][i], tkt["histogram_h"][i] ))
f.close()
print("File written to disk: plotxy_histside.dat")
f = open("plotxy_histside.dat",'w')
for i in range(tkt["nbins_v"]):
f.write("%12.5f %12.5f \n"%( tkt["histogram_v"][i], tkt["bin_v_left"][i] ))
f.write("%12.5f %12.5f \n"%( tkt["histogram_v"][i], tkt["bin_v_right"][i] ))
f.close()
print("File written to disk: plotxy_histtop.dat")
f = open("plotxy_grid.dat",'w')
f.write(" # plotxy grid data for plotxy.gpl\n")
f.write(" # Xbin Ybin Weight\n")
for i in range(tkt["nbins_h"]):
for j in range(tkt["nbins_v"]):
f.write("%25.20f %25.20f %25.20f\n"%(tkt["bin_h_center"][i],tkt["bin_v_center"][j], tkt["histogram"][i,j] ))
f.write("\n")
f.close()
print("File written to disk: plotxy_grid.dat")
txt = """
#GnuPlot command file for PLOTXY
#Minimum version: gnuplot 4.2 patchlevel 6
#
{set_terminal}
set multiplot
#
# top histogram
#
set lmargin screen 0.2125
set rmargin screen 0.70
set bmargin screen 0.75
set tmargin screen 0.90
unset xtics
unset x2tics
unset ytics
unset y2tics
unset key
unset xlabel
unset ylabel
unset x2label
unset y2label
set x2tics mirror
set x2label " {title} "
set xrange[ {xrange[0]} : {xrange[1]} ]
set yrange[*:*]
plot "plotxy_histtop.dat" u 1:2 w lines lt -1 notitle
#
# side histogram
#
set lmargin screen 0.10
set rmargin screen 0.2125
set bmargin screen 0.10
set tmargin screen 0.75
unset xtics
unset x2tics
unset ytics
unset y2tics
unset key
unset xlabel
unset ylabel
unset x2label
unset y2label
set ytics
set ylabel "Column {col_v}"
set xrange[*:*]
set yrange[ {yrange[0]} : {yrange[1]} ]
plot "plotxy_histside.dat" u (-$1):2 w lines lt -1 notitle
#
# scattered/contour plot
#
set lmargin screen 0.2125
set rmargin screen 0.70
set bmargin screen 0.10
set tmargin screen 0.75
unset xtics
unset x2tics
unset ytics
unset y2tics
unset key
unset xlabel
unset ylabel
unset x2label
unset y2label
set xlabel "Column {col_h}"
set xrange[ {xrange[0]} : {xrange[1]} ]
set yrange[ {yrange[0]} : {yrange[1]} ]
#
# IF PIXEL UNCOMMENT THIS
#
set pm3d map
set palette gray
splot "./plotxy_grid.dat" u 1:2:3 notitle
#
# info column
#
set obj 10 rect from graph 1.20, graph 1 to graph 1.61, graph 0
set label "{label_id}" at graph 1.21, graph 0.9
set label "{label_good}" at graph 1.21, graph 0.5
set label "TOT = {nrays}" at graph 1.21, graph 0.30
set label "LOST = {lost_rays}" at graph 1.21, graph 0.25
set label "GOOD = {good_rays}" at graph 1.21, graph 0.20
set label "INTENS = {intensity}" at graph 1.21, graph 0.15
set label "{label_weight}" at graph 1.21, graph 0.10
replot
unset multiplot
{set_pause}
"""
#add kws to dictionnary to be used in the template
tkt["set_terminal"] = "set terminal x11 size 900,600"
tkt["set_pause"] = "pause -1 'Press <Enter> to end graphic '"
if ps:
tkt["set_terminal"] = "set terminal postscript \n set output 'plotxy.ps' "
tkt["set_pause"] = ""
if pdf:
tkt["set_terminal"] = "set terminal pdf \n set output 'plotxy.pdf' "
tkt["set_pause"] = ""
tkt["title"] = title
tkt["lost_rays"] = tkt["nrays"] - tkt["good_rays"]
tkt["label_id"] = ""
if os.getenv("USER") is None:
pass
else:
tkt["label_id"] += os.getenv("USER")
if os.getenv("HOST") is None:
pass
else:
tkt["label_id"] += "@"+os.getenv("HOST")
if tkt["ref"] == 0:
tkt["label_weight"] = "WEIGHT: RAYS"
else:
if tkt["ref"] == 1 or tkt["ref"] == 23:
tkt["label_weight"] = "WEIGHT: INTENSITY"
else:
tkt["label_weight"] = "WEIGHT: COLUMN %d"%(tkt["ref"])
if tkt["nolost"] == 0:
tkt["label_good"] = "--ALL RAYS"
elif tkt["nolost"] == 1:
tkt["label_good"] = "--GOOD ONLY"
else:
tkt["label_good"] = "--ONLY LOSSES"
txt2 = txt.format_map(tkt)
f = open("plotxy.gpl",'w')
f.write(txt2)
f.close()
print("File written to disk: plotxy.gpl")
if execute:
os.system("gnuplot plotxy.gpl")
if ps:
os.system(viewer+" plotxy.ps")
if pdf:
os.system(viewer+" plotxy.pdf")
return tkt
def plotxy(beam,col_h,col_v, nofwhm=1, title="", **kwargs):
"""
plotxy implementation using matplotlib.
Calculations are done using Shadow.beam.histo2()
:param beam: it can be a SHADOW binary file, an instance of Shadow.Beam() or a dictionary from Shadow.Beam.histo2
:param col_h: The column for the H coordinate in the plot (irrelevant of beam is a dictionary)
:param col_v: The column for the H coordinate in the plot (irrelevant of beam is a dictionary)
:param nofwhm: set to 0 to label the FWHM value in the plot (default do not label)
:param kwargs: keywrods passed to Shadow.Beam.histo2
:return: the dictionary returned by Shadow.beam.histo2() with some added keys.
"""
if title == "":
title = "plotxy"
if isinstance(beam,dict):
tkt = beam
col_h = tkt["col_h"]
col_v = tkt["col_v"]
else:
if isinstance(beam,str):
beam1 = sd.Beam()
beam1.load(beam)
title += " - file: "+beam
beam = beam1
tkt = beam.histo2(col_h,col_v,**kwargs)
xtitle = "Column %d"%tkt["col_h"]
ytitle = "Column %d"%tkt["col_v"]
figure = plt.figure(figsize=(12,8),dpi=96)
ratio = 8.0/12.0
rect_scatter = [0.10*ratio, 0.10, 0.65*ratio, 0.65]
rect_histx = [0.10*ratio, 0.77, 0.65*ratio, 0.20]
rect_histy = [0.77*ratio, 0.10, 0.20*ratio, 0.65]
rect_text = [1.00*ratio, 0.10, 1.20*ratio, 0.65]
#
#main plot
#
axScatter = figure.add_axes(rect_scatter)
axScatter.set_xlabel(xtitle)
axScatter.set_ylabel(ytitle)
# axScatter.set_xlim(tkt["xrange"])
# axScatter.set_ylim(tkt["yrange"])
axScatter.axis(xmin=tkt["xrange"][0],xmax=tkt["xrange"][1])
axScatter.axis(ymin=tkt["yrange"][0],ymax=tkt["yrange"][1])
#axScatter.pcolor(tkt["bin_h_edges"], tkt["bin_v_edges"], tkt["histogram"].T)
axScatter.pcolormesh(tkt["bin_h_edges"], tkt["bin_v_edges"], tkt["histogram"].T)
for tt in axScatter.get_xticklabels():
tt.set_size('x-small')
for tt in axScatter.get_yticklabels():
tt.set_size('x-small')
#
#histograms
#
axHistx = figure.add_axes(rect_histx, sharex=axScatter)
axHisty = figure.add_axes(rect_histy, sharey=axScatter)
#for practical purposes, writes the full histogram path
tmp_h_b = []
tmp_h_h = []
for s,t,v in zip(tkt["bin_h_left"],tkt["bin_h_right"],tkt["histogram_h"]):
tmp_h_b.append(s)
tmp_h_h.append(v)
tmp_h_b.append(t)
tmp_h_h.append(v)
tmp_v_b = []
tmp_v_h = []
for s,t,v in zip(tkt["bin_v_left"],tkt["bin_v_right"],tkt["histogram_v"]):
tmp_v_b.append(s)
tmp_v_h.append(v)
tmp_v_b.append(t)
tmp_v_h.append(v)
axHistx.plot(tmp_h_b,tmp_h_h)
axHisty.plot(tmp_v_h,tmp_v_b)
for tl in axHistx.get_xticklabels(): tl.set_visible(False)
for tl in axHisty.get_yticklabels(): tl.set_visible(False)
for tt in axHisty.get_xticklabels():
tt.set_rotation(270)
tt.set_size('x-small')
for tt in axHistx.get_yticklabels():
tt.set_size('x-small')
if tkt["fwhm_h"] != None:
hh = 0.5*numpy.max(tkt["histogram_h"])
lines = [ [ (tkt["fwhm_coordinates_h"][0],hh), \
(tkt["fwhm_coordinates_h"][1],hh) ]]
lc = collections.LineCollection(lines,color='red',linewidths=2)
axHistx.add_collection(lc)
if nofwhm != 1:
if tkt["fwhm_coordinates_h"][0] < 0:
shift1 = 0.9
else:
shift1 = 1.0
axHistx.annotate('FWHM=%f'%tkt["fwhm_h"], xy=(shift1*tkt["fwhm_coordinates_h"][0],1.01*hh))
if tkt["fwhm_v"] != None:
hh = 0.5*numpy.max(tkt["histogram_v"])
lines = [ [ (hh,tkt["fwhm_coordinates_v"][0]), \
(hh,tkt["fwhm_coordinates_v"][1]) ]]
lc = collections.LineCollection(lines,color='green',linewidths=2)
axHisty.add_collection(lc)
if nofwhm != 1:
if tkt["fwhm_coordinates_v"][0] < 0:
shift1 = 0.9
else:
shift1 = 1.0
axHisty.annotate('FWHM=%f'%tkt["fwhm_v"], xy=(shift1*tkt["fwhm_coordinates_v"][0],1.01*hh))
if title!=None:
axHistx.set_title(title)
axText = figure.add_axes(rect_text)
if tkt["nolost"] == 0: axText.text(0.0,0.8,"ALL RAYS")
if tkt["nolost"] == 1: axText.text(0.0,0.8,"GOOD RAYS")
if tkt["nolost"] == 2: axText.text(0.0,0.8,"LOST RAYS")
#tmps = "intensity: %f"%(tkt["intensity"])
axText.text(0.0,0.7,"intensity: %8.2f"%(tkt["intensity"]))
axText.text(0.0,0.6,"total number of rays: "+str(tkt["nrays"]))
axText.text(0.0,0.5,"total good rays: "+str(tkt["good_rays"]))
axText.text(0.0,0.4,"total lost rays: "+str(tkt["nrays"]-tkt["good_rays"]))
calfwhm = 1
if tkt["fwhm_h"] != None:
axText.text(0.0,0.3,"fwhm H: "+str(tkt["fwhm_h"]))
if tkt["fwhm_v"] != None:
axText.text(0.0,0.2,"fwhm V: "+str(tkt["fwhm_v"]))
if isinstance(beam,str): axText.text(0.0,0.1,"FILE: "+beam)
if isinstance(beam,sd.Beam): axText.text(0.0,0.1,"from Shadow.Beam instance")
if tkt["ref"] == 0:
axText.text(0.0,0.0,"WEIGHT: RAYS")
else:
axText.text(0.0,0.0,"WEIGHT: INTENSITY")
axText.set_axis_off()
plt.show()
return tkt
#TODO: delete. Reimplemented using Shadow.Beam.histo2()
def plotxy_old(beam,cols1,cols2,nbins=25,nbins_h=None,level=5,xrange=None,yrange=None,nolost=0,title='PLOTXY',xtitle=None,ytitle=None,noplot=0,calfwhm=0,contour=0):
'''
Draw the scatter or contour or pixel-like plot of two columns of a Shadow.Beam instance or of a given shadow file, along with histograms for the intensity on the top and right side.
Inumpy.ts:
beam : str instance with the name of the shadow file to be loaded, or a Shadow.Beam initialized instance.
cols1 : first column.
cols2 : second column.
Optional Inumpy.ts:
nbins : int for the size of the grid (nbins x nbins). It will affect the plot only if non scatter.
nbins_h : int for the number of bins for the histograms
level : int number of level to be drawn. It will affect the plot only if contour.
xrange : tuple or list of length 2 describing the interval of interest for x, the data read from the chosen column.
yrange : tuple or list of length 2 describing the interval of interest for y, counts or intensity depending on ref.
nolost :
0 All rays
1 Only good rays
2 Only lost rays
title : title of the figure, it will appear on top of the window.
xtitle : label for the x axis.
ytitle : label for the y axis.
noplot :
0 plot the histogram
1 don't plot the histogram
calfwhm :
0 don't compute the fwhm
1 compute the fwhm and draw it
2 in addition to calfwhm=1, it computes now the intensity in a
slit of FWHM_h x FWHM_v
contour :
0 scatter plot
1 contour, black & white, only counts (without intensity)
2 contour, black & white, with intensity.
3 contour, colored, only counts (without intensity)
4 contour, colored, with intensity.
5 pixelized, colored, only counts (without intensity)
6 pixelized, colored, with intensity.
Outputs:
ShadowTools.Histo1_Ticket instance.
Error:
if an error occurs an ArgsError is raised.
Possible choice for col are:
1 X spatial coordinate [user's unit]
2 Y spatial coordinate [user's unit]
3 Z spatial coordinate [user's unit]
4 X' direction or divergence [rads]
5 Y' direction or divergence [rads]
6 Z' direction or divergence [rads]
7 X component of the electromagnetic vector (s-polariz)
8 Y component of the electromagnetic vector (s-polariz)
9 Z component of the electromagnetic vector (s-polariz)
10 Lost ray flag
11 Energy [eV]
12 Ray index
13 Optical path length
14 Phase (s-polarization)
15 Phase (p-polarization)
16 X component of the electromagnetic vector (p-polariz)
17 Y component of the electromagnetic vector (p-polariz)
18 Z component of the electromagnetic vector (p-polariz)
19 Wavelength [A]
20 R= SQRT(X^2+Y^2+Z^2)
21 angle from Y axis
22 the magnituse of the Electromagnetic vector
23 |E|^2 (total intensity)
24 total intensity for s-polarization
25 total intensity for p-polarization
26 K = 2 pi / lambda [A^-1]
27 K = 2 pi / lambda * col4 [A^-1]
28 K = 2 pi / lambda * col5 [A^-1]
29 K = 2 pi / lambda * col6 [A^-1]
30 S0-stokes = |Es|^2 + |Ep|^2
31 S1-stokes = |Es|^2 - |Ep|^2
32 S2-stokes = 2 |Es| |Ep| cos(phase_s-phase_p)
33 S3-stokes = 2 |Es| |Ep| sin(phase_s-phase_p)
'''
if nbins_h==None: nbins_h=nbins+1
try:
stp.plotxy_CheckArg(beam,cols1,cols2,nbins,nbins_h,level,xrange,yrange,nolost,title,xtitle,ytitle,noplot,calfwhm,contour)
except stp.ArgsError as e:
raise e
#plot_nicc.ioff()
plt.ioff()
col1,col2,col3,col4 = getshcol(beam,(cols1,cols2,10,23,))
nbins=nbins+1
if xtitle==None: xtitle=(stp.getLabel(cols1-1))[0]
if ytitle==None: ytitle=(stp.getLabel(cols2-1))[0]
if nolost==0: t = numpy.where(col3!=-3299)
if nolost==1: t = numpy.where(col3==1.0)
if nolost==2: t = numpy.where(col3!=1.0)
if xrange==None: xrange = stp.setGoodRange(col1[t])
if yrange==None: yrange = stp.setGoodRange(col2[t])
#print xrange
#print yrange
tx = numpy.where((col1>xrange[0])&(col1<xrange[1]))
ty = numpy.where((col2>yrange[0])&(col2<yrange[1]))
tf = set(list(t[0])) & set(list(tx[0])) & set(list(ty[0]))
t = (numpy.array(sorted(list(tf))),)
if len(t[0])==0:
print ("no point selected")
return None
#figure = pylab.plt.figure(figsize=(12,8),dpi=96)
figure = plt.figure(figsize=(12,8),dpi=96)
ratio = 8.0/12.0
left, width = 0.1*ratio, 0.65*ratio
bottom, height = 0.1, 0.65
bottom_h = bottom+height+0.02
left_h = left+width+0.02*ratio
rect_scatter = [0.10*ratio, 0.10, 0.65*ratio, 0.65]
rect_histx = [0.10*ratio, 0.77, 0.65*ratio, 0.20]
rect_histy = [0.77*ratio, 0.10, 0.20*ratio, 0.65]
rect_text = [1.00*ratio, 0.10, 1.20*ratio, 0.65]
axScatter = figure.add_axes(rect_scatter)
axScatter.set_xlabel(xtitle)
axScatter.set_ylabel(ytitle)
if contour==0:
axScatter.scatter(col1[t],col2[t],s=0.5)
if contour>0 and contour<7:
if contour==1 or contour==3 or contour==5: w = numpy.ones( len(col1) )
if contour==2 or contour==4 or contour==6: w = col4
grid = numpy.zeros(nbins*nbins).reshape(nbins,nbins)
for i in t[0]:
indX = stp.findIndex(col1[i],nbins,xrange[0],xrange[1])
indY = stp.findIndex(col2[i],nbins,yrange[0],yrange[1])
try:
grid[indX][indY] = grid[indX][indY] + w[i]
except IndexError:
pass
X, Y = numpy.mgrid[xrange[0]:xrange[1]:nbins*1.0j,yrange[0]:yrange[1]:nbins*1.0j]
L = numpy.linspace(numpy.amin(grid),numpy.amax(grid),level)
if contour==1 or contour==2: axScatter.contour(X, Y, grid, colors='k', levels=L)
if contour==3 or contour==4: axScatter.contour(X, Y, grid, levels=L)
if contour==5 or contour==6: axScatter.pcolor(X, Y, grid)
#axScatter.set_xlim(xrange)
#axScatter.set_ylim(yrange)
#axScatter.axis(xmin=xrange[0],xmax=xrange[1])
#axScatter.axis(ymin=yrange[0],ymax=yrange[1])
for tt in axScatter.get_xticklabels():
tt.set_size('x-small')
for tt in axScatter.get_yticklabels():
tt.set_size('x-small')
#if ref==0: col4 = numpy.ones(len(col4),dtype=float)
axHistx = figure.add_axes(rect_histx, sharex=axScatter)
axHisty = figure.add_axes(rect_histy, sharey=axScatter)
binx = numpy.linspace(xrange[0],xrange[1],nbins_h)
biny = numpy.linspace(yrange[0],yrange[1],nbins_h)
if contour==0 or contour==1 or contour==3 or contour==5:
hx, binx, patchx = axHistx.hist(col1[t],bins=binx,range=xrange,histtype='step',color='k')
hy, biny, patchy = axHisty.hist(col2[t],bins=biny,range=yrange,orientation='horizontal',histtype='step',color='k')
if contour==2 or contour==4 or contour==6:
hx, binx, patchx = axHistx.hist(col1[t],bins=binx,range=xrange,weights=col4[t],histtype='step',color='b')
hy, biny, patchy = axHisty.hist(col2[t],bins=biny,range=yrange,weights=col4[t],orientation='horizontal',histtype='step',color='b')
for tl in axHistx.get_xticklabels(): tl.set_visible(False)
for tl in axHisty.get_yticklabels(): tl.set_visible(False)
for tt in axHisty.get_xticklabels():
tt.set_rotation(270)
tt.set_size('x-small')
for tt in axHistx.get_yticklabels():
tt.set_size('x-small')
intensityinslit = 0.0
if calfwhm>=1:
fwhmx,txf, txi = stp.calcFWHM(hx,binx[1]-binx[0])
fwhmy,tyf, tyi = stp.calcFWHM(hy,biny[1]-biny[0])
axHistx.plot([binx[txi],binx[txf+1]],[max(hx)*0.5,max(hx)*0.5],'x-')
axHisty.plot([max(hy)*0.5,max(hy)*0.5],[biny[tyi],biny[tyf+1]],'x-')
print ("fwhm horizontal: %g" % fwhmx)
print ("fwhm vertical: %g" % fwhmy)
if calfwhm>=2:
xx1 = binx[txi]
xx2 = binx[txf+1]
yy1 = biny[tyi]
yy2 = biny[tyf+1]
print ("limits horizontal: %g %g " % (binx[txi],binx[txf+1]))
print ("limits vertical: %g %g " % (biny[tyi],biny[tyf+1]))
axScatter.plot([xx1,xx2,xx2,xx1,xx1],[yy1,yy1,yy2,yy2,yy1])
#fwhmx,txf, txi = stp.calcFWHM(hx,binx[1]-binx[0])
#fwhmy,tyf, tyi = stp.calcFWHM(hy,biny[1]-biny[0])
#calculate intensity in slit
if nolost==0: tt = numpy.where(col3!=-3299)
if nolost==1: tt = numpy.where(col3==1.0)
if nolost==2: tt = numpy.where(col3!=1.0)
ttx = numpy.where((col1>=xx1)&(col1<=xx2))
tty = numpy.where((col2>=yy1)&(col2<=yy2))
ttf = set(list(tt[0])) & set(list(ttx[0])) & set(list(tty[0]))
tt = (numpy.array(sorted(list(ttf))),)
if len(tt[0])>0:
intensityinslit = col4[tt].sum()
print ("Intensity in slit: %g ",intensityinslit)
if title!=None:
axHistx.set_title(title)
axText = figure.add_axes(rect_text)
ntot = len(numpy.where(col3!=3299)[0])
ngood = len(numpy.where(col3==1)[0])
nbad = ntot - ngood
if nolost==0: axText.text(0.0,0.8,"ALL RAYS")
if nolost==1: axText.text(0.0,0.8,"GOOD RAYS")
if nolost==2: axText.text(0.0,0.8,"LOST RAYS")
tmps = "intensity: "+str(col4[t].sum())
if calfwhm == 2:
tmps=tmps+" (in slit:"+str(intensityinslit)+") "
axText.text(0.0,0.7,tmps)
axText.text(0.0,0.6,"total number of rays: "+str(ntot))
axText.text(0.0,0.5,"total good rays: "+str(ngood))
axText.text(0.0,0.4,"total lost rays: "+str(ntot-ngood))
if calfwhm>=1:
axText.text(0.0,0.3,"fwhm H: "+str(fwhmx))
axText.text(0.0,0.2,"fwhm V: "+str(fwhmy))
if isinstance(beam,str): axText.text(0.0,0.1,"FILE: "+beam)
if isinstance(beam,sd.Beam): axText.text(0.0,0.1,"from Shadow3 Beam instance")
axText.text(0.0,0.0,"DIR: "+os.getcwd())
axText.set_axis_off()
#pylab.plt.draw()
plt.draw()
if noplot==0: figure.show()
ticket = plotxy_Ticket()
ticket.figure = figure
ticket.xrange = xrange
ticket.yrange = yrange
ticket.xtitle = xtitle
ticket.ytitle = ytitle
ticket.title = title
if calfwhm>=1:
ticket.fwhmx = fwhmx
ticket.fwhmy = fwhmy
ticket.intensity = col4[t].sum()
ticket.averagex = numpy.average( col1[t] )
ticket.averagey = numpy.average( col2[t] )
ticket.intensityinslit = intensityinslit
return ticket
#
#focnew
#
def focnew(beam,nolost=1,mode=0,center=[0.0,0.0]):
"""
Implements SHADOW's focnew utility
For scanning the RMS around the focal position, use focnew_scan with focnew results
:param beam: a file name or an instance of Shadow.Beam
:param nolost: 0=all rays, 1=good only, 2=lost only
:param mode: 0=center at origin, 1-Center at baricenter, 2=External center (please define)
:param center: [x0,y0] the center coordinates, if mode=2
:return: a python dictionary (ticket) with:
ticket['nolost'] # input flag
ticket['mode'] # input flag
ticket['center_at'] # text of mode: 'Origin','Baricenter' or 'External'
ticket['AX'] # \
ticket['AZ'] # focnew coefficients (to be used by focnew_scan)
ticket['AT'] # /
ticket['x_waist'] # position of waist X
ticket['z_waist'] # position of waist Z
ticket['t_waist'] # position of waist T (averaged)
ticket['text'] = txt # a text with focnew info
"""
NMODE = ['Origin','Baricenter','External']
if isinstance(beam,str):
beam1 = Shadow.Beam()
beam1.load(beam)
else:
beam1 = beam
# get focnew coefficients
ray = numpy.array(beam1.getshcol([1,2,3,4,5,6],nolost=nolost))
#ray = numpy.array(self.getshcol([1,2,3,4,5,6],nolost=nolost))
if mode == 2:
ray[:,0] -= center[0]
ray[:,2] -= center[1]
AX,AZ,AT = _focnew_coeffs(ray,nolost=nolost,mode=mode,center=center)
# store versors
ZBAR = AZ[3]
VZBAR = AZ[5]
#
XBAR = AX[3]
VXBAR = AX[5]
#
TBAR = ZBAR + XBAR
VTBAR = VZBAR + VXBAR
#reset coeffs
if mode != 1:
AZ[3] = 0.0
AZ[4] = 0.0
AZ[5] = 0.0
AX[3] = 0.0
AX[4] = 0.0
AX[5] = 0.0
AT[3] = 0.0
AT[4] = 0.0
AT[5] = 0.0
#get Y coordinate of the three waists
if numpy.abs(AZ[0]-AZ[5]) > 1e-30:
TPARZ = (AZ[4] - AZ[1]) / (AZ[0] - AZ[5])
else:
TPARZ = 0.0
if numpy.abs(AX[0]-AX[5]) > 1e-30:
TPARX = (AX[4] - AX[1]) / (AX[0] - AX[5])
else:
TPARX = 0.0
if numpy.abs(AT[0]-AX[5]) > 1e-30:
TPART = (AT[4] - AT[1]) / (AT[0] - AT[5])
else:
TPART = 0.0
#prepare text output
txt = ""
txt += '-----------------------------------------------------------------------------\n'
txt += 'Center at : %s\n'%(NMODE[mode])
txt += 'X = %f Z = %f\n'%(center[0],center[1])
txt += '-----------------------------------------------------------------------------\n'
SIGX = numpy.sqrt(numpy.abs( AX[0] * TPARX**2 + 2.0 * AX[1] * TPARX + AX[2] - ( AX[3] + 2.0 * AX[4] * TPARX + AX[5] * TPARX**2)))
SIGZ = numpy.sqrt(numpy.abs( AZ[0] * TPARZ**2 + 2.0 * AZ[1] * TPARZ + AZ[2] - ( AZ[3] + 2.0 * AZ[4] * TPARZ + AZ[5] * TPARZ**2)))
SIGT = numpy.sqrt(numpy.abs( AT[0] * TPART**2 + 2.0 * AT[1] * TPART + AT[2] - ( AT[3] + 2.0 * AT[4] * TPART + AT[5] * TPART**2)))
SIGX0 = numpy.sqrt(numpy.abs(AX[2] - AX[3]))
SIGZ0 = numpy.sqrt(numpy.abs(AZ[2] - AZ[3]))
SIGT0 = numpy.sqrt(numpy.abs(AT[2] - AT[3]))
# txt += '............. S A G I T T A L ............\n'
txt += '............. X AXIS (column 1) ............\n'
txt += 'X coefficients : %g %g %g\n'%(AX[0],AX[1],AX[2])
txt += 'Center : %g Average versor : %g\n'%(numpy.sqrt(numpy.abs(XBAR)),numpy.sqrt(numpy.abs(VXBAR)))
txt += 'Focus along X at : %g\n'%(TPARX)
txt += 'Waist size at best focus (rms) : %g\n'%(SIGX)
txt += 'Waist size at origin : %g\n'%(SIGX0)
# txt += '............. T A N G E N T I A L .............\n'
txt += '............. Z AXIS (column 3) ............\n'
txt += 'Z coefficients : %g %g %g\n'%(AZ[0],AZ[1],AZ[2])
txt += 'Center : %g Average versor : %g\n'%(numpy.sqrt(numpy.abs(ZBAR)),numpy.sqrt(numpy.abs(VZBAR)))
txt += 'Focus along Z at : %g\n'%(TPARZ)
txt += 'Waist size at best focus (rms) : %g\n'%(SIGZ)
txt += 'Waist size at origin : %g\n'%(SIGZ0)
txt += '............. L E A S T C O N F U S I O N ...............\n'
txt += 'XZ coefficients : %g %g %g\n'%(AT[0],AT[1],AT[2])
txt += 'Center : %g Average versor : %g\n'%(numpy.sqrt(numpy.abs(TBAR)),numpy.sqrt(numpy.abs(VTBAR)))
txt += 'Circle of least confusion : %g\n'%(TPART)
txt += 'Waist size at best focus (rms) : %g\n'%(SIGT)
txt += 'Waist size at origin : %g\n'%(SIGT0)
#store all outputs
ticket = {}
# copy the inputs
ticket['nolost'] = nolost
ticket['mode'] = mode
ticket['center_at'] = NMODE[mode]
# coefficients
ticket['AX'] = AX
ticket['AZ'] = AZ
ticket['AT'] = AT
# position of waists
ticket['x_waist'] = TPARX
ticket['z_waist'] = TPARZ
ticket['t_waist'] = TPART
# text output
ticket['text'] = txt
return ticket
def focnew_scan(A,x):
"""
Scans the RMS of the beam size using the focnew coefficients
Example:
tkt = focnew("star.02")
try:
import matplotlib.pylab as plt
except:
raise Exception("Cannot load matplotlib")
f2 = plt.figure(2)
y = numpy.linespace(-10.,10.,101)
plt.plot(y,2.35*focnew_scan(tkt["AX"],y),label="x (column 1)")
plt.plot(y,2.35*focnew_scan(tkt["AZ"],y),label="z (column 3)")
plt.plot(y,2.35*focnew_scan(tkt["AT"],y),label="combined x,z")
plt.legend()
plt.title("FOCNEW SCAN")
plt.xlabel("Y [cm]")
plt.ylabel("2.35*<Z> [cm]")
plt.show()
:param A: array of 6 coefficients
:param x: the abscissas array
:return: the array with RMS values
"""
x1 = numpy.array(x)
y = numpy.sqrt(numpy.abs( A[0] * x1**2 + 2.0 * A[1] * x1 + A[2] - (A[3] + 2.0 * A[4] * x1 + A[5] * x1**2)))
return y
def _focnew_coeffs(ray,nolost=1,mode=0,center=[0.0,0.0]):
"""
Internal use of focnew:
calculate the 6 CHI-Square coefficients for that data array referred to the origin
e.g., for x we have d = Vy/Vx the 6 coeffs are: <d**2>, <x d>, <x**2>, <x>**2, <x><d>, <d>**2
This is a translatiopn of FINDOUT in shadow_kernel.F90
Note that mode=0 and mode=1 give the same output
:param ray: numpy array with ray coordinates and directions
:param nolost:
:param mode: 0=center at origin, 1-Center at baricenter, 2=External center (please define)
:param center: [x0,y0] the center coordinates, if mode=2
:return: AX,AZ,AT 6 coeffs arrays for X, Z and AVERAGE directions, respectively
"""
# for col=3
AZ = numpy.zeros(6)
DVECTOR = ray[5,:]/ray[4,:] ### RAY(KOL+3,I)/RAY(5,I)
AZ[0] = (DVECTOR**2).sum() # A1 = A1 + DVECTOR**2
AZ[1] = (ray[2,:]*DVECTOR).sum() # A2 = A2 + RAY(KOL,I)*DVECTOR
AZ[2] = (ray[2,:]**2).sum() # A3 = A3 + RAY(KOL,I)**2
AZ[3] = (ray[2,:]).sum() # A4 = A4 + RAY(KOL,I)
AZ[5] = DVECTOR.sum() # A6 = A6 + DVECTOR
AZ[0] = AZ[0] / ray.shape[1] # A1 = A1/K
AZ[1] = AZ[1] / ray.shape[1] # A2 = A2/K
AZ[2] = AZ[2] / ray.shape[1] # A3 = A3/K
AZ[3] = AZ[3] / ray.shape[1] # A4 = A4/K
AZ[5] = AZ[5] / ray.shape[1] # A6 = A6/K
AZ[4] = AZ[5] * AZ[3] # A5 = A6*A4
AZ[3] = AZ[3]**2 # A4 = A4**2
AZ[5] = AZ[5]**2 # A6 = A6**2
# for col=1
AX = numpy.zeros(6)
DVECTOR = ray[3,:]/ray[4,:] # RAY(KOL+3,I)/RAY(5,I)
AX[0] = (DVECTOR**2).sum() # A1 = A1 + DVECTOR**2
AX[1] = (ray[0,:]*DVECTOR).sum() # A2 = A2 + RAY(KOL,I)*DVECTOR
AX[2] = (ray[0,:]**2).sum() # A3 = A3 + RAY(KOL,I)**2
AX[3] = (ray[0,:]).sum() # A4 = A4 + RAY(KOL,I)
AX[5] = DVECTOR.sum() # A6 = A6 + DVECTOR
AX[0] = AX[0] / ray.shape[1] # A1 = A1/K
AX[1] = AX[1] / ray.shape[1] # A2 = A2/K
AX[2] = AX[2] / ray.shape[1] # A3 = A3/K
AX[3] = AX[3] / ray.shape[1] # A4 = A4/K
AX[5] = AX[5] / ray.shape[1] # A6 = A6/K
AX[4] = AX[5] * AX[3] # A5 = A6*A4
AX[3] = AX[3]**2 # A4 = A4**2
AX[5] = AX[5]**2 # A6 = A6**2
# for T
AT = numpy.zeros(6)
AT[0] = AX[0] + AZ[0]
AT[1] = AX[1] + AZ[1]
AT[2] = AX[2] + AZ[2]
AT[3] = AX[3] + AZ[3]
AT[4] = AX[4] + AZ[4]
AT[5] = AX[5] + AZ[5]
return AX,AZ,AT
def ray_prop(beam,nolost=1,ypoints=21,ymin=-1.0,ymax=1.0,xrange=None,zrange=None,xbins=0,zbins=0):
"""
:param beam: a file name or an instance of Shadow.Beam
:param nolost: 0=all rays, 1=good only, 2=lost only
:param ypoints: number of points (planes) where to propagate the beam
:param ymin: minumum coordinate y (along the beam)
:param ymax: maximum coordinate y (along the beam)
:param xrange: [xmin,xmax] limits in X (for the histograms, i.e. xbins > 0)
:param zrange: [zmin,zmax] limits in Z (for the histograms, i.e. zbins > 0)
:param xbins: number of bins for histograms in X direction. If xbins=0 (default) do not make X histograms
:param zbins: number of bins for histograms in Z direction. If zbins=0 (default) do not make Z histograms
:return: a python dictionary (ticket) with:
ticket['ypoints'] # input
ticket['ymin'] # input
ticket['ymax'] # input
ticket['xbins'] # input
ticket['zbins'] # input
ticket['y'] # array (ypoints) with y values
ticket['x'] # array (ypoints,NRAYS) with x values
ticket['z'] # array (ypoints,NRAYS) with z values
ticket['x_mean'] # array(ypoints) with X mean
ticket['z_mean'] # array(ypoints) with Z mean
ticket['x_wmean'] # array(ypoints) with X weighted (with intensity) mean
ticket['z_wmean'] # array(ypoints) with Z weighted (with intensity) mean
ticket['x_sd'] # array(ypoints) with X standard deviations
ticket['z_sd'] # array(ypoints) with Z standard deviations
ticket['x_wsd'] # array(ypoints) with X standard deviations (rays weighted with intensty)
ticket['z_wsd'] # array(ypoints) with Z standard deviations (rays weighted with intensty)
ticket['x_fwhm'] # if xbins>0 array(ypoints) with FWHM for X
ticket['x_wfwhm'] # if xbins>0 array(ypoints) with FWHM (rays weighted with intensity) for X
ticket['x_bins'] # if xbins>0 array(ypoinys,xbins) with X histograms abscissas (at bin center)
ticket['x_h'] # if xbins>0 array(ypoinys,xbins) with X histogram counts
ticket['x_wh'] # if xbins>0 array(ypoinys,xbins) with X histogram counts (weighted with intensity)
ticket['z_fwhm'] # if zbins>0 array(ypoints) with FWHM for Z
ticket['z_wfwhm'] # if zbins>0 array(ypoints) with FWHM (rays weighted with intensity) for Z
ticket['z_bins'] # if zbins>0 array(ypoinys,zbins) with Z histograms abscissas (at bin center)
ticket['z_h'] # if zbins>0 array(ypoinys,zbins) with Z histogram counts
ticket['z_wh'] # if zbins>0 array(ypoinys,zbins) with Z histogram counts (weighted with intensity)
"""
if isinstance(beam,str):
beam1 = Shadow.Beam()
beam1.load(beam)
else:
beam1 = beam
rays = beam1.getshcol((1,2,3,4,5,6),nolost=nolost)
rays = numpy.array(rays).T
weights = beam1.getshcol(23,nolost=nolost)
weights = numpy.array(weights)
weights_sum = weights.sum()
s = rays.shape
#define output variables
y = numpy.linspace(ymin,ymax,ypoints)
x_mean = y.copy()
z_mean = y.copy()
x_sd = y.copy()
z_sd = y.copy()
x_wmean = y.copy()
z_wmean = y.copy()
x_wsd = y.copy()
z_wsd = y.copy()
x = numpy.zeros((y.size,s[0]))
z = numpy.zeros((y.size,s[0]))
for i,yi in enumerate(y):
tof = (-rays[:,1].flatten() + yi)/rays[:,4].flatten()
xi = rays[:,0].flatten() + tof*rays[:,3].flatten()
zi = rays[:,2].flatten() + tof*rays[:,5].flatten()
# out[0,i,:] = xi
# out[1,i,:] = zi
x[i,:] = xi
z[i,:] = zi
x_mean[i] = (xi).mean()
z_mean[i] = (zi).mean()
x_sd[i] = xi.std()
z_sd[i] = zi.std()
x_wmean[i] = (xi*weights).sum() / weights_sum
z_wmean[i] = (zi*weights).sum() / weights_sum
x_wsd[i] = numpy.sqrt( ((xi*weights-x_wmean[i])**2).sum() / weights_sum)
z_wsd[i] = numpy.sqrt( ((zi*weights-z_wmean[i])**2).sum() / weights_sum)
# now the histograms
if xrange is None:
xrange = [x.min(),x.max()]
if zrange is None:
zrange = [z.min(),z.max()]
# first histograms fo X
if xbins > 0:
x_fwhm = numpy.zeros(ypoints)
x_wfwhm = | numpy.zeros(ypoints) | numpy.zeros |
# -*- coding: utf-8 -*-
"""
Created on Sat May 14 17:10:46 2016
@author: castaned
"""
import numpy as np
import main_modules as mmod
def xi(i,r1,theta,dr,phi,tstp):
global dth,dr_arr,alambda
dx1 = np.sin(np.deg2rad(i))
# dy1 = 0.
dz1 = np.cos(np.deg2rad(i))
x1 = r1*np.sin(np.deg2rad(theta))*np.cos(np.deg2rad(phi))
y1 = r1*np.sin(np.deg2rad(theta))*np.sin(np.deg2rad(phi))
z1 = r1*np.cos(np.deg2rad(theta))
pert = 0.1*r1
alambda = dr/(r1*np.deg2rad(tstp))
r3 = pert*np.tan(alambda)
r2 = np.sqrt((r1+pert)**2+r3**2)
psi = np.rad2deg(np.arcsin(r3/r2))
th2 = theta-psi
x2 = r2*np.sin(np.deg2rad(th2))*np.cos(np.deg2rad(phi))
y2 = r2*np.sin(np.deg2rad(th2))*np.sin( | np.deg2rad(phi) | numpy.deg2rad |
"""
The Pshpere module contains a class named PShpere which allows the user to
generate synthetic porous media, and to get information about that porous
media
A user can instantiate and use the PSphere() object as follows:
>>> from lb_colloids import PSphere
>>> img = Psphere(dimension=200, radius=20, porosity=0.375, sensitivity=0.01)
>>> # hydraulic radius can be calculated
>>> rh = img.calculate_hydraulic_radius(resolution=1e-06)
>>> # to get a copy of the porous media use
>>> matrix = img.matrix
>>> # save the image
>>> img.save("test_image.png")
"""
import numpy as np
import random
import math
from PIL import Image
import matplotlib.pyplot as plt
class PSphere(object):
"""
Pshpere is a class that allows for the automated generation of
synthetic porous media in two-dimensions. This approach can be
expanded to three dimensions with some effort.
Parameters:
----------
:param int radius: grain size radius
:param float porosity: target porosity for porous media
:param int dimension: the x and y dimension in pixels for the domain
:param float sensitivity: a porosity sensitivity target. This is the allowable range of error for PShpere
"""
def __init__(self, radius=20, porosity=0.5, dimension=256, sensitivity=0.08):
self.radius = radius
self.porosity = porosity
self.sensitivity = sensitivity
self.dimension = dimension
self.matrix = np.ones((dimension, dimension), dtype=bool)
self.matrix_porosity = 0.
self.matrix_rh = 0.
self.particle_space = False
self.pore_space = True
self.percolates = False
good = False
while not good:
self.generate_plane()
self.check_percolation()
self.check_porosity()
print(self.matrix_porosity)
if abs(self.matrix_porosity - self.porosity) <= self.sensitivity:
if self.percolates:
good = True
else:
print("Regenerating porous media")
self.matrix = np.ones((dimension, dimension), dtype=bool)
# self.percolates = False
def get_matrix(self):
matrix = np.invert(self.matrix)
return matrix.astype(bool)
def generate_plane(self):
"""
Main method used to generate a porous media plane by PSphere,
this should not be called by the user
"""
porosity = self.porosity
slice_location = self.dimension / 2
low_bound = slice_location - int(self.radius)
up_bound = slice_location + int(self.radius)
if low_bound <= 0 or up_bound > self.dimension:
raise AssertionError("Radius to large or slice location incorrect")
relative_radius = self.radius / float(self.dimension)
number_of_spheres = self.iround(-3.0 * np.log(self.porosity) /
(4 * np.pi * relative_radius ** 3))
for i in range(number_of_spheres):
z = 1 + random.uniform(0, self.dimension)
if up_bound > z > low_bound:
x = 1 + int(random.uniform(0, self.dimension))
y = 1 + int(random.uniform(0, self.dimension))
slice_distance = abs(z - slice_location)
slice_radius = | np.sqrt(self.radius ** 2 - slice_distance ** 2) | numpy.sqrt |
import os
import keras
from keras.layers import concatenate
from sklearn.metrics import cohen_kappa_score
import scipy.io
import math
import random
from keras import optimizers
import numpy as np
import scipy.io as spio
from sklearn.metrics import f1_score, accuracy_score
np.random.seed(0)
from keras.preprocessing import sequence
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers import Layer,Dense, Dropout, Input, Activation, TimeDistributed, Reshape
from keras.layers import GRU, Bidirectional
from keras.layers import Conv1D, Conv2D, MaxPooling2D, Flatten, BatchNormalization, LSTM, ZeroPadding2D
from keras.callbacks import History
from keras.models import Model
from keras.layers.noise import GaussianNoise
from collections import Counter
from sklearn.utils import class_weight
from myModel import build_model
from os import listdir
from os.path import isfile, join
import sys
sys.path.append("../..")
from loadData import *
from utils import *
out_dir = './pred/'
data_dir = './../../data/files/'
f_set = './../../data/file_sets.mat'
files_val = []
files_test = []
files_train = []
mat = spio.loadmat(f_set)
tmp = mat['files_val']
for i in range(len(tmp)):
file = [str(''.join(l)) for la in tmp[i] for l in la]
files_val.extend(file)
tmp = mat['files_test']
for i in range(len(tmp)):
file = [str(''.join(l)) for la in tmp[i] for l in la]
files_test.extend(file)
batch_size = 200
n_ep = 4
fs = 200;
w_len = 1*fs;
data_dim = w_len*2
half_prec = 0.5
prec = 1
n_cl = 4
print("=====================")
print("Reading dataset to predict:")
( data_val, targets_val, N_samples_val) = load_data(data_dir,files_val, w_len)
( data_test, targets_test, N_samples_test) = load_data(data_dir,files_test, w_len)
ordering = 'tf';
keras.backend.common.set_image_dim_ordering(ordering)
sample_list_val = []
for i in range(len(targets_val)):
sample_list_val.append([])
for j in range(len(targets_val[i][0])):
mid = j*prec
# we add the padding size
mid += w_len
wnd_begin = mid-w_len
wnd_end = mid+w_len-1
sample_list_val[i].append([i,j,wnd_begin, wnd_end, 0 ])
sample_list_val2 = []
for i in range(len(targets_val)):
sample_list_val2.append([])
for j in range(len(targets_val[i][1])):
mid = j*prec
# we add the padding size
mid += w_len
wnd_begin = mid-w_len
wnd_end = mid+w_len-1
sample_list_val2[i].append([i,j,wnd_begin, wnd_end, 1 ])
sample_list_test = []
for i in range(len(targets_test)):
sample_list_test.append([])
for j in range(len(targets_test[i][0])):
mid = j*prec
# we add the padding size
mid += w_len
wnd_begin = mid-w_len
wnd_end = mid+w_len-1
sample_list_test[i].append([i,j,wnd_begin, wnd_end, 0 ])
sample_list_test2 = []
for i in range(len(targets_test)):
sample_list_test2.append([])
for j in range(len(targets_test[i][1])):
mid = j*prec
# we add the padding size
mid += w_len
wnd_begin = mid-w_len
wnd_end = mid+w_len-1
sample_list_test2[i].append([i,j,wnd_begin, wnd_end, 1 ])
n_channels = 3
def my_generator(data_train, targets_train, sample_list, shuffle = True):
if shuffle:
random.shuffle(sample_list)
while True:
for batch in batch_generator(sample_list, batch_size):
batch_data1 = []
batch_data2 = []
batch_targets = []
for sample in batch:
[f, s, b, e, c] = sample
sample_label = targets_train[f][c][s]
sample_x1 = data_train[f][c][b:e+1]
sample_x2 = data_train[f][2][b:e+1]
sample_x = np.concatenate( ( sample_x1, sample_x2 ), axis = 2 )
batch_data1.append(sample_x)
batch_targets.append(sample_label)
batch_data1 = np.stack(batch_data1, axis=0)
batch_targets = np.array(batch_targets)
batch_targets = np_utils.to_categorical(batch_targets, n_cl)
batch_data1 = (batch_data1 )/100
batch_data1 = np.clip(batch_data1, -1, 1)
yield [ batch_data1 ], batch_targets
def val_data_to_batch(data, targets):
batch_data = []
batch_targets = []
for j in range(len(targets)):
mid = j*prec
# we add the padding size
mid += w_len
wnd_begin = mid-w_len
wnd_end = mid+w_len-1
b = wnd_begin
e = wnd_end
sample_label = targets[j]
sample_x = data[b:e+1]
batch_data.append(sample_x)
batch_targets.append(sample_label)
batch_data = np.stack(batch_data, axis=0)
batch_targets = np_utils.to_categorical(batch_targets, n_cl)
batch_data = (batch_data )/100 #- np.mean(batch_data, axis=1)
np.clip(batch_data, -1, 1)
return batch_data, batch_targets
[cnn_eeg, model] = build_model(data_dim, n_channels, n_cl)
Nadam = optimizers.Nadam( )
model.compile(optimizer=Nadam, loss='categorical_crossentropy', metrics=['accuracy'], sample_weight_mode=None)
model.load_weights('./model.h5')
y_ = []
y = []
O2y_ = []
O2y = []
y_p = []
O2y_p = []
f_list = files_val
for j in range(0,len(f_list)):
f = f_list[j]
generator_val = my_generator(data_val, targets_val, sample_list_val[j], shuffle = False)
scores = model.evaluate_generator( generator_val, int(math.ceil((len(sample_list_val[j],)+0.0)/batch_size)), workers=1)
generator_val = my_generator(data_val, targets_val, sample_list_val[j], shuffle = False)
y_pred = model.predict_generator( generator_val, int(math.ceil((len(sample_list_val[j],)+0.0)/batch_size)), workers=1)
print(y_pred.shape)
y_ = | np.argmax(y_pred, axis=1) | numpy.argmax |
#!/usr/bin/env python3
"""<NAME> 2019
reads in TGSS and NVSS datapoints in a 5° patch of sky
and computes positional matches within 5',
saves feature vectors (individuals and pairs) as .csv
"""
import pandas as pd
import numpy as np
from astropy.io import fits
from tqdm import tqdm_notebook as tqdm
PATCH_SIZE = 5
SEPARATION_LIMIT = 5*1/60
# these five functions could be imported from positional_catalogue.py
def geodesic_dist(p1,p2):
"""arguments are two points on the unit sphere,
with ra and dec given in radians;
returns their geodesic distance, see:
https://en.wikipedia.org/wiki/Great-circle_distance#Formulae"""
ra1,dec1,ra2,dec2 = p1[0],p1[1],p2[0],p2[1]
decdiff = (dec1-dec2)/2
radiff = (ra1-ra2)/2
better_circle = 2*np.arcsin(np.sqrt(np.sin(decdiff)**2
+ np.cos(dec1)*np.cos(dec2) * np.sin(radiff)**2))
r = 1
return better_circle*r
def degdist(p1,p2):
"""calls geodesic_dist on two points,
with ra and dec given in degrees;
returns their separation in degrees"""
return 180/np.pi*geodesic_dist([x*np.pi/180 for x in p1],
[x*np.pi/180 for x in p2])
def deci_deg_to_deg_min_sec(deci_deg):
"""https://stackoverflow.com/questions/2579535\
/convert-dd-decimal-degrees-to-dms-degrees-minutes-seconds-in-python"""
is_positive = (deci_deg >= 0)
deci_deg = abs(deci_deg)
# divmod returns quotient and remainder
minutes,seconds = divmod(deci_deg*3600,60)
degrees,minutes = divmod(minutes,60)
degrees = degrees if is_positive else -degrees
return (degrees,minutes,seconds)
def deci_deg_to_hr_min_sec(deci_deg):
"""assume deci_deg +ve"""
deci_hours = deci_deg/15.
schminutes,schmeconds = divmod(deci_hours*3600,60)
hours,schminutes = divmod(schminutes,60)
return (hours,schminutes,schmeconds)
def iau_designation(ra,dec):
"""generate NVSS names as per:
https://heasarc.gsfc.nasa.gov/W3Browse/all/nvss.html
There are four cases where there are pairs of sources which are
so close together that their names would be identical according
to this schema (see below), and the HEASARC has added suffixes
of 'a' (for the source with smaller RA) and 'b' (for the source
with the larger RA) in such cases in order to differentate them.
It was easier just to hard-code this in,
should really check if designation alreadys exists and compare
"""
hr,schmin,schmec = deci_deg_to_hr_min_sec(ra)
rhh = str(int(hr)).zfill(2)
rmm = str(int(schmin)).zfill(2)
rss = str(int(schmec - schmec%1)).zfill(2)
deg,minu,sec = deci_deg_to_deg_min_sec(dec)
sgn = '+' if deg>=0 else '-'
ddd = str(int(abs(deg))).zfill(2)
dmm = str(int(minu)).zfill(2)
dss = str(int(sec - sec%1)).zfill(2)
designation = ''.join(('NVSS J',rhh,rmm,rss,sgn,ddd,dmm,dss))
close_pairs = {'NVSS J093731-102001':144.382,
'NVSS J133156-121336':202.987,
'NVSS J160612+000027':241.553,
'NVSS J215552+380029':328.968}
if designation in close_pairs:
if ra < close_pairs[designation]:
designation = ''.join((designation,'a'))
else:
designation = ''.join((designation,'b'))
return designation
def main():
"""this should really be broken up into separate functions"""
PATCH_DEC = -35
PATCH_RA = 149
def df_in_patch(df_ra,df_dec,):
in_patch = ((PATCH_RA < df_ra) & (df_ra < PATCH_RA+PATCH_SIZE) &
(PATCH_DEC < df_dec) & (df_dec < PATCH_DEC+PATCH_SIZE))
return in_patch
# import TGSS
tgss_df = pd.read_csv('TGSSADR1_7sigma_catalog.tsv',delimiter='\t',
index_col=0,usecols=(0,1,3,5,7,9,11,13))
tgss_df = tgss_df.sort_values(by=['DEC'])
tgss_df['Total_flux'] = tgss_df['Total_flux']*1e-3
tgss_df['Peak_flux'] = tgss_df['Peak_flux']*1e-3
tgss_df = tgss_df[df_in_patch(tgss_df['RA'],tgss_df['DEC'])]
tgss_df.index.names = ['name_TGSS']
tgss_df.columns = ['ra_TGSS','dec_TGSS','integrated_TGSS','peak_TGSS',
'major_ax_TGSS','minor_ax_TGSS','posangle_TGSS']
tgss_df.to_csv('tgss.csv')
# import NVSS
with fits.open('CATALOG.FIT') as hdulist:
data = hdulist[1].data
nvss_data = np.column_stack((data['RA(2000)'],data['DEC(2000)'],data['PEAK INT'],
data['MAJOR AX'],data['MINOR AX'],data['POSANGLE'],
data['Q CENTER'],data['U CENTER'],data['P FLUX'],
data['RES PEAK'],data['RES FLUX']))
nvss_columns = ['RA(2000)','DEC(2000)','PEAK INT','MAJOR AX','MINOR AX','POSANGLE',
'Q CENTER','U CENTER','P FLUX','RES PEAK','RES FLUX']
nvss_df = pd.DataFrame(data = nvss_data, columns = nvss_columns)
nvss_df = nvss_df.sort_values(by=['DEC(2000)']).reset_index(drop = True)
nvss_df = nvss_df[df_in_patch(nvss_df['RA(2000)'],nvss_df['DEC(2000)'])]
nvss_labels = np.array([iau_designation(p[0],p[1]) for p in
nvss_df[['RA(2000)','DEC(2000)']].values])
nvss_df['name_NVSS'] = nvss_labels
nvss_df.set_index('name_NVSS',inplace=True)
nvss_df.columns = ['ra_NVSS','dec_NVSS','peak_NVSS','major_ax_NVSS','minor_ax_NVSS','posangle_NVSS',
'q_centre_NVSS','u_centre_NVSS','polarised_NVSS','res_peak_NVSS','res_flux_NVSS']
nvss_df.to_csv('nvss.csv')
# positional matching, a la positional_catalogue.py
tgss = tgss_df[['ra_TGSS','dec_TGSS']].values
nvss = nvss_df[['ra_NVSS','dec_NVSS']].values
nvss_dec_min = round(nvss[:,1].min(),1)
nvss_dec_max = round(nvss[:,1].max(),1)
patch_matches = []
tqdmbar = tqdm(total=len(tgss))
for i1,p1 in enumerate(tgss):
for i2,p2 in enumerate(nvss):
if (abs((p1[0]-p2[0])*np.cos(p1[1]*np.pi/180)) < SEPARATION_LIMIT
and abs(p1[1]-p2[1]) < SEPARATION_LIMIT):
patch_matches.append((i1,i2))
tqdmbar.postfix = 'matches = {}'.format(len(patch_matches))
tqdmbar.update(1)
patch_matches = np.array(patch_matches)
tmp_patch_matches = []
for i1,i2 in tqdm(patch_matches):
p1,p2 = tgss[i1],nvss[i2]
d = degdist(p1,p2)
if d < SEPARATION_LIMIT:
tmp_patch_matches.append([i1,i2])
patch_matches = np.array(tmp_patch_matches)
patch_cat_columns = np.concatenate((tgss_df.reset_index().columns.values,
nvss_df.reset_index().columns.values))
patch_cat = pd.DataFrame(columns=patch_cat_columns)
FREQ_TGSS,FREQ_NVSS = 150e6,1.4e9
for i1,i2 in tqdm(patch_matches):
obj_t = tgss_df.reset_index().iloc[i1]
obj_n = nvss_df.reset_index().iloc[i2]
match_row = {**obj_t,**obj_n}
sepa = degdist((obj_t['ra_TGSS'],obj_t['dec_TGSS']),
(obj_n['ra_NVSS'],obj_n['dec_NVSS']))
match_row['separation'] = sepa
alpha = np.log(obj_t['peak_TGSS']/obj_n['peak_NVSS'])/ | np.log(FREQ_NVSS/FREQ_TGSS) | numpy.log |
import pyspark
from pyspark import SparkConf, SparkContext
import matplotlib as plt
import os
import argparse
import numpy as np
import png
from PIL import Image
import scipy
import re
import binascii
import matplotlib.image as mpimg
import matplotlib as plt
from sklearn.model_selection import train_test_split
import keras
import tensorflow as tf
from keras.utils import to_categorical
from keras.datasets import fashion_mnist
from keras.models import Sequential,Input,Model
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras.layers.normalization import BatchNormalization
from keras.layers.advanced_activations import LeakyReLU
from sklearn.metrics import classification_report
from keras.models import load_model
from sklearn.metrics import classification_report
#-----spark configuration-----
conf = SparkConf().setAppName("MlvClassification")
conf = (conf.setMaster('local[*]')
.set('spark.executor.memory', '4g')
.set('spark.driver.memory', '4G')
.set('spark.driver.maxResultSize', '10G'))
sc = SparkContext(conf=conf)
#-----functions-------
#----assigning each filename its corresponding label----
def fname_label_assign(fnames, labels):
filename_label_dict = {}
for filename, label in zip(fnames.value, labels.value):
filename_label_dict[filename] = label
return filename_label_dict
#----making the list of words form byte files----
def pre_process(x, fname_label_dict):
fname = x[0].split('/')[-1][:-6]
label = int(fname_label_dict.value[fname])
word_list = list(filter(lambda x: len(x)==2 and x!='??' and x!='00' and x!='CC', re.split('\r\n| ', x[1])))
return ((fname,label), word_list)
#----making image out of byte files----
def makeImage(rdd, input_type):
img_w = 448
img_h = 448
row = 0
col = 0
image = []
image.append([])
for i in rdd[1]:
intrdd = int(i, 16)
if col <= img_w-1:
image[row].append(intrdd)
col += 1
else:
row += 1
col = 0
image.append([])
image[row].append(intrdd)
col += 1
if col <= img_w-1:
for j in range(col, img_w):
image[row].append('0')
#------This part save the byte files as gray scale images-----
# image_output = Image.fromarray(np.asarray(image).astype(np.uint8))
# if input_type == 'train':
# imagefile = ('images/train/'+rdd[0][0]+'_'+str(rdd[0][1])+'.png')
# else:
# imagefile = ('images/test/'+rdd[0][0]+'_'+str(rdd[0][1])+'.png')
# image_output.save(imagefile)
#----making all images the same size (640x750), reshape and normalize----
image_np = np.array(image).astype(np.float32)
image_np.resize(img_w,img_h)
image_np.reshape(img_w,img_h)
image_np = image_np.reshape(img_w, img_h, 1)
image_np = image_np/255
new_labels = []
return (image_np, int(rdd[0][1])-1)
#----loading file names and their corresponding labels-----
train_fnames = open('dataset/files/X_train.txt').read().split('\n')
train_labels = open('dataset/files/y_train.txt').read().split('\n')
test_fnames = open('dataset/files/X_small_train.txt').read().split('\n')
test_labels = open('dataset/files/y_small_train.txt').read().split('\n')
#----Broadcasting the file names and labels
train_fnames_broad = sc.broadcast(train_fnames)
train_labels_broad = sc.broadcast(train_labels)
train_fname_label_dict = fname_label_assign(train_fnames_broad, train_labels_broad)
train_fname_label_dict_broad = sc.broadcast(train_fname_label_dict)
test_fnames_broad = sc.broadcast(test_fnames)
test_labels_broad = sc.broadcast(test_labels)
test_fname_label_dict = fname_label_assign(test_fnames_broad, test_labels_broad)
test_fname_label_dict_broad = sc.broadcast(test_fname_label_dict)
train_rdd_files = sc.wholeTextFiles("/run/media/afarahani/dataset/train").repartition(30)
test_rdd_files = sc.wholeTextFiles("dataset/bytes/train").repartition(30)
train_bag_of_docs = train_rdd_files.map(lambda x: pre_process(x, train_fname_label_dict_broad))
test_bag_of_docs = test_rdd_files.map(lambda x: pre_process(x ,test_fname_label_dict_broad))
train_rdd_image = train_bag_of_docs.map(lambda x: makeImage(x, 'train'))
test_rdd_image = test_bag_of_docs.map(lambda x: makeImage(x, 'test'))
train_x =train_rdd_image.map(lambda x: x[0])
train_rdd_image.map(lambda x: (x[0], tuple(np.array(_) for _ in zip(*x[1:]))))
train_x =train_rdd_image.map(lambda x: x[0])
test_x = test_rdd_image.map(lambda x: x[0])
train_x = np.array(train_x.collect())
test_x = np.array(test_x.collect())
train_labels = train_rdd_image.map(lambda x: x[1])
test_labels= test_rdd_image.map(lambda x: x[1])
train_labels = np.array(train_labels.collect())
test_labels = np.array(test_labels.collect())
#----Convolutional model ---------
classes = np.unique( | np.array(train_labels) | numpy.array |
import os
import pickle
import numpy as np
import random as rnd
import tensorflow as tf
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib.mlab as mlab
import seaborn
from PIL import Image, ImageColor
from collections import namedtuple
# def download_model_weights():
# from pathlib import Path
# import urllib.request
# cwd = os.path.dirname(os.path.abspath(__file__))
# for k in ['model-29.data-00000-of-00001','model-29.index','model-29.meta','translation.pkl']:
# download_dir = Path(cwd)/'handwritten_model/'
# download_dir.mkdir(exist_ok=True,parents=True)
# if (download_dir/f'{k}').exists(): continue
# print(f'file {k} not found, downloading from git repo..')
# urllib.request.urlretrieve(
# f'https://raw.github.com/Belval/TextRecognitionDataGenerator/master/trdg/handwritten_model/{k}',
# download_dir/f'{k}')
# print(f'file {k} saved to disk')
# return cwd
def _sample(e, mu1, mu2, std1, std2, rho):
cov = np.array([[std1 * std1, std1 * std2 * rho], [std1 * std2 * rho, std2 * std2]])
mean = np.array([mu1, mu2])
x, y = np.random.multivariate_normal(mean, cov)
end = np.random.binomial(1, e)
return np.array([x, y, end])
def _split_strokes(points):
points = np.array(points)
strokes = []
b = 0
for e in range(len(points)):
if points[e, 2] == 1.0:
strokes += [points[b : e + 1, :2].copy()]
b = e + 1
return strokes
def _cumsum(points):
sums = | np.cumsum(points[:, :2], axis=0) | numpy.cumsum |
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import matplotlib.patches as mpatches
from .utils import A_to_P, P_to_A
N_times = 500
# Colors
C0 = 'C0'
C1 = 'C1'
def func_Roche_radius(M1, M2, A):
""" Get Roche lobe radius (Eggleton 1983)
Parameters
----------
M1 : float
Primary mass (Msun)
M2 : float
Secondary mass (Msun)
A : float
Orbital separation (any unit)
Returns
-------
Roche radius : float
in units of input, A
"""
q = M1 / M2
return A * 0.49*q**(2.0/3.0) / (0.6*q**(2.0/3.0) + np.log(1.0 + q**(1.0/3.0)))
def evolve_binary(evolve, M1_in, M2_in, P_orb_in, ecc, t_min, t_max,
v1_kick=(0.0, 0.0, 0.0, 0.0), v2_kick=(0.0, 0.0, 0.0, 0.0),
metallicity=0.02, verbose_output=False, model_kwargs={}):
times = np.linspace(t_min, t_max, N_times)
R1_out = | np.array([]) | numpy.array |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Create plots for Figure 4.
"""
# import packages
import numpy as np
import matplotlib.pyplot as plt
import os
import sklearn.linear_model as lm
#######################
# SET DIRECTORY
#######################
loaddir = 'data/fig4/'
savdir = 'figures/fig4/'
if not os.path.exists(savdir):
os.mkdir(savdir)
#######################
# LOAD SIMULATION DATA
#######################
# load main data dictionaries
data = np.load(loaddir+'relearning_results.npy',allow_pickle=True).item()
data0 = np.load(loaddir+'experiment_results.npy',allow_pickle=True).item()
# load parameters
manifold_trials = data0['params']['manifold_trials']
stimulus = data0['stimulus']
target = data0['target']
reduced_dim = data0['decoding']['reduced_dim']
T = data['perturbations']['T']
T_within = data['perturbations']['T_within']
T_outside = data['perturbations']['T_outside']
feedback_within = data['relearning']['feedback_within']
feedback_outside = data['relearning']['feedback_outside']
# load weights
w00 = np.load(loaddir+'W_initial.npy')
w0 = np.load(loaddir+'W_stabilized.npy')
w1w = | np.load(loaddir+'W_within.npy') | numpy.load |
import inspect
import os
import random
from typing import Tuple
import numpy as np
import tensorflow as tf
from scipy import stats
from tensorflow.contrib import layers
from divisivenormalization.regularizers import (
smoothness_regularizer_2d,
group_sparsity_regularizer_2d,
smoothness_regularizer_1d,
)
def inv_elu(x):
"""Inverse elu function."""
y = x.copy()
idx = y < 1.0
y[idx] = np.log(y[idx]) + 1.0
return y
def lin_step(x, a, b):
return tf.minimum(tf.constant(b - a, dtype=tf.float32), tf.nn.relu(x - tf.constant(a, dtype=tf.float32))) / (b - a)
def tent(x, a, b):
z = tf.constant(0, dtype=tf.float32)
d = tf.constant(2 * (b - a), dtype=tf.float32)
a = tf.constant(a, dtype=tf.float32)
return tf.minimum(tf.maximum(x - a, z), tf.maximum(a + d - x, z)) / (b - a)
def output_nonlinearity(x, num_neurons, vmin=-3.0, vmax=6.0, num_bins=10, alpha=0, scope="output_nonlinearity"):
with tf.variable_scope(scope):
elu = tf.nn.elu(x - 1.0) + 1.0
if alpha == -1:
tf.add_to_collection("output_nonlinearity", 0)
return elu
_, neurons = x.get_shape().as_list()
k = int(num_bins / 2)
num_bins = 2 * k
bins = np.linspace(vmin, vmax, num_bins + 1, endpoint=True)
segments = [tent(x, a, b) for a, b in zip(bins[:-2], bins[1:-1])] + [lin_step(x, bins[-2], bins[-1])]
reg = lambda w: smoothness_regularizer_1d(w, weight=alpha, order=2)
a = tf.get_variable(
"weights",
shape=[neurons, num_bins, 1],
dtype=tf.float32,
initializer=tf.constant_initializer(0),
regularizer=reg,
)
a = tf.exp(a)
tf.add_to_collection("output_nonlinearity", a)
v = tf.transpose(tf.concat([tf.reshape(s, [-1, neurons, 1]) for s in segments], axis=2), [1, 0, 2])
multiplier = tf.transpose(tf.reshape(tf.matmul(v, a), [neurons, -1]))
return multiplier * elu
class Net:
"""Abstract class to be inherited by models."""
def __init__(
self, data=None, log_dir=None, log_hash=None, global_step=None, obs_noise_model="poisson", eval_batches=None
):
self.data = data
log_dir_ = os.path.dirname(os.path.dirname(inspect.stack()[0][1]))
self.log_dir_wo_hash = os.path.join(log_dir_, "train_logs", "tmp") if log_dir is None else log_dir
if log_hash == None:
log_hash = "%010x" % random.getrandbits(40)
self.log_dir = os.path.join(self.log_dir_wo_hash, log_hash)
self.log_hash = log_hash
self.global_step = 0 if global_step == None else global_step
self.session = None
self.obs_noise_model = obs_noise_model
self.best_loss = 1e100
self.val_iter_loss = []
self.eval_batches = eval_batches
# placeholders
if data is None:
return
with tf.Graph().as_default() as self.graph:
self.is_training = tf.placeholder(tf.bool)
self.learning_rate = tf.placeholder(tf.float32)
self.images = tf.placeholder(tf.float32, shape=[None, data.px_y, data.px_x, 1])
self.responses = tf.placeholder(tf.float32, shape=[None, data.num_neurons])
self.real_responses = tf.placeholder(tf.float32, shape=[None, data.num_neurons])
def initialize(self):
self.summaries = tf.summary.merge_all()
if self.session is None:
self.session = tf.Session(graph=self.graph)
self.session.run(tf.global_variables_initializer())
self.saver = tf.train.Saver(max_to_keep=100)
self.saver_best = tf.train.Saver(max_to_keep=1)
self.writer = tf.summary.FileWriter(self.log_dir, max_queue=0, flush_secs=0.1)
def __del__(self):
try:
if not self.session == None:
self.session.close()
self.writer.close()
except:
pass
def close(self):
self.session.close()
def save(self, step=None):
if step == None:
step = self.global_step
chkp_file = os.path.join(self.log_dir, "model.ckpt")
self.saver.save(self.session, chkp_file, global_step=step)
def save_best(self):
self.saver_best.save(self.session, os.path.join(self.log_dir, "best.ckpt"))
def load(self, log_hash=None, omit_var_by_name=None):
"""Load model.
Args:
log_hash (str, optional): Checkpoint hash. Defaults to None.
omit_var_by_name (list[str], optional): Variables that should not be loaded. Defaults to
None. Example: ["conv0/weights"]
"""
if log_hash is None:
print("WARNING: Restored same model. (specified log hash to load from was None)")
ckpt_path = os.path.join(self.log_dir, "model.ckpt")
else:
ckpt_path = os.path.join(self.log_dir_wo_hash, log_hash, "model.ckpt")
ckpt_var_list = tf.train.list_variables(ckpt_path)
var_list = []
for v in ckpt_var_list:
if omit_var_by_name is not None and v[0] in omit_var_by_name:
continue
var_list.append(self.graph.get_tensor_by_name(v[0] + ":0"))
self.trainable_var_saver = tf.train.Saver(var_list=var_list)
self.trainable_var_saver.restore(self.session, ckpt_path)
def load_best(self):
ckpt_path = os.path.join(self.log_dir, "best.ckpt")
ckpt_var_list = tf.train.list_variables(ckpt_path)
var_list = []
for v in ckpt_var_list:
var_list.append(self.graph.get_tensor_by_name(v[0] + ":0"))
print("load", v[0] + ":0")
self.saver_best = tf.train.Saver(var_list=var_list)
self.saver_best.restore(self.session, ckpt_path)
def train(
self,
max_iter=5000,
learning_rate=3e-4,
batch_size=256,
val_steps=100,
save_steps=1000,
early_stopping_steps=5,
learning_rule_updates=3,
eval_batches=None,
):
self.eval_batches = eval_batches
with self.graph.as_default():
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
imgs_val, res_val, real_resp_val = self.data.val()
not_improved = 0
num_lr_updates = 0
for i in range(self.global_step + 1, self.global_step + max_iter + 1):
# training step
imgs_batch, res_batch, real_batch = self.data.minibatch(batch_size)
self.global_step = i
feed_dict = {
self.images: imgs_batch,
self.responses: res_batch,
self.real_responses: real_batch,
self.is_training: True,
self.learning_rate: learning_rate,
}
self.session.run([self.train_step, update_ops], feed_dict)
# validate/save periodically
if not i % save_steps:
self.save(i)
if not i % val_steps:
result = self.eval(
images=imgs_val,
responses=res_val,
real_responses=real_resp_val,
with_summaries=False,
global_step=i,
learning_rate=learning_rate,
)
if result[0] < self.best_loss:
self.best_loss = result[0]
self.save_best()
not_improved = 0
else:
not_improved += 1
if not_improved == early_stopping_steps:
self.global_step -= early_stopping_steps * val_steps
self.load_best()
not_improved = 0
learning_rate /= 3
print("reducing learning rate to {}".format(learning_rate))
num_lr_updates += 1
if num_lr_updates == learning_rule_updates:
self.load_best()
break
yield (i, result)
def eval(
self,
with_summaries=False,
keep_record_loss=True,
images=None,
responses=None,
real_responses=None,
global_step=None,
learning_rate=None,
):
"""Returns result, where last entry are the predictions."""
if (images is None) or (responses is None):
images, responses, real_responses = self.data.test()
nrep, nim, nneu = responses.shape
images = np.tile(images, [nrep, 1, 1, 1])
responses = responses.reshape([nim * nrep, nneu])
real_responses = real_responses.reshape([nim * nrep, nneu])
if with_summaries:
raise NotImplementedError
ops = self.get_test_ops()
if self.eval_batches is not None:
batches = self.eval_batches
numpts = images.shape[0]
numbatch = int(np.ceil(numpts / batches))
pred_val = []
result = 0
for batch_idx in range(0, numbatch):
if batches * (batch_idx + 1) > numpts:
idx = (batch_idx * batches) + np.arange(0, numpts - (batch_idx * batches))
else:
idx = (batch_idx * batches) + np.arange(0, batches)
feed_dict = {
self.images: images[idx],
self.responses: responses[idx],
self.real_responses: real_responses[idx],
self.is_training: False,
}
res = self.session.run(ops, feed_dict)
pred_val.append(res[-1])
result += np.array([r * len(idx) for r in res[:-1]])
result = [np.float32(r / numpts) for r in result]
pred_val = np.concatenate(pred_val)
result.append(pred_val)
else:
feed_dict = {
self.images: images,
self.responses: responses,
self.real_responses: real_responses,
self.is_training: False,
}
result = self.session.run(ops, feed_dict)
if keep_record_loss:
self.val_iter_loss.append(result[0])
return result
def compute_log_likelihoods(self, prediction, response, real_responses):
self.poisson = tf.reduce_mean(
tf.reduce_sum((prediction - response * tf.log(prediction + 1e-9)) * real_responses, axis=0)
/ tf.reduce_sum(real_responses, axis=0)
)
def get_log_likelihood(self):
if self.obs_noise_model == "poisson":
return self.poisson
else:
raise NotImplementedError
def get_test_ops(self):
return [self.get_log_likelihood(), self.total_loss, self.prediction]
def evaluate_corr_vals(self):
"""Computes and returns a vector of correlations between prediction and labels of all neurons
on the validation set."""
im, res, real_res = self.data.val()
result = self.eval(images=im, responses=res, real_responses=real_res)
pred = result[-1]
corrs = []
for i in range(self.data.num_neurons):
# keep only entries corresponding to real_res
r = res[:, i]
p = pred[:, i]
b = real_res[:, i].astype(np.bool)
r = np.compress(b, r)
p = np.compress(b, p)
corr = stats.pearsonr(r, p)[0]
if np.isnan(corr):
print("INFO: corr for neuron " + str(i) + " is nan - replaced by 0")
corr = 0
corrs.append(corr)
return corrs
def evaluate_avg_corr_val(self):
"""Prediction correlation averaged across neurons on validation set."""
avg_corr = np.mean(self.evaluate_corr_vals())
return avg_corr
def evaluate_ve_testset_per_neuron(self):
"""Computes variance explained and explainable variance on the test set per neuron."""
images_test, responses_test, real_responses_test = self.data.test()
nrep, nim, nneu = responses_test.shape
predictions_test = self.prediction.eval(
session=self.session, feed_dict={self.images: images_test, self.is_training: False}
)
predictions_test = | np.tile(predictions_test.T, 4) | numpy.tile |
import copy
from GTac_Data import gtac_data
from data_gen import raw_data_byts_checkout_2, collect_DataPoints
# from data_collect_fingers_five import COLUMNS_RAW_FINGER_DATA, MAG_NUM, COL_INDEX
from gtac_config import COLUMNS_RAW_FINGER_DATA, MAG_NUM, COL_INDEX
# from Handover import collect_DataPoints, find_location, find_mat_value
# from Stably_Gentle_Grasping import find_mat_sum_sec, reactive_pinch
from draw_bubbles_py_3 import setup_scatter_ax, plot_fingertip_2
# from draw_lines2 import update_vals
import serial
import time
import pandas as pd
import numpy as np
import argparse
import matplotlib
# matplotlib.use('TkAgg')
from matplotlib import pyplot as plt
from matplotlib.animation import FuncAnimation
# from GTac_Hand import gtac_hand
window_length = 200
x = np.linspace(0, 199, 200)
y = np.zeros(len(x))
mag_x = []
mag_y = []
mag_z = []
mat_x_0 = [4, 4, 4, 4, 3, 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1]
mat_y_0 = [1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4]
mat_x = [1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4]
mat_y = [1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4]
press_location_r = 2.5
press_location_r_list = []
press_location_c = 2.5
press_location_c_list = []
sum_value = 0
sum_value_list = []
mat_sz = np.zeros(16)
mat_amp_index = 10
pressing_loc_amp_index = 2
mat_loc_index = 0.001
def update_vals(data_frame_array, finger=1, sec=2, window_len=200):
tri_index = finger * 9 + (2 - sec) * 3
sum_r = 0
sum_c = 0
global mag_x, mag_y, mag_z, sum_value, press_location_r, press_location_c, \
sum_value_list, press_location_r_list, press_location_c_list
sum_value = 0
# update magnetic and resistive signals for GTac Bubbles
for i in range(len(mat_x)):
r = i // 4
c = i % 4
index, value = gtac_data.find_mat_value(data_frame_array, finger, sec, r, c)
if value > 20: # threshold to remove noise for obtaining pressing location
sum_r += (r + 1) * value
sum_c += (c + 1) * value
sum_value += value
mat_sz[i] = abs(value * mat_amp_index)
else:
mat_sz[i] = 0
mat_x[i] = c + 1 + data_frame_array[tri_index] * mat_loc_index
mat_y[i] = r + 1 + data_frame_array[tri_index + 1] * mat_loc_index
# update pressing locations
if sum_value != 0:
press_location_r = round(sum_r / sum_value, 1)
press_location_c = round(sum_c / sum_value, 1)
# update magnetic signals
mag_x.append(data_frame_array[tri_index])
mag_y.append(data_frame_array[tri_index + 1])
mag_z.append(abs(data_frame_array[tri_index + 2]))
sum_value_list.append(sum_value)
press_location_r_list.append(press_location_r - 1)
press_location_c_list.append(press_location_c - 1)
if len(mag_x) > window_len:
mag_x = mag_x[-window_len:]
mag_y = mag_y[-window_len:]
mag_z = mag_z[-window_len:]
sum_value_list = sum_value_list[-window_len:]
press_location_r_list = press_location_r_list[-window_len:]
press_location_c_list = press_location_c_list[-window_len:]
print('r:{};c:{}'.format(press_location_r, press_location_c))
# update vals for plot gaussian
# zarray = gaus2d(x=x_mesh, y=y_mesh,
# mx=press_location_r,
# my=press_location_c,
# sx=1,
# sy=1)
# define normalized 2D gaussian
def gaus2d(x=0, y=0, mx=0, my=0, sx=1, sy=1):
return 1. / (2. * np.pi * sx * sy) * np.exp(-((x - mx)**2. / (2. * sx**2.) + (y - my)**2. / (2. * sy**2.)))
def plot_pressing_loc(scat, press_location_r, press_location_c, sec_sum):
scat.set_offsets(np.array([press_location_c, press_location_r]))
scat.set_sizes([sec_sum * pressing_loc_amp_index])
def set_data_sec(f4_ax1_scat_tri_mat, f4_ax1_scat_pre_loc,
f4_ax2_magx, f4_ax2_magy, f4_ax3_magz,
f4_ax3_mat_sum, f4_ax4_center_x, f4_ax4_center_y):
plot_fingertip_2(f4_ax1_scat_tri_mat, mat_x, mat_y, mat_sz)
plot_pressing_loc(f4_ax1_scat_pre_loc,
press_location_r,
press_location_c,
sum_value)
if len(mag_y) == window_length:
# print(len(mag_x),len(mag_y),len(mag_z),len(sum_value_list),len(press_location_c_list),len(press_location_r_list))
f4_ax2_magx.set_ydata(mag_x)
f4_ax2_magy.set_ydata(mag_y)
f4_ax3_magz.set_ydata(mag_z)
f4_ax3_mat_sum.set_ydata(sum_value_list)
f4_ax4_center_x.set_ydata(press_location_c_list)
f4_ax4_center_y.set_ydata(press_location_r_list)
def setup_scatter_ax2(ax):
# rect is the box edge
rect = plt.Rectangle((-1, -1),
5,
5,
ec='none', lw=2, fc='none')
ax.add_patch(rect)
ax.axes.xaxis.set_visible(False)
ax.axes.yaxis.set_visible(False)
scat_base = ax.scatter(mat_x_0, mat_y_0, s=1500, alpha=0.4)
scat_tri_mat = ax.scatter(mat_x, mat_y, s=150, alpha=1)
scat_pre_loc = ax.scatter(press_location_c, press_location_r, s=150, alpha=1)
return scat_tri_mat, scat_pre_loc
def setup_figures():
# prepare the figure
fig4 = plt.figure(figsize=(9, 5), constrained_layout=True)
gs = fig4.add_gridspec(3, 5)
f4_ax1 = fig4.add_subplot(gs[:, :-2],
aspect='equal',
autoscale_on=False,
xlim=(0, 5), ylim=(0, 5))
f4_ax1.set_title('GTac Bubbles')
f4_ax1_scat_tri_mat, f4_ax1_scat_pre_loc = setup_scatter_ax2(f4_ax1)
f4_ax2 = fig4.add_subplot(gs[0, -2:])
f4_ax2.set_title('Shear Force Signals (uT)')
f4_ax2.set_ylim([-500, 500])
f4_ax2_magx = f4_ax2.plot(np.zeros(window_length), label='SA-II x')[0]
f4_ax2_magy = f4_ax2.plot(np.zeros(window_length), label='SA-II y')[0]
# f4_ax3_magz = f4_ax2.plot(np.zeros(window_length), label='mag-z')[0]
f4_ax2.legend(loc=0)
f4_ax3 = fig4.add_subplot(gs[1, -2:])
f4_ax3.set_title('Normal Force Signals')
f4_ax3.set_ylim([0, 2000])
f4_ax3_mat_sum = f4_ax3.plot(np.zeros(window_length),
label='FA-I Sum')[0]
f4_ax3_magz = f4_ax3.plot( | np.zeros(window_length) | numpy.zeros |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.